From 90da9ca89613a3bdf1d3baea634741ab871e5766 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 27 Nov 2020 11:26:26 +0100 Subject: [PATCH 001/198] fix(resolve): improving bits --- pype/hosts/resolve/plugin.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index 72eec04896..4be17870cc 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -7,6 +7,7 @@ from pype.api import config from Qt import QtWidgets, QtCore + class CreatorWidget(QtWidgets.QDialog): # output items @@ -132,8 +133,8 @@ class CreatorWidget(QtWidgets.QDialog): return item def add_presets_to_layout(self, content_layout, data): - for k, v in data.items(): - if isinstance(v, dict): + for _key, _val in data.items(): + if isinstance(_val, dict): # adding spacer between sections self.content_widget.append(QtWidgets.QWidget(self)) devider = QtWidgets.QVBoxLayout(self.content_widget[-1]) @@ -147,18 +148,19 @@ class CreatorWidget(QtWidgets.QDialog): nested_content_layout.setObjectName("NestedContentLayout") # add nested key as label - self.create_row(nested_content_layout, "QLabel", k) - data[k] = self.add_presets_to_layout(nested_content_layout, v) - elif isinstance(v, str): - print(f"layout.str: {k}") - print(f"content_layout: {content_layout}") - data[k] = self.create_row( - content_layout, "QLineEdit", k, setText=v) - elif isinstance(v, int): - print(f"layout.int: {k}") - print(f"content_layout: {content_layout}") - data[k] = self.create_row( - content_layout, "QSpinBox", k, setValue=v) + self.create_row(nested_content_layout, "QLabel", _key) + data[_key] = self.add_presets_to_layout( + nested_content_layout, _val) + elif isinstance(_val, str): + log.debug("layout.str: {}".format(_key)) + log.debug("content_layout: {}".format(content_layout)) + data[_key] = self.create_row( + content_layout, "QLineEdit", _key, setText=_val) + elif isinstance(_val, int): + log.debug("layout.int: {}".format(_key)) + log.debug("content_layout: {}".format(content_layout)) + data[_key] = self.create_row( + content_layout, "QSpinBox", _key, setValue=_val) return data From 8eed57908d477e894443da85f780454a64d2e585 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 27 Nov 2020 14:03:09 +0100 Subject: [PATCH 002/198] feat(resolve): adding otio and sync util script --- .../resolve/utility_scripts/OTIO_export.py | 185 ++++++++++++++++++ .../utility_scripts/PYPE_sync_util_scripts.py | 16 ++ 2 files changed, 201 insertions(+) create mode 100644 pype/hosts/resolve/utility_scripts/OTIO_export.py create mode 100644 pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py diff --git a/pype/hosts/resolve/utility_scripts/OTIO_export.py b/pype/hosts/resolve/utility_scripts/OTIO_export.py new file mode 100644 index 0000000000..a0c8e80bc7 --- /dev/null +++ b/pype/hosts/resolve/utility_scripts/OTIO_export.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +import os +import sys +import opentimelineio as otio +print(otio) +resolve = bmd.scriptapp("Resolve") +fu = resolve.Fusion() + +ui = fu.UIManager +disp = bmd.UIDispatcher(fu.UIManager) + +TRACK_TYPES = { + "video": otio.schema.TrackKind.Video, + "audio": otio.schema.TrackKind.Audio +} + +print(resolve) + +def _create_rational_time(frame, fps): + return otio.opentime.RationalTime( + float(frame), + float(fps) + ) + + +def _create_time_range(start, duration, fps): + return otio.opentime.TimeRange( + start_time=_create_rational_time(start, fps), + duration=_create_rational_time(duration, fps) + ) + + +def _create_reference(mp_item): + return otio.schema.ExternalReference( + target_url=mp_item.GetClipProperty("File Path").get("File Path"), + available_range=_create_time_range( + mp_item.GetClipProperty("Start").get("Start"), + mp_item.GetClipProperty("Frames").get("Frames"), + mp_item.GetClipProperty("FPS").get("FPS") + ) + ) + + +def _create_markers(tl_item, frame_rate): + tl_markers = tl_item.GetMarkers() + markers = [] + for m_frame in tl_markers: + markers.append( + otio.schema.Marker( + name=tl_markers[m_frame]["name"], + marked_range=_create_time_range( + m_frame, + tl_markers[m_frame]["duration"], + frame_rate + ), + color=tl_markers[m_frame]["color"].upper(), + metadata={"Resolve": {"note": tl_markers[m_frame]["note"]}} + ) + ) + return markers + + +def _create_clip(tl_item): + mp_item = tl_item.GetMediaPoolItem() + frame_rate = mp_item.GetClipProperty("FPS").get("FPS") + clip = otio.schema.Clip( + name=tl_item.GetName(), + source_range=_create_time_range( + tl_item.GetLeftOffset(), + tl_item.GetDuration(), + frame_rate + ), + media_reference=_create_reference(mp_item) + ) + for marker in _create_markers(tl_item, frame_rate): + clip.markers.append(marker) + return clip + + +def _create_gap(gap_start, clip_start, tl_start_frame, frame_rate): + return otio.schema.Gap( + source_range=_create_time_range( + gap_start, + (clip_start - tl_start_frame) - gap_start, + frame_rate + ) + ) + + +def _create_ot_timeline(output_path): + if not output_path: + return + project_manager = resolve.GetProjectManager() + current_project = project_manager.GetCurrentProject() + dr_timeline = current_project.GetCurrentTimeline() + ot_timeline = otio.schema.Timeline(name=dr_timeline.GetName()) + for track_type in list(TRACK_TYPES.keys()): + track_count = dr_timeline.GetTrackCount(track_type) + for track_index in range(1, int(track_count) + 1): + ot_track = otio.schema.Track( + name="{}{}".format(track_type[0].upper(), track_index), + kind=TRACK_TYPES[track_type] + ) + tl_items = dr_timeline.GetItemListInTrack(track_type, track_index) + for tl_item in tl_items: + if tl_item.GetMediaPoolItem() is None: + continue + clip_start = tl_item.GetStart() - dr_timeline.GetStartFrame() + if clip_start > ot_track.available_range().duration.value: + ot_track.append( + _create_gap( + ot_track.available_range().duration.value, + tl_item.GetStart(), + dr_timeline.GetStartFrame(), + current_project.GetSetting("timelineFrameRate") + ) + ) + ot_track.append(_create_clip(tl_item)) + ot_timeline.tracks.append(ot_track) + otio.adapters.write_to_file( + ot_timeline, "{}/{}.otio".format(output_path, dr_timeline.GetName())) + + +title_font = ui.Font({"PixelSize": 18}) +dlg = disp.AddWindow( + { + "WindowTitle": "Export OTIO", + "ID": "OTIOwin", + "Geometry": [250, 250, 250, 100], + "Spacing": 0, + "Margin": 10 + }, + [ + ui.VGroup( + { + "Spacing": 2 + }, + [ + ui.Button( + { + "ID": "exportfilebttn", + "Text": "Select Destination", + "Weight": 1.25, + "ToolTip": "Choose where to save the otio", + "Flat": False + } + ), + ui.VGap(), + ui.Button( + { + "ID": "exportbttn", + "Text": "Export", + "Weight": 2, + "ToolTip": "Export the current timeline", + "Flat": False + } + ) + ] + ) + ] +) + +itm = dlg.GetItems() + + +def _close_window(event): + disp.ExitLoop() + + +def _export_button(event): + _create_ot_timeline(itm["exportfilebttn"].Text) + _close_window(None) + + +def _export_file_pressed(event): + selectedPath = fu.RequestDir(os.path.expanduser("~/Documents")) + itm["exportfilebttn"].Text = selectedPath + + +dlg.On.OTIOwin.Close = _close_window +dlg.On.exportfilebttn.Clicked = _export_file_pressed +dlg.On.exportbttn.Clicked = _export_button +dlg.Show() +disp.RunLoop() +dlg.Hide() diff --git a/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py b/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py new file mode 100644 index 0000000000..ee4905033b --- /dev/null +++ b/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +import os +import sys +import pype +import pype.hosts.resolve as bmdvr + + +def main(env): + # Registers pype's Global pyblish plugins + pype.install() + bmdvr.setup(env) + + +if __name__ == "__main__": + result = main(os.environ) + sys.exit(not bool(result)) From 09f175afab35473ee0d63a3a1a5ea8bd80acb66e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 27 Nov 2020 15:00:30 +0100 Subject: [PATCH 003/198] fix(resolve): code improvemenets --- pype/hosts/resolve/plugin.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index 4be17870cc..c5d4c1d3e5 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -7,7 +7,6 @@ from pype.api import config from Qt import QtWidgets, QtCore - class CreatorWidget(QtWidgets.QDialog): # output items @@ -87,12 +86,10 @@ class CreatorWidget(QtWidgets.QDialog): data[k] = self.value(v) elif getattr(v, "value", None): print(f"normal int: {k}") - result = v.value() - data[k] = result() + data[k] = v.value() else: print(f"normal text: {k}") - result = v.text() - data[k] = result() + data[k] = v.text() return data def camel_case_split(self, text): @@ -152,13 +149,13 @@ class CreatorWidget(QtWidgets.QDialog): data[_key] = self.add_presets_to_layout( nested_content_layout, _val) elif isinstance(_val, str): - log.debug("layout.str: {}".format(_key)) - log.debug("content_layout: {}".format(content_layout)) + print("layout.str: {}".format(_key)) + print("content_layout: {}".format(content_layout)) data[_key] = self.create_row( content_layout, "QLineEdit", _key, setText=_val) elif isinstance(_val, int): - log.debug("layout.int: {}".format(_key)) - log.debug("content_layout: {}".format(content_layout)) + print("layout.int: {}".format(_key)) + print("content_layout: {}".format(content_layout)) data[_key] = self.create_row( content_layout, "QSpinBox", _key, setValue=_val) return data From 6f9c56d03188bb02a0775acd9f06c198f5cdfe2b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 27 Nov 2020 17:01:04 +0100 Subject: [PATCH 004/198] feat(resolve): wip new creator --- pype/hosts/resolve/__init__.py | 2 + pype/hosts/resolve/lib.py | 16 ++ pype/hosts/resolve/plugin.py | 187 +++++++++---- .../utility_scripts/PYPE_sync_util_scripts.py | 2 +- .../utility_scripts/resolve_dev_scriping.py | 22 ++ .../resolve/create/create_shot_clip_new.py | 252 ++++++++++++++++++ 6 files changed, 427 insertions(+), 54 deletions(-) create mode 100644 pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py create mode 100644 pype/plugins/resolve/create/create_shot_clip_new.py diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index c8f45259ff..8b0ca774b5 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -17,6 +17,7 @@ from .lib import ( get_project_manager, get_current_project, get_current_sequence, + get_video_track_names, get_current_track_items, create_current_sequence_media_bin, create_compound_clip, @@ -60,6 +61,7 @@ __all__ = [ "get_project_manager", "get_current_project", "get_current_sequence", + "get_video_track_names", "get_current_track_items", "create_current_sequence_media_bin", "create_compound_clip", diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index deb4fa6339..e36dc1bb15 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -35,6 +35,22 @@ def get_current_sequence(): return project.GetCurrentTimeline() +def get_video_track_names(): + tracks = list() + track_type = "video" + sequence = get_current_sequence() + + # get all tracks count filtered by track type + selected_track_count = sequence.GetTrackCount(track_type) + + # loop all tracks and get items + for track_index in range(1, (int(selected_track_count) + 1)): + track_name = sequence.GetTrackName("video", track_index) + tracks.append(track_name) + + return tracks + + def get_current_track_items( filter=False, track_type=None, diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index c5d4c1d3e5..a652fbfe64 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -12,7 +12,7 @@ class CreatorWidget(QtWidgets.QDialog): # output items items = dict() - def __init__(self, name, info, presets, parent=None): + def __init__(self, name, info, ui_inputs, parent=None): super(CreatorWidget, self).__init__(parent) self.setObjectName(name) @@ -25,6 +25,7 @@ class CreatorWidget(QtWidgets.QDialog): | QtCore.Qt.WindowStaysOnTopHint ) self.setWindowTitle(name or "Pype Creator Input") + self.resize(500, 700) # Where inputs and labels are set self.content_widget = [QtWidgets.QWidget(self)] @@ -35,14 +36,25 @@ class CreatorWidget(QtWidgets.QDialog): # first add widget tag line top_layout.addWidget(QtWidgets.QLabel(info)) - top_layout.addWidget(Spacer(5, self)) - # main dynamic layout - self.content_widget.append(QtWidgets.QWidget(self)) - content_layout = QtWidgets.QFormLayout(self.content_widget[-1]) + self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAsNeeded) + self.scroll_area.setVerticalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOn) + self.scroll_area.setHorizontalScrollBarPolicy( + QtCore.Qt.ScrollBarAlwaysOff) + self.scroll_area.setWidgetResizable(True) + + self.content_widget.append(self.scroll_area) + + scroll_widget = QtWidgets.QWidget(self) + in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget) + self.content_layout = [in_scroll_area] # add preset data into input widget layout - self.items = self.add_presets_to_layout(content_layout, presets) + self.items = self.populate_widgets(ui_inputs) + self.scroll_area.setWidget(scroll_widget) # Confirmation buttons btns_widget = QtWidgets.QWidget(self) @@ -79,18 +91,33 @@ class CreatorWidget(QtWidgets.QDialog): self.result = None self.close() - def value(self, data): + def value(self, data, new_data=None): + new_data = new_data or dict() for k, v in data.items(): - if isinstance(v, dict): - print(f"nested: {k}") - data[k] = self.value(v) - elif getattr(v, "value", None): - print(f"normal int: {k}") - data[k] = v.value() - else: - print(f"normal text: {k}") - data[k] = v.text() - return data + new_data[k] = { + "target": None, + "value": None + } + if v["type"] == "dict": + new_data[k]["target"] = v["target"] + new_data[k]["value"] = self.value(v["value"]) + if v["type"] == "section": + new_data.pop(k) + new_data = self.value(v["value"], new_data) + elif getattr(v["value"], "currentText", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].currentText() + elif getattr(v["value"], "isChecked", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].isChecked() + elif getattr(v["value"], "value", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].value() + elif getattr(v["value"], "text", None): + new_data[k]["target"] = v["target"] + new_data[k]["value"] = v["value"].text() + + return new_data def camel_case_split(self, text): matches = re.finditer( @@ -129,35 +156,103 @@ class CreatorWidget(QtWidgets.QDialog): return item - def add_presets_to_layout(self, content_layout, data): - for _key, _val in data.items(): - if isinstance(_val, dict): + def populate_widgets(self, data, content_layout=None): + """ + Populate widget from input dict. + + Each plugin has its own set of widget rows defined in dictionary + each row values should have following keys: `type`, `target`, + `label`, `order`, `value` and optionally also `toolTip`. + + Args: + data (dict): widget rows or organized groups defined + by types `dict` or `section` + content_layout (QtWidgets.QFormLayout)[optional]: used when nesting + + Returns: + dict: redefined data dict updated with created widgets + + """ + + content_layout = content_layout or self.content_layout[-1] + # fix order of process by defined order value + ordered_keys = list(data.keys()) + for k, v in data.items(): + try: + # try removing a key from index which should + # be filled with new + ordered_keys.pop(v["order"]) + except IndexError: + pass + # add key into correct order + ordered_keys.insert(v["order"], k) + + # process ordered + for k in ordered_keys: + v = data[k] + tool_tip = v.get("toolTip", "") + if v["type"] == "dict": # adding spacer between sections - self.content_widget.append(QtWidgets.QWidget(self)) - devider = QtWidgets.QVBoxLayout(self.content_widget[-1]) - devider.addWidget(Spacer(5, self)) - devider.setObjectName("Devider") + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) # adding nested layout with label - self.content_widget.append(QtWidgets.QWidget(self)) + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + nested_content_layout = QtWidgets.QFormLayout( - self.content_widget[-1]) + self.content_layout[-1]) nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) # add nested key as label - self.create_row(nested_content_layout, "QLabel", _key) - data[_key] = self.add_presets_to_layout( - nested_content_layout, _val) - elif isinstance(_val, str): - print("layout.str: {}".format(_key)) - print("content_layout: {}".format(content_layout)) - data[_key] = self.create_row( - content_layout, "QLineEdit", _key, setText=_val) - elif isinstance(_val, int): - print("layout.int: {}".format(_key)) - print("content_layout: {}".format(content_layout)) - data[_key] = self.create_row( - content_layout, "QSpinBox", _key, setValue=_val) + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + if v["type"] == "section": + # adding spacer between sections + self.content_layout.append(QtWidgets.QWidget(self)) + content_layout.addWidget(self.content_layout[-1]) + self.content_layout[-1].setObjectName("sectionHeadline") + + headline = QtWidgets.QVBoxLayout(self.content_layout[-1]) + headline.addWidget(Spacer(20, self)) + headline.addWidget(QtWidgets.QLabel(v["label"])) + + # adding nested layout with label + self.content_layout.append(QtWidgets.QWidget(self)) + self.content_layout[-1].setObjectName("sectionContent") + + nested_content_layout = QtWidgets.QFormLayout( + self.content_layout[-1]) + nested_content_layout.setObjectName("NestedContentLayout") + content_layout.addWidget(self.content_layout[-1]) + + # add nested key as label + data[k]["value"] = self.populate_widgets( + v["value"], nested_content_layout) + + elif v["type"] == "QLineEdit": + data[k]["value"] = self.create_row( + content_layout, "QLineEdit", v["label"], + setText=v["value"], setToolTip=tool_tip) + elif v["type"] == "QComboBox": + data[k]["value"] = self.create_row( + content_layout, "QComboBox", v["label"], + addItems=v["value"], setToolTip=tool_tip) + elif v["type"] == "QCheckBox": + data[k]["value"] = self.create_row( + content_layout, "QCheckBox", v["label"], + setChecked=v["value"], setToolTip=tool_tip) + elif v["type"] == "QSpinBox": + data[k]["value"] = self.create_row( + content_layout, "QSpinBox", v["label"], + setValue=v["value"], setMaximum=10000, setToolTip=tool_tip) return data @@ -178,20 +273,6 @@ class Spacer(QtWidgets.QWidget): self.setLayout(layout) -def get_reference_node_parents(ref): - """Return all parent reference nodes of reference node - - Args: - ref (str): reference node. - - Returns: - list: The upstream parent reference nodes. - - """ - parents = [] - return parents - - class SequenceLoader(api.Loader): """A basic SequenceLoader for Resolve diff --git a/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py b/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py index ee4905033b..753bddc1da 100644 --- a/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py +++ b/pype/hosts/resolve/utility_scripts/PYPE_sync_util_scripts.py @@ -2,10 +2,10 @@ import os import sys import pype -import pype.hosts.resolve as bmdvr def main(env): + import pype.hosts.resolve as bmdvr # Registers pype's Global pyblish plugins pype.install() bmdvr.setup(env) diff --git a/pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py b/pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py new file mode 100644 index 0000000000..bd9fe593e0 --- /dev/null +++ b/pype/hosts/resolve/utility_scripts/resolve_dev_scriping.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + + +def main(): + import pype.hosts.resolve as bmdvr + bmdvr.utils.get_resolve_module() + + tracks = list() + track_type = "video" + sequence = bmdvr.get_current_sequence() + + # get all tracks count filtered by track type + selected_track_count = sequence.GetTrackCount(track_type) + + # loop all tracks and get items + for track_index in range(1, (int(selected_track_count) + 1)): + track_name = sequence.GetTrackName("video", track_index) + tracks.append(track_name) + + +if __name__ == "__main__": + main() diff --git a/pype/plugins/resolve/create/create_shot_clip_new.py b/pype/plugins/resolve/create/create_shot_clip_new.py new file mode 100644 index 0000000000..03a3041089 --- /dev/null +++ b/pype/plugins/resolve/create/create_shot_clip_new.py @@ -0,0 +1,252 @@ +from pprint import pformat +from pype.hosts import resolve +from pype.hosts.resolve import lib + + +class CreateShotClipNew(resolve.Creator): + """Publishable clip""" + + label = "Create Publishable Clip [New]" + family = "clip" + icon = "film" + defaults = ["Main"] + + gui_tracks = resolve.get_video_track_names() + gui_name = "Pype publish attributes creator" + gui_info = "Define sequential rename and fill hierarchy data." + gui_inputs = { + "renameHierarchy": { + "type": "section", + "label": "Shot Hierarchy And Rename Settings", + "target": "ui", + "order": 0, + "value": { + "hierarchy": { + "value": "{folder}/{sequence}", + "type": "QLineEdit", + "label": "Shot Parent Hierarchy", + "target": "tag", + "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa + "order": 0}, + "clipRename": { + "value": False, + "type": "QCheckBox", + "label": "Rename clips", + "target": "ui", + "toolTip": "Renaming selected clips on fly", # noqa + "order": 1}, + "clipName": { + "value": "{sequence}{shot}", + "type": "QLineEdit", + "label": "Clip Name Template", + "target": "ui", + "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa + "order": 2}, + "countFrom": { + "value": 10, + "type": "QSpinBox", + "label": "Count sequence from", + "target": "ui", + "toolTip": "Set when the sequence number stafrom", # noqa + "order": 3}, + "countSteps": { + "value": 10, + "type": "QSpinBox", + "label": "Stepping number", + "target": "ui", + "toolTip": "What number is adding every new step", # noqa + "order": 4}, + } + }, + "hierarchyData": { + "type": "dict", + "label": "Shot Template Keywords", + "target": "tag", + "order": 1, + "value": { + "folder": { + "value": "shots", + "type": "QLineEdit", + "label": "{folder}", + "target": "tag", + "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 0}, + "episode": { + "value": "ep01", + "type": "QLineEdit", + "label": "{episode}", + "target": "tag", + "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 1}, + "sequence": { + "value": "sq01", + "type": "QLineEdit", + "label": "{sequence}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 2}, + "track": { + "value": "{_track_}", + "type": "QLineEdit", + "label": "{track}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 3}, + "shot": { + "value": "sh###", + "type": "QLineEdit", + "label": "{shot}", + "target": "tag", + "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 4} + } + }, + "verticalSync": { + "type": "section", + "label": "Vertical Synchronization Of Attributes", + "target": "ui", + "order": 2, + "value": { + "vSyncOn": { + "value": True, + "type": "QCheckBox", + "label": "Enable Vertical Sync", + "target": "ui", + "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa + "order": 0}, + "vSyncTrack": { + "value": gui_tracks, # noqa + "type": "QComboBox", + "label": "Master track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1} + } + }, + "publishSettings": { + "type": "section", + "label": "Publish Settings", + "target": "ui", + "order": 3, + "value": { + "subsetName": { + "value": ["", "main", "bg", "fg", "bg", + "animatic"], + "type": "QComboBox", + "label": "Subset Name", + "target": "ui", + "toolTip": "chose subset name patern, if is selected, name of track layer will be used", # noqa + "order": 0}, + "subsetFamily": { + "value": ["plate", "take"], + "type": "QComboBox", + "label": "Subset Family", + "target": "ui", "toolTip": "What use of this subset is for", # noqa + "order": 1}, + "reviewTrack": { + "value": ["< none >"] + gui_tracks, + "type": "QComboBox", + "label": "Use Review Track", + "target": "ui", + "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa + "order": 2}, + "audio": { + "value": False, + "type": "QCheckBox", + "label": "Include audio", + "target": "tag", + "toolTip": "Process subsets with corresponding audio", # noqa + "order": 3}, + "sourceResolution": { + "value": False, + "type": "QCheckBox", + "label": "Source resolution", + "target": "tag", + "toolTip": "Is resloution taken from timeline or source?", # noqa + "order": 4}, + } + }, + "frameRangeAttr": { + "type": "section", + "label": "Shot Attributes", + "target": "ui", + "order": 4, + "value": { + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0}, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle Start", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1}, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle End", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2}, + } + } + } + + presets = None + + def process(self): + # get key pares from presets and match it on ui inputs + for k, v in self.gui_inputs.items(): + if v["type"] in ("dict", "section"): + # nested dictionary (only one level allowed + # for sections and dict) + for _k, _v in v["value"].items(): + if self.presets.get(_k): + self.gui_inputs[k][ + "value"][_k]["value"] = self.presets[_k] + if self.presets.get(k): + self.gui_inputs[k]["value"] = self.presets[k] + + print(pformat(self.gui_inputs)) + # open widget for plugins inputs + widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) + widget.exec_() + + if len(self.selected) < 1: + return + + if not widget.result: + print("Operation aborted") + return + + self.rename_add = 0 + + # get ui output for track name for vertical sync + v_sync_track = widget.result["vSyncTrack"]["value"] + + # sort selected trackItems by + sorted_selected_track_items = list() + unsorted_selected_track_items = list() + for _ti in self.selected: + if _ti.parent().name() in v_sync_track: + sorted_selected_track_items.append(_ti) + else: + unsorted_selected_track_items.append(_ti) + + sorted_selected_track_items.extend(unsorted_selected_track_items) + + kwargs = { + "ui_inputs": widget.result, + "avalon": self.data + } + + for i, track_item in enumerate(sorted_selected_track_items): + self.rename_index = i + + # convert track item to timeline media pool item + phiero.PublishClip(self, track_item, **kwargs).convert() From 6448380312eea365b376774f09c458e739a5c0b5 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 27 Nov 2020 18:29:29 +0100 Subject: [PATCH 005/198] feat(hiero): wip new crator --- pype/hosts/resolve/__init__.py | 16 +- pype/hosts/resolve/lib.py | 123 +++++++- pype/hosts/resolve/pipeline.py | 100 +++++- pype/hosts/resolve/plugin.py | 293 ++++++++++++++++++ .../resolve/create/create_shot_clip_new.py | 31 +- 5 files changed, 538 insertions(+), 25 deletions(-) diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index 8b0ca774b5..b8457438c6 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -19,6 +19,11 @@ from .lib import ( get_current_sequence, get_video_track_names, get_current_track_items, + get_track_item_pype_tag, + set_track_item_pype_tag, + imprint, + set_publish_attribute, + get_publish_attribute, create_current_sequence_media_bin, create_compound_clip, swap_clips, @@ -28,7 +33,10 @@ from .lib import ( from .menu import launch_pype_menu -from .plugin import Creator +from .plugin import ( + Creator, + PublishClip +) from .workio import ( open_file, @@ -63,6 +71,11 @@ __all__ = [ "get_current_sequence", "get_video_track_names", "get_current_track_items", + "get_track_item_pype_tag", + "set_track_item_pype_tag", + "imprint", + "set_publish_attribute", + "get_publish_attribute", "create_current_sequence_media_bin", "create_compound_clip", "swap_clips", @@ -74,6 +87,7 @@ __all__ = [ # plugin "Creator", + "PublishClip", # workio "open_file", diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index e36dc1bb15..4052fa74fd 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -1,5 +1,6 @@ import sys import json +import ast from opentimelineio import opentime from pprint import pformat @@ -11,7 +12,7 @@ self = sys.modules[__name__] self.pm = None self.rename_index = 0 self.rename_add = 0 -self.pype_metadata_key = "VFX Notes" +self.pype_tag_name = "VFX Notes" def get_project_manager(): @@ -93,13 +94,121 @@ def get_current_track_items( if filter is True: if selecting_color in ti_color: selected_clips.append(data) - # ti.ClearClipColor() else: selected_clips.append(data) return selected_clips +def get_track_item_pype_tag(track_item): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + return_tag = None + # get all tags from track item + _tags = track_item.GetMetadata() + if not _tags: + return None + for key, data in _tags.items(): + # return only correct tag defined by global name + if key in self.pype_tag_name: + return_tag = json.loads(data) + + return return_tag + + +def set_track_item_pype_tag(track_item, data=None): + """ + Set pype track item tag to input track_item. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag + """ + data = data or dict() + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "Pype data holder", + "icon": "pype_icon.png", + "metadata": {k: v for k, v in data.items()} + } + # get available pype tag if any + _tag = get_track_item_pype_tag(track_item) + + if _tag: + # it not tag then create one + _tag.update(tag_data) + track_item.SetMetadata(self.pype_tag_name, json.dumps(_tag)) + return _tag + else: + # if pype tag available then update with input data + # add it to the input track item + track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) + return tag_data + + +def imprint(track_item, data=None): + """ + Adding `Avalon data` into a hiero track item tag. + + Also including publish attribute into tag. + + Arguments: + track_item (hiero.core.TrackItem): hiero track item object + data (dict): Any data which needst to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + set_track_item_pype_tag(track_item, data) + + # add publish attribute + set_publish_attribute(track_item, True) + + +def set_publish_attribute(track_item, value): + """ Set Publish attribute in input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = get_track_item_pype_tag(track_item) + tag_data["publish"] = str(value) + # set data to the publish attribute + set_track_item_pype_tag(track_item, tag_data) + + +def get_publish_attribute(track_item): + """ Get Publish attribute from input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = get_track_item_pype_tag(track_item) + value = tag_data["publish"] + + # return value converted to bool value. Atring is stored in tag. + return ast.literal_eval(value) + + def create_current_sequence_media_bin(sequence): seq_name = sequence.GetName() media_pool = get_current_project().GetMediaPool() @@ -299,9 +408,9 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs): project.SetCurrentTimeline(sq_origin) # Add collected metadata and attributes to the comound clip: - if mp_item.GetMetadata(self.pype_metadata_key): - clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata( - self.pype_metadata_key)[self.pype_metadata_key] + if mp_item.GetMetadata(self.pype_tag_name): + clip_attributes[self.pype_tag_name] = mp_item.GetMetadata( + self.pype_tag_name)[self.pype_tag_name] # stringify clip_attributes = json.dumps(clip_attributes) @@ -311,7 +420,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs): cct.SetMetadata(k, v) # add metadata to cct - cct.SetMetadata(self.pype_metadata_key, clip_attributes) + cct.SetMetadata(self.pype_tag_name, clip_attributes) # reset start timecode of the compound clip cct.SetClipProperty("Start TC", mp_props["Start TC"]) @@ -389,7 +498,7 @@ def get_pype_clip_metadata(clip): mp_item = clip.GetMediaPoolItem() metadata = mp_item.GetMetadata() - return metadata.get(self.pype_metadata_key) + return metadata.get(self.pype_tag_name) def get_clip_attributes(clip): diff --git a/pype/hosts/resolve/pipeline.py b/pype/hosts/resolve/pipeline.py index 92bef2e13b..22437980e7 100644 --- a/pype/hosts/resolve/pipeline.py +++ b/pype/hosts/resolve/pipeline.py @@ -3,11 +3,15 @@ Basic avalon integration """ import os import contextlib +from collections import OrderedDict from avalon.tools import workfiles from avalon import api as avalon +from avalon import schema +from avalon.pipeline import AVALON_CONTAINER_ID from pyblish import api as pyblish import pype from pype.api import Logger +from . import lib log = Logger().get_logger(__name__, "resolve") @@ -80,29 +84,46 @@ def uninstall(): avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) -def containerise(obj, +def containerise(track_item, name, namespace, context, loader=None, data=None): - """Bundle Resolve's object into an assembly and imprint it with metadata + """Bundle Hiero's object into an assembly and imprint it with metadata Containerisation enables a tracking of version, author and origin for loaded assets. Arguments: - obj (obj): Resolve's object to imprint as container + track_item (hiero.core.TrackItem): object to imprint as container name (str): Name of resulting assembly namespace (str): Namespace under which to host container context (dict): Asset information loader (str, optional): Name of node used to produce this container. Returns: - obj (obj): containerised object + track_item (hiero.core.TrackItem): containerised object """ - pass + + data_imprint = OrderedDict({ + "schema": "avalon-core:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + }) + + if data: + for k, v in data.items(): + data_imprint.update({k: v}) + + print("_ data_imprint: {}".format(data_imprint)) + lib.set_track_item_pype_tag(track_item, data_imprint) + + return track_item def ls(): @@ -115,20 +136,77 @@ def ls(): See the `container.json` schema for details on how it should look, and the Maya equivalent, which is in `avalon.maya.pipeline` """ - pass + + # get all track items from current timeline + all_track_items = lib.get_current_track_items(filter=False) + + for track_item_data in all_track_items: + track_item = track_item_data["clip"]["item"] + container = parse_container(track_item) + if container: + yield container -def parse_container(container): - """Return the container node's full container data. +def parse_container(track_item, validate=True): + """Return container data from track_item's pype tag. Args: - container (str): A container node name. + track_item (hiero.core.TrackItem): A containerised track item. + validate (bool)[optional]: validating with avalon scheme Returns: - dict: The container schema data for this container node. + dict: The container schema data for input containerized track item. """ - pass + # convert tag metadata to normal keys names + data = lib.get_track_item_pype_tag(track_item) + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if not all(key in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = track_item.name() + + # Store reference to the node object + container["_track_item"] = track_item + + return container + + +def update_container(track_item, data=None): + """Update container data to input track_item's pype tag. + + Args: + track_item (hiero.core.TrackItem): A containerised track item. + data (dict)[optional]: dictionery with data to be updated + + Returns: + bool: True if container was updated correctly + + """ + data = data or dict() + + container = lib.get_track_item_pype_tag(track_item) + + for _key, _value in container.items(): + try: + container[_key] = data[_key] + except KeyError: + pass + + log.info("Updating container: `{}`".format(track_item)) + return bool(lib.set_track_item_pype_tag(track_item, container)) def launch_workfiles_app(*args): diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index a652fbfe64..b465d77950 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -3,6 +3,7 @@ from avalon import api from pype.hosts import resolve from avalon.vendor import qargparse from pype.api import config +from . import lib from Qt import QtWidgets, QtCore @@ -351,3 +352,295 @@ class Creator(api.Creator): self.selected = resolve.get_current_track_items(filter=False) self.widget = CreatorWidget + + +class PublishClip: + """ + Convert a track item to publishable instance + + Args: + track_item (hiero.core.TrackItem): hiero track item object + kwargs (optional): additional data needed for rename=True (presets) + + Returns: + hiero.core.TrackItem: hiero track item object with pype tag + """ + vertical_clip_match = dict() + tag_data = dict() + types = { + "shot": "shot", + "folder": "folder", + "episode": "episode", + "sequence": "sequence", + "track": "sequence", + } + + # parents search patern + parents_search_patern = r"\{([a-z]*?)\}" + + # default templates for non-ui use + rename_default = False + hierarchy_default = "{_folder_}/{_sequence_}/{_track_}" + clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}" + subset_name_default = "" + review_track_default = "< none >" + subset_family_default = "plate" + count_from_default = 10 + count_steps_default = 10 + vertical_sync_default = False + driving_layer_default = "" + + def __init__(self, cls, track_item, **kwargs): + # populate input cls attribute onto self.[attr] + self.__dict__.update(cls.__dict__) + + # get main parent objects + self.track_item = track_item["clip"]["item"] + sequence_name = track_item["sequence"].GetName() + self.sequence_name = str(sequence_name).replace(" ", "_") + + # track item (clip) main attributes + self.ti_name = self.track_item.GetName() + self.ti_index = int(track_item["clip"]["index"]) + + # get track name and index + track_name = track_item["track"]["name"] + self.track_name = str(track_name).replace(" ", "_") + self.track_index = int(track_item["track"]["index"]) + + # adding tag.family into tag + if kwargs.get("avalon"): + self.tag_data.update(kwargs["avalon"]) + + # adding ui inputs if any + self.ui_inputs = kwargs.get("ui_inputs", {}) + + # populate default data before we get other attributes + self._populate_track_item_default_data() + + # use all populated default data to create all important attributes + self._populate_attributes() + + # create parents with correct types + self._create_parents() + + def convert(self): + # solve track item data and add them to tag data + self._convert_to_tag_data() + + # if track name is in review track name and also if driving track name + # is not in review track name: skip tag creation + if (self.track_name in self.review_layer) and ( + self.driving_layer not in self.review_layer): + return + + # deal with clip name + new_name = self.tag_data.pop("newClipName") + + if self.rename: + # rename track item + self.track_item.setName(new_name) + self.tag_data["asset"] = new_name + else: + self.tag_data["asset"] = self.ti_name + + # create pype tag on track_item and add data + lib.imprint(self.track_item, self.tag_data) + + return self.track_item + + def _populate_track_item_default_data(self): + """ Populate default formating data from track item. """ + + self.track_item_default_data = { + "_folder_": "shots", + "_sequence_": self.sequence_name, + "_track_": self.track_name, + "_clip_": self.ti_name, + "_trackIndex_": self.track_index, + "_clipIndex_": self.ti_index + } + + def _populate_attributes(self): + """ Populate main object attributes. """ + # track item frame range and parent track name for vertical sync check + self.clip_in = int(self.track_item.GetStart()) + self.clip_out = int(self.track_item.GetEnd()) + + # define ui inputs if non gui mode was used + self.shot_num = self.ti_index + print( + "____ self.shot_num: {}".format(self.shot_num)) + + # ui_inputs data or default values if gui was not used + self.rename = self.ui_inputs.get( + "clipRename", {}).get("value") or self.rename_default + self.clip_name = self.ui_inputs.get( + "clipName", {}).get("value") or self.clip_name_default + self.hierarchy = self.ui_inputs.get( + "hierarchy", {}).get("value") or self.hierarchy_default + self.hierarchy_data = self.ui_inputs.get( + "hierarchyData", {}).get("value") or \ + self.track_item_default_data.copy() + self.count_from = self.ui_inputs.get( + "countFrom", {}).get("value") or self.count_from_default + self.count_steps = self.ui_inputs.get( + "countSteps", {}).get("value") or self.count_steps_default + self.subset_name = self.ui_inputs.get( + "subsetName", {}).get("value") or self.subset_name_default + self.subset_family = self.ui_inputs.get( + "subsetFamily", {}).get("value") or self.subset_family_default + self.vertical_sync = self.ui_inputs.get( + "vSyncOn", {}).get("value") or self.vertical_sync_default + self.driving_layer = self.ui_inputs.get( + "vSyncTrack", {}).get("value") or self.driving_layer_default + self.review_track = self.ui_inputs.get( + "reviewTrack", {}).get("value") or self.review_track_default + + # build subset name from layer name + if self.subset_name == "": + self.subset_name = self.track_name + + # create subset for publishing + self.subset = self.subset_family + self.subset_name.capitalize() + + def _replace_hash_to_expression(self, name, text): + """ Replace hash with number in correct padding. """ + _spl = text.split("#") + _len = (len(_spl) - 1) + _repl = "{{{0}:0>{1}}}".format(name, _len) + new_text = text.replace(("#" * _len), _repl) + return new_text + + def _convert_to_tag_data(self): + """ Convert internal data to tag data. + + Populating the tag data into internal variable self.tag_data + """ + # define vertical sync attributes + master_layer = True + self.review_layer = "" + if self.vertical_sync: + # check if track name is not in driving layer + if self.track_name not in self.driving_layer: + # if it is not then define vertical sync as None + master_layer = False + + # increasing steps by index of rename iteration + self.count_steps *= self.rename_index + + hierarchy_formating_data = dict() + _data = self.track_item_default_data.copy() + if self.ui_inputs: + # adding tag metadata from ui + for _k, _v in self.ui_inputs.items(): + if _v["target"] == "tag": + self.tag_data[_k] = _v["value"] + + # driving layer is set as positive match + if master_layer or self.vertical_sync: + # mark review layer + if self.review_track and ( + self.review_track not in self.review_track_default): + # if review layer is defined and not the same as defalut + self.review_layer = self.review_track + # shot num calculate + if self.rename_index == 0: + self.shot_num = self.count_from + else: + self.shot_num = self.count_from + self.count_steps + + # clip name sequence number + _data.update({"shot": self.shot_num}) + + # solve # in test to pythonic expression + for _k, _v in self.hierarchy_data.items(): + if "#" not in _v["value"]: + continue + self.hierarchy_data[ + _k]["value"] = self._replace_hash_to_expression( + _k, _v["value"]) + + # fill up pythonic expresisons in hierarchy data + for k, _v in self.hierarchy_data.items(): + hierarchy_formating_data[k] = _v["value"].format(**_data) + else: + # if no gui mode then just pass default data + hierarchy_formating_data = self.hierarchy_data + + tag_hierarchy_data = self._solve_tag_hierarchy_data( + hierarchy_formating_data + ) + + if master_layer and self.vertical_sync: + tag_hierarchy_data.update({"masterLayer": True}) + self.vertical_clip_match.update({ + (self.clip_in, self.clip_out): tag_hierarchy_data + }) + + if not master_layer and self.vertical_sync: + # driving layer is set as negative match + for (_in, _out), master_data in self.vertical_clip_match.items(): + master_data.update({"masterLayer": False}) + if _in == self.clip_in and _out == self.clip_out: + data_subset = master_data["subset"] + # add track index in case duplicity of names in master data + if self.subset in data_subset: + master_data["subset"] = self.subset + str( + self.track_index) + # in case track name and subset name is the same then add + if self.subset_name == self.track_name: + master_data["subset"] = self.subset + # assing data to return hierarchy data to tag + tag_hierarchy_data = master_data + + # add data to return data dict + self.tag_data.update(tag_hierarchy_data) + + if master_layer and self.review_layer: + self.tag_data.update({"review": self.review_layer}) + else: + self.tag_data.update({"review": False}) + + def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + """ Solve tag data from hierarchy data and templates. """ + # fill up clip name and hierarchy keys + hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) + clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + + return { + "newClipName": clip_name_filled, + "hierarchy": hierarchy_filled, + "parents": self.parents, + "hierarchyData": hierarchy_formating_data, + "subset": self.subset, + "families": [self.subset_family] + } + + def _convert_to_entity(self, key): + """ Converting input key to key with type. """ + # convert to entity type + entity_type = self.types.get(key, None) + + assert entity_type, "Missing entity type for `{}`".format( + key + ) + + return { + "entity_type": entity_type, + "entity_name": self.hierarchy_data[key]["value"].format( + **self.track_item_default_data + ) + } + + def _create_parents(self): + """ Create parents and return it in list. """ + self.parents = list() + + patern = re.compile(self.parents_search_patern) + par_split = [patern.findall(t).pop() + for t in self.hierarchy.split("/")] + + for key in par_split: + parent = self._convert_to_entity(key) + self.parents.append(parent) diff --git a/pype/plugins/resolve/create/create_shot_clip_new.py b/pype/plugins/resolve/create/create_shot_clip_new.py index 03a3041089..5d6c0a2e79 100644 --- a/pype/plugins/resolve/create/create_shot_clip_new.py +++ b/pype/plugins/resolve/create/create_shot_clip_new.py @@ -232,11 +232,11 @@ class CreateShotClipNew(resolve.Creator): # sort selected trackItems by sorted_selected_track_items = list() unsorted_selected_track_items = list() - for _ti in self.selected: - if _ti.parent().name() in v_sync_track: - sorted_selected_track_items.append(_ti) + for track_item_data in self.selected: + if track_item_data["track"]["name"] in v_sync_track: + sorted_selected_track_items.append(track_item_data) else: - unsorted_selected_track_items.append(_ti) + unsorted_selected_track_items.append(track_item_data) sorted_selected_track_items.extend(unsorted_selected_track_items) @@ -245,8 +245,27 @@ class CreateShotClipNew(resolve.Creator): "avalon": self.data } - for i, track_item in enumerate(sorted_selected_track_items): + # sequence attrs + sq_frame_start = self.sequence.GetStartFrame() + sq_markers = self.sequence.GetMarkers() + + # create media bin for compound clips (trackItems) + mp_folder = resolve.create_current_sequence_media_bin(self.sequence) + + for i, track_item_data in enumerate(sorted_selected_track_items): self.rename_index = i # convert track item to timeline media pool item - phiero.PublishClip(self, track_item, **kwargs).convert() + resolve.PublishClip(self, track_item_data, **kwargs).convert() + + # clear color after it is done + track_item_data["clip"]["item"].ClearClipColor() + + # convert track item to timeline media pool item + resolve.create_compound_clip( + track_item_data, + mp_folder, + rename=True, + **dict( + {"presets": widget.result}) + ) From f59f7bb3985458fa557be3d303fc548522d39ea8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 30 Nov 2020 12:02:07 +0100 Subject: [PATCH 006/198] feat(resolve): refactory Creator for clips --- pype/hosts/resolve/lib.py | 27 ++----------------- pype/hosts/resolve/menu_style.qss | 11 ++++++++ pype/hosts/resolve/plugin.py | 24 +++++++++++------ .../resolve/create/create_shot_clip_new.py | 22 ++++++--------- 4 files changed, 37 insertions(+), 47 deletions(-) diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 4052fa74fd..74f105a130 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -303,7 +303,7 @@ def get_name_with_data(clip_data, presets): }) -def create_compound_clip(clip_data, folder, rename=False, **kwargs): +def create_compound_clip(clip_data, name, folder): """ Convert timeline object into nested timeline object @@ -311,8 +311,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs): clip_data (dict): timeline item object packed into dict with project, timeline (sequence) folder (resolve.MediaPool.Folder): media pool folder object, - rename (bool)[optional]: renaming in sequence or not - kwargs (optional): additional data needed for rename=True (presets) + name (str): name for compound clip Returns: resolve.MediaPoolItem: media pool item with compound clip timeline(cct) @@ -324,34 +323,12 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs): # get details of objects clip_item = clip["item"] - track = clip_data["track"] mp = project.GetMediaPool() # get clip attributes clip_attributes = get_clip_attributes(clip_item) - print(f"_ clip_attributes: {pformat(clip_attributes)}") - if rename: - presets = kwargs.get("presets") - if presets: - name, data = get_name_with_data(clip_data, presets) - # add hirarchy data to clip attributes - clip_attributes.update(data) - else: - name = "{:0>3}_{:0>4}".format( - int(track["index"]), int(clip["index"])) - else: - # build name - clip_name_split = clip_item.GetName().split(".") - name = "_".join([ - track["name"], - str(track["index"]), - clip_name_split[0], - str(clip["index"])] - ) - - # get metadata mp_item = clip_item.GetMediaPoolItem() mp_props = mp_item.GetClipProperty() diff --git a/pype/hosts/resolve/menu_style.qss b/pype/hosts/resolve/menu_style.qss index ea11c4ca2e..5a1d39fe79 100644 --- a/pype/hosts/resolve/menu_style.qss +++ b/pype/hosts/resolve/menu_style.qss @@ -4,6 +4,17 @@ QWidget { font-size: 13px; } +QComboBox { + border: 1px solid #090909; + background-color: #201f1f; + color: #ffffff; +} + +QComboBox QAbstractItemView +{ + color: white; +} + QPushButton { border: 1px solid #090909; background-color: #201f1f; diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index b465d77950..c816735be2 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -390,23 +390,24 @@ class PublishClip: vertical_sync_default = False driving_layer_default = "" - def __init__(self, cls, track_item, **kwargs): + def __init__(self, cls, track_item_data, **kwargs): # populate input cls attribute onto self.[attr] self.__dict__.update(cls.__dict__) # get main parent objects - self.track_item = track_item["clip"]["item"] - sequence_name = track_item["sequence"].GetName() + self.track_item_data = track_item_data + self.track_item = track_item_data["clip"]["item"] + sequence_name = track_item_data["sequence"].GetName() self.sequence_name = str(sequence_name).replace(" ", "_") # track item (clip) main attributes self.ti_name = self.track_item.GetName() - self.ti_index = int(track_item["clip"]["index"]) + self.ti_index = int(track_item_data["clip"]["index"]) # get track name and index - track_name = track_item["track"]["name"] + track_name = track_item_data["track"]["name"] self.track_name = str(track_name).replace(" ", "_") - self.track_index = int(track_item["track"]["index"]) + self.track_index = int(track_item_data["track"]["index"]) # adding tag.family into tag if kwargs.get("avalon"): @@ -415,6 +416,9 @@ class PublishClip: # adding ui inputs if any self.ui_inputs = kwargs.get("ui_inputs", {}) + # adding media pool folder if any + self.mp_folder = kwargs.get("mp_folder") + # populate default data before we get other attributes self._populate_track_item_default_data() @@ -438,12 +442,16 @@ class PublishClip: new_name = self.tag_data.pop("newClipName") if self.rename: - # rename track item - self.track_item.setName(new_name) self.tag_data["asset"] = new_name else: self.tag_data["asset"] = self.ti_name + self.track_item = lib.create_compound_clip( + self.track_item_data, + self.tag_data["asset"], + self.mp_folder + ) + # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) diff --git a/pype/plugins/resolve/create/create_shot_clip_new.py b/pype/plugins/resolve/create/create_shot_clip_new.py index 5d6c0a2e79..a94e30ed73 100644 --- a/pype/plugins/resolve/create/create_shot_clip_new.py +++ b/pype/plugins/resolve/create/create_shot_clip_new.py @@ -240,11 +240,6 @@ class CreateShotClipNew(resolve.Creator): sorted_selected_track_items.extend(unsorted_selected_track_items) - kwargs = { - "ui_inputs": widget.result, - "avalon": self.data - } - # sequence attrs sq_frame_start = self.sequence.GetStartFrame() sq_markers = self.sequence.GetMarkers() @@ -252,6 +247,14 @@ class CreateShotClipNew(resolve.Creator): # create media bin for compound clips (trackItems) mp_folder = resolve.create_current_sequence_media_bin(self.sequence) + kwargs = { + "ui_inputs": widget.result, + "avalon": self.data, + "mp_folder": mp_folder, + "sq_frame_start": sq_frame_start, + "sq_markers": sq_markers + } + for i, track_item_data in enumerate(sorted_selected_track_items): self.rename_index = i @@ -260,12 +263,3 @@ class CreateShotClipNew(resolve.Creator): # clear color after it is done track_item_data["clip"]["item"].ClearClipColor() - - # convert track item to timeline media pool item - resolve.create_compound_clip( - track_item_data, - mp_folder, - rename=True, - **dict( - {"presets": widget.result}) - ) From 1f65c27c1b0489d35ea088448ad2239757aadc86 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 30 Nov 2020 18:17:53 +0100 Subject: [PATCH 007/198] feat(resolve): publishing wip --- pype/hosts/resolve/lib.py | 27 +++--- pype/hosts/resolve/plugin.py | 7 +- .../{publish => _publish}/collect_clips.py | 0 .../resolve/create/collect_clip_resolution.py | 38 ++++++++ .../resolve/publish/collect_instances.py | 97 +++++++++++++++++++ .../resolve/publish/collect_project.py | 29 ------ .../resolve/publish/collect_workfile.py | 58 +++++++++++ 7 files changed, 211 insertions(+), 45 deletions(-) rename pype/plugins/resolve/{publish => _publish}/collect_clips.py (100%) create mode 100644 pype/plugins/resolve/create/collect_clip_resolution.py create mode 100644 pype/plugins/resolve/publish/collect_instances.py delete mode 100644 pype/plugins/resolve/publish/collect_project.py create mode 100644 pype/plugins/resolve/publish/collect_workfile.py diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 74f105a130..8dd9566c44 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -105,14 +105,16 @@ def get_track_item_pype_tag(track_item): Get pype track item tag created by creator or loader plugin. Attributes: - trackItem (hiero.core.TrackItem): hiero object + trackItem (resolve.TimelineItem): hiero object Returns: hiero.core.Tag: hierarchy, orig clip attributes """ return_tag = None + media_pool_item = track_item.GetMediaPoolItem() + # get all tags from track item - _tags = track_item.GetMetadata() + _tags = media_pool_item.GetMetadata() if not _tags: return None for key, data in _tags.items(): @@ -135,22 +137,17 @@ def set_track_item_pype_tag(track_item, data=None): """ data = data or dict() - # basic Tag's attribute - tag_data = { - "editable": "0", - "note": "Pype data holder", - "icon": "pype_icon.png", - "metadata": {k: v for k, v in data.items()} - } # get available pype tag if any - _tag = get_track_item_pype_tag(track_item) + tag_data = get_track_item_pype_tag(track_item) - if _tag: + if tag_data: + media_pool_item = track_item.GetMediaPoolItem() # it not tag then create one - _tag.update(tag_data) - track_item.SetMetadata(self.pype_tag_name, json.dumps(_tag)) - return _tag + tag_data.update(data) + media_pool_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) + return tag_data else: + tag_data = data # if pype tag available then update with input data # add it to the input track item track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) @@ -416,7 +413,7 @@ def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame): It will add take and activate it to the frame range which is inputted Args: - from_clip (resolve.mediaPoolItem) + from_clip (resolve.TimelineItem) to_clip (resolve.mediaPoolItem) to_clip_name (str): name of to_clip to_in_frame (float): cut in frame, usually `GetLeftOffset()` diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index c816735be2..95097434f8 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -446,12 +446,17 @@ class PublishClip: else: self.tag_data["asset"] = self.ti_name - self.track_item = lib.create_compound_clip( + lib.create_compound_clip( self.track_item_data, self.tag_data["asset"], self.mp_folder ) + # add track_item_data selection to tag + self.tag_data.update({ + "track_data": self.track_item_data["track"] + }) + # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) diff --git a/pype/plugins/resolve/publish/collect_clips.py b/pype/plugins/resolve/_publish/collect_clips.py similarity index 100% rename from pype/plugins/resolve/publish/collect_clips.py rename to pype/plugins/resolve/_publish/collect_clips.py diff --git a/pype/plugins/resolve/create/collect_clip_resolution.py b/pype/plugins/resolve/create/collect_clip_resolution.py new file mode 100644 index 0000000000..3bea68c677 --- /dev/null +++ b/pype/plugins/resolve/create/collect_clip_resolution.py @@ -0,0 +1,38 @@ +import pyblish.api + + +class CollectClipResolution(pyblish.api.InstancePlugin): + """Collect clip geometry resolution""" + + order = pyblish.api.CollectorOrder - 0.1 + label = "Collect Clip Resoluton" + hosts = ["resolve"] + families = ["clip"] + + def process(self, instance): + sequence = instance.context.data['activeSequence'] + item = instance.data["item"] + source_resolution = instance.data.get("sourceResolution", None) + + resolution_width = int(sequence.format().width()) + resolution_height = int(sequence.format().height()) + pixel_aspect = sequence.format().pixelAspect() + + # source exception + if source_resolution: + resolution_width = int(item.source().mediaSource().width()) + resolution_height = int(item.source().mediaSource().height()) + pixel_aspect = item.source().mediaSource().pixelAspect() + + resolution_data = { + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect + } + # add to instacne data + instance.data.update(resolution_data) + + self.log.info("Resolution of instance '{}' is: {}".format( + instance, + resolution_data + )) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py new file mode 100644 index 0000000000..a2c7fea0e0 --- /dev/null +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -0,0 +1,97 @@ +import os +import pyblish +from pype.hosts import resolve + +# # developer reload modules +from pprint import pformat + + +class CollectInstances(pyblish.api.ContextPlugin): + """Collect all Track items selection.""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Instances" + hosts = ["resolve"] + + def process(self, context): + selected_track_items = resolve.get_current_track_items( + filter=True, selecting_color="Pink") + + self.log.info( + "Processing enabled track items: {}".format( + len(selected_track_items))) + + for track_item_data in selected_track_items: + self.log.debug(pformat(track_item_data)) + data = dict() + track_item = track_item_data["clip"]["item"] + self.log.debug(track_item) + # get pype tag data + tag_parsed_data = resolve.get_track_item_pype_tag(track_item) + self.log.debug(pformat(tag_parsed_data)) + + if not tag_parsed_data: + continue + + if tag_parsed_data.get("id") != "pyblish.avalon.instance": + continue + + compound_source_prop = tag_parsed_data["sourceProperties"] + self.log.debug(f"compound_source_prop: {compound_source_prop}") + + # source = track_item_data.GetMediaPoolItem() + + source_path = os.path.normpath( + compound_source_prop["File Path"]) + source_name = compound_source_prop["File Name"] + source_id = tag_parsed_data["sourceId"] + self.log.debug(f"source_path: {source_path}") + self.log.debug(f"source_name: {source_name}") + self.log.debug(f"source_id: {source_id}") + + # add tag data to instance data + data.update({ + k: v for k, v in tag_parsed_data.items() + if k not in ("id", "applieswhole", "label") + }) + + asset = tag_parsed_data["asset"] + subset = tag_parsed_data["subset"] + review = tag_parsed_data["review"] + + # insert family into families + family = tag_parsed_data["family"] + families = [str(f) for f in tag_parsed_data["families"]] + families.insert(0, str(family)) + + track = tag_parsed_data["track_data"]["name"] + base_name = os.path.basename(source_path) + file_head = os.path.splitext(base_name)[0] + # source_first_frame = int(file_info.startFrame()) + + # apply only for feview and master track instance + if review: + families += ["review", "ftrack"] + + data.update({ + "name": "{} {} {}".format(asset, subset, families), + "asset": asset, + "item": track_item, + "families": families, + + # tags + "tags": tag_parsed_data, + + # track item attributes + "track": track, + + # source attribute + "source": source_path, + "sourcePath": source_path, + "sourceFileHead": file_head, + # "sourceFirst": source_first_frame, + }) + + instance = context.create_instance(**data) + + self.log.info("Creating instance: {}".format(instance)) diff --git a/pype/plugins/resolve/publish/collect_project.py b/pype/plugins/resolve/publish/collect_project.py deleted file mode 100644 index aa57f93619..0000000000 --- a/pype/plugins/resolve/publish/collect_project.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import pyblish.api -from pype.hosts.resolve.utils import get_resolve_module - - -class CollectProject(pyblish.api.ContextPlugin): - """Collect Project object""" - - order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Project" - hosts = ["resolve"] - - def process(self, context): - exported_projet_ext = ".drp" - current_dir = os.getenv("AVALON_WORKDIR") - resolve = get_resolve_module() - PM = resolve.GetProjectManager() - P = PM.GetCurrentProject() - name = P.GetName() - - fname = name + exported_projet_ext - current_file = os.path.join(current_dir, fname) - normalised = os.path.normpath(current_file) - - context.data["project"] = P - context.data["currentFile"] = normalised - - self.log.info(name) - self.log.debug(normalised) diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py new file mode 100644 index 0000000000..a8b09573db --- /dev/null +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -0,0 +1,58 @@ +import os +import pyblish.api +from pype.hosts import resolve +from avalon import api as avalon + + +class CollectWorkfile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + label = "Collect Workfile" + order = pyblish.api.CollectorOrder - 0.501 + + def process(self, context): + exported_projet_ext = ".drp" + asset = avalon.Session["AVALON_ASSET"] + staging_dir = os.getenv("AVALON_WORKDIR") + subset = "workfile" + + project = resolve.get_current_project() + name = project.GetName() + + base_name = name + exported_projet_ext + current_file = os.path.join(staging_dir, base_name) + current_file = os.path.normpath(current_file) + + active_sequence = resolve.get_current_sequence() + video_tracks = resolve.get_video_track_names() + + # set main project attributes to context + context.data["activeProject"] = project + context.data["activeSequence"] = active_sequence + context.data["videoTracks"] = video_tracks + context.data["currentFile"] = current_file + + self.log.info("currentFile: {}".format(current_file)) + + # creating workfile representation + representation = { + 'name': 'hrox', + 'ext': 'hrox', + 'files': base_name, + "stagingDir": staging_dir, + } + + instance_data = { + "name": "{}_{}".format(asset, subset), + "asset": asset, + "subset": "{}{}".format(asset, subset.capitalize()), + "item": project, + "family": "workfile", + + # source attribute + "sourcePath": current_file, + "representations": [representation] + } + + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) From dbf0438f36e26ba57ef403f015e61d78fb30373f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 30 Nov 2020 18:33:19 +0100 Subject: [PATCH 008/198] feat(resolve): activating publish on instances --- pype/hosts/resolve/pipeline.py | 21 ++++++++++++ .../resolve/publish/collect_instances.py | 34 +++++++++---------- 2 files changed, 38 insertions(+), 17 deletions(-) diff --git a/pype/hosts/resolve/pipeline.py b/pype/hosts/resolve/pipeline.py index 22437980e7..23cf042a13 100644 --- a/pype/hosts/resolve/pipeline.py +++ b/pype/hosts/resolve/pipeline.py @@ -61,6 +61,9 @@ def install(): avalon.register_plugin_path(avalon.Creator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + # register callback for switching publishable + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) + get_resolve_module() @@ -83,6 +86,9 @@ def uninstall(): avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + # register callback for switching publishable + pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + def containerise(track_item, name, @@ -241,3 +247,18 @@ def reset_selection(): """Deselect all selected nodes """ pass + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + from pype.hosts.resolve import ( + set_publish_attribute + ) + + # Whether instances should be passthrough based on new value + track_item = instance.data["item"] + set_publish_attribute(track_item, new_value) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index a2c7fea0e0..b556117e65 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -22,21 +22,21 @@ class CollectInstances(pyblish.api.ContextPlugin): len(selected_track_items))) for track_item_data in selected_track_items: - self.log.debug(pformat(track_item_data)) + data = dict() track_item = track_item_data["clip"]["item"] - self.log.debug(track_item) + # get pype tag data - tag_parsed_data = resolve.get_track_item_pype_tag(track_item) - self.log.debug(pformat(tag_parsed_data)) + tag_data = resolve.get_track_item_pype_tag(track_item) + self.log.debug(f"__ tag_data: {pformat(tag_data)}") - if not tag_parsed_data: + if not tag_data: continue - if tag_parsed_data.get("id") != "pyblish.avalon.instance": + if tag_data.get("id") != "pyblish.avalon.instance": continue - compound_source_prop = tag_parsed_data["sourceProperties"] + compound_source_prop = tag_data["sourceProperties"] self.log.debug(f"compound_source_prop: {compound_source_prop}") # source = track_item_data.GetMediaPoolItem() @@ -44,27 +44,27 @@ class CollectInstances(pyblish.api.ContextPlugin): source_path = os.path.normpath( compound_source_prop["File Path"]) source_name = compound_source_prop["File Name"] - source_id = tag_parsed_data["sourceId"] + source_id = tag_data["sourceId"] self.log.debug(f"source_path: {source_path}") self.log.debug(f"source_name: {source_name}") self.log.debug(f"source_id: {source_id}") # add tag data to instance data data.update({ - k: v for k, v in tag_parsed_data.items() + k: v for k, v in tag_data.items() if k not in ("id", "applieswhole", "label") }) - asset = tag_parsed_data["asset"] - subset = tag_parsed_data["subset"] - review = tag_parsed_data["review"] + asset = tag_data["asset"] + subset = tag_data["subset"] + review = tag_data["review"] # insert family into families - family = tag_parsed_data["family"] - families = [str(f) for f in tag_parsed_data["families"]] + family = tag_data["family"] + families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = tag_parsed_data["track_data"]["name"] + track = tag_data["track_data"]["name"] base_name = os.path.basename(source_path) file_head = os.path.splitext(base_name)[0] # source_first_frame = int(file_info.startFrame()) @@ -78,9 +78,9 @@ class CollectInstances(pyblish.api.ContextPlugin): "asset": asset, "item": track_item, "families": families, - + "publish": resolve.get_publish_attribute(track_item), # tags - "tags": tag_parsed_data, + "tags": tag_data, # track item attributes "track": track, From 727ce8b0f7319957e56405000aa84d9901e4a1bd Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 1 Dec 2020 14:55:59 +0100 Subject: [PATCH 009/198] feat(resolve): otio host modul for publishing and injesting now only works for exporting from resolve --- pype/hosts/resolve/otio.py | 192 +++++++++++++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 pype/hosts/resolve/otio.py diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py new file mode 100644 index 0000000000..3edb6b08c2 --- /dev/null +++ b/pype/hosts/resolve/otio.py @@ -0,0 +1,192 @@ +import opentimelineio as otio + + +TRACK_TYPES = { + "video": otio.schema.TrackKind.Video, + "audio": otio.schema.TrackKind.Audio +} + + +def create_rational_time(frame, fps): + return otio.opentime.RationalTime( + float(frame), + float(fps) + ) + + +def create_time_range(start_frame, frame_duration, fps): + return otio.opentime.TimeRange( + start_time=create_rational_time(start_frame, fps), + duration=create_rational_time(frame_duration, fps) + ) + + +def create_reference(media_pool_item): + return otio.schema.ExternalReference( + target_url=media_pool_item.GetClipProperty( + "File Path").get("File Path"), + available_range=create_time_range( + media_pool_item.GetClipProperty("Start").get("Start"), + media_pool_item.GetClipProperty("Frames").get("Frames"), + media_pool_item.GetClipProperty("FPS").get("FPS") + ) + ) + + +def create_markers(track_item, frame_rate): + track_item_markers = track_item.GetMarkers() + markers = [] + for m_frame in track_item_markers: + markers.append( + otio.schema.Marker( + name=track_item_markers[m_frame]["name"], + marked_range=create_time_range( + m_frame, + track_item_markers[m_frame]["duration"], + frame_rate + ), + color=track_item_markers[m_frame]["color"].upper(), + metadata={ + "Resolve": { + "note": track_item_markers[m_frame]["note"] + } + } + ) + ) + return markers + + +def create_clip(track_item): + media_pool_item = track_item.GetMediaPoolItem() + frame_rate = media_pool_item.GetClipProperty("FPS").get("FPS") + clip = otio.schema.Clip( + name=track_item.GetName(), + source_range=create_time_range( + track_item.GetLeftOffset(), + track_item.GetDuration(), + frame_rate + ), + media_reference=create_reference(media_pool_item) + ) + for marker in create_markers(track_item, frame_rate): + clip.markers.append(marker) + return clip + + +def create_gap(gap_start, clip_start, tl_start_frame, frame_rate): + return otio.schema.Gap( + source_range=create_time_range( + gap_start, + (clip_start - tl_start_frame) - gap_start, + frame_rate + ) + ) + + +def create_timeline(timeline): + return otio.schema.Timeline(name=timeline.GetName()) + + +def create_track(track_type, track_name): + return otio.schema.Track( + name=track_name, + kind=TRACK_TYPES[track_type] + ) + + +def create_complete_otio_timeline(project): + # get current timeline + timeline = project.GetCurrentTimeline() + + # convert timeline to otio + otio_timeline = create_timeline(timeline) + + # loop all defined track types + for track_type in list(TRACK_TYPES.keys()): + # get total track count + track_count = timeline.GetTrackCount(track_type) + + # loop all tracks by track indexes + for track_index in range(1, int(track_count) + 1): + # get current track name + track_name = timeline.GetTrackName(track_type, track_index) + + # convert track to otio + otio_track = create_track( + track_type, "{}{}".format(track_name, track_index)) + + # get all track items in current track + current_track_items = timeline.GetItemListInTrack( + track_type, track_index) + + # loop available track items in current track items + for track_item in current_track_items: + # skip offline track items + if track_item.GetMediaPoolItem() is None: + continue + + # calculate real clip start + clip_start = track_item.GetStart() - timeline.GetStartFrame() + + # if gap between track start and clip start + if clip_start > otio_track.available_range().duration.value: + # create gap and add it to track + otio_track.append( + create_gap( + otio_track.available_range().duration.value, + track_item.GetStart(), + timeline.GetStartFrame(), + project.GetSetting("timelineFrameRate") + ) + ) + + # create otio clip and add it to track + otio_track.append(create_clip(track_item)) + + # add track to otio timeline + otio_timeline.tracks.append(otio_track) + + +def get_clip_with_parents(track_item_data): + """ + Return otio objects for timeline, track and clip + + Args: + track_item_data (dict): track_item_data from list returned by + resolve.get_current_track_items() + + Returns: + dict: otio clip with parent objects + + """ + + track_item = track_item_data["clip"]["item"] + timeline = track_item_data["timeline"] + track_type = track_item_data["track"]["type"] + track_name = track_item_data["track"]["name"] + track_index = track_item_data["track"]["index"] + + # convert timeline to otio + otio_timeline = create_timeline(timeline) + # convert track to otio + otio_track = create_track( + track_type, "{}{}".format(track_name, track_index)) + + # create otio clip + otio_clip = create_clip(track_item) + + # add it to track + otio_track.append(otio_clip) + + # add track to otio timeline + otio_timeline.tracks.append(otio_track) + + return { + "otioTimeline": otio_timeline, + "otioTrack": otio_track, + "otioClip": otio_clip + } + + +def save(otio_timeline, path): + otio.adapters.write_to_file(otio_timeline, path) From 60213ecc5dd4e384170da38a7735fd4efdb4826e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 1 Dec 2020 15:03:06 +0100 Subject: [PATCH 010/198] feat(resolve): collect workfile --- .../resolve/publish/collect_workfile.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index a8b09573db..d1b45117c9 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -2,6 +2,7 @@ import os import pyblish.api from pype.hosts import resolve from avalon import api as avalon +from pprint import pformat class CollectWorkfile(pyblish.api.ContextPlugin): @@ -18,6 +19,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): project = resolve.get_current_project() name = project.GetName() + fps = project.GetSetting("timelineFrameRate") base_name = name + exported_projet_ext current_file = os.path.join(staging_dir, base_name) @@ -27,17 +29,18 @@ class CollectWorkfile(pyblish.api.ContextPlugin): video_tracks = resolve.get_video_track_names() # set main project attributes to context - context.data["activeProject"] = project - context.data["activeSequence"] = active_sequence - context.data["videoTracks"] = video_tracks - context.data["currentFile"] = current_file - - self.log.info("currentFile: {}".format(current_file)) + context.data.update({ + "activeProject": project, + "activeSequence": active_sequence, + "videoTracks": video_tracks, + "currentFile": current_file, + "fps": fps, + }) # creating workfile representation representation = { - 'name': 'hrox', - 'ext': 'hrox', + 'name': exported_projet_ext[1:], + 'ext': exported_projet_ext[1:], 'files': base_name, "stagingDir": staging_dir, } @@ -56,3 +59,4 @@ class CollectWorkfile(pyblish.api.ContextPlugin): instance = context.create_instance(**instance_data) self.log.info("Creating instance: {}".format(instance)) + self.log.debug("__ instance.data: {}".format(pformat(instance.data))) From 4d6b9fd33ff21cdb891a0000b4f7f7b0c51c29d1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 1 Dec 2020 15:04:03 +0100 Subject: [PATCH 011/198] fix(resolve): moving file to correct folder --- .../resolve/{create => _publish}/collect_clip_resolution.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pype/plugins/resolve/{create => _publish}/collect_clip_resolution.py (100%) diff --git a/pype/plugins/resolve/create/collect_clip_resolution.py b/pype/plugins/resolve/_publish/collect_clip_resolution.py similarity index 100% rename from pype/plugins/resolve/create/collect_clip_resolution.py rename to pype/plugins/resolve/_publish/collect_clip_resolution.py From 78946d04baa08b7b4a9c781a07a66177f9469351 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 2 Dec 2020 11:28:14 +0100 Subject: [PATCH 012/198] feat(resolve): create with markers and publishing via markers --- pype/hosts/resolve/__init__.py | 2 + pype/hosts/resolve/lib.py | 136 +++++++++++++----- pype/hosts/resolve/otio.py | 12 +- pype/hosts/resolve/plugin.py | 20 +-- .../resolve/create/create_shot_clip_new.py | 7 +- .../resolve/publish/collect_instances.py | 24 ++-- 6 files changed, 132 insertions(+), 69 deletions(-) diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index b8457438c6..83f8e3a720 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -14,6 +14,7 @@ from .pipeline import ( ) from .lib import ( + publish_clip_color, get_project_manager, get_current_project, get_current_sequence, @@ -66,6 +67,7 @@ __all__ = [ "get_resolve_module", # lib + "publish_clip_color", "get_project_manager", "get_current_project", "get_current_sequence", diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 8dd9566c44..2ade558d89 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -2,31 +2,41 @@ import sys import json import ast from opentimelineio import opentime -from pprint import pformat - from pype.api import Logger log = Logger().get_logger(__name__, "resolve") self = sys.modules[__name__] -self.pm = None +self.project_manager = None + +# Pype sequencial rename variables self.rename_index = 0 self.rename_add = 0 + +self.publish_clip_color = "Pink" +self.pype_marker_workflow = True + +# Pype compound clip workflow variable self.pype_tag_name = "VFX Notes" +# Pype marker workflow variables +self.pype_marker_name = "PYPEDATA" +self.pype_marker_duration = 1 +self.pype_marker_color = "Mint" +self.temp_marker_frame = None def get_project_manager(): from . import bmdvr - if not self.pm: - self.pm = bmdvr.GetProjectManager() - return self.pm + if not self.project_manager: + self.project_manager = bmdvr.GetProjectManager() + return self.project_manager def get_current_project(): # initialize project manager get_project_manager() - return self.pm.GetCurrentProject() + return self.project_manager.GetCurrentProject() def get_current_sequence(): @@ -111,16 +121,20 @@ def get_track_item_pype_tag(track_item): hiero.core.Tag: hierarchy, orig clip attributes """ return_tag = None - media_pool_item = track_item.GetMediaPoolItem() - # get all tags from track item - _tags = media_pool_item.GetMetadata() - if not _tags: - return None - for key, data in _tags.items(): - # return only correct tag defined by global name - if key in self.pype_tag_name: - return_tag = json.loads(data) + if self.pype_marker_workflow: + return_tag = get_pype_marker(track_item) + else: + media_pool_item = track_item.GetMediaPoolItem() + + # get all tags from track item + _tags = media_pool_item.GetMetadata() + if not _tags: + return None + for key, data in _tags.items(): + # return only correct tag defined by global name + if key in self.pype_tag_name: + return_tag = json.loads(data) return return_tag @@ -130,28 +144,37 @@ def set_track_item_pype_tag(track_item, data=None): Set pype track item tag to input track_item. Attributes: - trackItem (hiero.core.TrackItem): hiero object + trackItem (resolve.TimelineItem): resolve api object Returns: - hiero.core.Tag + dict: json loaded data """ data = data or dict() # get available pype tag if any tag_data = get_track_item_pype_tag(track_item) - if tag_data: - media_pool_item = track_item.GetMediaPoolItem() - # it not tag then create one + if self.pype_marker_workflow: + # delete tag as it is not updatable + if tag_data: + delete_pype_marker(track_item) + tag_data.update(data) - media_pool_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) - return tag_data + set_pype_marker(track_item, tag_data) else: - tag_data = data - # if pype tag available then update with input data - # add it to the input track item - track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) - return tag_data + if tag_data: + media_pool_item = track_item.GetMediaPoolItem() + # it not tag then create one + tag_data.update(data) + media_pool_item.SetMetadata( + self.pype_tag_name, json.dumps(tag_data)) + else: + tag_data = data + # if pype tag available then update with input data + # add it to the input track item + track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data)) + + return tag_data def imprint(track_item, data=None): @@ -187,7 +210,7 @@ def set_publish_attribute(track_item, value): value (bool): True or False """ tag_data = get_track_item_pype_tag(track_item) - tag_data["publish"] = str(value) + tag_data["publish"] = value # set data to the publish attribute set_track_item_pype_tag(track_item, tag_data) @@ -200,10 +223,47 @@ def get_publish_attribute(track_item): value (bool): True or False """ tag_data = get_track_item_pype_tag(track_item) - value = tag_data["publish"] + return tag_data["publish"] - # return value converted to bool value. Atring is stored in tag. - return ast.literal_eval(value) + +def set_pype_marker(track_item, tag_data): + source_start = track_item.GetLeftOffset() + item_duration = track_item.GetDuration() + frame = int(source_start + (item_duration / 2)) + + # marker attributes + frameId = (frame / 10) * 10 + color = self.pype_marker_color + name = self.pype_marker_name + note = json.dumps(tag_data) + duration = (self.pype_marker_duration / 10) * 10 + + track_item.AddMarker( + frameId, + color, + name, + note, + duration + ) + + +def get_pype_marker(track_item): + track_item_markers = track_item.GetMarkers() + for marker_frame in track_item_markers: + note = track_item_markers[marker_frame]["note"] + color = track_item_markers[marker_frame]["color"] + name = track_item_markers[marker_frame]["name"] + print(f"_ marker data: {marker_frame} | {name} | {color} | {note}") + if name == self.pype_marker_name and color == self.pype_marker_color: + self.temp_marker_frame = marker_frame + return json.loads(note) + + return dict() + + +def delete_pype_marker(track_item): + track_item.DeleteMarkerAtFrame(self.temp_marker_frame) + self.temp_marker_frame = None def create_current_sequence_media_bin(sequence): @@ -523,16 +583,16 @@ def set_project_manager_to_folder_name(folder_name): set_folder = False # go back to root folder - if self.pm.GotoRootFolder(): + if self.project_manager.GotoRootFolder(): log.info(f"Testing existing folder: {folder_name}") folders = convert_resolve_list_type( - self.pm.GetFoldersInCurrentFolder()) + self.project_manager.GetFoldersInCurrentFolder()) log.info(f"Testing existing folders: {folders}") # get me first available folder object # with the same name as in `folder_name` else return False if next((f for f in folders if f in folder_name), False): log.info(f"Found existing folder: {folder_name}") - set_folder = self.pm.OpenFolder(folder_name) + set_folder = self.project_manager.OpenFolder(folder_name) if set_folder: return True @@ -540,11 +600,11 @@ def set_project_manager_to_folder_name(folder_name): # if folder by name is not existent then create one # go back to root folder log.info(f"Folder `{folder_name}` not found and will be created") - if self.pm.GotoRootFolder(): + if self.project_manager.GotoRootFolder(): try: # create folder by given name - self.pm.CreateFolder(folder_name) - self.pm.OpenFolder(folder_name) + self.project_manager.CreateFolder(folder_name) + self.project_manager.OpenFolder(folder_name) return True except NameError as e: log.error((f"Folder with name `{folder_name}` cannot be created!" diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index 3edb6b08c2..7a6d142a10 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -36,19 +36,19 @@ def create_reference(media_pool_item): def create_markers(track_item, frame_rate): track_item_markers = track_item.GetMarkers() markers = [] - for m_frame in track_item_markers: + for marker_frame in track_item_markers: markers.append( otio.schema.Marker( - name=track_item_markers[m_frame]["name"], + name=track_item_markers[marker_frame]["name"], marked_range=create_time_range( - m_frame, - track_item_markers[m_frame]["duration"], + marker_frame, + track_item_markers[marker_frame]["duration"], frame_rate ), - color=track_item_markers[m_frame]["color"].upper(), + color=track_item_markers[marker_frame]["color"].upper(), metadata={ "Resolve": { - "note": track_item_markers[m_frame]["note"] + "note": track_item_markers[marker_frame]["note"] } } ) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index 95097434f8..be666358ae 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -446,16 +446,18 @@ class PublishClip: else: self.tag_data["asset"] = self.ti_name - lib.create_compound_clip( - self.track_item_data, - self.tag_data["asset"], - self.mp_folder - ) + if not lib.pype_marker_workflow: + # create compound clip workflow + lib.create_compound_clip( + self.track_item_data, + self.tag_data["asset"], + self.mp_folder + ) - # add track_item_data selection to tag - self.tag_data.update({ - "track_data": self.track_item_data["track"] - }) + # add track_item_data selection to tag + self.tag_data.update({ + "track_data": self.track_item_data["track"] + }) # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) diff --git a/pype/plugins/resolve/create/create_shot_clip_new.py b/pype/plugins/resolve/create/create_shot_clip_new.py index a94e30ed73..5f6790394b 100644 --- a/pype/plugins/resolve/create/create_shot_clip_new.py +++ b/pype/plugins/resolve/create/create_shot_clip_new.py @@ -259,7 +259,6 @@ class CreateShotClipNew(resolve.Creator): self.rename_index = i # convert track item to timeline media pool item - resolve.PublishClip(self, track_item_data, **kwargs).convert() - - # clear color after it is done - track_item_data["clip"]["item"].ClearClipColor() + track_item = resolve.PublishClip( + self, track_item_data, **kwargs).convert() + track_item.SetClipColor(lib.publish_clip_color) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index b556117e65..b8c929f3d6 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -15,7 +15,7 @@ class CollectInstances(pyblish.api.ContextPlugin): def process(self, context): selected_track_items = resolve.get_current_track_items( - filter=True, selecting_color="Pink") + filter=True, selecting_color=resolve.publish_clip_color) self.log.info( "Processing enabled track items: {}".format( @@ -36,18 +36,15 @@ class CollectInstances(pyblish.api.ContextPlugin): if tag_data.get("id") != "pyblish.avalon.instance": continue - compound_source_prop = tag_data["sourceProperties"] - self.log.debug(f"compound_source_prop: {compound_source_prop}") - - # source = track_item_data.GetMediaPoolItem() + media_pool_item = track_item.GetMediaPoolItem() + clip_property = media_pool_item.GetClipProperty() + self.log.debug(f"clip_property: {clip_property}") source_path = os.path.normpath( - compound_source_prop["File Path"]) - source_name = compound_source_prop["File Name"] - source_id = tag_data["sourceId"] + clip_property["File Path"]) + source_name = clip_property["File Name"] self.log.debug(f"source_path: {source_path}") self.log.debug(f"source_name: {source_name}") - self.log.debug(f"source_id: {source_id}") # add tag data to instance data data.update({ @@ -64,10 +61,13 @@ class CollectInstances(pyblish.api.ContextPlugin): families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = tag_data["track_data"]["name"] + track = track_item_data["track"]["name"] base_name = os.path.basename(source_path) file_head = os.path.splitext(base_name)[0] - # source_first_frame = int(file_info.startFrame()) + source_first_frame = int( + track_item.GetStart() + - track_item.GetLeftOffset() + ) # apply only for feview and master track instance if review: @@ -89,7 +89,7 @@ class CollectInstances(pyblish.api.ContextPlugin): "source": source_path, "sourcePath": source_path, "sourceFileHead": file_head, - # "sourceFirst": source_first_frame, + "sourceFirst": source_first_frame, }) instance = context.create_instance(**data) From c4864b11e3451901c8a4d045028712927525619d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 13:00:44 +0100 Subject: [PATCH 013/198] feat(resolve): otio publishing wip --- pype/hosts/resolve/__init__.py | 17 +- pype/hosts/resolve/lib.py | 28 +- pype/hosts/resolve/lib_hiero.py | 838 ++++++++++++++++++ pype/hosts/resolve/otio.py | 112 ++- pype/hosts/resolve/pipeline_hiero.py | 302 +++++++ pype/hosts/resolve/plugin.py | 9 +- .../resolve/publish/collect_instances.py | 36 +- .../StartupUI/otioimporter/OTIOImport.py | 24 +- 8 files changed, 1286 insertions(+), 80 deletions(-) create mode 100644 pype/hosts/resolve/lib_hiero.py create mode 100644 pype/hosts/resolve/pipeline_hiero.py diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index 83f8e3a720..45aa5502cc 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -29,7 +29,8 @@ from .lib import ( create_compound_clip, swap_clips, get_pype_clip_metadata, - set_project_manager_to_folder_name + set_project_manager_to_folder_name, + get_reformated_path ) from .menu import launch_pype_menu @@ -48,6 +49,12 @@ from .workio import ( work_root ) +from .otio import ( + get_otio_clip_instance_data, + get_otio_complete_timeline, + save_otio +) + bmdvr = None bmdvf = None @@ -83,6 +90,7 @@ __all__ = [ "swap_clips", "get_pype_clip_metadata", "set_project_manager_to_folder_name", + "get_reformated_path", # menu "launch_pype_menu", @@ -101,5 +109,10 @@ __all__ = [ # singleton with black magic resolve module "bmdvr", - "bmdvf" + "bmdvf", + + # open color io integration + "get_otio_clip_instance_data", + "get_otio_complete_timeline", + "save_otio" ] diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 2ade558d89..777cae0eb2 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -1,6 +1,6 @@ import sys import json -import ast +import re from opentimelineio import opentime from pype.api import Logger @@ -25,6 +25,7 @@ self.pype_marker_duration = 1 self.pype_marker_color = "Mint" self.temp_marker_frame = None + def get_project_manager(): from . import bmdvr if not self.project_manager: @@ -621,3 +622,28 @@ def convert_resolve_list_type(resolve_list): "Input argument should be dict() type") return [resolve_list[i] for i in sorted(resolve_list.keys())] + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + num_pattern = "(\\[\\d+\\-\\d+\\])" + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + padding = len(re.findall(padding_pattern, path).pop()) + if padded: + path = re.sub(num_pattern, f"%0{padding}d", path) + else: + path = re.sub(num_pattern, f"%d", path) + return path diff --git a/pype/hosts/resolve/lib_hiero.py b/pype/hosts/resolve/lib_hiero.py new file mode 100644 index 0000000000..891ca3905c --- /dev/null +++ b/pype/hosts/resolve/lib_hiero.py @@ -0,0 +1,838 @@ +""" +Host specific functions where host api is connected +""" +import os +import re +import sys +import ast +import hiero +import avalon.api as avalon +import avalon.io +from avalon.vendor.Qt import QtWidgets +from pype.api import (Logger, Anatomy, config) +from . import tags +import shutil +from compiler.ast import flatten + +try: + from PySide.QtCore import QFile, QTextStream + from PySide.QtXml import QDomDocument +except ImportError: + from PySide2.QtCore import QFile, QTextStream + from PySide2.QtXml import QDomDocument + +# from opentimelineio import opentime +# from pprint import pformat + +log = Logger().get_logger(__name__, "hiero") + +self = sys.modules[__name__] +self._has_been_setup = False +self._has_menu = False +self._registered_gui = None +self.pype_tag_name = "Pype Data" +self.default_sequence_name = "PypeSequence" +self.default_bin_name = "PypeBin" + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") + + +def get_current_project(remove_untitled=False): + projects = flatten(hiero.core.projects()) + if not remove_untitled: + return next(iter(projects)) + + # if remove_untitled + for proj in projects: + if "Untitled" in proj.name(): + proj.close() + else: + return proj + + +def get_current_sequence(name=None, new=False): + """ + Get current sequence in context of active project. + + Args: + name (str)[optional]: name of sequence we want to return + new (bool)[optional]: if we want to create new one + + Returns: + hiero.core.Sequence: the sequence object + """ + sequence = None + project = get_current_project() + root_bin = project.clipsBin() + + if new: + # create new + name = name or self.default_sequence_name + sequence = hiero.core.Sequence(name) + root_bin.addItem(hiero.core.BinItem(sequence)) + elif name: + # look for sequence by name + sequences = project.sequences() + for _sequence in sequences: + if _sequence.name() == name: + sequence = _sequence + if not sequence: + # if nothing found create new with input name + sequence = get_current_sequence(name, True) + elif not name and not new: + # if name is none and new is False then return current open sequence + sequence = hiero.ui.activeSequence() + + return sequence + + +def get_current_track(sequence, name, audio=False): + """ + Get current track in context of active project. + + Creates new if none is found. + + Args: + sequence (hiero.core.Sequence): hiero sequene object + name (str): name of track we want to return + audio (bool)[optional]: switch to AudioTrack + + Returns: + hiero.core.Track: the track object + """ + tracks = sequence.videoTracks() + + if audio: + tracks = sequence.audioTracks() + + # get track by name + track = None + for _track in tracks: + if _track.name() in name: + track = _track + + if not track: + if not audio: + track = hiero.core.VideoTrack(name) + else: + track = hiero.core.AudioTrack(name) + sequence.addTrack(track) + + return track + + +def get_track_items( + selected=False, + sequence_name=None, + track_item_name=None, + track_name=None, + track_type=None, + check_enabled=True, + check_locked=True, + check_tagged=False): + """Get all available current timeline track items. + + Attribute: + selected (bool)[optional]: return only selected items on timeline + sequence_name (str)[optional]: return only clips from input sequence + track_item_name (str)[optional]: return only item with input name + track_name (str)[optional]: return only items from track name + track_type (str)[optional]: return only items of given type + (`audio` or `video`) default is `video` + check_enabled (bool)[optional]: ignore disabled if True + check_locked (bool)[optional]: ignore locked if True + + Return: + list or hiero.core.TrackItem: list of track items or single track item + """ + return_list = list() + track_items = list() + + # get selected track items or all in active sequence + if selected: + selected_items = list(hiero.selection) + for item in selected_items: + if track_name and track_name in item.parent().name(): + # filter only items fitting input track name + track_items.append(item) + elif not track_name: + # or add all if no track_name was defined + track_items.append(item) + else: + sequence = get_current_sequence(name=sequence_name) + # get all available tracks from sequence + tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) + # loop all tracks + for track in tracks: + if check_locked and track.isLocked(): + continue + if check_enabled and not track.isEnabled(): + continue + # and all items in track + for item in track.items(): + if check_tagged and not item.tags(): + continue + + # check if track item is enabled + if check_enabled: + if not item.isEnabled(): + continue + if track_item_name: + if item.name() in track_item_name: + return item + # make sure only track items with correct track names are added + if track_name and track_name in track.name(): + # filter out only defined track_name items + track_items.append(item) + elif not track_name: + # or add all if no track_name is defined + track_items.append(item) + + # filter out only track items with defined track_type + for track_item in track_items: + if track_type and track_type == "video" and isinstance( + track_item.parent(), hiero.core.VideoTrack): + # only video track items are allowed + return_list.append(track_item) + elif track_type and track_type == "audio" and isinstance( + track_item.parent(), hiero.core.AudioTrack): + # only audio track items are allowed + return_list.append(track_item) + elif not track_type: + # add all if no track_type is defined + return_list.append(track_item) + + return return_list + + +def get_track_item_pype_tag(track_item): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track_item.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if tag.name() in self.pype_tag_name: + return tag + + +def set_track_item_pype_tag(track_item, data=None): + """ + Set pype track item tag to input track_item. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag + """ + data = data or dict() + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "Pype data holder", + "icon": "pype_icon.png", + "metadata": {k: v for k, v in data.items()} + } + # get available pype tag if any + _tag = get_track_item_pype_tag(track_item) + + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag(self.pype_tag_name, tag_data) + # add it to the input track item + track_item.addTag(tag) + + return tag + + +def get_track_item_pype_data(track_item): + """ + Get track item's pype tag data. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + dict: data found on pype tag + """ + data = dict() + # get pype data tag from track item + tag = get_track_item_pype_tag(track_item) + + if not tag: + return None + + # get tag metadata attribut + tag_data = tag.metadata() + # convert tag metadata to normal keys names and values to correct types + for k, v in dict(tag_data).items(): + key = k.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + value = ast.literal_eval(v) + except (ValueError, SyntaxError): + value = v + + data.update({key: value}) + + return data + + +def imprint(track_item, data=None): + """ + Adding `Avalon data` into a hiero track item tag. + + Also including publish attribute into tag. + + Arguments: + track_item (hiero.core.TrackItem): hiero track item object + data (dict): Any data which needst to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + tag = set_track_item_pype_tag(track_item, data) + + # add publish attribute + set_publish_attribute(tag, True) + + +def set_publish_attribute(tag, value): + """ Set Publish attribute in input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # set data to the publish attribute + tag_data.setValue("tag.publish", str(value)) + + +def get_publish_attribute(tag): + """ Get Publish attribute from input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # get data to the publish attribute + value = tag_data.value("tag.publish") + # return value converted to bool value. Atring is stored in tag. + return ast.literal_eval(value) + + +def sync_avalon_data_to_workfile(): + # import session to get project dir + project_name = avalon.Session["AVALON_PROJECT"] + + anatomy = Anatomy(project_name) + work_template = anatomy.templates["work"]["path"] + work_root = anatomy.root_value_for_template(work_template) + active_project_root = ( + os.path.join(work_root, project_name) + ).replace("\\", "/") + # getting project + project = get_current_project() + + if "Tag Presets" in project.name(): + return + + log.debug("Synchronizing Pype metadata to project: {}".format( + project.name())) + + # set project root with backward compatibility + try: + project.setProjectDirectory(active_project_root) + except Exception: + # old way of seting it + project.setProjectRoot(active_project_root) + + # get project data from avalon db + project_doc = avalon.io.find_one({"type": "project"}) + project_data = project_doc["data"] + + log.debug("project_data: {}".format(project_data)) + + # get format and fps property from avalon db on project + width = project_data["resolutionWidth"] + height = project_data["resolutionHeight"] + pixel_aspect = project_data["pixelAspect"] + fps = project_data['fps'] + format_name = project_data['code'] + + # create new format in hiero project + format = hiero.core.Format(width, height, pixel_aspect, format_name) + project.setOutputFormat(format) + + # set fps to hiero project + project.setFramerate(fps) + + # TODO: add auto colorspace set from project drop + log.info("Project property has been synchronised with Avalon db") + + +def launch_workfiles_app(event): + """ + Event for launching workfiles after hiero start + + Args: + event (obj): required but unused + """ + from . import launch_workfiles_app + launch_workfiles_app() + + +def setup(console=False, port=None, menu=True): + """Setup integration + + Registers Pyblish for Hiero plug-ins and appends an item to the File-menu + + Arguments: + console (bool): Display console with GUI + port (int, optional): Port from which to start looking for an + available port to connect with Pyblish QML, default + provided by Pyblish Integration. + menu (bool, optional): Display file menu in Hiero. + """ + + if self._has_been_setup: + teardown() + + add_submission() + + if menu: + add_to_filemenu() + self._has_menu = True + + self._has_been_setup = True + log.debug("pyblish: Loaded successfully.") + + +def teardown(): + """Remove integration""" + if not self._has_been_setup: + return + + if self._has_menu: + remove_from_filemenu() + self._has_menu = False + + self._has_been_setup = False + log.debug("pyblish: Integration torn down successfully") + + +def remove_from_filemenu(): + raise NotImplementedError("Implement me please.") + + +def add_to_filemenu(): + PublishAction() + + +class PyblishSubmission(hiero.exporters.FnSubmission.Submission): + + def __init__(self): + hiero.exporters.FnSubmission.Submission.__init__(self) + + def addToQueue(self): + from . import publish + # Add submission to Hiero module for retrieval in plugins. + hiero.submission = self + publish() + + +def add_submission(): + registry = hiero.core.taskRegistry + registry.addSubmission("Pyblish", PyblishSubmission) + + +class PublishAction(QtWidgets.QAction): + """ + Action with is showing as menu item + """ + + def __init__(self): + QtWidgets.QAction.__init__(self, "Publish", None) + self.triggered.connect(self.publish) + + for interest in ["kShowContextMenu/kTimeline", + "kShowContextMenukBin", + "kShowContextMenu/kSpreadsheet"]: + hiero.core.events.registerInterest(interest, self.eventHandler) + + self.setShortcut("Ctrl+Alt+P") + + def publish(self): + from . import publish + # Removing "submission" attribute from hiero module, to prevent tasks + # from getting picked up when not using the "Export" dialog. + if hasattr(hiero, "submission"): + del hiero.submission + publish() + + def eventHandler(self, event): + # Add the Menu to the right-click menu + event.menu.addAction(self) + + +# def CreateNukeWorkfile(nodes=None, +# nodes_effects=None, +# to_timeline=False, +# **kwargs): +# ''' Creating nuke workfile with particular version with given nodes +# Also it is creating timeline track items as precomps. +# +# Arguments: +# nodes(list of dict): each key in dict is knob order is important +# to_timeline(type): will build trackItem with metadata +# +# Returns: +# bool: True if done +# +# Raises: +# Exception: with traceback +# +# ''' +# import hiero.core +# from avalon.nuke import imprint +# from pype.hosts.nuke import ( +# lib as nklib +# ) +# +# # check if the file exists if does then Raise "File exists!" +# if os.path.exists(filepath): +# raise FileExistsError("File already exists: `{}`".format(filepath)) +# +# # if no representations matching then +# # Raise "no representations to be build" +# if len(representations) == 0: +# raise AttributeError("Missing list of `representations`") +# +# # check nodes input +# if len(nodes) == 0: +# log.warning("Missing list of `nodes`") +# +# # create temp nk file +# nuke_script = hiero.core.nuke.ScriptWriter() +# +# # create root node and save all metadata +# root_node = hiero.core.nuke.RootNode() +# +# anatomy = Anatomy(os.environ["AVALON_PROJECT"]) +# work_template = anatomy.templates["work"]["path"] +# root_path = anatomy.root_value_for_template(work_template) +# +# nuke_script.addNode(root_node) +# +# # here to call pype.hosts.nuke.lib.BuildWorkfile +# script_builder = nklib.BuildWorkfile( +# root_node=root_node, +# root_path=root_path, +# nodes=nuke_script.getNodes(), +# **kwargs +# ) + + +def create_nuke_workfile_clips(nuke_workfiles, seq=None): + ''' + nuke_workfiles is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will ned to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nuke_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create clip media + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + source = hiero.core.Clip(media) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin(path=None, project=None): + ''' + Create bin in project. + If the path is "bin1/bin2/bin3" it will create whole depth + and return `bin3` + + ''' + # get the first loaded project + project = project or get_current_project() + + path = path or self.default_bin_name + + path = path.replace("\\", "/").split("/") + + root_bin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(path): + if i == 0 and len(path) > 1: + if b in [bin.name() for bin in root_bin.bins()]: + bin = [bin for bin in root_bin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + root_bin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as error: + log.error(error) + return None + + +def get_selected_track_items(sequence=None): + _sequence = sequence or get_current_sequence() + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.selection() + + +def set_selected_track_items(track_items_list, sequence=None): + _sequence = sequence or get_current_sequence() + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.setSelection(track_items_list) + + +def _read_doc_from_path(path): + # reading QDomDocument from HROX path + hrox_file = QFile(path) + if not hrox_file.open(QFile.ReadOnly): + raise RuntimeError("Failed to open file for reading") + doc = QDomDocument() + doc.setContent(hrox_file) + hrox_file.close() + return doc + + +def _write_doc_to_path(doc, path): + # write QDomDocument to path as HROX + hrox_file = QFile(path) + if not hrox_file.open(QFile.WriteOnly): + raise RuntimeError("Failed to open file for writing") + stream = QTextStream(hrox_file) + doc.save(stream, 1) + hrox_file.close() + + +def _set_hrox_project_knobs(doc, **knobs): + # set attributes to Project Tag + proj_elem = doc.documentElement().firstChildElement("Project") + for k, v in knobs.items(): + proj_elem.setAttribute(k, v) + + +def apply_colorspace_project(): + # get path the the active projects + project = get_current_project(remove_untitled=True) + current_file = project.path() + + # close the active project + project.close() + + # get presets for hiero + presets = config.get_init_presets() + colorspace = presets["colorspace"] + hiero_project_clrs = colorspace.get("hiero", {}).get("project", {}) + + # save the workfile as subversion "comment:_colorspaceChange" + split_current_file = os.path.splitext(current_file) + copy_current_file = current_file + + if "_colorspaceChange" not in current_file: + copy_current_file = ( + split_current_file[0] + + "_colorspaceChange" + + split_current_file[1] + ) + + try: + # duplicate the file so the changes are applied only to the copy + shutil.copyfile(current_file, copy_current_file) + except shutil.Error: + # in case the file already exists and it want to copy to the + # same filewe need to do this trick + # TEMP file name change + copy_current_file_tmp = copy_current_file + "_tmp" + # create TEMP file + shutil.copyfile(current_file, copy_current_file_tmp) + # remove original file + os.remove(current_file) + # copy TEMP back to original name + shutil.copyfile(copy_current_file_tmp, copy_current_file) + # remove the TEMP file as we dont need it + os.remove(copy_current_file_tmp) + + # use the code from bellow for changing xml hrox Attributes + hiero_project_clrs.update({"name": os.path.basename(copy_current_file)}) + + # read HROX in as QDomSocument + doc = _read_doc_from_path(copy_current_file) + + # apply project colorspace properties + _set_hrox_project_knobs(doc, **hiero_project_clrs) + + # write QDomSocument back as HROX + _write_doc_to_path(doc, copy_current_file) + + # open the file as current project + hiero.core.openProject(copy_current_file) + + +def apply_colorspace_clips(): + project = get_current_project(remove_untitled=True) + clips = project.clips() + + # get presets for hiero + presets = config.get_init_presets() + colorspace = presets["colorspace"] + hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {}) + + for clip in clips: + clip_media_source_path = clip.mediaSource().firstpath() + clip_name = clip.name() + clip_colorspace = clip.sourceMediaColourTransform() + + if "default" in clip_colorspace: + continue + + # check if any colorspace presets for read is mathing + preset_clrsp = next((hiero_clips_clrs[k] + for k in hiero_clips_clrs + if bool(re.search(k, clip_media_source_path))), + None) + + if preset_clrsp: + log.debug("Changing clip.path: {}".format(clip_media_source_path)) + log.info("Changing clip `{}` colorspace {} to {}".format( + clip_name, clip_colorspace, preset_clrsp)) + # set the found preset to the clip + clip.setSourceMediaColourTransform(preset_clrsp) + + # save project after all is changed + project.save() diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index 7a6d142a10..782775253e 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -1,5 +1,6 @@ +import json import opentimelineio as otio - +from . import lib TRACK_TYPES = { "video": otio.schema.TrackKind.Video, @@ -7,75 +8,85 @@ TRACK_TYPES = { } -def create_rational_time(frame, fps): +def create_otio_rational_time(frame, fps): return otio.opentime.RationalTime( float(frame), float(fps) ) -def create_time_range(start_frame, frame_duration, fps): +def create_otio_time_range(start_frame, frame_duration, fps): return otio.opentime.TimeRange( - start_time=create_rational_time(start_frame, fps), - duration=create_rational_time(frame_duration, fps) + start_time=create_otio_rational_time(start_frame, fps), + duration=create_otio_rational_time(frame_duration, fps) ) -def create_reference(media_pool_item): +def create_otio_reference(media_pool_item): + path = media_pool_item.GetClipProperty( + "File Path").get("File Path") + reformat_path = lib.get_reformated_path(path, padded=False) + frame_start = int(media_pool_item.GetClipProperty( + "Start").get("Start")) + frame_duration = int(media_pool_item.GetClipProperty( + "Frames").get("Frames")) + fps = media_pool_item.GetClipProperty("FPS").get("FPS") + return otio.schema.ExternalReference( - target_url=media_pool_item.GetClipProperty( - "File Path").get("File Path"), - available_range=create_time_range( - media_pool_item.GetClipProperty("Start").get("Start"), - media_pool_item.GetClipProperty("Frames").get("Frames"), - media_pool_item.GetClipProperty("FPS").get("FPS") + target_url=reformat_path, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps ) ) -def create_markers(track_item, frame_rate): +def create_otio_markers(track_item, frame_rate): track_item_markers = track_item.GetMarkers() markers = [] for marker_frame in track_item_markers: + note = track_item_markers[marker_frame]["note"] + if "{" in note and "}" in note: + metadata = json.loads(note) + else: + metadata = {"note": note} markers.append( otio.schema.Marker( name=track_item_markers[marker_frame]["name"], - marked_range=create_time_range( + marked_range=create_otio_time_range( marker_frame, track_item_markers[marker_frame]["duration"], frame_rate ), color=track_item_markers[marker_frame]["color"].upper(), - metadata={ - "Resolve": { - "note": track_item_markers[marker_frame]["note"] - } - } + metadata=metadata ) ) return markers -def create_clip(track_item): +def create_otio_clip(track_item): media_pool_item = track_item.GetMediaPoolItem() frame_rate = media_pool_item.GetClipProperty("FPS").get("FPS") + name = lib.get_reformated_path(track_item.GetName()) clip = otio.schema.Clip( - name=track_item.GetName(), - source_range=create_time_range( - track_item.GetLeftOffset(), - track_item.GetDuration(), + name=name, + source_range=create_otio_time_range( + int(track_item.GetLeftOffset()), + int(track_item.GetDuration()), frame_rate ), - media_reference=create_reference(media_pool_item) + media_reference=create_otio_reference(media_pool_item) ) - for marker in create_markers(track_item, frame_rate): + for marker in create_otio_markers(track_item, frame_rate): clip.markers.append(marker) return clip -def create_gap(gap_start, clip_start, tl_start_frame, frame_rate): +def create_otio_gap(gap_start, clip_start, tl_start_frame, frame_rate): return otio.schema.Gap( - source_range=create_time_range( + source_range=create_otio_time_range( gap_start, (clip_start - tl_start_frame) - gap_start, frame_rate @@ -83,23 +94,30 @@ def create_gap(gap_start, clip_start, tl_start_frame, frame_rate): ) -def create_timeline(timeline): - return otio.schema.Timeline(name=timeline.GetName()) +def create_otio_timeline(timeline, fps): + start_time = create_otio_rational_time( + timeline.GetStartFrame(), fps) + otio_timeline = otio.schema.Timeline( + name=timeline.GetName(), + global_start_time=start_time + ) + return otio_timeline -def create_track(track_type, track_name): +def create_otio_track(track_type, track_name): return otio.schema.Track( name=track_name, kind=TRACK_TYPES[track_type] ) -def create_complete_otio_timeline(project): +def get_otio_complete_timeline(project): # get current timeline timeline = project.GetCurrentTimeline() + fps = project.GetSetting("timelineFrameRate") # convert timeline to otio - otio_timeline = create_timeline(timeline) + otio_timeline = create_otio_timeline(timeline, fps) # loop all defined track types for track_type in list(TRACK_TYPES.keys()): @@ -112,7 +130,7 @@ def create_complete_otio_timeline(project): track_name = timeline.GetTrackName(track_type, track_index) # convert track to otio - otio_track = create_track( + otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) # get all track items in current track @@ -132,7 +150,7 @@ def create_complete_otio_timeline(project): if clip_start > otio_track.available_range().duration.value: # create gap and add it to track otio_track.append( - create_gap( + create_otio_gap( otio_track.available_range().duration.value, track_item.GetStart(), timeline.GetStartFrame(), @@ -141,13 +159,13 @@ def create_complete_otio_timeline(project): ) # create otio clip and add it to track - otio_track.append(create_clip(track_item)) + otio_track.append(create_otio_clip(track_item)) # add track to otio timeline otio_timeline.tracks.append(otio_track) -def get_clip_with_parents(track_item_data): +def get_otio_clip_instance_data(track_item_data): """ Return otio objects for timeline, track and clip @@ -161,19 +179,26 @@ def get_clip_with_parents(track_item_data): """ track_item = track_item_data["clip"]["item"] - timeline = track_item_data["timeline"] + project = track_item_data["project"] + timeline = track_item_data["sequence"] track_type = track_item_data["track"]["type"] track_name = track_item_data["track"]["name"] track_index = track_item_data["track"]["index"] + frame_start = track_item.GetStart() + frame_duration = track_item.GetDuration() + project_fps = project.GetSetting("timelineFrameRate") + + otio_clip_range = create_otio_time_range( + frame_start, frame_duration, project_fps) # convert timeline to otio - otio_timeline = create_timeline(timeline) + otio_timeline = create_otio_timeline(timeline, project_fps) # convert track to otio - otio_track = create_track( + otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) # create otio clip - otio_clip = create_clip(track_item) + otio_clip = create_otio_clip(track_item) # add it to track otio_track.append(otio_clip) @@ -184,9 +209,10 @@ def get_clip_with_parents(track_item_data): return { "otioTimeline": otio_timeline, "otioTrack": otio_track, - "otioClip": otio_clip + "otioClip": otio_clip, + "otioClipRange": otio_clip_range } -def save(otio_timeline, path): +def save_otio(otio_timeline, path): otio.adapters.write_to_file(otio_timeline, path) diff --git a/pype/hosts/resolve/pipeline_hiero.py b/pype/hosts/resolve/pipeline_hiero.py new file mode 100644 index 0000000000..73025e790f --- /dev/null +++ b/pype/hosts/resolve/pipeline_hiero.py @@ -0,0 +1,302 @@ +""" +Basic avalon integration +""" +import os +import contextlib +from collections import OrderedDict +from avalon.tools import ( + workfiles, + publish as _publish +) +from avalon.pipeline import AVALON_CONTAINER_ID +from avalon import api as avalon +from avalon import schema +from pyblish import api as pyblish +import pype +from pype.api import Logger + +from . import lib, menu, events + +log = Logger().get_logger(__name__, "hiero") + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") + +# plugin paths +LOAD_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "load") +CREATE_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "create") +INVENTORY_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "inventory") + +PUBLISH_PATH = os.path.join( + pype.PLUGINS_DIR, "hiero", "publish" +).replace("\\", "/") + +AVALON_CONTAINERS = ":AVALON_CONTAINERS" + + +def install(): + """ + Installing Hiero integration for avalon + + Args: + config (obj): avalon config module `pype` in our case, it is not + used but required by avalon.api.install() + + """ + + # adding all events + events.register_events() + + log.info("Registering Hiero plug-ins..") + pyblish.register_host("hiero") + pyblish.register_plugin_path(PUBLISH_PATH) + avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + avalon.register_plugin_path(avalon.Creator, CREATE_PATH) + avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + + # register callback for switching publishable + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) + + # Disable all families except for the ones we explicitly want to see + family_states = [ + "write", + "review", + "plate" + ] + + avalon.data["familiesStateDefault"] = False + avalon.data["familiesStateToggled"] = family_states + + # install menu + menu.menu_install() + + # register hiero events + events.register_hiero_events() + + +def uninstall(): + """ + Uninstalling Hiero integration for avalon + + """ + log.info("Deregistering Hiero plug-ins..") + pyblish.deregister_host("hiero") + pyblish.deregister_plugin_path(PUBLISH_PATH) + avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) + + # register callback for switching publishable + pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + + +def containerise(track_item, + name, + namespace, + context, + loader=None, + data=None): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track_item (hiero.core.TrackItem): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = OrderedDict({ + "schema": "avalon-core:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + }) + + if data: + for k, v in data.items(): + data_imprint.update({k: v}) + + log.debug("_ data_imprint: {}".format(data_imprint)) + lib.set_track_item_pype_tag(track_item, data_imprint) + + return track_item + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + + See the `container.json` schema for details on how it should look, + and the Maya equivalent, which is in `avalon.maya.pipeline` + """ + + # get all track items from current timeline + all_track_items = lib.get_track_items() + + for track_item in all_track_items: + container = parse_container(track_item) + if container: + yield container + + +def parse_container(track_item, validate=True): + """Return container data from track_item's pype tag. + + Args: + track_item (hiero.core.TrackItem): A containerised track item. + validate (bool)[optional]: validating with avalon scheme + + Returns: + dict: The container schema data for input containerized track item. + + """ + # convert tag metadata to normal keys names + data = lib.get_track_item_pype_data(track_item) + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if not all(key in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = track_item.name() + + # Store reference to the node object + container["_track_item"] = track_item + + return container + + +def update_container(track_item, data=None): + """Update container data to input track_item's pype tag. + + Args: + track_item (hiero.core.TrackItem): A containerised track item. + data (dict)[optional]: dictionery with data to be updated + + Returns: + bool: True if container was updated correctly + + """ + data = data or dict() + + container = lib.get_track_item_pype_data(track_item) + + for _key, _value in container.items(): + try: + container[_key] = data[_key] + except KeyError: + pass + + log.info("Updating container: `{}`".format(track_item.name())) + return bool(lib.set_track_item_pype_tag(track_item, container)) + + +def launch_workfiles_app(*args): + ''' Wrapping function for workfiles launcher ''' + + workdir = os.environ["AVALON_WORKDIR"] + + # show workfile gui + workfiles.show(workdir) + + +def publish(parent): + """Shorthand to publish from within host""" + return _publish.show(parent) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... for track_item in track_items: + ... < do some stuff > + """ + from .lib import ( + set_selected_track_items, + get_selected_track_items + ) + previous_selection = get_selected_track_items() + reset_selection() + try: + # do the operation + yield + finally: + reset_selection() + set_selected_track_items(previous_selection) + + +def reset_selection(): + """Deselect all selected nodes + """ + from .lib import set_selected_track_items + set_selected_track_items([]) + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + import importlib + + for module in ( + "avalon", + "avalon.lib", + "avalon.pipeline", + "pyblish", + "pypeapp", + "{}.api".format(AVALON_CONFIG), + "{}.hosts.hiero.lib".format(AVALON_CONFIG), + "{}.hosts.hiero.menu".format(AVALON_CONFIG), + "{}.hosts.hiero.tags".format(AVALON_CONFIG) + ): + log.info("Reloading module: {}...".format(module)) + try: + module = importlib.import_module(module) + import imp + imp.reload(module) + except Exception as e: + log.warning("Cannot reload module: {}".format(e)) + importlib.reload(module) + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + from pype.hosts.hiero import ( + get_track_item_pype_tag, + set_publish_attribute + ) + + # Whether instances should be passthrough based on new value + track_item = instance.data["item"] + tag = get_track_item_pype_tag(track_item) + set_publish_attribute(tag, new_value) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index be666358ae..1b7e6fc051 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -150,7 +150,10 @@ class CreatorWidget(QtWidgets.QDialog): for func, val in kwargs.items(): if getattr(item, func): func_attr = getattr(item, func) - func_attr(val) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) # add to layout layout.addRow(label, item) @@ -253,7 +256,9 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMaximum=10000, setToolTip=tool_tip) + setRange=(1, 99999), + setValue=v["value"], + setToolTip=tool_tip) return data diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index b8c929f3d6..7f874a3281 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -40,12 +40,6 @@ class CollectInstances(pyblish.api.ContextPlugin): clip_property = media_pool_item.GetClipProperty() self.log.debug(f"clip_property: {clip_property}") - source_path = os.path.normpath( - clip_property["File Path"]) - source_name = clip_property["File Name"] - self.log.debug(f"source_path: {source_path}") - self.log.debug(f"source_name: {source_name}") - # add tag data to instance data data.update({ k: v for k, v in tag_data.items() @@ -61,14 +55,6 @@ class CollectInstances(pyblish.api.ContextPlugin): families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = track_item_data["track"]["name"] - base_name = os.path.basename(source_path) - file_head = os.path.splitext(base_name)[0] - source_first_frame = int( - track_item.GetStart() - - track_item.GetLeftOffset() - ) - # apply only for feview and master track instance if review: families += ["review", "ftrack"] @@ -81,17 +67,21 @@ class CollectInstances(pyblish.api.ContextPlugin): "publish": resolve.get_publish_attribute(track_item), # tags "tags": tag_data, - - # track item attributes - "track": track, - - # source attribute - "source": source_path, - "sourcePath": source_path, - "sourceFileHead": file_head, - "sourceFirst": source_first_frame, }) + # otio + otio_data = resolve.get_otio_clip_instance_data(track_item_data) + data.update(otio_data) + + file_name = "".join([asset, "_", subset, ".otio"]) + file_dir = os.path.dirname(context.data["currentFile"]) + file_path = os.path.join(file_dir, "otio", file_name) + + resolve.save_otio(otio_data["otioTimeline"], file_path) + + # create instance instance = context.create_instance(**data) self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) diff --git a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py index f506333a67..ddb57def00 100644 --- a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py +++ b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py @@ -202,7 +202,8 @@ marker_color_map = { "PURPLE": "Magenta", "MAGENTA": "Magenta", "BLACK": "Blue", - "WHITE": "Green" + "WHITE": "Green", + "MINT": "Cyan" } @@ -259,7 +260,7 @@ def add_markers(otio_item, hiero_item, tagsbin): marker.marked_range.duration.value ) - tag = hiero_item.addTagToRange(_tag, start, end) + tag = hiero_item.addTag(_tag) tag.setName(marker.name or marker_color_map[marker_color]) # Add metadata @@ -285,7 +286,7 @@ def create_track(otio_track, tracknum, track_kind): return track -def create_clip(otio_clip, tagsbin): +def create_clip(otio_clip): # Create MediaSource otio_media = otio_clip.media_reference if isinstance(otio_media, otio.schema.ExternalReference): @@ -300,13 +301,10 @@ def create_clip(otio_clip, tagsbin): # Create Clip clip = hiero.core.Clip(media) - # Add markers - add_markers(otio_clip, clip, tagsbin) - return clip -def create_trackitem(playhead, track, otio_clip, clip): +def create_trackitem(playhead, track, otio_clip, clip, tagsbin): source_range = otio_clip.source_range trackitem = track.createTrackItem(otio_clip.name) @@ -352,6 +350,9 @@ def create_trackitem(playhead, track, otio_clip, clip): trackitem.setTimelineIn(timeline_in) trackitem.setTimelineOut(timeline_out) + # Add markers + add_markers(otio_clip, trackitem, tagsbin) + return trackitem @@ -362,6 +363,10 @@ def build_sequence(otio_timeline, project=None, track_kind=None): # Create a Sequence sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + global_start_time = otio_timeline.global_start_time.value + global_rate = otio_timeline.global_start_time.rate + sequence.setFramerate(global_rate) + sequence.setTimecodeStart(global_start_time) # Create a Bin to hold clips projectbin = project.clipsBin() @@ -403,7 +408,7 @@ def build_sequence(otio_timeline, project=None, track_kind=None): elif isinstance(otio_clip, otio.schema.Clip): # Create a Clip - clip = create_clip(otio_clip, tagsbin) + clip = create_clip(otio_clip) # Add Clip to a Bin sequencebin.addItem(hiero.core.BinItem(clip)) @@ -413,7 +418,8 @@ def build_sequence(otio_timeline, project=None, track_kind=None): playhead, track, otio_clip, - clip + clip, + tagsbin ) # Add trackitem to track From 9b8d34ed177962199bd445082d34929690df29f7 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 13:00:44 +0100 Subject: [PATCH 014/198] feat(resolve): otio publishing wip hiero otio import fixes --- pype/hosts/resolve/__init__.py | 17 +- pype/hosts/resolve/lib.py | 28 +- pype/hosts/resolve/lib_hiero.py | 838 ++++++++++++++++++ pype/hosts/resolve/otio.py | 112 ++- pype/hosts/resolve/pipeline_hiero.py | 302 +++++++ pype/hosts/resolve/plugin.py | 9 +- .../resolve/publish/collect_instances.py | 36 +- .../StartupUI/otioimporter/OTIOImport.py | 24 +- 8 files changed, 1286 insertions(+), 80 deletions(-) create mode 100644 pype/hosts/resolve/lib_hiero.py create mode 100644 pype/hosts/resolve/pipeline_hiero.py diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index 83f8e3a720..45aa5502cc 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -29,7 +29,8 @@ from .lib import ( create_compound_clip, swap_clips, get_pype_clip_metadata, - set_project_manager_to_folder_name + set_project_manager_to_folder_name, + get_reformated_path ) from .menu import launch_pype_menu @@ -48,6 +49,12 @@ from .workio import ( work_root ) +from .otio import ( + get_otio_clip_instance_data, + get_otio_complete_timeline, + save_otio +) + bmdvr = None bmdvf = None @@ -83,6 +90,7 @@ __all__ = [ "swap_clips", "get_pype_clip_metadata", "set_project_manager_to_folder_name", + "get_reformated_path", # menu "launch_pype_menu", @@ -101,5 +109,10 @@ __all__ = [ # singleton with black magic resolve module "bmdvr", - "bmdvf" + "bmdvf", + + # open color io integration + "get_otio_clip_instance_data", + "get_otio_complete_timeline", + "save_otio" ] diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 2ade558d89..777cae0eb2 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -1,6 +1,6 @@ import sys import json -import ast +import re from opentimelineio import opentime from pype.api import Logger @@ -25,6 +25,7 @@ self.pype_marker_duration = 1 self.pype_marker_color = "Mint" self.temp_marker_frame = None + def get_project_manager(): from . import bmdvr if not self.project_manager: @@ -621,3 +622,28 @@ def convert_resolve_list_type(resolve_list): "Input argument should be dict() type") return [resolve_list[i] for i in sorted(resolve_list.keys())] + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + num_pattern = "(\\[\\d+\\-\\d+\\])" + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + padding = len(re.findall(padding_pattern, path).pop()) + if padded: + path = re.sub(num_pattern, f"%0{padding}d", path) + else: + path = re.sub(num_pattern, f"%d", path) + return path diff --git a/pype/hosts/resolve/lib_hiero.py b/pype/hosts/resolve/lib_hiero.py new file mode 100644 index 0000000000..891ca3905c --- /dev/null +++ b/pype/hosts/resolve/lib_hiero.py @@ -0,0 +1,838 @@ +""" +Host specific functions where host api is connected +""" +import os +import re +import sys +import ast +import hiero +import avalon.api as avalon +import avalon.io +from avalon.vendor.Qt import QtWidgets +from pype.api import (Logger, Anatomy, config) +from . import tags +import shutil +from compiler.ast import flatten + +try: + from PySide.QtCore import QFile, QTextStream + from PySide.QtXml import QDomDocument +except ImportError: + from PySide2.QtCore import QFile, QTextStream + from PySide2.QtXml import QDomDocument + +# from opentimelineio import opentime +# from pprint import pformat + +log = Logger().get_logger(__name__, "hiero") + +self = sys.modules[__name__] +self._has_been_setup = False +self._has_menu = False +self._registered_gui = None +self.pype_tag_name = "Pype Data" +self.default_sequence_name = "PypeSequence" +self.default_bin_name = "PypeBin" + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") + + +def get_current_project(remove_untitled=False): + projects = flatten(hiero.core.projects()) + if not remove_untitled: + return next(iter(projects)) + + # if remove_untitled + for proj in projects: + if "Untitled" in proj.name(): + proj.close() + else: + return proj + + +def get_current_sequence(name=None, new=False): + """ + Get current sequence in context of active project. + + Args: + name (str)[optional]: name of sequence we want to return + new (bool)[optional]: if we want to create new one + + Returns: + hiero.core.Sequence: the sequence object + """ + sequence = None + project = get_current_project() + root_bin = project.clipsBin() + + if new: + # create new + name = name or self.default_sequence_name + sequence = hiero.core.Sequence(name) + root_bin.addItem(hiero.core.BinItem(sequence)) + elif name: + # look for sequence by name + sequences = project.sequences() + for _sequence in sequences: + if _sequence.name() == name: + sequence = _sequence + if not sequence: + # if nothing found create new with input name + sequence = get_current_sequence(name, True) + elif not name and not new: + # if name is none and new is False then return current open sequence + sequence = hiero.ui.activeSequence() + + return sequence + + +def get_current_track(sequence, name, audio=False): + """ + Get current track in context of active project. + + Creates new if none is found. + + Args: + sequence (hiero.core.Sequence): hiero sequene object + name (str): name of track we want to return + audio (bool)[optional]: switch to AudioTrack + + Returns: + hiero.core.Track: the track object + """ + tracks = sequence.videoTracks() + + if audio: + tracks = sequence.audioTracks() + + # get track by name + track = None + for _track in tracks: + if _track.name() in name: + track = _track + + if not track: + if not audio: + track = hiero.core.VideoTrack(name) + else: + track = hiero.core.AudioTrack(name) + sequence.addTrack(track) + + return track + + +def get_track_items( + selected=False, + sequence_name=None, + track_item_name=None, + track_name=None, + track_type=None, + check_enabled=True, + check_locked=True, + check_tagged=False): + """Get all available current timeline track items. + + Attribute: + selected (bool)[optional]: return only selected items on timeline + sequence_name (str)[optional]: return only clips from input sequence + track_item_name (str)[optional]: return only item with input name + track_name (str)[optional]: return only items from track name + track_type (str)[optional]: return only items of given type + (`audio` or `video`) default is `video` + check_enabled (bool)[optional]: ignore disabled if True + check_locked (bool)[optional]: ignore locked if True + + Return: + list or hiero.core.TrackItem: list of track items or single track item + """ + return_list = list() + track_items = list() + + # get selected track items or all in active sequence + if selected: + selected_items = list(hiero.selection) + for item in selected_items: + if track_name and track_name in item.parent().name(): + # filter only items fitting input track name + track_items.append(item) + elif not track_name: + # or add all if no track_name was defined + track_items.append(item) + else: + sequence = get_current_sequence(name=sequence_name) + # get all available tracks from sequence + tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) + # loop all tracks + for track in tracks: + if check_locked and track.isLocked(): + continue + if check_enabled and not track.isEnabled(): + continue + # and all items in track + for item in track.items(): + if check_tagged and not item.tags(): + continue + + # check if track item is enabled + if check_enabled: + if not item.isEnabled(): + continue + if track_item_name: + if item.name() in track_item_name: + return item + # make sure only track items with correct track names are added + if track_name and track_name in track.name(): + # filter out only defined track_name items + track_items.append(item) + elif not track_name: + # or add all if no track_name is defined + track_items.append(item) + + # filter out only track items with defined track_type + for track_item in track_items: + if track_type and track_type == "video" and isinstance( + track_item.parent(), hiero.core.VideoTrack): + # only video track items are allowed + return_list.append(track_item) + elif track_type and track_type == "audio" and isinstance( + track_item.parent(), hiero.core.AudioTrack): + # only audio track items are allowed + return_list.append(track_item) + elif not track_type: + # add all if no track_type is defined + return_list.append(track_item) + + return return_list + + +def get_track_item_pype_tag(track_item): + """ + Get pype track item tag created by creator or loader plugin. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag: hierarchy, orig clip attributes + """ + # get all tags from track item + _tags = track_item.tags() + if not _tags: + return None + for tag in _tags: + # return only correct tag defined by global name + if tag.name() in self.pype_tag_name: + return tag + + +def set_track_item_pype_tag(track_item, data=None): + """ + Set pype track item tag to input track_item. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + hiero.core.Tag + """ + data = data or dict() + + # basic Tag's attribute + tag_data = { + "editable": "0", + "note": "Pype data holder", + "icon": "pype_icon.png", + "metadata": {k: v for k, v in data.items()} + } + # get available pype tag if any + _tag = get_track_item_pype_tag(track_item) + + if _tag: + # it not tag then create one + tag = tags.update_tag(_tag, tag_data) + else: + # if pype tag available then update with input data + tag = tags.create_tag(self.pype_tag_name, tag_data) + # add it to the input track item + track_item.addTag(tag) + + return tag + + +def get_track_item_pype_data(track_item): + """ + Get track item's pype tag data. + + Attributes: + trackItem (hiero.core.TrackItem): hiero object + + Returns: + dict: data found on pype tag + """ + data = dict() + # get pype data tag from track item + tag = get_track_item_pype_tag(track_item) + + if not tag: + return None + + # get tag metadata attribut + tag_data = tag.metadata() + # convert tag metadata to normal keys names and values to correct types + for k, v in dict(tag_data).items(): + key = k.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + value = ast.literal_eval(v) + except (ValueError, SyntaxError): + value = v + + data.update({key: value}) + + return data + + +def imprint(track_item, data=None): + """ + Adding `Avalon data` into a hiero track item tag. + + Also including publish attribute into tag. + + Arguments: + track_item (hiero.core.TrackItem): hiero track item object + data (dict): Any data which needst to be imprinted + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or {} + + tag = set_track_item_pype_tag(track_item, data) + + # add publish attribute + set_publish_attribute(tag, True) + + +def set_publish_attribute(tag, value): + """ Set Publish attribute in input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # set data to the publish attribute + tag_data.setValue("tag.publish", str(value)) + + +def get_publish_attribute(tag): + """ Get Publish attribute from input Tag object + + Attribute: + tag (hiero.core.Tag): a tag object + value (bool): True or False + """ + tag_data = tag.metadata() + # get data to the publish attribute + value = tag_data.value("tag.publish") + # return value converted to bool value. Atring is stored in tag. + return ast.literal_eval(value) + + +def sync_avalon_data_to_workfile(): + # import session to get project dir + project_name = avalon.Session["AVALON_PROJECT"] + + anatomy = Anatomy(project_name) + work_template = anatomy.templates["work"]["path"] + work_root = anatomy.root_value_for_template(work_template) + active_project_root = ( + os.path.join(work_root, project_name) + ).replace("\\", "/") + # getting project + project = get_current_project() + + if "Tag Presets" in project.name(): + return + + log.debug("Synchronizing Pype metadata to project: {}".format( + project.name())) + + # set project root with backward compatibility + try: + project.setProjectDirectory(active_project_root) + except Exception: + # old way of seting it + project.setProjectRoot(active_project_root) + + # get project data from avalon db + project_doc = avalon.io.find_one({"type": "project"}) + project_data = project_doc["data"] + + log.debug("project_data: {}".format(project_data)) + + # get format and fps property from avalon db on project + width = project_data["resolutionWidth"] + height = project_data["resolutionHeight"] + pixel_aspect = project_data["pixelAspect"] + fps = project_data['fps'] + format_name = project_data['code'] + + # create new format in hiero project + format = hiero.core.Format(width, height, pixel_aspect, format_name) + project.setOutputFormat(format) + + # set fps to hiero project + project.setFramerate(fps) + + # TODO: add auto colorspace set from project drop + log.info("Project property has been synchronised with Avalon db") + + +def launch_workfiles_app(event): + """ + Event for launching workfiles after hiero start + + Args: + event (obj): required but unused + """ + from . import launch_workfiles_app + launch_workfiles_app() + + +def setup(console=False, port=None, menu=True): + """Setup integration + + Registers Pyblish for Hiero plug-ins and appends an item to the File-menu + + Arguments: + console (bool): Display console with GUI + port (int, optional): Port from which to start looking for an + available port to connect with Pyblish QML, default + provided by Pyblish Integration. + menu (bool, optional): Display file menu in Hiero. + """ + + if self._has_been_setup: + teardown() + + add_submission() + + if menu: + add_to_filemenu() + self._has_menu = True + + self._has_been_setup = True + log.debug("pyblish: Loaded successfully.") + + +def teardown(): + """Remove integration""" + if not self._has_been_setup: + return + + if self._has_menu: + remove_from_filemenu() + self._has_menu = False + + self._has_been_setup = False + log.debug("pyblish: Integration torn down successfully") + + +def remove_from_filemenu(): + raise NotImplementedError("Implement me please.") + + +def add_to_filemenu(): + PublishAction() + + +class PyblishSubmission(hiero.exporters.FnSubmission.Submission): + + def __init__(self): + hiero.exporters.FnSubmission.Submission.__init__(self) + + def addToQueue(self): + from . import publish + # Add submission to Hiero module for retrieval in plugins. + hiero.submission = self + publish() + + +def add_submission(): + registry = hiero.core.taskRegistry + registry.addSubmission("Pyblish", PyblishSubmission) + + +class PublishAction(QtWidgets.QAction): + """ + Action with is showing as menu item + """ + + def __init__(self): + QtWidgets.QAction.__init__(self, "Publish", None) + self.triggered.connect(self.publish) + + for interest in ["kShowContextMenu/kTimeline", + "kShowContextMenukBin", + "kShowContextMenu/kSpreadsheet"]: + hiero.core.events.registerInterest(interest, self.eventHandler) + + self.setShortcut("Ctrl+Alt+P") + + def publish(self): + from . import publish + # Removing "submission" attribute from hiero module, to prevent tasks + # from getting picked up when not using the "Export" dialog. + if hasattr(hiero, "submission"): + del hiero.submission + publish() + + def eventHandler(self, event): + # Add the Menu to the right-click menu + event.menu.addAction(self) + + +# def CreateNukeWorkfile(nodes=None, +# nodes_effects=None, +# to_timeline=False, +# **kwargs): +# ''' Creating nuke workfile with particular version with given nodes +# Also it is creating timeline track items as precomps. +# +# Arguments: +# nodes(list of dict): each key in dict is knob order is important +# to_timeline(type): will build trackItem with metadata +# +# Returns: +# bool: True if done +# +# Raises: +# Exception: with traceback +# +# ''' +# import hiero.core +# from avalon.nuke import imprint +# from pype.hosts.nuke import ( +# lib as nklib +# ) +# +# # check if the file exists if does then Raise "File exists!" +# if os.path.exists(filepath): +# raise FileExistsError("File already exists: `{}`".format(filepath)) +# +# # if no representations matching then +# # Raise "no representations to be build" +# if len(representations) == 0: +# raise AttributeError("Missing list of `representations`") +# +# # check nodes input +# if len(nodes) == 0: +# log.warning("Missing list of `nodes`") +# +# # create temp nk file +# nuke_script = hiero.core.nuke.ScriptWriter() +# +# # create root node and save all metadata +# root_node = hiero.core.nuke.RootNode() +# +# anatomy = Anatomy(os.environ["AVALON_PROJECT"]) +# work_template = anatomy.templates["work"]["path"] +# root_path = anatomy.root_value_for_template(work_template) +# +# nuke_script.addNode(root_node) +# +# # here to call pype.hosts.nuke.lib.BuildWorkfile +# script_builder = nklib.BuildWorkfile( +# root_node=root_node, +# root_path=root_path, +# nodes=nuke_script.getNodes(), +# **kwargs +# ) + + +def create_nuke_workfile_clips(nuke_workfiles, seq=None): + ''' + nuke_workfiles is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will ned to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nuke_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create clip media + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + source = hiero.core.Clip(media) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin(path=None, project=None): + ''' + Create bin in project. + If the path is "bin1/bin2/bin3" it will create whole depth + and return `bin3` + + ''' + # get the first loaded project + project = project or get_current_project() + + path = path or self.default_bin_name + + path = path.replace("\\", "/").split("/") + + root_bin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(path): + if i == 0 and len(path) > 1: + if b in [bin.name() for bin in root_bin.bins()]: + bin = [bin for bin in root_bin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + root_bin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(path) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as error: + log.error(error) + return None + + +def get_selected_track_items(sequence=None): + _sequence = sequence or get_current_sequence() + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.selection() + + +def set_selected_track_items(track_items_list, sequence=None): + _sequence = sequence or get_current_sequence() + + # Getting selection + timeline_editor = hiero.ui.getTimelineEditor(_sequence) + return timeline_editor.setSelection(track_items_list) + + +def _read_doc_from_path(path): + # reading QDomDocument from HROX path + hrox_file = QFile(path) + if not hrox_file.open(QFile.ReadOnly): + raise RuntimeError("Failed to open file for reading") + doc = QDomDocument() + doc.setContent(hrox_file) + hrox_file.close() + return doc + + +def _write_doc_to_path(doc, path): + # write QDomDocument to path as HROX + hrox_file = QFile(path) + if not hrox_file.open(QFile.WriteOnly): + raise RuntimeError("Failed to open file for writing") + stream = QTextStream(hrox_file) + doc.save(stream, 1) + hrox_file.close() + + +def _set_hrox_project_knobs(doc, **knobs): + # set attributes to Project Tag + proj_elem = doc.documentElement().firstChildElement("Project") + for k, v in knobs.items(): + proj_elem.setAttribute(k, v) + + +def apply_colorspace_project(): + # get path the the active projects + project = get_current_project(remove_untitled=True) + current_file = project.path() + + # close the active project + project.close() + + # get presets for hiero + presets = config.get_init_presets() + colorspace = presets["colorspace"] + hiero_project_clrs = colorspace.get("hiero", {}).get("project", {}) + + # save the workfile as subversion "comment:_colorspaceChange" + split_current_file = os.path.splitext(current_file) + copy_current_file = current_file + + if "_colorspaceChange" not in current_file: + copy_current_file = ( + split_current_file[0] + + "_colorspaceChange" + + split_current_file[1] + ) + + try: + # duplicate the file so the changes are applied only to the copy + shutil.copyfile(current_file, copy_current_file) + except shutil.Error: + # in case the file already exists and it want to copy to the + # same filewe need to do this trick + # TEMP file name change + copy_current_file_tmp = copy_current_file + "_tmp" + # create TEMP file + shutil.copyfile(current_file, copy_current_file_tmp) + # remove original file + os.remove(current_file) + # copy TEMP back to original name + shutil.copyfile(copy_current_file_tmp, copy_current_file) + # remove the TEMP file as we dont need it + os.remove(copy_current_file_tmp) + + # use the code from bellow for changing xml hrox Attributes + hiero_project_clrs.update({"name": os.path.basename(copy_current_file)}) + + # read HROX in as QDomSocument + doc = _read_doc_from_path(copy_current_file) + + # apply project colorspace properties + _set_hrox_project_knobs(doc, **hiero_project_clrs) + + # write QDomSocument back as HROX + _write_doc_to_path(doc, copy_current_file) + + # open the file as current project + hiero.core.openProject(copy_current_file) + + +def apply_colorspace_clips(): + project = get_current_project(remove_untitled=True) + clips = project.clips() + + # get presets for hiero + presets = config.get_init_presets() + colorspace = presets["colorspace"] + hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {}) + + for clip in clips: + clip_media_source_path = clip.mediaSource().firstpath() + clip_name = clip.name() + clip_colorspace = clip.sourceMediaColourTransform() + + if "default" in clip_colorspace: + continue + + # check if any colorspace presets for read is mathing + preset_clrsp = next((hiero_clips_clrs[k] + for k in hiero_clips_clrs + if bool(re.search(k, clip_media_source_path))), + None) + + if preset_clrsp: + log.debug("Changing clip.path: {}".format(clip_media_source_path)) + log.info("Changing clip `{}` colorspace {} to {}".format( + clip_name, clip_colorspace, preset_clrsp)) + # set the found preset to the clip + clip.setSourceMediaColourTransform(preset_clrsp) + + # save project after all is changed + project.save() diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index 7a6d142a10..782775253e 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -1,5 +1,6 @@ +import json import opentimelineio as otio - +from . import lib TRACK_TYPES = { "video": otio.schema.TrackKind.Video, @@ -7,75 +8,85 @@ TRACK_TYPES = { } -def create_rational_time(frame, fps): +def create_otio_rational_time(frame, fps): return otio.opentime.RationalTime( float(frame), float(fps) ) -def create_time_range(start_frame, frame_duration, fps): +def create_otio_time_range(start_frame, frame_duration, fps): return otio.opentime.TimeRange( - start_time=create_rational_time(start_frame, fps), - duration=create_rational_time(frame_duration, fps) + start_time=create_otio_rational_time(start_frame, fps), + duration=create_otio_rational_time(frame_duration, fps) ) -def create_reference(media_pool_item): +def create_otio_reference(media_pool_item): + path = media_pool_item.GetClipProperty( + "File Path").get("File Path") + reformat_path = lib.get_reformated_path(path, padded=False) + frame_start = int(media_pool_item.GetClipProperty( + "Start").get("Start")) + frame_duration = int(media_pool_item.GetClipProperty( + "Frames").get("Frames")) + fps = media_pool_item.GetClipProperty("FPS").get("FPS") + return otio.schema.ExternalReference( - target_url=media_pool_item.GetClipProperty( - "File Path").get("File Path"), - available_range=create_time_range( - media_pool_item.GetClipProperty("Start").get("Start"), - media_pool_item.GetClipProperty("Frames").get("Frames"), - media_pool_item.GetClipProperty("FPS").get("FPS") + target_url=reformat_path, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps ) ) -def create_markers(track_item, frame_rate): +def create_otio_markers(track_item, frame_rate): track_item_markers = track_item.GetMarkers() markers = [] for marker_frame in track_item_markers: + note = track_item_markers[marker_frame]["note"] + if "{" in note and "}" in note: + metadata = json.loads(note) + else: + metadata = {"note": note} markers.append( otio.schema.Marker( name=track_item_markers[marker_frame]["name"], - marked_range=create_time_range( + marked_range=create_otio_time_range( marker_frame, track_item_markers[marker_frame]["duration"], frame_rate ), color=track_item_markers[marker_frame]["color"].upper(), - metadata={ - "Resolve": { - "note": track_item_markers[marker_frame]["note"] - } - } + metadata=metadata ) ) return markers -def create_clip(track_item): +def create_otio_clip(track_item): media_pool_item = track_item.GetMediaPoolItem() frame_rate = media_pool_item.GetClipProperty("FPS").get("FPS") + name = lib.get_reformated_path(track_item.GetName()) clip = otio.schema.Clip( - name=track_item.GetName(), - source_range=create_time_range( - track_item.GetLeftOffset(), - track_item.GetDuration(), + name=name, + source_range=create_otio_time_range( + int(track_item.GetLeftOffset()), + int(track_item.GetDuration()), frame_rate ), - media_reference=create_reference(media_pool_item) + media_reference=create_otio_reference(media_pool_item) ) - for marker in create_markers(track_item, frame_rate): + for marker in create_otio_markers(track_item, frame_rate): clip.markers.append(marker) return clip -def create_gap(gap_start, clip_start, tl_start_frame, frame_rate): +def create_otio_gap(gap_start, clip_start, tl_start_frame, frame_rate): return otio.schema.Gap( - source_range=create_time_range( + source_range=create_otio_time_range( gap_start, (clip_start - tl_start_frame) - gap_start, frame_rate @@ -83,23 +94,30 @@ def create_gap(gap_start, clip_start, tl_start_frame, frame_rate): ) -def create_timeline(timeline): - return otio.schema.Timeline(name=timeline.GetName()) +def create_otio_timeline(timeline, fps): + start_time = create_otio_rational_time( + timeline.GetStartFrame(), fps) + otio_timeline = otio.schema.Timeline( + name=timeline.GetName(), + global_start_time=start_time + ) + return otio_timeline -def create_track(track_type, track_name): +def create_otio_track(track_type, track_name): return otio.schema.Track( name=track_name, kind=TRACK_TYPES[track_type] ) -def create_complete_otio_timeline(project): +def get_otio_complete_timeline(project): # get current timeline timeline = project.GetCurrentTimeline() + fps = project.GetSetting("timelineFrameRate") # convert timeline to otio - otio_timeline = create_timeline(timeline) + otio_timeline = create_otio_timeline(timeline, fps) # loop all defined track types for track_type in list(TRACK_TYPES.keys()): @@ -112,7 +130,7 @@ def create_complete_otio_timeline(project): track_name = timeline.GetTrackName(track_type, track_index) # convert track to otio - otio_track = create_track( + otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) # get all track items in current track @@ -132,7 +150,7 @@ def create_complete_otio_timeline(project): if clip_start > otio_track.available_range().duration.value: # create gap and add it to track otio_track.append( - create_gap( + create_otio_gap( otio_track.available_range().duration.value, track_item.GetStart(), timeline.GetStartFrame(), @@ -141,13 +159,13 @@ def create_complete_otio_timeline(project): ) # create otio clip and add it to track - otio_track.append(create_clip(track_item)) + otio_track.append(create_otio_clip(track_item)) # add track to otio timeline otio_timeline.tracks.append(otio_track) -def get_clip_with_parents(track_item_data): +def get_otio_clip_instance_data(track_item_data): """ Return otio objects for timeline, track and clip @@ -161,19 +179,26 @@ def get_clip_with_parents(track_item_data): """ track_item = track_item_data["clip"]["item"] - timeline = track_item_data["timeline"] + project = track_item_data["project"] + timeline = track_item_data["sequence"] track_type = track_item_data["track"]["type"] track_name = track_item_data["track"]["name"] track_index = track_item_data["track"]["index"] + frame_start = track_item.GetStart() + frame_duration = track_item.GetDuration() + project_fps = project.GetSetting("timelineFrameRate") + + otio_clip_range = create_otio_time_range( + frame_start, frame_duration, project_fps) # convert timeline to otio - otio_timeline = create_timeline(timeline) + otio_timeline = create_otio_timeline(timeline, project_fps) # convert track to otio - otio_track = create_track( + otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) # create otio clip - otio_clip = create_clip(track_item) + otio_clip = create_otio_clip(track_item) # add it to track otio_track.append(otio_clip) @@ -184,9 +209,10 @@ def get_clip_with_parents(track_item_data): return { "otioTimeline": otio_timeline, "otioTrack": otio_track, - "otioClip": otio_clip + "otioClip": otio_clip, + "otioClipRange": otio_clip_range } -def save(otio_timeline, path): +def save_otio(otio_timeline, path): otio.adapters.write_to_file(otio_timeline, path) diff --git a/pype/hosts/resolve/pipeline_hiero.py b/pype/hosts/resolve/pipeline_hiero.py new file mode 100644 index 0000000000..73025e790f --- /dev/null +++ b/pype/hosts/resolve/pipeline_hiero.py @@ -0,0 +1,302 @@ +""" +Basic avalon integration +""" +import os +import contextlib +from collections import OrderedDict +from avalon.tools import ( + workfiles, + publish as _publish +) +from avalon.pipeline import AVALON_CONTAINER_ID +from avalon import api as avalon +from avalon import schema +from pyblish import api as pyblish +import pype +from pype.api import Logger + +from . import lib, menu, events + +log = Logger().get_logger(__name__, "hiero") + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") + +# plugin paths +LOAD_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "load") +CREATE_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "create") +INVENTORY_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "inventory") + +PUBLISH_PATH = os.path.join( + pype.PLUGINS_DIR, "hiero", "publish" +).replace("\\", "/") + +AVALON_CONTAINERS = ":AVALON_CONTAINERS" + + +def install(): + """ + Installing Hiero integration for avalon + + Args: + config (obj): avalon config module `pype` in our case, it is not + used but required by avalon.api.install() + + """ + + # adding all events + events.register_events() + + log.info("Registering Hiero plug-ins..") + pyblish.register_host("hiero") + pyblish.register_plugin_path(PUBLISH_PATH) + avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + avalon.register_plugin_path(avalon.Creator, CREATE_PATH) + avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + + # register callback for switching publishable + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) + + # Disable all families except for the ones we explicitly want to see + family_states = [ + "write", + "review", + "plate" + ] + + avalon.data["familiesStateDefault"] = False + avalon.data["familiesStateToggled"] = family_states + + # install menu + menu.menu_install() + + # register hiero events + events.register_hiero_events() + + +def uninstall(): + """ + Uninstalling Hiero integration for avalon + + """ + log.info("Deregistering Hiero plug-ins..") + pyblish.deregister_host("hiero") + pyblish.deregister_plugin_path(PUBLISH_PATH) + avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) + avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) + + # register callback for switching publishable + pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) + + +def containerise(track_item, + name, + namespace, + context, + loader=None, + data=None): + """Bundle Hiero's object into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + track_item (hiero.core.TrackItem): object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + track_item (hiero.core.TrackItem): containerised object + + """ + + data_imprint = OrderedDict({ + "schema": "avalon-core:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": str(name), + "namespace": str(namespace), + "loader": str(loader), + "representation": str(context["representation"]["_id"]), + }) + + if data: + for k, v in data.items(): + data_imprint.update({k: v}) + + log.debug("_ data_imprint: {}".format(data_imprint)) + lib.set_track_item_pype_tag(track_item, data_imprint) + + return track_item + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + + See the `container.json` schema for details on how it should look, + and the Maya equivalent, which is in `avalon.maya.pipeline` + """ + + # get all track items from current timeline + all_track_items = lib.get_track_items() + + for track_item in all_track_items: + container = parse_container(track_item) + if container: + yield container + + +def parse_container(track_item, validate=True): + """Return container data from track_item's pype tag. + + Args: + track_item (hiero.core.TrackItem): A containerised track item. + validate (bool)[optional]: validating with avalon scheme + + Returns: + dict: The container schema data for input containerized track item. + + """ + # convert tag metadata to normal keys names + data = lib.get_track_item_pype_data(track_item) + + if validate and data and data.get("schema"): + schema.validate(data) + + if not isinstance(data, dict): + return + + # If not all required data return the empty container + required = ['schema', 'id', 'name', + 'namespace', 'loader', 'representation'] + + if not all(key in data for key in required): + return + + container = {key: data[key] for key in required} + + container["objectName"] = track_item.name() + + # Store reference to the node object + container["_track_item"] = track_item + + return container + + +def update_container(track_item, data=None): + """Update container data to input track_item's pype tag. + + Args: + track_item (hiero.core.TrackItem): A containerised track item. + data (dict)[optional]: dictionery with data to be updated + + Returns: + bool: True if container was updated correctly + + """ + data = data or dict() + + container = lib.get_track_item_pype_data(track_item) + + for _key, _value in container.items(): + try: + container[_key] = data[_key] + except KeyError: + pass + + log.info("Updating container: `{}`".format(track_item.name())) + return bool(lib.set_track_item_pype_tag(track_item, container)) + + +def launch_workfiles_app(*args): + ''' Wrapping function for workfiles launcher ''' + + workdir = os.environ["AVALON_WORKDIR"] + + # show workfile gui + workfiles.show(workdir) + + +def publish(parent): + """Shorthand to publish from within host""" + return _publish.show(parent) + + +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... for track_item in track_items: + ... < do some stuff > + """ + from .lib import ( + set_selected_track_items, + get_selected_track_items + ) + previous_selection = get_selected_track_items() + reset_selection() + try: + # do the operation + yield + finally: + reset_selection() + set_selected_track_items(previous_selection) + + +def reset_selection(): + """Deselect all selected nodes + """ + from .lib import set_selected_track_items + set_selected_track_items([]) + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + import importlib + + for module in ( + "avalon", + "avalon.lib", + "avalon.pipeline", + "pyblish", + "pypeapp", + "{}.api".format(AVALON_CONFIG), + "{}.hosts.hiero.lib".format(AVALON_CONFIG), + "{}.hosts.hiero.menu".format(AVALON_CONFIG), + "{}.hosts.hiero.tags".format(AVALON_CONFIG) + ): + log.info("Reloading module: {}...".format(module)) + try: + module = importlib.import_module(module) + import imp + imp.reload(module) + except Exception as e: + log.warning("Cannot reload module: {}".format(e)) + importlib.reload(module) + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + from pype.hosts.hiero import ( + get_track_item_pype_tag, + set_publish_attribute + ) + + # Whether instances should be passthrough based on new value + track_item = instance.data["item"] + tag = get_track_item_pype_tag(track_item) + set_publish_attribute(tag, new_value) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index be666358ae..1b7e6fc051 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -150,7 +150,10 @@ class CreatorWidget(QtWidgets.QDialog): for func, val in kwargs.items(): if getattr(item, func): func_attr = getattr(item, func) - func_attr(val) + if isinstance(val, tuple): + func_attr(*val) + else: + func_attr(val) # add to layout layout.addRow(label, item) @@ -253,7 +256,9 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMaximum=10000, setToolTip=tool_tip) + setRange=(1, 99999), + setValue=v["value"], + setToolTip=tool_tip) return data diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index b8c929f3d6..7f874a3281 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -40,12 +40,6 @@ class CollectInstances(pyblish.api.ContextPlugin): clip_property = media_pool_item.GetClipProperty() self.log.debug(f"clip_property: {clip_property}") - source_path = os.path.normpath( - clip_property["File Path"]) - source_name = clip_property["File Name"] - self.log.debug(f"source_path: {source_path}") - self.log.debug(f"source_name: {source_name}") - # add tag data to instance data data.update({ k: v for k, v in tag_data.items() @@ -61,14 +55,6 @@ class CollectInstances(pyblish.api.ContextPlugin): families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = track_item_data["track"]["name"] - base_name = os.path.basename(source_path) - file_head = os.path.splitext(base_name)[0] - source_first_frame = int( - track_item.GetStart() - - track_item.GetLeftOffset() - ) - # apply only for feview and master track instance if review: families += ["review", "ftrack"] @@ -81,17 +67,21 @@ class CollectInstances(pyblish.api.ContextPlugin): "publish": resolve.get_publish_attribute(track_item), # tags "tags": tag_data, - - # track item attributes - "track": track, - - # source attribute - "source": source_path, - "sourcePath": source_path, - "sourceFileHead": file_head, - "sourceFirst": source_first_frame, }) + # otio + otio_data = resolve.get_otio_clip_instance_data(track_item_data) + data.update(otio_data) + + file_name = "".join([asset, "_", subset, ".otio"]) + file_dir = os.path.dirname(context.data["currentFile"]) + file_path = os.path.join(file_dir, "otio", file_name) + + resolve.save_otio(otio_data["otioTimeline"], file_path) + + # create instance instance = context.create_instance(**data) self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) diff --git a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py index f506333a67..ddb57def00 100644 --- a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py +++ b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py @@ -202,7 +202,8 @@ marker_color_map = { "PURPLE": "Magenta", "MAGENTA": "Magenta", "BLACK": "Blue", - "WHITE": "Green" + "WHITE": "Green", + "MINT": "Cyan" } @@ -259,7 +260,7 @@ def add_markers(otio_item, hiero_item, tagsbin): marker.marked_range.duration.value ) - tag = hiero_item.addTagToRange(_tag, start, end) + tag = hiero_item.addTag(_tag) tag.setName(marker.name or marker_color_map[marker_color]) # Add metadata @@ -285,7 +286,7 @@ def create_track(otio_track, tracknum, track_kind): return track -def create_clip(otio_clip, tagsbin): +def create_clip(otio_clip): # Create MediaSource otio_media = otio_clip.media_reference if isinstance(otio_media, otio.schema.ExternalReference): @@ -300,13 +301,10 @@ def create_clip(otio_clip, tagsbin): # Create Clip clip = hiero.core.Clip(media) - # Add markers - add_markers(otio_clip, clip, tagsbin) - return clip -def create_trackitem(playhead, track, otio_clip, clip): +def create_trackitem(playhead, track, otio_clip, clip, tagsbin): source_range = otio_clip.source_range trackitem = track.createTrackItem(otio_clip.name) @@ -352,6 +350,9 @@ def create_trackitem(playhead, track, otio_clip, clip): trackitem.setTimelineIn(timeline_in) trackitem.setTimelineOut(timeline_out) + # Add markers + add_markers(otio_clip, trackitem, tagsbin) + return trackitem @@ -362,6 +363,10 @@ def build_sequence(otio_timeline, project=None, track_kind=None): # Create a Sequence sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + global_start_time = otio_timeline.global_start_time.value + global_rate = otio_timeline.global_start_time.rate + sequence.setFramerate(global_rate) + sequence.setTimecodeStart(global_start_time) # Create a Bin to hold clips projectbin = project.clipsBin() @@ -403,7 +408,7 @@ def build_sequence(otio_timeline, project=None, track_kind=None): elif isinstance(otio_clip, otio.schema.Clip): # Create a Clip - clip = create_clip(otio_clip, tagsbin) + clip = create_clip(otio_clip) # Add Clip to a Bin sequencebin.addItem(hiero.core.BinItem(clip)) @@ -413,7 +418,8 @@ def build_sequence(otio_timeline, project=None, track_kind=None): playhead, track, otio_clip, - clip + clip, + tagsbin ) # Add trackitem to track From 6e618c6f10f9070384ea023e87f8939f41217fec Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 13:16:22 +0100 Subject: [PATCH 015/198] feat(resolve): adding before if timeline start lower then clip start --- pype/hosts/resolve/otio.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index 782775253e..71cdbcc97b 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -185,6 +185,7 @@ def get_otio_clip_instance_data(track_item_data): track_name = track_item_data["track"]["name"] track_index = track_item_data["track"]["index"] + timeline_start = timeline.GetStartFrame() frame_start = track_item.GetStart() frame_duration = track_item.GetDuration() project_fps = project.GetSetting("timelineFrameRate") @@ -197,6 +198,19 @@ def get_otio_clip_instance_data(track_item_data): otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) + # add gap if track item is not starting from timeline start + # if gap between track start and clip start + if frame_start > timeline_start: + # create gap and add it to track + otio_track.append( + create_otio_gap( + 0, + frame_start, + timeline_start, + project_fps + ) + ) + # create otio clip otio_clip = create_otio_clip(track_item) From e94523f1c192da9a69257c2fd54ab59aa92db82b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 18:15:11 +0100 Subject: [PATCH 016/198] feat(resolve): otio export full timeline fix(heiro): import otio timeline start and rate --- pype/hosts/resolve/otio.py | 141 +++++++++++++----- .../resolve/publish/collect_workfile.py | 19 ++- .../StartupUI/otioimporter/OTIOImport.py | 10 +- 3 files changed, 122 insertions(+), 48 deletions(-) diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index 71cdbcc97b..c1dab6defe 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -8,6 +8,17 @@ TRACK_TYPES = { } +def timecode_to_frames(timecode, framerate): + parts = zip(( + 3600 * framerate, + 60 * framerate, + framerate, 1 + ), timecode.split(":")) + return sum( + f * int(t) for f, t in parts + ) + + def create_otio_rational_time(frame, fps): return otio.opentime.RationalTime( float(frame), @@ -23,14 +34,21 @@ def create_otio_time_range(start_frame, frame_duration, fps): def create_otio_reference(media_pool_item): - path = media_pool_item.GetClipProperty( - "File Path").get("File Path") + mp_clip_property = media_pool_item.GetClipProperty() + path = mp_clip_property["File Path"] reformat_path = lib.get_reformated_path(path, padded=False) - frame_start = int(media_pool_item.GetClipProperty( - "Start").get("Start")) - frame_duration = int(media_pool_item.GetClipProperty( - "Frames").get("Frames")) - fps = media_pool_item.GetClipProperty("FPS").get("FPS") + + # get clip property regarding to type + mp_clip_property = media_pool_item.GetClipProperty() + fps = mp_clip_property["FPS"] + if mp_clip_property["Type"] == "Video": + frame_start = int(mp_clip_property["Start"]) + frame_duration = int(mp_clip_property["Frames"]) + else: + audio_duration = str(mp_clip_property["Duration"]) + frame_start = 0 + frame_duration = int(timecode_to_frames( + audio_duration, float(fps))) return otio.schema.ExternalReference( target_url=reformat_path, @@ -42,7 +60,7 @@ def create_otio_reference(media_pool_item): ) -def create_otio_markers(track_item, frame_rate): +def create_otio_markers(track_item, fps): track_item_markers = track_item.GetMarkers() markers = [] for marker_frame in track_item_markers: @@ -57,7 +75,7 @@ def create_otio_markers(track_item, frame_rate): marked_range=create_otio_time_range( marker_frame, track_item_markers[marker_frame]["duration"], - frame_rate + fps ), color=track_item_markers[marker_frame]["color"].upper(), metadata=metadata @@ -68,28 +86,48 @@ def create_otio_markers(track_item, frame_rate): def create_otio_clip(track_item): media_pool_item = track_item.GetMediaPoolItem() - frame_rate = media_pool_item.GetClipProperty("FPS").get("FPS") + mp_clip_property = media_pool_item.GetClipProperty() + fps = mp_clip_property["FPS"] name = lib.get_reformated_path(track_item.GetName()) - clip = otio.schema.Clip( - name=name, - source_range=create_otio_time_range( - int(track_item.GetLeftOffset()), - int(track_item.GetDuration()), - frame_rate - ), - media_reference=create_otio_reference(media_pool_item) + + media_reference = create_otio_reference(media_pool_item) + source_range = create_otio_time_range( + int(track_item.GetLeftOffset()), + int(track_item.GetDuration()), + fps ) - for marker in create_otio_markers(track_item, frame_rate): - clip.markers.append(marker) - return clip + + if mp_clip_property["Type"] == "Audio": + return_clips = list() + audio_chanels = mp_clip_property["Audio Ch"] + for channel in range(0, int(audio_chanels)): + clip = otio.schema.Clip( + name=f"{name}_{channel}", + source_range=source_range, + media_reference=media_reference + ) + for marker in create_otio_markers(track_item, fps): + clip.markers.append(marker) + return_clips.append(clip) + return return_clips + else: + clip = otio.schema.Clip( + name=name, + source_range=source_range, + media_reference=media_reference + ) + for marker in create_otio_markers(track_item, fps): + clip.markers.append(marker) + + return clip -def create_otio_gap(gap_start, clip_start, tl_start_frame, frame_rate): +def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): return otio.schema.Gap( source_range=create_otio_time_range( gap_start, (clip_start - tl_start_frame) - gap_start, - frame_rate + fps ) ) @@ -111,6 +149,20 @@ def create_otio_track(track_type, track_name): ) +def add_otio_gap(clip_start, otio_track, track_item, timeline, project): + # if gap between track start and clip start + if clip_start > otio_track.available_range().duration.value: + # create gap and add it to track + otio_track.append( + create_otio_gap( + otio_track.available_range().duration.value, + track_item.GetStart(), + timeline.GetStartFrame(), + project.GetSetting("timelineFrameRate") + ) + ) + + def get_otio_complete_timeline(project): # get current timeline timeline = project.GetCurrentTimeline() @@ -131,7 +183,7 @@ def get_otio_complete_timeline(project): # convert track to otio otio_track = create_otio_track( - track_type, "{}{}".format(track_name, track_index)) + track_type, track_name) # get all track items in current track current_track_items = timeline.GetItemListInTrack( @@ -146,24 +198,34 @@ def get_otio_complete_timeline(project): # calculate real clip start clip_start = track_item.GetStart() - timeline.GetStartFrame() - # if gap between track start and clip start - if clip_start > otio_track.available_range().duration.value: - # create gap and add it to track - otio_track.append( - create_otio_gap( - otio_track.available_range().duration.value, - track_item.GetStart(), - timeline.GetStartFrame(), - project.GetSetting("timelineFrameRate") - ) - ) + add_otio_gap( + clip_start, otio_track, track_item, timeline, project) # create otio clip and add it to track - otio_track.append(create_otio_clip(track_item)) + otio_clip = create_otio_clip(track_item) + + if not isinstance(otio_clip, list): + otio_track.append(otio_clip) + else: + for index, clip in enumerate(otio_clip): + if index == 0: + otio_track.append(clip) + else: + # add previouse otio track to timeline + otio_timeline.tracks.append(otio_track) + # convert track to otio + otio_track = create_otio_track( + track_type, track_name) + add_otio_gap( + clip_start, otio_track, + track_item, timeline, project) + otio_track.append(clip) # add track to otio timeline otio_timeline.tracks.append(otio_track) + return otio_timeline + def get_otio_clip_instance_data(track_item_data): """ @@ -211,19 +273,16 @@ def get_otio_clip_instance_data(track_item_data): ) ) - # create otio clip + # create otio clip and add it to track otio_clip = create_otio_clip(track_item) - # add it to track - otio_track.append(otio_clip) - # add track to otio timeline otio_timeline.tracks.append(otio_track) return { "otioTimeline": otio_timeline, "otioTrack": otio_track, - "otioClip": otio_clip, + "otioClips": otio_clip, "otioClipRange": otio_clip_range } diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index d1b45117c9..cbbb1936c6 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -4,6 +4,11 @@ from pype.hosts import resolve from avalon import api as avalon from pprint import pformat +# dev +from importlib import reload +from pype.hosts.resolve import otio +reload(otio) + class CollectWorkfile(pyblish.api.ContextPlugin): """Inject the current working file into context""" @@ -21,6 +26,9 @@ class CollectWorkfile(pyblish.api.ContextPlugin): name = project.GetName() fps = project.GetSetting("timelineFrameRate") + # adding otio timeline to context + otio_timeline = resolve.get_otio_complete_timeline(project) + base_name = name + exported_projet_ext current_file = os.path.join(staging_dir, base_name) current_file = os.path.normpath(current_file) @@ -29,13 +37,16 @@ class CollectWorkfile(pyblish.api.ContextPlugin): video_tracks = resolve.get_video_track_names() # set main project attributes to context - context.data.update({ + context_data = { "activeProject": project, "activeSequence": active_sequence, + "otioTimeline": otio_timeline, "videoTracks": video_tracks, "currentFile": current_file, "fps": fps, - }) + } + self.log.debug("__ context_data: {}".format(pformat(context_data))) + context.data.update(context_data) # creating workfile representation representation = { @@ -60,3 +71,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): instance = context.create_instance(**instance_data) self.log.info("Creating instance: {}".format(instance)) self.log.debug("__ instance.data: {}".format(pformat(instance.data))) + + file_name = "".join([asset, "_", subset, ".otio"]) + file_path = os.path.join(staging_dir, file_name) + resolve.save_otio(otio_timeline, file_path) diff --git a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py index ddb57def00..8884ecf806 100644 --- a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py +++ b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py @@ -363,10 +363,6 @@ def build_sequence(otio_timeline, project=None, track_kind=None): # Create a Sequence sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') - global_start_time = otio_timeline.global_start_time.value - global_rate = otio_timeline.global_start_time.rate - sequence.setFramerate(global_rate) - sequence.setTimecodeStart(global_start_time) # Create a Bin to hold clips projectbin = project.clipsBin() @@ -380,7 +376,11 @@ def build_sequence(otio_timeline, project=None, track_kind=None): # Add timeline markers add_markers(otio_timeline, sequence, tagsbin) - # TODO: Set sequence settings from otio timeline if available + # add sequence attributes form otio timeline + if otio_timeline.global_start_time: + sequence.setFramerate(otio_timeline.global_start_time.rate) + sequence.setTimecodeStart(otio_timeline.global_start_time.value) + if isinstance(otio_timeline, otio.schema.Timeline): tracks = otio_timeline.tracks From adcbf74f542be7bcd047cc7d71b3298464818911 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 18:15:22 +0100 Subject: [PATCH 017/198] feat(resolve): wip rendering --- pype/hosts/resolve/rendering.py | 111 ++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 pype/hosts/resolve/rendering.py diff --git a/pype/hosts/resolve/rendering.py b/pype/hosts/resolve/rendering.py new file mode 100644 index 0000000000..e38466e5d4 --- /dev/null +++ b/pype/hosts/resolve/rendering.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +""" +Example DaVinci Resolve script: +Load a still from DRX file, apply the still to all clips in all timelines. Set render format and codec, add render jobs for all timelines, render to specified path and wait for rendering completion. +Once render is complete, delete all jobs +""" + +from python_get_resolve import GetResolve +import sys +import time + +def AddTimelineToRender( project, timeline, presetName, targetDirectory, renderFormat, renderCodec ): + project.SetCurrentTimeline(timeline) + project.LoadRenderPreset(presetName) + + if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec): + return False + + project.SetRenderSettings({"SelectAllFrames" : 1, "TargetDir" : targetDirectory}) + return project.AddRenderJob() + +def RenderAllTimelines( resolve, presetName, targetDirectory, renderFormat, renderCodec ): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + + resolve.OpenPage("Deliver") + timelineCount = project.GetTimelineCount() + + for index in range (0, int(timelineCount)): + if not AddTimelineToRender(project, project.GetTimelineByIndex(index + 1), presetName, targetDirectory, renderFormat, renderCodec): + return False + return project.StartRendering() + +def IsRenderingInProgress( resolve ): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + + return project.IsRenderingInProgress() + +def WaitForRenderingCompletion( resolve ): + while IsRenderingInProgress(resolve): + time.sleep(1) + return + +def ApplyDRXToAllTimelineClips( timeline, path, gradeMode = 0 ): + trackCount = timeline.GetTrackCount("video") + + clips = {} + for index in range (1, int(trackCount) + 1): + clips.update( timeline.GetItemsInTrack("video", index) ) + return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips) + +def ApplyDRXToAllTimelines( resolve, path, gradeMode = 0 ): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + timelineCount = project.GetTimelineCount() + + for index in range (0, int(timelineCount)): + timeline = project.GetTimelineByIndex(index + 1) + project.SetCurrentTimeline( timeline ) + if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode): + return False + return True + +def DeleteAllRenderJobs( resolve ): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + project.DeleteAllRenderJobs() + return + +# Inputs: +# - DRX file to import grade still and apply it for clips +# - grade mode (0, 1 or 2) +# - preset name for rendering +# - render path +# - render format +# - render codec +if len(sys.argv) < 7: + print("input parameters for scripts are [drx file path] [grade mode] [render preset name] [render path] [render format] [render codec]") + sys.exit() + +drxPath = sys.argv[1] +gradeMode = sys.argv[2] +renderPresetName = sys.argv[3] +renderPath = sys.argv[4] +renderFormat = sys.argv[5] +renderCodec = sys.argv[6] + +# Get currently open project +resolve = GetResolve() + +if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode): + print("Unable to apply a still from drx file to all timelines") + sys.exit() + +if not RenderAllTimelines(resolve, renderPresetName, renderPath, renderFormat, renderCodec): + print("Unable to set all timelines for rendering") + sys.exit() + +WaitForRenderingCompletion(resolve) + +DeleteAllRenderJobs(resolve) + +print("Rendering is completed.") From 027f19f9f6524a4898083b704b6422b47585396f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 3 Dec 2020 18:19:53 +0100 Subject: [PATCH 018/198] fix(resolve): clip was not added to track --- pype/hosts/resolve/otio.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index c1dab6defe..c4de1160c6 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -275,6 +275,7 @@ def get_otio_clip_instance_data(track_item_data): # create otio clip and add it to track otio_clip = create_otio_clip(track_item) + otio_track.append(otio_clip) # add track to otio timeline otio_timeline.tracks.append(otio_track) From 7dfc6a56d8a6343762fc6144e145c11b8ff11fad Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 13:04:33 +0100 Subject: [PATCH 019/198] feat(hiero): update otio export import --- .../Startup/otioexporter/OTIOExportTask.py | 87 ++++++++++++------- .../StartupUI/otioimporter/OTIOImport.py | 45 ++++++---- 2 files changed, 83 insertions(+), 49 deletions(-) diff --git a/setup/hiero/hiero_plugin_path/Python/Startup/otioexporter/OTIOExportTask.py b/setup/hiero/hiero_plugin_path/Python/Startup/otioexporter/OTIOExportTask.py index 77dc9c45b3..90504ccd18 100644 --- a/setup/hiero/hiero_plugin_path/Python/Startup/otioexporter/OTIOExportTask.py +++ b/setup/hiero/hiero_plugin_path/Python/Startup/otioexporter/OTIOExportTask.py @@ -49,6 +49,9 @@ class OTIOExportTask(hiero.core.TaskBase): return str(type(self)) def get_rate(self, item): + if not hasattr(item, 'framerate'): + item = item.sequence() + num, den = item.framerate().toRational() rate = float(num) / float(den) @@ -58,12 +61,12 @@ class OTIOExportTask(hiero.core.TaskBase): return round(rate, 2) def get_clip_ranges(self, trackitem): - # Is clip an audio file? Use sequence frame rate - if not trackitem.source().mediaSource().hasVideo(): - rate_item = trackitem.sequence() + # Get rate from source or sequence + if trackitem.source().mediaSource().hasVideo(): + rate_item = trackitem.source() else: - rate_item = trackitem.source() + rate_item = trackitem.sequence() source_rate = self.get_rate(rate_item) @@ -88,9 +91,10 @@ class OTIOExportTask(hiero.core.TaskBase): duration=source_duration ) - available_range = None hiero_clip = trackitem.source() - if not hiero_clip.mediaSource().isOffline(): + + available_range = None + if hiero_clip.mediaSource().isMediaPresent(): start_time = otio.opentime.RationalTime( hiero_clip.mediaSource().startTime(), source_rate @@ -123,7 +127,7 @@ class OTIOExportTask(hiero.core.TaskBase): def get_marker_color(self, tag): icon = tag.icon() - pat = 'icons:Tag(?P\w+)\.\w+' + pat = r'icons:Tag(?P\w+)\.\w+' res = re.search(pat, icon) if res: @@ -155,13 +159,17 @@ class OTIOExportTask(hiero.core.TaskBase): ) ) + metadata = dict( + Hiero=tag.metadata().dict() + ) + # Store the source item for future import assignment + metadata['Hiero']['source_type'] = hiero_item.__class__.__name__ + marker = otio.schema.Marker( name=tag.name(), color=self.get_marker_color(tag), marked_range=marked_range, - metadata={ - 'Hiero': tag.metadata().dict() - } + metadata=metadata ) otio_item.markers.append(marker) @@ -170,37 +178,44 @@ class OTIOExportTask(hiero.core.TaskBase): hiero_clip = trackitem.source() # Add Gap if needed - prev_item = ( - itemindex and trackitem.parent().items()[itemindex - 1] or - trackitem - ) + if itemindex == 0: + prev_item = trackitem - if prev_item == trackitem and trackitem.timelineIn() > 0: + else: + prev_item = trackitem.parent().items()[itemindex - 1] + + clip_diff = trackitem.timelineIn() - prev_item.timelineOut() + + if itemindex == 0 and trackitem.timelineIn() > 0: self.add_gap(trackitem, otio_track, 0) - elif ( - prev_item != trackitem and - prev_item.timelineOut() != trackitem.timelineIn() - ): + elif itemindex and clip_diff != 1: self.add_gap(trackitem, otio_track, prev_item.timelineOut()) # Create Clip source_range, available_range = self.get_clip_ranges(trackitem) - otio_clip = otio.schema.Clip() - otio_clip.name = trackitem.name() - otio_clip.source_range = source_range + otio_clip = otio.schema.Clip( + name=trackitem.name(), + source_range=source_range + ) # Add media reference media_reference = otio.schema.MissingReference() - if not hiero_clip.mediaSource().isOffline(): + if hiero_clip.mediaSource().isMediaPresent(): source = hiero_clip.mediaSource() - media_reference = otio.schema.ExternalReference() - media_reference.available_range = available_range + first_file = source.fileinfos()[0] + path = first_file.filename() - path, name = os.path.split(source.fileinfos()[0].filename()) - media_reference.target_url = os.path.join(path, name) - media_reference.name = name + if "%" in path: + path = re.sub(r"%\d+d", "%d", path) + if "#" in path: + path = re.sub(r"#+", "%d", path) + + media_reference = otio.schema.ExternalReference( + target_url=u'{}'.format(path), + available_range=available_range + ) otio_clip.media_reference = media_reference @@ -218,6 +233,7 @@ class OTIOExportTask(hiero.core.TaskBase): # Add tags as markers if self._preset.properties()["includeTags"]: + self.add_markers(trackitem, otio_clip) self.add_markers(trackitem.source(), otio_clip) otio_track.append(otio_clip) @@ -273,16 +289,16 @@ class OTIOExportTask(hiero.core.TaskBase): name=alignment, # Consider placing Hiero name in metadata transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve, in_offset=in_time, - out_offset=out_time, - metadata={} + out_offset=out_time ) if alignment == 'kFadeIn': - otio_track.insert(-2, otio_transition) + otio_track.insert(-1, otio_transition) else: otio_track.append(otio_transition) + def add_tracks(self): for track in self._sequence.items(): if isinstance(track, hiero.core.AudioTrack): @@ -291,8 +307,7 @@ class OTIOExportTask(hiero.core.TaskBase): else: kind = otio.schema.TrackKind.Video - otio_track = otio.schema.Track(kind=kind) - otio_track.name = track.name() + otio_track = otio.schema.Track(name=track.name(), kind=kind) for itemindex, trackitem in enumerate(track): if isinstance(trackitem.source(), hiero.core.Clip): @@ -306,6 +321,12 @@ class OTIOExportTask(hiero.core.TaskBase): def create_OTIO(self): self.otio_timeline = otio.schema.Timeline() + + # Set global start time based on sequence + self.otio_timeline.global_start_time = otio.opentime.RationalTime( + self._sequence.timecodeStart(), + self._sequence.framerate().toFloat() + ) self.otio_timeline.name = self._sequence.name() self.add_tracks() diff --git a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py index 8884ecf806..7efb352ed2 100644 --- a/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py +++ b/setup/hiero/hiero_plugin_path/Python/StartupUI/otioimporter/OTIOImport.py @@ -356,19 +356,38 @@ def create_trackitem(playhead, track, otio_clip, clip, tagsbin): return trackitem -def build_sequence(otio_timeline, project=None, track_kind=None): +def build_sequence( + otio_timeline, project=None, sequence=None, track_kind=None): + if project is None: - # TODO: Find a proper way for active project - project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1] + if sequence: + project = sequence.project() - # Create a Sequence - sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + else: + # Per version 12.1v2 there is no way of getting active project + project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1] - # Create a Bin to hold clips projectbin = project.clipsBin() - projectbin.addItem(hiero.core.BinItem(sequence)) - sequencebin = hiero.core.Bin(sequence.name()) - projectbin.addItem(sequencebin) + + if not sequence: + # Create a Sequence + sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + + # Set sequence settings from otio timeline if available + if hasattr(otio_timeline, 'global_start_time'): + if otio_timeline.global_start_time: + start_time = otio_timeline.global_start_time + sequence.setFramerate(start_time.rate) + sequence.setTimecodeStart(start_time.value) + + # Create a Bin to hold clips + projectbin.addItem(hiero.core.BinItem(sequence)) + + sequencebin = hiero.core.Bin(sequence.name()) + projectbin.addItem(sequencebin) + + else: + sequencebin = projectbin # Get tagsBin tagsbin = hiero.core.project("Tag Presets").tagsBin() @@ -376,17 +395,11 @@ def build_sequence(otio_timeline, project=None, track_kind=None): # Add timeline markers add_markers(otio_timeline, sequence, tagsbin) - # add sequence attributes form otio timeline - if otio_timeline.global_start_time: - sequence.setFramerate(otio_timeline.global_start_time.rate) - sequence.setTimecodeStart(otio_timeline.global_start_time.value) - if isinstance(otio_timeline, otio.schema.Timeline): tracks = otio_timeline.tracks else: - # otio.schema.Stack - tracks = otio_timeline + tracks = [otio_timeline] for tracknum, otio_track in enumerate(tracks): playhead = 0 From 724c2e902324c609a9eb6bbbb38f46ac5f70a181 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 14:57:54 +0100 Subject: [PATCH 020/198] feat(global): extract otio timeline file --- .../global/publish/extract_otio_file.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 pype/plugins/global/publish/extract_otio_file.py diff --git a/pype/plugins/global/publish/extract_otio_file.py b/pype/plugins/global/publish/extract_otio_file.py new file mode 100644 index 0000000000..c93cf34c79 --- /dev/null +++ b/pype/plugins/global/publish/extract_otio_file.py @@ -0,0 +1,41 @@ +import os +import pyblish.api +import pype.api +import opentimelineio as otio + + +class ExtractOTIOFile(pype.api.Extractor): + """ + Extractor export OTIO file + """ + + label = "Extract OTIO file" + order = pyblish.api.ExtractorOrder + families = ["workfile"] + hosts = ["resolve"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + staging_dir = self.staging_dir(instance) + + otio_timeline = instance.context.data["otioTimeline"] + # create otio timeline representation + otio_file_name = name + ".otio" + otio_file_path = os.path.join(staging_dir, otio_file_name) + otio.adapters.write_to_file(otio_timeline, otio_file_path) + + representation_otio = { + 'name': "otio", + 'ext': "otio", + 'files': otio_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_otio) + + self.log.info("Added OTIO file representation: {}".format( + representation_otio)) From eeb635dab57a2a2a88c5964b6475ff16b3dd30cb Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 14:58:34 +0100 Subject: [PATCH 021/198] feat(resove): wip publishing otio editorial --- .../resolve/publish/collect_instances.py | 6 -- .../resolve/publish/collect_workfile.py | 55 ++++++------------- .../resolve/publish/extract_workfile.py | 49 +++++++++++++++++ 3 files changed, 66 insertions(+), 44 deletions(-) create mode 100644 pype/plugins/resolve/publish/extract_workfile.py diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index 7f874a3281..d8dac70a8f 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -73,12 +73,6 @@ class CollectInstances(pyblish.api.ContextPlugin): otio_data = resolve.get_otio_clip_instance_data(track_item_data) data.update(otio_data) - file_name = "".join([asset, "_", subset, ".otio"]) - file_dir = os.path.dirname(context.data["currentFile"]) - file_path = os.path.join(file_dir, "otio", file_name) - - resolve.save_otio(otio_data["otioTimeline"], file_path) - # create instance instance = context.create_instance(**data) diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index cbbb1936c6..0bd1a24a46 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -17,61 +17,40 @@ class CollectWorkfile(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.501 def process(self, context): - exported_projet_ext = ".drp" - asset = avalon.Session["AVALON_ASSET"] - staging_dir = os.getenv("AVALON_WORKDIR") - subset = "workfile" + asset = avalon.Session["AVALON_ASSET"] + subset = "workfile" project = resolve.get_current_project() - name = project.GetName() fps = project.GetSetting("timelineFrameRate") # adding otio timeline to context otio_timeline = resolve.get_otio_complete_timeline(project) - base_name = name + exported_projet_ext - current_file = os.path.join(staging_dir, base_name) - current_file = os.path.normpath(current_file) - active_sequence = resolve.get_current_sequence() video_tracks = resolve.get_video_track_names() - # set main project attributes to context - context_data = { - "activeProject": project, - "activeSequence": active_sequence, - "otioTimeline": otio_timeline, - "videoTracks": video_tracks, - "currentFile": current_file, - "fps": fps, - } - self.log.debug("__ context_data: {}".format(pformat(context_data))) - context.data.update(context_data) - - # creating workfile representation - representation = { - 'name': exported_projet_ext[1:], - 'ext': exported_projet_ext[1:], - 'files': base_name, - "stagingDir": staging_dir, - } - instance_data = { "name": "{}_{}".format(asset, subset), "asset": asset, "subset": "{}{}".format(asset, subset.capitalize()), "item": project, - "family": "workfile", - - # source attribute - "sourcePath": current_file, - "representations": [representation] + "family": "workfile" } + # create instance with workfile instance = context.create_instance(**instance_data) + + # update context with main project attributes + context_data = { + "activeProject": project, + "activeSequence": active_sequence, + "otioTimeline": otio_timeline, + "videoTracks": video_tracks, + "currentFile": project.GetName(), + "fps": fps, + } + context.data.update(context_data) + self.log.info("Creating instance: {}".format(instance)) self.log.debug("__ instance.data: {}".format(pformat(instance.data))) - - file_name = "".join([asset, "_", subset, ".otio"]) - file_path = os.path.join(staging_dir, file_name) - resolve.save_otio(otio_timeline, file_path) + self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/pype/plugins/resolve/publish/extract_workfile.py b/pype/plugins/resolve/publish/extract_workfile.py new file mode 100644 index 0000000000..a88794841b --- /dev/null +++ b/pype/plugins/resolve/publish/extract_workfile.py @@ -0,0 +1,49 @@ +import os +import pyblish.api +import pype.api +from pype.hosts import resolve + +class ExtractWorkfile(pype.api.Extractor): + """ + Extractor export DRP workfile file representation + """ + + label = "Extract Workfile" + order = pyblish.api.ExtractorOrder + families = ["workfile"] + hosts = ["resolve"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + project = instance.context.data["activeProject"] + staging_dir = self.staging_dir(instance) + + resolve_workfile_ext = ".drp" + drp_file_name = name + resolve_workfile_ext + drp_file_path = os.path.normpath( + os.path.join(staging_dir, drp_file_name)) + + # write out the drp workfile + resolve.get_project_manager().ExportProject( + project.GetName(), drp_file_path) + + # create drp workfile representation + representation_drp = { + 'name': resolve_workfile_ext[1:], + 'ext': resolve_workfile_ext[1:], + 'files': drp_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_drp) + + # add sourcePath attribute to instance + if not instance.data.get("sourcePath"): + instance.data["sourcePath"] = drp_file_path + + self.log.info("Added Resolve file representation: {}".format( + representation_drp)) From 882183356a13d567893467ee4e46e8094fd392c1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 17:47:59 +0100 Subject: [PATCH 022/198] fix(resolve, otio): source range with project fps rather then source --- pype/hosts/resolve/otio.py | 39 +++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio.py index c4de1160c6..acb669196f 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio.py @@ -1,11 +1,15 @@ +import sys import json import opentimelineio as otio from . import lib -TRACK_TYPES = { + +self = sys.modules[__name__] +self.track_types = { "video": otio.schema.TrackKind.Video, "audio": otio.schema.TrackKind.Audio } +self.project_fps = None def timecode_to_frames(timecode, framerate): @@ -87,7 +91,12 @@ def create_otio_markers(track_item, fps): def create_otio_clip(track_item): media_pool_item = track_item.GetMediaPoolItem() mp_clip_property = media_pool_item.GetClipProperty() - fps = mp_clip_property["FPS"] + + if not self.project_fps: + fps = mp_clip_property["FPS"] + else: + fps = self.project_fps + name = lib.get_reformated_path(track_item.GetName()) media_reference = create_otio_reference(media_pool_item) @@ -145,11 +154,11 @@ def create_otio_timeline(timeline, fps): def create_otio_track(track_type, track_name): return otio.schema.Track( name=track_name, - kind=TRACK_TYPES[track_type] + kind=self.track_types[track_type] ) -def add_otio_gap(clip_start, otio_track, track_item, timeline, project): +def add_otio_gap(clip_start, otio_track, track_item, timeline): # if gap between track start and clip start if clip_start > otio_track.available_range().duration.value: # create gap and add it to track @@ -158,7 +167,7 @@ def add_otio_gap(clip_start, otio_track, track_item, timeline, project): otio_track.available_range().duration.value, track_item.GetStart(), timeline.GetStartFrame(), - project.GetSetting("timelineFrameRate") + self.project_fps ) ) @@ -166,13 +175,13 @@ def add_otio_gap(clip_start, otio_track, track_item, timeline, project): def get_otio_complete_timeline(project): # get current timeline timeline = project.GetCurrentTimeline() - fps = project.GetSetting("timelineFrameRate") + self.project_fps = project.GetSetting("timelineFrameRate") # convert timeline to otio - otio_timeline = create_otio_timeline(timeline, fps) + otio_timeline = create_otio_timeline(timeline, self.project_fps) # loop all defined track types - for track_type in list(TRACK_TYPES.keys()): + for track_type in list(self.track_types.keys()): # get total track count track_count = timeline.GetTrackCount(track_type) @@ -199,7 +208,7 @@ def get_otio_complete_timeline(project): clip_start = track_item.GetStart() - timeline.GetStartFrame() add_otio_gap( - clip_start, otio_track, track_item, timeline, project) + clip_start, otio_track, track_item, timeline) # create otio clip and add it to track otio_clip = create_otio_clip(track_item) @@ -218,7 +227,7 @@ def get_otio_complete_timeline(project): track_type, track_name) add_otio_gap( clip_start, otio_track, - track_item, timeline, project) + track_item, timeline) otio_track.append(clip) # add track to otio timeline @@ -250,12 +259,12 @@ def get_otio_clip_instance_data(track_item_data): timeline_start = timeline.GetStartFrame() frame_start = track_item.GetStart() frame_duration = track_item.GetDuration() - project_fps = project.GetSetting("timelineFrameRate") + self.project_fps = project.GetSetting("timelineFrameRate") otio_clip_range = create_otio_time_range( - frame_start, frame_duration, project_fps) + frame_start, frame_duration, self.project_fps) # convert timeline to otio - otio_timeline = create_otio_timeline(timeline, project_fps) + otio_timeline = create_otio_timeline(timeline, self.project_fps) # convert track to otio otio_track = create_otio_track( track_type, "{}{}".format(track_name, track_index)) @@ -269,7 +278,7 @@ def get_otio_clip_instance_data(track_item_data): 0, frame_start, timeline_start, - project_fps + self.project_fps ) ) @@ -283,7 +292,7 @@ def get_otio_clip_instance_data(track_item_data): return { "otioTimeline": otio_timeline, "otioTrack": otio_track, - "otioClips": otio_clip, + "otioClip": otio_clip, "otioClipRange": otio_clip_range } From ee46ca58338b3426bf74ef92a52cd72cde2fad99 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 17:48:30 +0100 Subject: [PATCH 023/198] clean(resolve): removing os import --- pype/plugins/resolve/publish/collect_instances.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index d8dac70a8f..561b1b6198 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -1,4 +1,3 @@ -import os import pyblish from pype.hosts import resolve From 70e760981ec510fdcefa8d5345de453540bf56f2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Dec 2020 17:54:25 +0100 Subject: [PATCH 024/198] feat(global): otio review clip collector --- .../global/publish/collect_otio_review.py | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 pype/plugins/global/publish/collect_otio_review.py diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py new file mode 100644 index 0000000000..2943cc9ba5 --- /dev/null +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -0,0 +1,89 @@ +""" +Requires: + otioTimeline -> context data attribute + review -> instance data attribute + masterLayer -> instance data attribute + otioClip -> instance data attribute + otioClipRange -> instance data attribute +""" +import opentimelineio as otio +from opentimelineio.opentime import to_frames +import pyblish.api + + +class CollectOcioReview(pyblish.api.InstancePlugin): + """Get matching otio from defined review layer""" + + label = "Collect OTIO review" + order = pyblish.api.CollectorOrder + families = ["clip"] + hosts = ["resolve"] + + def process(self, instance): + # get basic variables + review_track_name = instance.data["review"] + master_layer = instance.data["masterLayer"] + otio_timeline_context = instance.context.data.get("otioTimeline") + otio_clip = instance.data["otioClip"] + otio_clip_range = instance.data["otioClipRange"] + + # skip if master layer is False + if not master_layer: + return + + # get timeline time values + start_time = otio_timeline_context.global_start_time + timeline_fps = start_time.rate + playhead = start_time.value + + # get matching review track as defined in instance data `review` + review_otio_track = None + for track in otio_timeline_context.video_tracks(): + if track.name == review_track_name: + review_otio_track = track + + frame_start = to_frames( + otio_clip_range.start_time, timeline_fps) + frame_duration = to_frames( + otio_clip_range.duration, timeline_fps) + self.log.debug( + ("name: {} | " + "timeline_in: {} | timeline_out: {}").format( + otio_clip.name, frame_start, + (frame_start + frame_duration - 1))) + + orwc_fps = timeline_fps + for clip_index, otio_rw_clip in enumerate(review_otio_track): + if isinstance(otio_rw_clip, otio.schema.Clip): + orwc_source_range = otio_rw_clip.source_range + orwc_fps = orwc_source_range.start_time.rate + orwc_start = to_frames(orwc_source_range.start_time, orwc_fps) + orwc_duration = to_frames(orwc_source_range.duration, orwc_fps) + source_in = orwc_start + source_out = (orwc_start + orwc_duration) - 1 + timeline_in = playhead + timeline_out = (timeline_in + orwc_duration) - 1 + self.log.debug( + ("name: {} | source_in: {} | source_out: {} | " + "timeline_in: {} | timeline_out: {} " + "| orwc_fps: {}").format( + otio_rw_clip.name, source_in, source_out, + timeline_in, timeline_out, orwc_fps)) + + # move plyhead to next available frame + playhead = timeline_out + 1 + + elif isinstance(otio_rw_clip, otio.schema.Gap): + gap_source_range = otio_rw_clip.source_range + gap_fps = gap_source_range.start_time.rate + gap_start = to_frames( + gap_source_range.start_time, gap_fps) + gap_duration = to_frames( + gap_source_range.duration, gap_fps) + if gap_fps != orwc_fps: + gap_duration += 1 + self.log.debug( + ("name: Gap | gap_start: {} | gap_fps: {}" + "| gap_duration: {} | timeline_fps: {}").format( + gap_start, gap_fps, gap_duration, timeline_fps)) + playhead += gap_duration From a3b11ad30925ec5072b7f961f5b387aef7b34017 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Dec 2020 12:32:49 +0100 Subject: [PATCH 025/198] feat(resolve): wip otio import and publishing --- pype/hosts/resolve/__init__.py | 17 +--- pype/hosts/resolve/lib.py | 33 +++++++ pype/hosts/resolve/otio/__init__.py | 0 .../{otio.py => otio/davinci_export.py} | 92 ++----------------- pype/hosts/resolve/otio/davinci_import.py | 48 ++++++++++ pype/hosts/resolve/otio/utils.py | 37 ++++++++ .../resolve/utility_scripts/OTIO_export.py | 2 +- .../resolve/utility_scripts/OTIO_import.py | 73 +++++++++++++++ .../resolve/publish/collect_workfile.py | 12 +-- 9 files changed, 211 insertions(+), 103 deletions(-) create mode 100644 pype/hosts/resolve/otio/__init__.py rename pype/hosts/resolve/{otio.py => otio/davinci_export.py} (72%) create mode 100644 pype/hosts/resolve/otio/davinci_import.py create mode 100644 pype/hosts/resolve/otio/utils.py create mode 100644 pype/hosts/resolve/utility_scripts/OTIO_import.py diff --git a/pype/hosts/resolve/__init__.py b/pype/hosts/resolve/__init__.py index 45aa5502cc..b6c43a58c2 100644 --- a/pype/hosts/resolve/__init__.py +++ b/pype/hosts/resolve/__init__.py @@ -30,7 +30,8 @@ from .lib import ( swap_clips, get_pype_clip_metadata, set_project_manager_to_folder_name, - get_reformated_path + get_reformated_path, + get_otio_clip_instance_data ) from .menu import launch_pype_menu @@ -49,12 +50,6 @@ from .workio import ( work_root ) -from .otio import ( - get_otio_clip_instance_data, - get_otio_complete_timeline, - save_otio -) - bmdvr = None bmdvf = None @@ -91,6 +86,7 @@ __all__ = [ "get_pype_clip_metadata", "set_project_manager_to_folder_name", "get_reformated_path", + "get_otio_clip_instance_data", # menu "launch_pype_menu", @@ -109,10 +105,5 @@ __all__ = [ # singleton with black magic resolve module "bmdvr", - "bmdvf", - - # open color io integration - "get_otio_clip_instance_data", - "get_otio_complete_timeline", - "save_otio" + "bmdvf" ] diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 777cae0eb2..6b44f97172 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -647,3 +647,36 @@ def get_reformated_path(path, padded=True): else: path = re.sub(num_pattern, f"%d", path) return path + + +def get_otio_clip_instance_data(track_item_data): + """ + Return otio objects for timeline, track and clip + + Args: + track_item_data (dict): track_item_data from list returned by + resolve.get_current_track_items() + + Returns: + dict: otio clip with parent objects + + """ + from .otio import davinci_export as otio_export + + track_item = track_item_data["clip"]["item"] + project = track_item_data["project"] + + frame_start = track_item.GetStart() + frame_duration = track_item.GetDuration() + self.project_fps = project.GetSetting("timelineFrameRate") + + otio_clip_range = otio_export.create_otio_time_range( + frame_start, frame_duration, self.project_fps) + + # create otio clip and add it to track + otio_clip = otio_export.create_otio_clip(track_item) + + return { + "otioClip": otio_clip, + "otioClipRange": otio_clip_range + } diff --git a/pype/hosts/resolve/otio/__init__.py b/pype/hosts/resolve/otio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/hosts/resolve/otio.py b/pype/hosts/resolve/otio/davinci_export.py similarity index 72% rename from pype/hosts/resolve/otio.py rename to pype/hosts/resolve/otio/davinci_export.py index acb669196f..25c578e0a7 100644 --- a/pype/hosts/resolve/otio.py +++ b/pype/hosts/resolve/otio/davinci_export.py @@ -1,8 +1,7 @@ import sys import json import opentimelineio as otio -from . import lib - +from . import utils self = sys.modules[__name__] self.track_types = { @@ -12,17 +11,6 @@ self.track_types = { self.project_fps = None -def timecode_to_frames(timecode, framerate): - parts = zip(( - 3600 * framerate, - 60 * framerate, - framerate, 1 - ), timecode.split(":")) - return sum( - f * int(t) for f, t in parts - ) - - def create_otio_rational_time(frame, fps): return otio.opentime.RationalTime( float(frame), @@ -40,7 +28,7 @@ def create_otio_time_range(start_frame, frame_duration, fps): def create_otio_reference(media_pool_item): mp_clip_property = media_pool_item.GetClipProperty() path = mp_clip_property["File Path"] - reformat_path = lib.get_reformated_path(path, padded=False) + reformat_path = utils.get_reformated_path(path, padded=False) # get clip property regarding to type mp_clip_property = media_pool_item.GetClipProperty() @@ -51,7 +39,7 @@ def create_otio_reference(media_pool_item): else: audio_duration = str(mp_clip_property["Duration"]) frame_start = 0 - frame_duration = int(timecode_to_frames( + frame_duration = int(utils.timecode_to_frames( audio_duration, float(fps))) return otio.schema.ExternalReference( @@ -97,7 +85,7 @@ def create_otio_clip(track_item): else: fps = self.project_fps - name = lib.get_reformated_path(track_item.GetName()) + name = utils.get_reformated_path(track_item.GetName()) media_reference = create_otio_reference(media_pool_item) source_range = create_otio_time_range( @@ -141,7 +129,7 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): ) -def create_otio_timeline(timeline, fps): +def _create_otio_timeline(timeline, fps): start_time = create_otio_rational_time( timeline.GetStartFrame(), fps) otio_timeline = otio.schema.Timeline( @@ -172,13 +160,12 @@ def add_otio_gap(clip_start, otio_track, track_item, timeline): ) -def get_otio_complete_timeline(project): +def create_otio_timeline(timeline, fps): # get current timeline - timeline = project.GetCurrentTimeline() - self.project_fps = project.GetSetting("timelineFrameRate") + self.project_fps = fps # convert timeline to otio - otio_timeline = create_otio_timeline(timeline, self.project_fps) + otio_timeline = _create_otio_timeline(timeline, self.project_fps) # loop all defined track types for track_type in list(self.track_types.keys()): @@ -236,66 +223,5 @@ def get_otio_complete_timeline(project): return otio_timeline -def get_otio_clip_instance_data(track_item_data): - """ - Return otio objects for timeline, track and clip - - Args: - track_item_data (dict): track_item_data from list returned by - resolve.get_current_track_items() - - Returns: - dict: otio clip with parent objects - - """ - - track_item = track_item_data["clip"]["item"] - project = track_item_data["project"] - timeline = track_item_data["sequence"] - track_type = track_item_data["track"]["type"] - track_name = track_item_data["track"]["name"] - track_index = track_item_data["track"]["index"] - - timeline_start = timeline.GetStartFrame() - frame_start = track_item.GetStart() - frame_duration = track_item.GetDuration() - self.project_fps = project.GetSetting("timelineFrameRate") - - otio_clip_range = create_otio_time_range( - frame_start, frame_duration, self.project_fps) - # convert timeline to otio - otio_timeline = create_otio_timeline(timeline, self.project_fps) - # convert track to otio - otio_track = create_otio_track( - track_type, "{}{}".format(track_name, track_index)) - - # add gap if track item is not starting from timeline start - # if gap between track start and clip start - if frame_start > timeline_start: - # create gap and add it to track - otio_track.append( - create_otio_gap( - 0, - frame_start, - timeline_start, - self.project_fps - ) - ) - - # create otio clip and add it to track - otio_clip = create_otio_clip(track_item) - otio_track.append(otio_clip) - - # add track to otio timeline - otio_timeline.tracks.append(otio_track) - - return { - "otioTimeline": otio_timeline, - "otioTrack": otio_track, - "otioClip": otio_clip, - "otioClipRange": otio_clip_range - } - - -def save_otio(otio_timeline, path): +def write_to_file(otio_timeline, path): otio.adapters.write_to_file(otio_timeline, path) diff --git a/pype/hosts/resolve/otio/davinci_import.py b/pype/hosts/resolve/otio/davinci_import.py new file mode 100644 index 0000000000..19133279bb --- /dev/null +++ b/pype/hosts/resolve/otio/davinci_import.py @@ -0,0 +1,48 @@ +import sys +import DaVinciResolveScript +import opentimelineio as otio + + +self = sys.modules[__name__] +self.resolve = DaVinciResolveScript.scriptapp('Resolve') +self.fusion = DaVinciResolveScript.scriptapp('Fusion') +self.project_manager = self.resolve.GetProjectManager() +self.current_project = self.project_manager.GetCurrentProject() +self.media_pool = self.current_project.GetMediaPool() +self.track_types = { + "video": otio.schema.TrackKind.Video, + "audio": otio.schema.TrackKind.Audio +} +self.project_fps = None + + +def build_timeline(otio_timeline): + for clip in otio_timeline.each_clip(): + print(clip.name) + print(clip.parent().name) + print(clip.range_in_parent()) + + +def _build_track(otio_track): + pass + + +def _build_media_pool_item(otio_media_reference): + pass + + +def _build_track_item(otio_clip): + pass + + +def _build_gap(otio_clip): + pass + + +def _build_marker(otio_marker): + pass + + +def read_from_file(otio_file): + otio_timeline = otio.adapters.read_from_file(otio_file) + build_timeline(otio_timeline) diff --git a/pype/hosts/resolve/otio/utils.py b/pype/hosts/resolve/otio/utils.py new file mode 100644 index 0000000000..22619d4172 --- /dev/null +++ b/pype/hosts/resolve/otio/utils.py @@ -0,0 +1,37 @@ +import re + + +def timecode_to_frames(timecode, framerate): + parts = zip(( + 3600 * framerate, + 60 * framerate, + framerate, 1 + ), timecode.split(":")) + return sum( + f * int(t) for f, t in parts + ) + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + num_pattern = "(\\[\\d+\\-\\d+\\])" + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + padding = len(re.findall(padding_pattern, path).pop()) + if padded: + path = re.sub(num_pattern, f"%0{padding}d", path) + else: + path = re.sub(num_pattern, f"%d", path) + return path diff --git a/pype/hosts/resolve/utility_scripts/OTIO_export.py b/pype/hosts/resolve/utility_scripts/OTIO_export.py index a0c8e80bc7..7569ba4c42 100644 --- a/pype/hosts/resolve/utility_scripts/OTIO_export.py +++ b/pype/hosts/resolve/utility_scripts/OTIO_export.py @@ -2,7 +2,7 @@ import os import sys import opentimelineio as otio -print(otio) + resolve = bmd.scriptapp("Resolve") fu = resolve.Fusion() diff --git a/pype/hosts/resolve/utility_scripts/OTIO_import.py b/pype/hosts/resolve/utility_scripts/OTIO_import.py new file mode 100644 index 0000000000..2266fd4b2b --- /dev/null +++ b/pype/hosts/resolve/utility_scripts/OTIO_import.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +import os +import sys +from pype.hosts.resolve.otio import davinci_resolve_import as otio_import + +resolve = bmd.scriptapp("Resolve") +fu = resolve.Fusion() +ui = fu.UIManager +disp = bmd.UIDispatcher(fu.UIManager) + + +title_font = ui.Font({"PixelSize": 18}) +dlg = disp.AddWindow( + { + "WindowTitle": "Import OTIO", + "ID": "OTIOwin", + "Geometry": [250, 250, 250, 100], + "Spacing": 0, + "Margin": 10 + }, + [ + ui.VGroup( + { + "Spacing": 2 + }, + [ + ui.Button( + { + "ID": "importOTIOfileButton", + "Text": "Select OTIO File Path", + "Weight": 1.25, + "ToolTip": "Choose otio file to import from", + "Flat": False + } + ), + ui.VGap(), + ui.Button( + { + "ID": "importButton", + "Text": "Import", + "Weight": 2, + "ToolTip": "Import otio to new timeline", + "Flat": False + } + ) + ] + ) + ] +) + +itm = dlg.GetItems() + + +def _close_window(event): + disp.ExitLoop() + + +def _import_button(event): + otio_import.read_from_file(itm["importOTIOfileButton"].Text) + _close_window(None) + + +def _import_file_pressed(event): + selected_path = fu.RequestFile(os.path.expanduser("~/Documents")) + itm["importOTIOfileButton"].Text = selected_path + + +dlg.On.OTIOwin.Close = _close_window +dlg.On.importOTIOfileButton.Clicked = _import_file_pressed +dlg.On.importButton.Clicked = _import_button +dlg.Show() +disp.RunLoop() +dlg.Hide() diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index 0bd1a24a46..9873e1ca97 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -1,4 +1,3 @@ -import os import pyblish.api from pype.hosts import resolve from avalon import api as avalon @@ -6,8 +5,8 @@ from pprint import pformat # dev from importlib import reload -from pype.hosts.resolve import otio -reload(otio) +from pype.hosts.resolve.otio import davinci_export +reload(davinci_export) class CollectWorkfile(pyblish.api.ContextPlugin): @@ -23,12 +22,13 @@ class CollectWorkfile(pyblish.api.ContextPlugin): project = resolve.get_current_project() fps = project.GetSetting("timelineFrameRate") - # adding otio timeline to context - otio_timeline = resolve.get_otio_complete_timeline(project) - active_sequence = resolve.get_current_sequence() video_tracks = resolve.get_video_track_names() + # adding otio timeline to context + otio_timeline = davinci_export.create_otio_timeline( + active_sequence, fps) + instance_data = { "name": "{}_{}".format(asset, subset), "asset": asset, From cf89dbab48cec08dcfd446eb6cbd222602590175 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Dec 2020 12:38:06 +0100 Subject: [PATCH 026/198] feat(global): wip collect_otio_review --- .../global/publish/collect_otio_review.py | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 2943cc9ba5..a7097b84d0 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -36,12 +36,6 @@ class CollectOcioReview(pyblish.api.InstancePlugin): timeline_fps = start_time.rate playhead = start_time.value - # get matching review track as defined in instance data `review` - review_otio_track = None - for track in otio_timeline_context.video_tracks(): - if track.name == review_track_name: - review_otio_track = track - frame_start = to_frames( otio_clip_range.start_time, timeline_fps) frame_duration = to_frames( @@ -53,9 +47,12 @@ class CollectOcioReview(pyblish.api.InstancePlugin): (frame_start + frame_duration - 1))) orwc_fps = timeline_fps - for clip_index, otio_rw_clip in enumerate(review_otio_track): - if isinstance(otio_rw_clip, otio.schema.Clip): - orwc_source_range = otio_rw_clip.source_range + for otio_clip in otio_timeline_context.each_clip(): + track_name = otio_clip.parent().name + if track_name not in review_track_name: + continue + if isinstance(otio_clip, otio.schema.Clip): + orwc_source_range = otio_clip.source_range orwc_fps = orwc_source_range.start_time.rate orwc_start = to_frames(orwc_source_range.start_time, orwc_fps) orwc_duration = to_frames(orwc_source_range.duration, orwc_fps) @@ -67,14 +64,14 @@ class CollectOcioReview(pyblish.api.InstancePlugin): ("name: {} | source_in: {} | source_out: {} | " "timeline_in: {} | timeline_out: {} " "| orwc_fps: {}").format( - otio_rw_clip.name, source_in, source_out, + otio_clip.name, source_in, source_out, timeline_in, timeline_out, orwc_fps)) # move plyhead to next available frame playhead = timeline_out + 1 - elif isinstance(otio_rw_clip, otio.schema.Gap): - gap_source_range = otio_rw_clip.source_range + elif isinstance(otio_clip, otio.schema.Gap): + gap_source_range = otio_clip.source_range gap_fps = gap_source_range.start_time.rate gap_start = to_frames( gap_source_range.start_time, gap_fps) From f7f7b657bb838ba2fdd7f2f312b0eb90cf422ece Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Dec 2020 17:10:03 +0100 Subject: [PATCH 027/198] feat(global, resolve): otio publishing wip --- pype/hosts/resolve/lib.py | 6 +- pype/hosts/resolve/otio/davinci_export.py | 28 ++++++- pype/hosts/resolve/otio/utils.py | 21 +++++ pype/lib/__init__.py | 10 ++- pype/lib/editorial.py | 36 +++++++++ .../global/publish/collect_otio_review.py | 76 ++++++------------- 6 files changed, 121 insertions(+), 56 deletions(-) create mode 100644 pype/lib/editorial.py diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 6b44f97172..5f186b7a98 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -665,9 +665,11 @@ def get_otio_clip_instance_data(track_item_data): track_item = track_item_data["clip"]["item"] project = track_item_data["project"] + timeline = track_item_data["sequence"] + timeline_start = timeline.GetStartFrame() - frame_start = track_item.GetStart() - frame_duration = track_item.GetDuration() + frame_start = int(track_item.GetStart() - timeline_start) + frame_duration = int(track_item.GetDuration()) self.project_fps = project.GetSetting("timelineFrameRate") otio_clip_range = otio_export.create_otio_time_range( diff --git a/pype/hosts/resolve/otio/davinci_export.py b/pype/hosts/resolve/otio/davinci_export.py index 25c578e0a7..cffb58f960 100644 --- a/pype/hosts/resolve/otio/davinci_export.py +++ b/pype/hosts/resolve/otio/davinci_export.py @@ -26,9 +26,17 @@ def create_otio_time_range(start_frame, frame_duration, fps): def create_otio_reference(media_pool_item): + metadata = dict() mp_clip_property = media_pool_item.GetClipProperty() path = mp_clip_property["File Path"] reformat_path = utils.get_reformated_path(path, padded=False) + padding = utils.get_padding_from_path(path) + + if padding: + metadata.update({ + "isSequence": True, + "padding": padding + }) # get clip property regarding to type mp_clip_property = media_pool_item.GetClipProperty() @@ -42,7 +50,7 @@ def create_otio_reference(media_pool_item): frame_duration = int(utils.timecode_to_frames( audio_duration, float(fps))) - return otio.schema.ExternalReference( + otio_ex_ref_item = otio.schema.ExternalReference( target_url=reformat_path, available_range=create_otio_time_range( frame_start, @@ -51,6 +59,11 @@ def create_otio_reference(media_pool_item): ) ) + # add metadata to otio item + add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata) + + return otio_ex_ref_item + def create_otio_markers(track_item, fps): track_item_markers = track_item.GetMarkers() @@ -85,7 +98,7 @@ def create_otio_clip(track_item): else: fps = self.project_fps - name = utils.get_reformated_path(track_item.GetName()) + name = track_item.GetName() media_reference = create_otio_reference(media_pool_item) source_range = create_otio_time_range( @@ -160,6 +173,17 @@ def add_otio_gap(clip_start, otio_track, track_item, timeline): ) +def add_otio_metadata(otio_item, media_pool_item, **kwargs): + mp_metadata = media_pool_item.GetMetadata() + # add additional metadata from kwargs + if kwargs: + mp_metadata.update(kwargs) + + # add metadata to otio item metadata + for key, value in mp_metadata.items(): + otio_item.metadata.update({key: value}) + + def create_otio_timeline(timeline, fps): # get current timeline self.project_fps = fps diff --git a/pype/hosts/resolve/otio/utils.py b/pype/hosts/resolve/otio/utils.py index 22619d4172..88e0b3d3b4 100644 --- a/pype/hosts/resolve/otio/utils.py +++ b/pype/hosts/resolve/otio/utils.py @@ -35,3 +35,24 @@ def get_reformated_path(path, padded=True): else: path = re.sub(num_pattern, f"%d", path) return path + + +def get_padding_from_path(path): + """ + Return padding number from DaVinci Resolve sequence path style + + Args: + path (str): path url or simple file name + + Returns: + int: padding number + + Example: + get_padding_from_path("plate.[0001-1008].exr") > 4 + + """ + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + return len(re.findall(padding_pattern, path).pop()) + + return None diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 188dd68039..8cc0384032 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -46,6 +46,11 @@ from .ffmpeg_utils import ( ffprobe_streams ) +from .editorial import ( + is_overlapping, + convert_otio_range_to_frame_range +) + __all__ = [ "get_avalon_database", "set_io_database", @@ -81,5 +86,8 @@ __all__ = [ "get_ffmpeg_tool_path", "source_hash", - "_subprocess" + "_subprocess", + + "is_overlapping", + "convert_otio_range_to_frame_range" ] diff --git a/pype/lib/editorial.py b/pype/lib/editorial.py new file mode 100644 index 0000000000..41a92165c3 --- /dev/null +++ b/pype/lib/editorial.py @@ -0,0 +1,36 @@ +from opentimelineio.opentime import to_frames + + +def convert_otio_range_to_frame_range(otio_range): + start = to_frames( + otio_range.start_time, otio_range.start_time.rate) + end = start + to_frames( + otio_range.duration, otio_range.duration.rate) - 1 + return start, end + + +def is_overlapping(test_range, main_range, strict=False): + test_start, test_end = convert_otio_range_to_frame_range(test_range) + main_start, main_end = convert_otio_range_to_frame_range(main_range) + covering_exp = bool( + (test_start <= main_start) and (test_end >= main_end) + ) + inside_exp = bool( + (test_start >= main_start) and (test_end <= main_end) + ) + overlaying_right_exp = bool( + (test_start < main_end) and (test_end >= main_end) + ) + overlaying_left_exp = bool( + (test_end > main_start) and (test_start <= main_start) + ) + + if not strict: + return any(( + covering_exp, + inside_exp, + overlaying_right_exp, + overlaying_left_exp + )) + else: + return covering_exp diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index a7097b84d0..9daea4d30f 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -3,12 +3,14 @@ Requires: otioTimeline -> context data attribute review -> instance data attribute masterLayer -> instance data attribute - otioClip -> instance data attribute otioClipRange -> instance data attribute """ import opentimelineio as otio -from opentimelineio.opentime import to_frames import pyblish.api +from pype.lib import ( + is_overlapping, + convert_otio_range_to_frame_range +) class CollectOcioReview(pyblish.api.InstancePlugin): @@ -23,64 +25,36 @@ class CollectOcioReview(pyblish.api.InstancePlugin): # get basic variables review_track_name = instance.data["review"] master_layer = instance.data["masterLayer"] - otio_timeline_context = instance.context.data.get("otioTimeline") - otio_clip = instance.data["otioClip"] + otio_timeline_context = instance.context.data["otioTimeline"] otio_clip_range = instance.data["otioClipRange"] # skip if master layer is False if not master_layer: return - # get timeline time values - start_time = otio_timeline_context.global_start_time - timeline_fps = start_time.rate - playhead = start_time.value - - frame_start = to_frames( - otio_clip_range.start_time, timeline_fps) - frame_duration = to_frames( - otio_clip_range.duration, timeline_fps) - self.log.debug( - ("name: {} | " - "timeline_in: {} | timeline_out: {}").format( - otio_clip.name, frame_start, - (frame_start + frame_duration - 1))) - - orwc_fps = timeline_fps for otio_clip in otio_timeline_context.each_clip(): track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() if track_name not in review_track_name: continue if isinstance(otio_clip, otio.schema.Clip): - orwc_source_range = otio_clip.source_range - orwc_fps = orwc_source_range.start_time.rate - orwc_start = to_frames(orwc_source_range.start_time, orwc_fps) - orwc_duration = to_frames(orwc_source_range.duration, orwc_fps) - source_in = orwc_start - source_out = (orwc_start + orwc_duration) - 1 - timeline_in = playhead - timeline_out = (timeline_in + orwc_duration) - 1 - self.log.debug( - ("name: {} | source_in: {} | source_out: {} | " - "timeline_in: {} | timeline_out: {} " - "| orwc_fps: {}").format( - otio_clip.name, source_in, source_out, - timeline_in, timeline_out, orwc_fps)) + if is_overlapping(parent_range, otio_clip_range, strict=False): + self.create_representation( + otio_clip, otio_clip_range, instance) - # move plyhead to next available frame - playhead = timeline_out + 1 - - elif isinstance(otio_clip, otio.schema.Gap): - gap_source_range = otio_clip.source_range - gap_fps = gap_source_range.start_time.rate - gap_start = to_frames( - gap_source_range.start_time, gap_fps) - gap_duration = to_frames( - gap_source_range.duration, gap_fps) - if gap_fps != orwc_fps: - gap_duration += 1 - self.log.debug( - ("name: Gap | gap_start: {} | gap_fps: {}" - "| gap_duration: {} | timeline_fps: {}").format( - gap_start, gap_fps, gap_duration, timeline_fps)) - playhead += gap_duration + def create_representation(self, otio_clip, to_otio_range, instance): + to_timeline_start, to_timeline_end = convert_otio_range_to_frame_range( + to_otio_range) + timeline_start, timeline_end = convert_otio_range_to_frame_range( + otio_clip.range_in_parent()) + source_start, source_end = convert_otio_range_to_frame_range( + otio_clip.source_range) + media_reference = otio_clip.media_reference + available_start, available_end = convert_otio_range_to_frame_range( + media_reference.available_range) + path = media_reference.target_url + self.log.debug(path) + self.log.debug((available_start, available_end)) + self.log.debug((source_start, source_end)) + self.log.debug((timeline_start, timeline_end)) + self.log.debug((to_timeline_start, to_timeline_end)) From eeeef0d33d7683a48fbd4ed5adc2d0e478659878 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Dec 2020 17:53:43 +0100 Subject: [PATCH 028/198] feat(global, resolve): otio publishing wip --- pype/hosts/resolve/lib.py | 62 ++++++++++++------- pype/lib/__init__.py | 4 +- pype/lib/editorial.py | 6 +- .../global/publish/collect_otio_review.py | 21 ++++--- .../resolve/publish/collect_instances.py | 10 ++- 5 files changed, 63 insertions(+), 40 deletions(-) diff --git a/pype/hosts/resolve/lib.py b/pype/hosts/resolve/lib.py index 5f186b7a98..5324f868d6 100644 --- a/pype/hosts/resolve/lib.py +++ b/pype/hosts/resolve/lib.py @@ -2,6 +2,10 @@ import sys import json import re from opentimelineio import opentime +import pype + +from .otio import davinci_export as otio_export + from pype.api import Logger log = Logger().get_logger(__name__, "resolve") @@ -649,20 +653,7 @@ def get_reformated_path(path, padded=True): return path -def get_otio_clip_instance_data(track_item_data): - """ - Return otio objects for timeline, track and clip - - Args: - track_item_data (dict): track_item_data from list returned by - resolve.get_current_track_items() - - Returns: - dict: otio clip with parent objects - - """ - from .otio import davinci_export as otio_export - +def create_otio_time_range_from_track_item_data(track_item_data): track_item = track_item_data["clip"]["item"] project = track_item_data["project"] timeline = track_item_data["sequence"] @@ -670,15 +661,40 @@ def get_otio_clip_instance_data(track_item_data): frame_start = int(track_item.GetStart() - timeline_start) frame_duration = int(track_item.GetDuration()) - self.project_fps = project.GetSetting("timelineFrameRate") + fps = project.GetSetting("timelineFrameRate") - otio_clip_range = otio_export.create_otio_time_range( - frame_start, frame_duration, self.project_fps) + return otio_export.create_otio_time_range( + frame_start, frame_duration, fps) - # create otio clip and add it to track - otio_clip = otio_export.create_otio_clip(track_item) - return { - "otioClip": otio_clip, - "otioClipRange": otio_clip_range - } +def get_otio_clip_instance_data(otio_timeline, track_item_data): + """ + Return otio objects for timeline, track and clip + + Args: + track_item_data (dict): track_item_data from list returned by + resolve.get_current_track_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + + track_item = track_item_data["clip"]["item"] + track_name = track_item_data["track"]["name"] + timeline_range = create_otio_time_range_from_track_item_data( + track_item_data) + + for otio_clip in otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if track_name not in track_name: + continue + if otio_clip.name not in track_item.GetName(): + continue + if pype.lib.is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + return {"otioClip": otio_clip} + + return None diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 8cc0384032..fc66504456 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -47,7 +47,7 @@ from .ffmpeg_utils import ( ) from .editorial import ( - is_overlapping, + is_overlapping_otio_ranges, convert_otio_range_to_frame_range ) @@ -88,6 +88,6 @@ __all__ = [ "source_hash", "_subprocess", - "is_overlapping", + "is_overlapping_otio_ranges", "convert_otio_range_to_frame_range" ] diff --git a/pype/lib/editorial.py b/pype/lib/editorial.py index 41a92165c3..89f534b143 100644 --- a/pype/lib/editorial.py +++ b/pype/lib/editorial.py @@ -9,9 +9,9 @@ def convert_otio_range_to_frame_range(otio_range): return start, end -def is_overlapping(test_range, main_range, strict=False): - test_start, test_end = convert_otio_range_to_frame_range(test_range) - main_start, main_end = convert_otio_range_to_frame_range(main_range) +def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False): + test_start, test_end = convert_otio_range_to_frame_range(test_otio_range) + main_start, main_end = convert_otio_range_to_frame_range(main_otio_range) covering_exp = bool( (test_start <= main_start) and (test_end >= main_end) ) diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 9daea4d30f..0ab3cf8b8b 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -8,7 +8,7 @@ Requires: import opentimelineio as otio import pyblish.api from pype.lib import ( - is_overlapping, + is_overlapping_otio_ranges, convert_otio_range_to_frame_range ) @@ -26,21 +26,22 @@ class CollectOcioReview(pyblish.api.InstancePlugin): review_track_name = instance.data["review"] master_layer = instance.data["masterLayer"] otio_timeline_context = instance.context.data["otioTimeline"] - otio_clip_range = instance.data["otioClipRange"] - + otio_clip = instance.data["otioClip"] + otio_clip_range = otio_clip.range_in_parent() # skip if master layer is False if not master_layer: return - for otio_clip in otio_timeline_context.each_clip(): - track_name = otio_clip.parent().name - parent_range = otio_clip.range_in_parent() + for _otio_clip in otio_timeline_context.each_clip(): + track_name = _otio_clip.parent().name + parent_range = _otio_clip.range_in_parent() if track_name not in review_track_name: continue - if isinstance(otio_clip, otio.schema.Clip): - if is_overlapping(parent_range, otio_clip_range, strict=False): + if isinstance(_otio_clip, otio.schema.Clip): + if is_overlapping_otio_ranges( + parent_range, otio_clip_range, strict=False): self.create_representation( - otio_clip, otio_clip_range, instance) + _otio_clip, otio_clip_range, instance) def create_representation(self, otio_clip, to_otio_range, instance): to_timeline_start, to_timeline_end = convert_otio_range_to_frame_range( @@ -50,10 +51,12 @@ class CollectOcioReview(pyblish.api.InstancePlugin): source_start, source_end = convert_otio_range_to_frame_range( otio_clip.source_range) media_reference = otio_clip.media_reference + metadata = media_reference.metadata available_start, available_end = convert_otio_range_to_frame_range( media_reference.available_range) path = media_reference.target_url self.log.debug(path) + self.log.debug(metadata) self.log.debug((available_start, available_end)) self.log.debug((source_start, source_end)) self.log.debug((timeline_start, timeline_end)) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index 561b1b6198..9283a7b1a6 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -13,6 +13,7 @@ class CollectInstances(pyblish.api.ContextPlugin): hosts = ["resolve"] def process(self, context): + otio_timeline = context.data["otioTimeline"] selected_track_items = resolve.get_current_track_items( filter=True, selecting_color=resolve.publish_clip_color) @@ -68,9 +69,12 @@ class CollectInstances(pyblish.api.ContextPlugin): "tags": tag_data, }) - # otio - otio_data = resolve.get_otio_clip_instance_data(track_item_data) - data.update(otio_data) + # otio clip data + otio_data = resolve.get_otio_clip_instance_data( + otio_timeline, track_item_data) + + if otio_data: + data.update(otio_data) # create instance instance = context.create_instance(**data) From e22aceab4b4a19ea6df0a0bf330b07fae009bf49 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Dec 2020 18:47:39 +0100 Subject: [PATCH 029/198] feat(global): review otio add representation --- pype/lib/__init__.py | 6 +- pype/lib/editorial.py | 21 +++++++ .../global/publish/collect_otio_review.py | 55 ++++++++++++++----- 3 files changed, 65 insertions(+), 17 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 6f8434d43e..93799d0232 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -61,7 +61,8 @@ from .ffmpeg_utils import ( from .editorial import ( is_overlapping_otio_ranges, - convert_otio_range_to_frame_range + convert_otio_range_to_frame_range, + convert_to_padded_path ) __all__ = [ @@ -110,5 +111,6 @@ __all__ = [ "_subprocess", "is_overlapping_otio_ranges", - "convert_otio_range_to_frame_range" + "convert_otio_range_to_frame_range", + "convert_to_padded_path" ] diff --git a/pype/lib/editorial.py b/pype/lib/editorial.py index 89f534b143..2381d4b518 100644 --- a/pype/lib/editorial.py +++ b/pype/lib/editorial.py @@ -1,3 +1,4 @@ +import re from opentimelineio.opentime import to_frames @@ -34,3 +35,23 @@ def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False): )) else: return covering_exp + + +def convert_to_padded_path(path, padding): + """ + Return correct padding in sequence string + + Args: + path (str): path url or simple file name + padding (int): number of padding + + Returns: + type: string with reformated path + + Example: + convert_to_padded_path("plate.%d.exr") > plate.%04d.exr + + """ + if "%d" in path: + path = re.sub("%d", "%0{padding}d".format(padding=padding), path) + return path diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 0ab3cf8b8b..cf80445f8d 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -5,12 +5,10 @@ Requires: masterLayer -> instance data attribute otioClipRange -> instance data attribute """ +import os import opentimelineio as otio import pyblish.api -from pype.lib import ( - is_overlapping_otio_ranges, - convert_otio_range_to_frame_range -) +import pype.lib class CollectOcioReview(pyblish.api.InstancePlugin): @@ -38,26 +36,53 @@ class CollectOcioReview(pyblish.api.InstancePlugin): if track_name not in review_track_name: continue if isinstance(_otio_clip, otio.schema.Clip): - if is_overlapping_otio_ranges( + if pype.lib.is_overlapping_otio_ranges( parent_range, otio_clip_range, strict=False): self.create_representation( _otio_clip, otio_clip_range, instance) def create_representation(self, otio_clip, to_otio_range, instance): - to_timeline_start, to_timeline_end = convert_otio_range_to_frame_range( + to_tl_start, to_tl_end = pype.lib.convert_otio_range_to_frame_range( to_otio_range) - timeline_start, timeline_end = convert_otio_range_to_frame_range( + tl_start, tl_end = pype.lib.convert_otio_range_to_frame_range( otio_clip.range_in_parent()) - source_start, source_end = convert_otio_range_to_frame_range( + source_start, source_end = pype.lib.convert_otio_range_to_frame_range( otio_clip.source_range) media_reference = otio_clip.media_reference metadata = media_reference.metadata - available_start, available_end = convert_otio_range_to_frame_range( + mr_start, mr_end = pype.lib.convert_otio_range_to_frame_range( media_reference.available_range) path = media_reference.target_url - self.log.debug(path) - self.log.debug(metadata) - self.log.debug((available_start, available_end)) - self.log.debug((source_start, source_end)) - self.log.debug((timeline_start, timeline_end)) - self.log.debug((to_timeline_start, to_timeline_end)) + reference_frame_start = (mr_start + source_start) + ( + to_tl_start - tl_start) + reference_frame_end = (mr_start + source_end) - ( + tl_end - to_tl_end) + + base_name = os.path.basename(path) + staging_dir = os.path.dirname(path) + ext = os.path.splitext(base_name)[1][1:] + + if metadata.get("isSequence"): + files = list() + padding = metadata["padding"] + base_name = pype.lib.convert_to_padded_path(base_name, padding) + for index in range( + reference_frame_start, (reference_frame_end + 1)): + file_name = base_name % index + path_test = os.path.join(staging_dir, file_name) + if os.path.exists(path_test): + files.append(file_name) + + self.log.debug(files) + else: + files = base_name + + representation = { + "ext": ext, + "name": ext, + "files": files, + "frameStart": reference_frame_start, + "frameEnd": reference_frame_end, + "stagingDir": staging_dir + } + self.log.debug(representation) From 042a4e643b36cae6f30e7017d57458a00a75f111 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 8 Dec 2020 16:03:33 +0100 Subject: [PATCH 030/198] feat(global, resolve): publishing with otio wip --- pype/hosts/resolve/otio/utils.py | 8 + pype/lib/__init__.py | 6 +- pype/lib/editorial.py | 27 +- .../publish/collect_otio_frame_ranges.py | 72 ++++ .../global/publish/collect_otio_review.py | 63 +-- .../global/publish/extract_otio_review.py | 403 ++++++++++++++++++ .../resolve/publish/collect_instances.py | 6 +- .../resolve/publish/collect_workfile.py | 2 +- 8 files changed, 526 insertions(+), 61 deletions(-) create mode 100644 pype/plugins/global/publish/collect_otio_frame_ranges.py create mode 100644 pype/plugins/global/publish/extract_otio_review.py diff --git a/pype/hosts/resolve/otio/utils.py b/pype/hosts/resolve/otio/utils.py index 88e0b3d3b4..54a052bb56 100644 --- a/pype/hosts/resolve/otio/utils.py +++ b/pype/hosts/resolve/otio/utils.py @@ -12,6 +12,14 @@ def timecode_to_frames(timecode, framerate): ) +def frames_to_timecode(frames, framerate): + return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format( + int(frames / (3600 * framerate)), + int(frames / (60 * framerate) % 60), + int(frames / framerate % 60), + int(frames % framerate)) + + def get_reformated_path(path, padded=True): """ Return fixed python expression path diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 93799d0232..cfc94ec97d 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -61,7 +61,8 @@ from .ffmpeg_utils import ( from .editorial import ( is_overlapping_otio_ranges, - convert_otio_range_to_frame_range, + otio_range_to_frame_range, + otio_range_with_handles, convert_to_padded_path ) @@ -111,6 +112,7 @@ __all__ = [ "_subprocess", "is_overlapping_otio_ranges", - "convert_otio_range_to_frame_range", + "otio_range_to_frame_range", + "otio_range_with_handles", "convert_to_padded_path" ] diff --git a/pype/lib/editorial.py b/pype/lib/editorial.py index 2381d4b518..c0ad4ace00 100644 --- a/pype/lib/editorial.py +++ b/pype/lib/editorial.py @@ -1,8 +1,9 @@ import re -from opentimelineio.opentime import to_frames +from opentimelineio.opentime import ( + to_frames, RationalTime, TimeRange) -def convert_otio_range_to_frame_range(otio_range): +def otio_range_to_frame_range(otio_range): start = to_frames( otio_range.start_time, otio_range.start_time.rate) end = start + to_frames( @@ -10,9 +11,23 @@ def convert_otio_range_to_frame_range(otio_range): return start, end +def otio_range_with_handles(otio_range, instance): + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + handles_duration = handle_start + handle_end + fps = float(otio_range.start_time.rate) + start = to_frames(otio_range.start_time, fps) + duration = to_frames(otio_range.duration, fps) + + return TimeRange( + start_time=RationalTime((start - handle_start), fps), + duration=RationalTime((duration + handles_duration), fps) + ) + + def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False): - test_start, test_end = convert_otio_range_to_frame_range(test_otio_range) - main_start, main_end = convert_otio_range_to_frame_range(main_otio_range) + test_start, test_end = otio_range_to_frame_range(test_otio_range) + main_start, main_end = otio_range_to_frame_range(main_otio_range) covering_exp = bool( (test_start <= main_start) and (test_end >= main_end) ) @@ -20,10 +35,10 @@ def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False): (test_start >= main_start) and (test_end <= main_end) ) overlaying_right_exp = bool( - (test_start < main_end) and (test_end >= main_end) + (test_start <= main_end) and (test_end >= main_end) ) overlaying_left_exp = bool( - (test_end > main_start) and (test_start <= main_start) + (test_end >= main_start) and (test_start <= main_start) ) if not strict: diff --git a/pype/plugins/global/publish/collect_otio_frame_ranges.py b/pype/plugins/global/publish/collect_otio_frame_ranges.py new file mode 100644 index 0000000000..5d1370850f --- /dev/null +++ b/pype/plugins/global/publish/collect_otio_frame_ranges.py @@ -0,0 +1,72 @@ +""" +Requires: + otioTimeline -> context data attribute + review -> instance data attribute + masterLayer -> instance data attribute + otioClipRange -> instance data attribute +""" +# import os +import opentimelineio as otio +import pyblish.api +import pype.lib +from pprint import pformat + + +class CollectOcioFrameRanges(pyblish.api.InstancePlugin): + """Getting otio ranges from otio_clip + + Adding timeline and source ranges to instance data""" + + label = "Collect OTIO Frame Ranges" + order = pyblish.api.CollectorOrder - 0.58 + families = ["clip"] + hosts = ["resolve"] + + def process(self, instance): + # get basic variables + otio_clip = instance.data["otioClip"] + workfile_start = instance.data["workfileFrameStart"] + + # get ranges + otio_tl_range = otio_clip.range_in_parent() + self.log.debug(otio_tl_range) + otio_src_range = otio_clip.source_range + otio_avalable_range = otio_clip.available_range() + self.log.debug(otio_avalable_range) + otio_tl_range_handles = pype.lib.otio_range_with_handles( + otio_tl_range, instance) + self.log.debug(otio_tl_range_handles) + otio_src_range_handles = pype.lib.otio_range_with_handles( + otio_src_range, instance) + + # get source avalable start frame + src_starting_from = otio.opentime.to_frames( + otio_avalable_range.start_time, + otio_avalable_range.start_time.rate) + # convert to frames + range_convert = pype.lib.otio_range_to_frame_range + tl_start, tl_end = range_convert(otio_tl_range) + tl_start_h, tl_end_h = range_convert(otio_tl_range_handles) + src_start, src_end = range_convert(otio_src_range) + src_start_h, src_end_h = range_convert(otio_src_range_handles) + frame_start = workfile_start + frame_end = frame_start + otio.opentime.to_frames( + otio_tl_range.duration, otio_tl_range.duration.rate) - 1 + + data = { + "frameStart": frame_start, + "frameEnd": frame_end, + "clipStart": tl_start, + "clipEnd": tl_end, + "clipStartH": tl_start_h, + "clipEndH": tl_end_h, + "sourceStart": src_starting_from + src_start, + "sourceEnd": src_starting_from + src_end, + "sourceStartH": src_starting_from + src_start_h, + "sourceEndH": src_starting_from + src_end_h, + } + instance.data.update(data) + self.log.debug( + "_ data: {}".format(pformat(data))) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index cf80445f8d..86ef469b71 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -5,21 +5,23 @@ Requires: masterLayer -> instance data attribute otioClipRange -> instance data attribute """ -import os +# import os import opentimelineio as otio import pyblish.api import pype.lib +from pprint import pformat class CollectOcioReview(pyblish.api.InstancePlugin): """Get matching otio from defined review layer""" label = "Collect OTIO review" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.57 families = ["clip"] hosts = ["resolve"] def process(self, instance): + otio_review_clips = list() # get basic variables review_track_name = instance.data["review"] master_layer = instance.data["masterLayer"] @@ -36,53 +38,18 @@ class CollectOcioReview(pyblish.api.InstancePlugin): if track_name not in review_track_name: continue if isinstance(_otio_clip, otio.schema.Clip): + test_start, test_end = pype.lib.otio_range_to_frame_range( + parent_range) + main_start, main_end = pype.lib.otio_range_to_frame_range( + otio_clip_range) if pype.lib.is_overlapping_otio_ranges( parent_range, otio_clip_range, strict=False): - self.create_representation( - _otio_clip, otio_clip_range, instance) + # add found clips to list + otio_review_clips.append(_otio_clip) - def create_representation(self, otio_clip, to_otio_range, instance): - to_tl_start, to_tl_end = pype.lib.convert_otio_range_to_frame_range( - to_otio_range) - tl_start, tl_end = pype.lib.convert_otio_range_to_frame_range( - otio_clip.range_in_parent()) - source_start, source_end = pype.lib.convert_otio_range_to_frame_range( - otio_clip.source_range) - media_reference = otio_clip.media_reference - metadata = media_reference.metadata - mr_start, mr_end = pype.lib.convert_otio_range_to_frame_range( - media_reference.available_range) - path = media_reference.target_url - reference_frame_start = (mr_start + source_start) + ( - to_tl_start - tl_start) - reference_frame_end = (mr_start + source_end) - ( - tl_end - to_tl_end) + instance.data["otioReviewClip"] = otio_review_clips + self.log.debug( + "_ otio_review_clips: {}".format(otio_review_clips)) - base_name = os.path.basename(path) - staging_dir = os.path.dirname(path) - ext = os.path.splitext(base_name)[1][1:] - - if metadata.get("isSequence"): - files = list() - padding = metadata["padding"] - base_name = pype.lib.convert_to_padded_path(base_name, padding) - for index in range( - reference_frame_start, (reference_frame_end + 1)): - file_name = base_name % index - path_test = os.path.join(staging_dir, file_name) - if os.path.exists(path_test): - files.append(file_name) - - self.log.debug(files) - else: - files = base_name - - representation = { - "ext": ext, - "name": ext, - "files": files, - "frameStart": reference_frame_start, - "frameEnd": reference_frame_end, - "stagingDir": staging_dir - } - self.log.debug(representation) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py new file mode 100644 index 0000000000..9d43e9a9a8 --- /dev/null +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -0,0 +1,403 @@ +import os +import sys +import six +import errno +from pyblish import api +import pype +import clique +from avalon.vendor import filelink + + +class ExtractOTIOReview(pype.api.Extractor): + """Extract OTIO timeline into one concuted video file""" + + # order = api.ExtractorOrder + order = api.CollectorOrder + 0.1023 + label = "Extract OTIO review" + hosts = ["resolve"] + families = ["review_otio"] + + # presets + tags_addition = [] + + def process(self, instance): + # self.create_representation( + # _otio_clip, otio_clip_range, instance) + """" + Expecting (instance.data): + otioClip (otio.schema.clip): clip from otio timeline + otioReviewClips (list): list with instances of otio.schema.clip + or otio.schema.gap + + Process description: + Comparing `otioClip` parent range with `otioReviewClip` parent range will result in frame range witch is the trimmed cut. In case more otio clips or otio gaps are found in otioReviewClips then ffmpeg will generate multiple clips and those are then concuted together to one video file or image sequence. Resulting files are then added to instance as representation ready for review family plugins. + """" + + + + inst_data = instance.data + asset = inst_data['asset'] + item = inst_data['item'] + event_number = int(item.eventNumber()) + + # get representation and loop them + representations = inst_data["representations"] + + # check if sequence + is_sequence = inst_data["isSequence"] + + # get resolution default + resolution_width = inst_data["resolutionWidth"] + resolution_height = inst_data["resolutionHeight"] + + # frame range data + media_duration = inst_data["mediaDuration"] + + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") + + # filter out mov and img sequences + representations_new = representations[:] + for repre in representations: + input_args = list() + output_args = list() + + tags = repre.get("tags", []) + + # check if supported tags are in representation for activation + filter_tag = False + for tag in ["_cut-bigger", "_cut-smaller"]: + if tag in tags: + filter_tag = True + break + if not filter_tag: + continue + + self.log.debug("__ repre: {}".format(repre)) + + files = repre.get("files") + staging_dir = repre.get("stagingDir") + fps = repre.get("fps") + ext = repre.get("ext") + + # make paths + full_output_dir = os.path.join( + staging_dir, "cuts") + + if is_sequence: + new_files = list() + + # frame range delivery included handles + frame_start = ( + inst_data["frameStart"] - inst_data["handleStart"]) + frame_end = ( + inst_data["frameEnd"] + inst_data["handleEnd"]) + self.log.debug("_ frame_start: {}".format(frame_start)) + self.log.debug("_ frame_end: {}".format(frame_end)) + + # make collection from input files list + collections, remainder = clique.assemble(files) + collection = collections.pop() + self.log.debug("_ collection: {}".format(collection)) + + # name components + head = collection.format("{head}") + padding = collection.format("{padding}") + tail = collection.format("{tail}") + self.log.debug("_ head: {}".format(head)) + self.log.debug("_ padding: {}".format(padding)) + self.log.debug("_ tail: {}".format(tail)) + + # make destination file with instance data + # frame start and end range + index = 0 + for image in collection: + dst_file_num = frame_start + index + dst_file_name = "".join([ + str(event_number), + head, + str(padding % dst_file_num), + tail + ]) + src = os.path.join(staging_dir, image) + dst = os.path.join(full_output_dir, dst_file_name) + self.log.info("Creating temp hardlinks: {}".format(dst)) + self.hardlink_file(src, dst) + new_files.append(dst_file_name) + index += 1 + + self.log.debug("_ new_files: {}".format(new_files)) + + else: + # ffmpeg when single file + new_files = "{}_{}".format(asset, files) + + # frame range + frame_start = repre.get("frameStart") + frame_end = repre.get("frameEnd") + + full_input_path = os.path.join( + staging_dir, files) + + os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) + + full_output_path = os.path.join( + full_output_dir, new_files) + + self.log.debug( + "__ full_input_path: {}".format(full_input_path)) + self.log.debug( + "__ full_output_path: {}".format(full_output_path)) + + # check if audio stream is in input video file + ffprob_cmd = ( + "\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams" + " -select_streams a -loglevel error" + ).format(**locals()) + + self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) + audio_check_output = pype.api.subprocess(ffprob_cmd) + self.log.debug( + "audio_check_output: {}".format(audio_check_output)) + + # Fix one frame difference + """ TODO: this is just work-around for issue: + https://github.com/pypeclub/pype/issues/659 + """ + frame_duration_extend = 1 + if audio_check_output: + frame_duration_extend = 0 + + # translate frame to sec + start_sec = float(frame_start) / fps + duration_sec = float( + (frame_end - frame_start) + frame_duration_extend) / fps + + empty_add = None + + # check if not missing frames at start + if (start_sec < 0) or (media_duration < frame_end): + # for later swithing off `-c:v copy` output arg + empty_add = True + + # init empty variables + video_empty_start = video_layer_start = "" + audio_empty_start = audio_layer_start = "" + video_empty_end = video_layer_end = "" + audio_empty_end = audio_layer_end = "" + audio_input = audio_output = "" + v_inp_idx = 0 + concat_n = 1 + + # try to get video native resolution data + try: + resolution_output = pype.api.subprocess(( + "\"{ffprobe_path}\" -i \"{full_input_path}\"" + " -v error " + "-select_streams v:0 -show_entries " + "stream=width,height -of csv=s=x:p=0" + ).format(**locals())) + + x, y = resolution_output.split("x") + resolution_width = int(x) + resolution_height = int(y) + except Exception as _ex: + self.log.warning( + "Video native resolution is untracable: {}".format( + _ex)) + + if audio_check_output: + # adding input for empty audio + input_args.append("-f lavfi -i anullsrc") + + # define audio empty concat variables + audio_input = "[1:a]" + audio_output = ":a=1" + v_inp_idx = 1 + + # adding input for video black frame + input_args.append(( + "-f lavfi -i \"color=c=black:" + "s={resolution_width}x{resolution_height}:r={fps}\"" + ).format(**locals())) + + if (start_sec < 0): + # recalculate input video timing + empty_start_dur = abs(start_sec) + start_sec = 0 + duration_sec = float(frame_end - ( + frame_start + (empty_start_dur * fps)) + 1) / fps + + # define starting empty video concat variables + video_empty_start = ( + "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa + ).format(**locals()) + video_layer_start = "[gv0]" + + if audio_check_output: + # define starting empty audio concat variables + audio_empty_start = ( + "[0]atrim=duration={empty_start_dur}[ga0];" + ).format(**locals()) + audio_layer_start = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # check if not missing frames at the end + if (media_duration < frame_end): + # recalculate timing + empty_end_dur = float( + frame_end - media_duration + 1) / fps + duration_sec = float( + media_duration - frame_start) / fps + + # define ending empty video concat variables + video_empty_end = ( + "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" + ).format(**locals()) + video_layer_end = "[gv1]" + + if audio_check_output: + # define ending empty audio concat variables + audio_empty_end = ( + "[0]atrim=duration={empty_end_dur}[ga1];" + ).format(**locals()) + audio_layer_end = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # concatting black frame togather + output_args.append(( + "-filter_complex \"" + "{audio_empty_start}" + "{video_empty_start}" + "{audio_empty_end}" + "{video_empty_end}" + "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa + "{video_layer_end}{audio_layer_end}" + "concat=n={concat_n}:v=1{audio_output}\"" + ).format(**locals())) + + # append ffmpeg input video clip + input_args.append("-ss {:0.2f}".format(start_sec)) + input_args.append("-t {:0.2f}".format(duration_sec)) + input_args.append("-i \"{}\"".format(full_input_path)) + + # add copy audio video codec if only shortening clip + if ("_cut-bigger" in tags) and (not empty_add): + output_args.append("-c:v copy") + + # make sure it is having no frame to frame comprassion + output_args.append("-intra") + + # output filename + output_args.append("-y \"{}\"".format(full_output_path)) + + mov_args = [ + "\"{}\"".format(ffmpeg_path), + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + repre_new = { + "files": new_files, + "stagingDir": full_output_dir, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end, + "step": 1, + "fps": fps, + "name": "cut_up_preview", + "tags": ["review"] + self.tags_addition, + "ext": ext, + "anatomy_template": "publish" + } + + representations_new.append(repre_new) + + for repre in representations_new: + if ("delete" in repre.get("tags", [])) and ( + "cut_up_preview" not in repre["name"]): + representations_new.remove(repre) + + self.log.debug( + "Representations: {}".format(representations_new)) + instance.data["representations"] = representations_new + + def hardlink_file(self, src, dst): + dirname = os.path.dirname(dst) + + # make sure the destination folder exist + try: + os.makedirs(dirname) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + six.reraise(*sys.exc_info()) + + # create hardlined file + try: + filelink.create(src, dst, filelink.HARDLINK) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + six.reraise(*sys.exc_info()) + + def create_representation(self, otio_clip, to_otio_range, instance): + to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( + to_otio_range) + tl_start, tl_end = pype.lib.otio_range_to_frame_range( + otio_clip.range_in_parent()) + source_start, source_end = pype.lib.otio_range_to_frame_range( + otio_clip.source_range) + media_reference = otio_clip.media_reference + metadata = media_reference.metadata + mr_start, mr_end = pype.lib.otio_range_to_frame_range( + media_reference.available_range) + path = media_reference.target_url + reference_frame_start = (mr_start + source_start) + ( + to_tl_start - tl_start) + reference_frame_end = (mr_start + source_end) - ( + tl_end - to_tl_end) + + base_name = os.path.basename(path) + staging_dir = os.path.dirname(path) + ext = os.path.splitext(base_name)[1][1:] + + if metadata.get("isSequence"): + files = list() + padding = metadata["padding"] + base_name = pype.lib.convert_to_padded_path(base_name, padding) + for index in range( + reference_frame_start, (reference_frame_end + 1)): + file_name = base_name % index + path_test = os.path.join(staging_dir, file_name) + if os.path.exists(path_test): + files.append(file_name) + + self.log.debug(files) + else: + files = base_name + + representation = { + "ext": ext, + "name": ext, + "files": files, + "frameStart": reference_frame_start, + "frameEnd": reference_frame_end, + "stagingDir": staging_dir + } + self.log.debug(representation) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index 9283a7b1a6..ee32eac09e 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -8,7 +8,7 @@ from pprint import pformat class CollectInstances(pyblish.api.ContextPlugin): """Collect all Track items selection.""" - order = pyblish.api.CollectorOrder - 0.5 + order = pyblish.api.CollectorOrder - 0.59 label = "Collect Instances" hosts = ["resolve"] @@ -64,9 +64,7 @@ class CollectInstances(pyblish.api.ContextPlugin): "asset": asset, "item": track_item, "families": families, - "publish": resolve.get_publish_attribute(track_item), - # tags - "tags": tag_data, + "publish": resolve.get_publish_attribute(track_item) }) # otio clip data diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index 9873e1ca97..1c6d682f3f 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -13,7 +13,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): """Inject the current working file into context""" label = "Collect Workfile" - order = pyblish.api.CollectorOrder - 0.501 + order = pyblish.api.CollectorOrder - 0.6 def process(self, context): From 7c0a0de6d173ab6380f7c3289d64ef667514e5ad Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 8 Dec 2020 16:34:22 +0100 Subject: [PATCH 031/198] feat(resolve): fixing export import otio --- .../resolve/utility_scripts/OTIO_export.py | 192 +++++------------- .../resolve/utility_scripts/OTIO_import.py | 2 +- 2 files changed, 47 insertions(+), 147 deletions(-) diff --git a/pype/hosts/resolve/utility_scripts/OTIO_export.py b/pype/hosts/resolve/utility_scripts/OTIO_export.py index 7569ba4c42..3e08cb370d 100644 --- a/pype/hosts/resolve/utility_scripts/OTIO_export.py +++ b/pype/hosts/resolve/utility_scripts/OTIO_export.py @@ -1,7 +1,7 @@ #!/usr/bin/env python import os import sys -import opentimelineio as otio +from pype.hosts.resolve.otio import davinci_export as otio_export resolve = bmd.scriptapp("Resolve") fu = resolve.Fusion() @@ -9,155 +9,44 @@ fu = resolve.Fusion() ui = fu.UIManager disp = bmd.UIDispatcher(fu.UIManager) -TRACK_TYPES = { - "video": otio.schema.TrackKind.Video, - "audio": otio.schema.TrackKind.Audio -} - -print(resolve) - -def _create_rational_time(frame, fps): - return otio.opentime.RationalTime( - float(frame), - float(fps) - ) - - -def _create_time_range(start, duration, fps): - return otio.opentime.TimeRange( - start_time=_create_rational_time(start, fps), - duration=_create_rational_time(duration, fps) - ) - - -def _create_reference(mp_item): - return otio.schema.ExternalReference( - target_url=mp_item.GetClipProperty("File Path").get("File Path"), - available_range=_create_time_range( - mp_item.GetClipProperty("Start").get("Start"), - mp_item.GetClipProperty("Frames").get("Frames"), - mp_item.GetClipProperty("FPS").get("FPS") - ) - ) - - -def _create_markers(tl_item, frame_rate): - tl_markers = tl_item.GetMarkers() - markers = [] - for m_frame in tl_markers: - markers.append( - otio.schema.Marker( - name=tl_markers[m_frame]["name"], - marked_range=_create_time_range( - m_frame, - tl_markers[m_frame]["duration"], - frame_rate - ), - color=tl_markers[m_frame]["color"].upper(), - metadata={"Resolve": {"note": tl_markers[m_frame]["note"]}} - ) - ) - return markers - - -def _create_clip(tl_item): - mp_item = tl_item.GetMediaPoolItem() - frame_rate = mp_item.GetClipProperty("FPS").get("FPS") - clip = otio.schema.Clip( - name=tl_item.GetName(), - source_range=_create_time_range( - tl_item.GetLeftOffset(), - tl_item.GetDuration(), - frame_rate - ), - media_reference=_create_reference(mp_item) - ) - for marker in _create_markers(tl_item, frame_rate): - clip.markers.append(marker) - return clip - - -def _create_gap(gap_start, clip_start, tl_start_frame, frame_rate): - return otio.schema.Gap( - source_range=_create_time_range( - gap_start, - (clip_start - tl_start_frame) - gap_start, - frame_rate - ) - ) - - -def _create_ot_timeline(output_path): - if not output_path: - return - project_manager = resolve.GetProjectManager() - current_project = project_manager.GetCurrentProject() - dr_timeline = current_project.GetCurrentTimeline() - ot_timeline = otio.schema.Timeline(name=dr_timeline.GetName()) - for track_type in list(TRACK_TYPES.keys()): - track_count = dr_timeline.GetTrackCount(track_type) - for track_index in range(1, int(track_count) + 1): - ot_track = otio.schema.Track( - name="{}{}".format(track_type[0].upper(), track_index), - kind=TRACK_TYPES[track_type] - ) - tl_items = dr_timeline.GetItemListInTrack(track_type, track_index) - for tl_item in tl_items: - if tl_item.GetMediaPoolItem() is None: - continue - clip_start = tl_item.GetStart() - dr_timeline.GetStartFrame() - if clip_start > ot_track.available_range().duration.value: - ot_track.append( - _create_gap( - ot_track.available_range().duration.value, - tl_item.GetStart(), - dr_timeline.GetStartFrame(), - current_project.GetSetting("timelineFrameRate") - ) - ) - ot_track.append(_create_clip(tl_item)) - ot_timeline.tracks.append(ot_track) - otio.adapters.write_to_file( - ot_timeline, "{}/{}.otio".format(output_path, dr_timeline.GetName())) - title_font = ui.Font({"PixelSize": 18}) dlg = disp.AddWindow( - { - "WindowTitle": "Export OTIO", - "ID": "OTIOwin", - "Geometry": [250, 250, 250, 100], - "Spacing": 0, - "Margin": 10 - }, - [ - ui.VGroup( - { - "Spacing": 2 - }, - [ - ui.Button( + { + "WindowTitle": "Export OTIO", + "ID": "OTIOwin", + "Geometry": [250, 250, 250, 100], + "Spacing": 0, + "Margin": 10 + }, + [ + ui.VGroup( { - "ID": "exportfilebttn", - "Text": "Select Destination", - "Weight": 1.25, - "ToolTip": "Choose where to save the otio", - "Flat": False - } - ), - ui.VGap(), - ui.Button( - { - "ID": "exportbttn", - "Text": "Export", - "Weight": 2, - "ToolTip": "Export the current timeline", - "Flat": False - } + "Spacing": 2 + }, + [ + ui.Button( + { + "ID": "exportfilebttn", + "Text": "Select Destination", + "Weight": 1.25, + "ToolTip": "Choose where to save the otio", + "Flat": False + } + ), + ui.VGap(), + ui.Button( + { + "ID": "exportbttn", + "Text": "Export", + "Weight": 2, + "ToolTip": "Export the current timeline", + "Flat": False + } + ) + ] ) - ] - ) - ] + ] ) itm = dlg.GetItems() @@ -168,7 +57,18 @@ def _close_window(event): def _export_button(event): - _create_ot_timeline(itm["exportfilebttn"].Text) + pm = resolve.GetProjectManager() + project = pm.GetCurrentProject() + fps = project.GetSetting("timelineFrameRate") + timeline = project.GetCurrentTimeline() + otio_timeline = otio_export.create_otio_timeline(timeline, fps) + otio_path = os.path.join( + itm["exportfilebttn"].Text, + timeline.GetName() + ".otio") + print(otio_path) + otio_export.write_to_file( + otio_timeline, + otio_path) _close_window(None) diff --git a/pype/hosts/resolve/utility_scripts/OTIO_import.py b/pype/hosts/resolve/utility_scripts/OTIO_import.py index 2266fd4b2b..879f7eb0b5 100644 --- a/pype/hosts/resolve/utility_scripts/OTIO_import.py +++ b/pype/hosts/resolve/utility_scripts/OTIO_import.py @@ -1,7 +1,7 @@ #!/usr/bin/env python import os import sys -from pype.hosts.resolve.otio import davinci_resolve_import as otio_import +from pype.hosts.resolve.otio import davinci_import as otio_import resolve = bmd.scriptapp("Resolve") fu = resolve.Fusion() From 57c595371f1fd851dec25116acce1ea7689fd4a4 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 9 Dec 2020 17:20:37 +0100 Subject: [PATCH 032/198] feat(resolve): update otio export modul - refactory timeline creation from project - adding metadata particularly widht, height --- pype/hosts/resolve/otio/davinci_export.py | 43 +++++++++++++++++++---- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/pype/hosts/resolve/otio/davinci_export.py b/pype/hosts/resolve/otio/davinci_export.py index cffb58f960..7244544183 100644 --- a/pype/hosts/resolve/otio/davinci_export.py +++ b/pype/hosts/resolve/otio/davinci_export.py @@ -26,7 +26,7 @@ def create_otio_time_range(start_frame, frame_duration, fps): def create_otio_reference(media_pool_item): - metadata = dict() + metadata = _get_metadata_media_pool_item(media_pool_item) mp_clip_property = media_pool_item.GetClipProperty() path = mp_clip_property["File Path"] reformat_path = utils.get_reformated_path(path, padded=False) @@ -142,16 +142,44 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): ) -def _create_otio_timeline(timeline, fps): +def _create_otio_timeline(project, timeline, fps): + metadata = _get_timeline_metadata(project, timeline) start_time = create_otio_rational_time( timeline.GetStartFrame(), fps) otio_timeline = otio.schema.Timeline( name=timeline.GetName(), - global_start_time=start_time + global_start_time=start_time, + metadata=metadata ) return otio_timeline +def _get_timeline_metadata(project, timeline): + media_pool = project.GetMediaPool() + root_folder = media_pool.GetRootFolder() + ls_folder = root_folder.GetClipList() + timeline = project.GetCurrentTimeline() + timeline_name = timeline.GetName() + for tl in ls_folder: + if tl.GetName() not in timeline_name: + continue + return _get_metadata_media_pool_item(tl) + + +def _get_metadata_media_pool_item(media_pool_item): + data = dict() + data.update({k: v for k, v in media_pool_item.GetMetadata().items()}) + property = media_pool_item.GetClipProperty() or {} + for name, value in property.items(): + if "Resolution" in name and "" != value: + width, height = value.split("x") + data.update({ + "width": int(width), + "height": int(height) + }) + return data + + def create_otio_track(track_type, track_name): return otio.schema.Track( name=track_name, @@ -184,12 +212,15 @@ def add_otio_metadata(otio_item, media_pool_item, **kwargs): otio_item.metadata.update({key: value}) -def create_otio_timeline(timeline, fps): +def create_otio_timeline(resolve_project): + # get current timeline - self.project_fps = fps + self.project_fps = resolve_project.GetSetting("timelineFrameRate") + timeline = resolve_project.GetCurrentTimeline() # convert timeline to otio - otio_timeline = _create_otio_timeline(timeline, self.project_fps) + otio_timeline = _create_otio_timeline( + resolve_project, timeline, self.project_fps) # loop all defined track types for track_type in list(self.track_types.keys()): From 7ea9080eb82f5828cc7e1359c7638f09a77f8032 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 9 Dec 2020 17:25:53 +0100 Subject: [PATCH 033/198] feat(resolve): publishing otio wip --- .../{Pype_menu.py => __PYPE__MENU__.py} | 0 pype/hosts/resolve/utility_scripts/test.py | 25 +- .../global/publish/collect_otio_review.py | 2 +- .../global/publish/extract_otio_review.py | 761 +++++++++--------- .../resolve/publish/collect_instances.py | 25 +- .../resolve/publish/collect_workfile.py | 3 +- 6 files changed, 420 insertions(+), 396 deletions(-) rename pype/hosts/resolve/utility_scripts/{Pype_menu.py => __PYPE__MENU__.py} (100%) diff --git a/pype/hosts/resolve/utility_scripts/Pype_menu.py b/pype/hosts/resolve/utility_scripts/__PYPE__MENU__.py similarity index 100% rename from pype/hosts/resolve/utility_scripts/Pype_menu.py rename to pype/hosts/resolve/utility_scripts/__PYPE__MENU__.py diff --git a/pype/hosts/resolve/utility_scripts/test.py b/pype/hosts/resolve/utility_scripts/test.py index 69dc4768bd..a76e4dc501 100644 --- a/pype/hosts/resolve/utility_scripts/test.py +++ b/pype/hosts/resolve/utility_scripts/test.py @@ -1,19 +1,24 @@ #! python3 import sys -from pype.api import Logger import DaVinciResolveScript as bmdvr -log = Logger().get_logger(__name__) - - def main(): - import pype.hosts.resolve as bmdvr - bm = bmdvr.utils.get_resolve_module() - log.info(f"blackmagicmodule: {bm}") - - -print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}") + resolve = bmdvr.scriptapp('Resolve') + print(f"resolve: {resolve}") + project_manager = resolve.GetProjectManager() + project = project_manager.GetCurrentProject() + media_pool = project.GetMediaPool() + root_folder = media_pool.GetRootFolder() + ls_folder = root_folder.GetClipList() + timeline = project.GetCurrentTimeline() + timeline_name = timeline.GetName() + for tl in ls_folder: + if tl.GetName() not in timeline_name: + continue + print(tl.GetName()) + print(tl.GetMetadata()) + print(tl.GetClipProperty()) if __name__ == "__main__": diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 86ef469b71..30240f456e 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -47,7 +47,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin): # add found clips to list otio_review_clips.append(_otio_clip) - instance.data["otioReviewClip"] = otio_review_clips + instance.data["otioReviewClips"] = otio_review_clips self.log.debug( "_ otio_review_clips: {}".format(otio_review_clips)) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index 9d43e9a9a8..f829659dff 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -12,392 +12,393 @@ class ExtractOTIOReview(pype.api.Extractor): """Extract OTIO timeline into one concuted video file""" # order = api.ExtractorOrder - order = api.CollectorOrder + 0.1023 + order = api.CollectorOrder label = "Extract OTIO review" hosts = ["resolve"] - families = ["review_otio"] - - # presets - tags_addition = [] + families = ["review"] def process(self, instance): # self.create_representation( # _otio_clip, otio_clip_range, instance) - """" - Expecting (instance.data): - otioClip (otio.schema.clip): clip from otio timeline - otioReviewClips (list): list with instances of otio.schema.clip - or otio.schema.gap + # """ + # Expecting (instance.data): + # otioClip (otio.schema.clip): clip from otio timeline + # otioReviewClips (list): list with instances of otio.schema.clip + # or otio.schema.gap + # + # Process description: + # Comparing `otioClip` parent range with `otioReviewClip` parent range will result in frame range witch is the trimmed cut. In case more otio clips or otio gaps are found in otioReviewClips then ffmpeg will generate multiple clips and those are then concuted together to one video file or image sequence. Resulting files are then added to instance as representation ready for review family plugins. + # """" - Process description: - Comparing `otioClip` parent range with `otioReviewClip` parent range will result in frame range witch is the trimmed cut. In case more otio clips or otio gaps are found in otioReviewClips then ffmpeg will generate multiple clips and those are then concuted together to one video file or image sequence. Resulting files are then added to instance as representation ready for review family plugins. - """" - - - - inst_data = instance.data - asset = inst_data['asset'] - item = inst_data['item'] - event_number = int(item.eventNumber()) - - # get representation and loop them - representations = inst_data["representations"] - - # check if sequence - is_sequence = inst_data["isSequence"] - - # get resolution default - resolution_width = inst_data["resolutionWidth"] - resolution_height = inst_data["resolutionHeight"] - - # frame range data - media_duration = inst_data["mediaDuration"] - - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") - - # filter out mov and img sequences - representations_new = representations[:] - for repre in representations: - input_args = list() - output_args = list() - - tags = repre.get("tags", []) - - # check if supported tags are in representation for activation - filter_tag = False - for tag in ["_cut-bigger", "_cut-smaller"]: - if tag in tags: - filter_tag = True - break - if not filter_tag: - continue - - self.log.debug("__ repre: {}".format(repre)) - - files = repre.get("files") - staging_dir = repre.get("stagingDir") - fps = repre.get("fps") - ext = repre.get("ext") - - # make paths - full_output_dir = os.path.join( - staging_dir, "cuts") - - if is_sequence: - new_files = list() - - # frame range delivery included handles - frame_start = ( - inst_data["frameStart"] - inst_data["handleStart"]) - frame_end = ( - inst_data["frameEnd"] + inst_data["handleEnd"]) - self.log.debug("_ frame_start: {}".format(frame_start)) - self.log.debug("_ frame_end: {}".format(frame_end)) - - # make collection from input files list - collections, remainder = clique.assemble(files) - collection = collections.pop() - self.log.debug("_ collection: {}".format(collection)) - - # name components - head = collection.format("{head}") - padding = collection.format("{padding}") - tail = collection.format("{tail}") - self.log.debug("_ head: {}".format(head)) - self.log.debug("_ padding: {}".format(padding)) - self.log.debug("_ tail: {}".format(tail)) - - # make destination file with instance data - # frame start and end range - index = 0 - for image in collection: - dst_file_num = frame_start + index - dst_file_name = "".join([ - str(event_number), - head, - str(padding % dst_file_num), - tail - ]) - src = os.path.join(staging_dir, image) - dst = os.path.join(full_output_dir, dst_file_name) - self.log.info("Creating temp hardlinks: {}".format(dst)) - self.hardlink_file(src, dst) - new_files.append(dst_file_name) - index += 1 - - self.log.debug("_ new_files: {}".format(new_files)) - - else: - # ffmpeg when single file - new_files = "{}_{}".format(asset, files) - - # frame range - frame_start = repre.get("frameStart") - frame_end = repre.get("frameEnd") - - full_input_path = os.path.join( - staging_dir, files) - - os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) - - full_output_path = os.path.join( - full_output_dir, new_files) - - self.log.debug( - "__ full_input_path: {}".format(full_input_path)) - self.log.debug( - "__ full_output_path: {}".format(full_output_path)) - - # check if audio stream is in input video file - ffprob_cmd = ( - "\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams" - " -select_streams a -loglevel error" - ).format(**locals()) - - self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) - audio_check_output = pype.api.subprocess(ffprob_cmd) - self.log.debug( - "audio_check_output: {}".format(audio_check_output)) - - # Fix one frame difference - """ TODO: this is just work-around for issue: - https://github.com/pypeclub/pype/issues/659 - """ - frame_duration_extend = 1 - if audio_check_output: - frame_duration_extend = 0 - - # translate frame to sec - start_sec = float(frame_start) / fps - duration_sec = float( - (frame_end - frame_start) + frame_duration_extend) / fps - - empty_add = None - - # check if not missing frames at start - if (start_sec < 0) or (media_duration < frame_end): - # for later swithing off `-c:v copy` output arg - empty_add = True - - # init empty variables - video_empty_start = video_layer_start = "" - audio_empty_start = audio_layer_start = "" - video_empty_end = video_layer_end = "" - audio_empty_end = audio_layer_end = "" - audio_input = audio_output = "" - v_inp_idx = 0 - concat_n = 1 - - # try to get video native resolution data - try: - resolution_output = pype.api.subprocess(( - "\"{ffprobe_path}\" -i \"{full_input_path}\"" - " -v error " - "-select_streams v:0 -show_entries " - "stream=width,height -of csv=s=x:p=0" - ).format(**locals())) - - x, y = resolution_output.split("x") - resolution_width = int(x) - resolution_height = int(y) - except Exception as _ex: - self.log.warning( - "Video native resolution is untracable: {}".format( - _ex)) - - if audio_check_output: - # adding input for empty audio - input_args.append("-f lavfi -i anullsrc") - - # define audio empty concat variables - audio_input = "[1:a]" - audio_output = ":a=1" - v_inp_idx = 1 - - # adding input for video black frame - input_args.append(( - "-f lavfi -i \"color=c=black:" - "s={resolution_width}x{resolution_height}:r={fps}\"" - ).format(**locals())) - - if (start_sec < 0): - # recalculate input video timing - empty_start_dur = abs(start_sec) - start_sec = 0 - duration_sec = float(frame_end - ( - frame_start + (empty_start_dur * fps)) + 1) / fps - - # define starting empty video concat variables - video_empty_start = ( - "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa - ).format(**locals()) - video_layer_start = "[gv0]" - - if audio_check_output: - # define starting empty audio concat variables - audio_empty_start = ( - "[0]atrim=duration={empty_start_dur}[ga0];" - ).format(**locals()) - audio_layer_start = "[ga0]" - - # alter concat number of clips - concat_n += 1 - - # check if not missing frames at the end - if (media_duration < frame_end): - # recalculate timing - empty_end_dur = float( - frame_end - media_duration + 1) / fps - duration_sec = float( - media_duration - frame_start) / fps - - # define ending empty video concat variables - video_empty_end = ( - "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" - ).format(**locals()) - video_layer_end = "[gv1]" - - if audio_check_output: - # define ending empty audio concat variables - audio_empty_end = ( - "[0]atrim=duration={empty_end_dur}[ga1];" - ).format(**locals()) - audio_layer_end = "[ga0]" - - # alter concat number of clips - concat_n += 1 - - # concatting black frame togather - output_args.append(( - "-filter_complex \"" - "{audio_empty_start}" - "{video_empty_start}" - "{audio_empty_end}" - "{video_empty_end}" - "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa - "{video_layer_end}{audio_layer_end}" - "concat=n={concat_n}:v=1{audio_output}\"" - ).format(**locals())) - - # append ffmpeg input video clip - input_args.append("-ss {:0.2f}".format(start_sec)) - input_args.append("-t {:0.2f}".format(duration_sec)) - input_args.append("-i \"{}\"".format(full_input_path)) - - # add copy audio video codec if only shortening clip - if ("_cut-bigger" in tags) and (not empty_add): - output_args.append("-c:v copy") - - # make sure it is having no frame to frame comprassion - output_args.append("-intra") - - # output filename - output_args.append("-y \"{}\"".format(full_output_path)) - - mov_args = [ - "\"{}\"".format(ffmpeg_path), - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - repre_new = { - "files": new_files, - "stagingDir": full_output_dir, - "frameStart": frame_start, - "frameEnd": frame_end, - "frameStartFtrack": frame_start, - "frameEndFtrack": frame_end, - "step": 1, - "fps": fps, - "name": "cut_up_preview", - "tags": ["review"] + self.tags_addition, - "ext": ext, - "anatomy_template": "publish" - } - - representations_new.append(repre_new) - - for repre in representations_new: - if ("delete" in repre.get("tags", [])) and ( - "cut_up_preview" not in repre["name"]): - representations_new.remove(repre) - - self.log.debug( - "Representations: {}".format(representations_new)) - instance.data["representations"] = representations_new - - def hardlink_file(self, src, dst): - dirname = os.path.dirname(dst) - - # make sure the destination folder exist - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - six.reraise(*sys.exc_info()) - - # create hardlined file - try: - filelink.create(src, dst, filelink.HARDLINK) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - six.reraise(*sys.exc_info()) - - def create_representation(self, otio_clip, to_otio_range, instance): - to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( - to_otio_range) - tl_start, tl_end = pype.lib.otio_range_to_frame_range( - otio_clip.range_in_parent()) - source_start, source_end = pype.lib.otio_range_to_frame_range( - otio_clip.source_range) + otio_clip = instance.data["otioClip"] media_reference = otio_clip.media_reference - metadata = media_reference.metadata - mr_start, mr_end = pype.lib.otio_range_to_frame_range( - media_reference.available_range) - path = media_reference.target_url - reference_frame_start = (mr_start + source_start) + ( - to_tl_start - tl_start) - reference_frame_end = (mr_start + source_end) - ( - tl_end - to_tl_end) + self.log.debug(media_reference.metadata) + otio_review_clips = instance.data["otioReviewClips"] + self.log.debug(otio_review_clips) - base_name = os.path.basename(path) - staging_dir = os.path.dirname(path) - ext = os.path.splitext(base_name)[1][1:] - - if metadata.get("isSequence"): - files = list() - padding = metadata["padding"] - base_name = pype.lib.convert_to_padded_path(base_name, padding) - for index in range( - reference_frame_start, (reference_frame_end + 1)): - file_name = base_name % index - path_test = os.path.join(staging_dir, file_name) - if os.path.exists(path_test): - files.append(file_name) - - self.log.debug(files) - else: - files = base_name - - representation = { - "ext": ext, - "name": ext, - "files": files, - "frameStart": reference_frame_start, - "frameEnd": reference_frame_end, - "stagingDir": staging_dir - } - self.log.debug(representation) + # inst_data = instance.data + # asset = inst_data['asset'] + # item = inst_data['item'] + # event_number = int(item.eventNumber()) + # + # # get representation and loop them + # representations = inst_data["representations"] + # + # # check if sequence + # is_sequence = inst_data["isSequence"] + # + # # get resolution default + # resolution_width = inst_data["resolutionWidth"] + # resolution_height = inst_data["resolutionHeight"] + # + # # frame range data + # media_duration = inst_data["mediaDuration"] + # + # ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + # ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") + # + # # filter out mov and img sequences + # representations_new = representations[:] + # for repre in representations: + # input_args = list() + # output_args = list() + # + # tags = repre.get("tags", []) + # + # # check if supported tags are in representation for activation + # filter_tag = False + # for tag in ["_cut-bigger", "_cut-smaller"]: + # if tag in tags: + # filter_tag = True + # break + # if not filter_tag: + # continue + # + # self.log.debug("__ repre: {}".format(repre)) + # + # files = repre.get("files") + # staging_dir = repre.get("stagingDir") + # fps = repre.get("fps") + # ext = repre.get("ext") + # + # # make paths + # full_output_dir = os.path.join( + # staging_dir, "cuts") + # + # if is_sequence: + # new_files = list() + # + # # frame range delivery included handles + # frame_start = ( + # inst_data["frameStart"] - inst_data["handleStart"]) + # frame_end = ( + # inst_data["frameEnd"] + inst_data["handleEnd"]) + # self.log.debug("_ frame_start: {}".format(frame_start)) + # self.log.debug("_ frame_end: {}".format(frame_end)) + # + # # make collection from input files list + # collections, remainder = clique.assemble(files) + # collection = collections.pop() + # self.log.debug("_ collection: {}".format(collection)) + # + # # name components + # head = collection.format("{head}") + # padding = collection.format("{padding}") + # tail = collection.format("{tail}") + # self.log.debug("_ head: {}".format(head)) + # self.log.debug("_ padding: {}".format(padding)) + # self.log.debug("_ tail: {}".format(tail)) + # + # # make destination file with instance data + # # frame start and end range + # index = 0 + # for image in collection: + # dst_file_num = frame_start + index + # dst_file_name = "".join([ + # str(event_number), + # head, + # str(padding % dst_file_num), + # tail + # ]) + # src = os.path.join(staging_dir, image) + # dst = os.path.join(full_output_dir, dst_file_name) + # self.log.info("Creating temp hardlinks: {}".format(dst)) + # self.hardlink_file(src, dst) + # new_files.append(dst_file_name) + # index += 1 + # + # self.log.debug("_ new_files: {}".format(new_files)) + # + # else: + # # ffmpeg when single file + # new_files = "{}_{}".format(asset, files) + # + # # frame range + # frame_start = repre.get("frameStart") + # frame_end = repre.get("frameEnd") + # + # full_input_path = os.path.join( + # staging_dir, files) + # + # os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) + # + # full_output_path = os.path.join( + # full_output_dir, new_files) + # + # self.log.debug( + # "__ full_input_path: {}".format(full_input_path)) + # self.log.debug( + # "__ full_output_path: {}".format(full_output_path)) + # + # # check if audio stream is in input video file + # ffprob_cmd = ( + # "\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams" + # " -select_streams a -loglevel error" + # ).format(**locals()) + # + # self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) + # audio_check_output = pype.api.subprocess(ffprob_cmd) + # self.log.debug( + # "audio_check_output: {}".format(audio_check_output)) + # + # # Fix one frame difference + # """ TODO: this is just work-around for issue: + # https://github.com/pypeclub/pype/issues/659 + # """ + # frame_duration_extend = 1 + # if audio_check_output: + # frame_duration_extend = 0 + # + # # translate frame to sec + # start_sec = float(frame_start) / fps + # duration_sec = float( + # (frame_end - frame_start) + frame_duration_extend) / fps + # + # empty_add = None + # + # # check if not missing frames at start + # if (start_sec < 0) or (media_duration < frame_end): + # # for later swithing off `-c:v copy` output arg + # empty_add = True + # + # # init empty variables + # video_empty_start = video_layer_start = "" + # audio_empty_start = audio_layer_start = "" + # video_empty_end = video_layer_end = "" + # audio_empty_end = audio_layer_end = "" + # audio_input = audio_output = "" + # v_inp_idx = 0 + # concat_n = 1 + # + # # try to get video native resolution data + # try: + # resolution_output = pype.api.subprocess(( + # "\"{ffprobe_path}\" -i \"{full_input_path}\"" + # " -v error " + # "-select_streams v:0 -show_entries " + # "stream=width,height -of csv=s=x:p=0" + # ).format(**locals())) + # + # x, y = resolution_output.split("x") + # resolution_width = int(x) + # resolution_height = int(y) + # except Exception as _ex: + # self.log.warning( + # "Video native resolution is untracable: {}".format( + # _ex)) + # + # if audio_check_output: + # # adding input for empty audio + # input_args.append("-f lavfi -i anullsrc") + # + # # define audio empty concat variables + # audio_input = "[1:a]" + # audio_output = ":a=1" + # v_inp_idx = 1 + # + # # adding input for video black frame + # input_args.append(( + # "-f lavfi -i \"color=c=black:" + # "s={resolution_width}x{resolution_height}:r={fps}\"" + # ).format(**locals())) + # + # if (start_sec < 0): + # # recalculate input video timing + # empty_start_dur = abs(start_sec) + # start_sec = 0 + # duration_sec = float(frame_end - ( + # frame_start + (empty_start_dur * fps)) + 1) / fps + # + # # define starting empty video concat variables + # video_empty_start = ( + # "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa + # ).format(**locals()) + # video_layer_start = "[gv0]" + # + # if audio_check_output: + # # define starting empty audio concat variables + # audio_empty_start = ( + # "[0]atrim=duration={empty_start_dur}[ga0];" + # ).format(**locals()) + # audio_layer_start = "[ga0]" + # + # # alter concat number of clips + # concat_n += 1 + # + # # check if not missing frames at the end + # if (media_duration < frame_end): + # # recalculate timing + # empty_end_dur = float( + # frame_end - media_duration + 1) / fps + # duration_sec = float( + # media_duration - frame_start) / fps + # + # # define ending empty video concat variables + # video_empty_end = ( + # "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" + # ).format(**locals()) + # video_layer_end = "[gv1]" + # + # if audio_check_output: + # # define ending empty audio concat variables + # audio_empty_end = ( + # "[0]atrim=duration={empty_end_dur}[ga1];" + # ).format(**locals()) + # audio_layer_end = "[ga0]" + # + # # alter concat number of clips + # concat_n += 1 + # + # # concatting black frame togather + # output_args.append(( + # "-filter_complex \"" + # "{audio_empty_start}" + # "{video_empty_start}" + # "{audio_empty_end}" + # "{video_empty_end}" + # "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa + # "{video_layer_end}{audio_layer_end}" + # "concat=n={concat_n}:v=1{audio_output}\"" + # ).format(**locals())) + # + # # append ffmpeg input video clip + # input_args.append("-ss {:0.2f}".format(start_sec)) + # input_args.append("-t {:0.2f}".format(duration_sec)) + # input_args.append("-i \"{}\"".format(full_input_path)) + # + # # add copy audio video codec if only shortening clip + # if ("_cut-bigger" in tags) and (not empty_add): + # output_args.append("-c:v copy") + # + # # make sure it is having no frame to frame comprassion + # output_args.append("-intra") + # + # # output filename + # output_args.append("-y \"{}\"".format(full_output_path)) + # + # mov_args = [ + # "\"{}\"".format(ffmpeg_path), + # " ".join(input_args), + # " ".join(output_args) + # ] + # subprcs_cmd = " ".join(mov_args) + # + # # run subprocess + # self.log.debug("Executing: {}".format(subprcs_cmd)) + # output = pype.api.subprocess(subprcs_cmd) + # self.log.debug("Output: {}".format(output)) + # + # repre_new = { + # "files": new_files, + # "stagingDir": full_output_dir, + # "frameStart": frame_start, + # "frameEnd": frame_end, + # "frameStartFtrack": frame_start, + # "frameEndFtrack": frame_end, + # "step": 1, + # "fps": fps, + # "name": "cut_up_preview", + # "tags": ["review"] + self.tags_addition, + # "ext": ext, + # "anatomy_template": "publish" + # } + # + # representations_new.append(repre_new) + # + # for repre in representations_new: + # if ("delete" in repre.get("tags", [])) and ( + # "cut_up_preview" not in repre["name"]): + # representations_new.remove(repre) + # + # self.log.debug( + # "Representations: {}".format(representations_new)) + # instance.data["representations"] = representations_new + # + # def hardlink_file(self, src, dst): + # dirname = os.path.dirname(dst) + # + # # make sure the destination folder exist + # try: + # os.makedirs(dirname) + # except OSError as e: + # if e.errno == errno.EEXIST: + # pass + # else: + # self.log.critical("An unexpected error occurred.") + # six.reraise(*sys.exc_info()) + # + # # create hardlined file + # try: + # filelink.create(src, dst, filelink.HARDLINK) + # except OSError as e: + # if e.errno == errno.EEXIST: + # pass + # else: + # self.log.critical("An unexpected error occurred.") + # six.reraise(*sys.exc_info()) + # + # def create_representation(self, otio_clip, to_otio_range, instance): + # to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( + # to_otio_range) + # tl_start, tl_end = pype.lib.otio_range_to_frame_range( + # otio_clip.range_in_parent()) + # source_start, source_end = pype.lib.otio_range_to_frame_range( + # otio_clip.source_range) + # media_reference = otio_clip.media_reference + # metadata = media_reference.metadata + # mr_start, mr_end = pype.lib.otio_range_to_frame_range( + # media_reference.available_range) + # path = media_reference.target_url + # reference_frame_start = (mr_start + source_start) + ( + # to_tl_start - tl_start) + # reference_frame_end = (mr_start + source_end) - ( + # tl_end - to_tl_end) + # + # base_name = os.path.basename(path) + # staging_dir = os.path.dirname(path) + # ext = os.path.splitext(base_name)[1][1:] + # + # if metadata.get("isSequence"): + # files = list() + # padding = metadata["padding"] + # base_name = pype.lib.convert_to_padded_path(base_name, padding) + # for index in range( + # reference_frame_start, (reference_frame_end + 1)): + # file_name = base_name % index + # path_test = os.path.join(staging_dir, file_name) + # if os.path.exists(path_test): + # files.append(file_name) + # + # self.log.debug(files) + # else: + # files = base_name + # + # representation = { + # "ext": ext, + # "name": ext, + # "files": files, + # "frameStart": reference_frame_start, + # "frameEnd": reference_frame_end, + # "stagingDir": staging_dir + # } + # self.log.debug(representation) diff --git a/pype/plugins/resolve/publish/collect_instances.py b/pype/plugins/resolve/publish/collect_instances.py index ee32eac09e..4693b94e4b 100644 --- a/pype/plugins/resolve/publish/collect_instances.py +++ b/pype/plugins/resolve/publish/collect_instances.py @@ -69,10 +69,11 @@ class CollectInstances(pyblish.api.ContextPlugin): # otio clip data otio_data = resolve.get_otio_clip_instance_data( - otio_timeline, track_item_data) + otio_timeline, track_item_data) or {} + data.update(otio_data) - if otio_data: - data.update(otio_data) + # add resolution + self.get_resolution_to_data(data, context) # create instance instance = context.create_instance(**data) @@ -80,3 +81,21 @@ class CollectInstances(pyblish.api.ContextPlugin): self.log.info("Creating instance: {}".format(instance)) self.log.debug( "_ instance.data: {}".format(pformat(instance.data))) + + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" + + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata["width"], + "resolutionHeight": otio_clip_metadata["height"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["width"], + "resolutionHeight": otio_tl_metadata["height"] + }) diff --git a/pype/plugins/resolve/publish/collect_workfile.py b/pype/plugins/resolve/publish/collect_workfile.py index 1c6d682f3f..8c8e2b66c8 100644 --- a/pype/plugins/resolve/publish/collect_workfile.py +++ b/pype/plugins/resolve/publish/collect_workfile.py @@ -26,8 +26,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): video_tracks = resolve.get_video_track_names() # adding otio timeline to context - otio_timeline = davinci_export.create_otio_timeline( - active_sequence, fps) + otio_timeline = davinci_export.create_otio_timeline(project) instance_data = { "name": "{}_{}".format(asset, subset), From b0b785ac80914576d75c458986b1a03275bfc2aa Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 10 Dec 2020 21:05:53 +0100 Subject: [PATCH 034/198] feat(global): wip editorial otio --- pype/hosts/resolve/otio/utils.py | 23 +- .../global/publish/collect_otio_review.py | 24 +- .../global/publish/extract_otio_review.py | 232 ++++++++++++++++-- 3 files changed, 234 insertions(+), 45 deletions(-) diff --git a/pype/hosts/resolve/otio/utils.py b/pype/hosts/resolve/otio/utils.py index 54a052bb56..ec514289f5 100644 --- a/pype/hosts/resolve/otio/utils.py +++ b/pype/hosts/resolve/otio/utils.py @@ -1,23 +1,20 @@ import re +import opentimelineio as otio def timecode_to_frames(timecode, framerate): - parts = zip(( - 3600 * framerate, - 60 * framerate, - framerate, 1 - ), timecode.split(":")) - return sum( - f * int(t) for f, t in parts - ) + rt = otio.opentime.from_timecode(timecode, 24) + return int(otio.opentime.to_frames(rt)) def frames_to_timecode(frames, framerate): - return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format( - int(frames / (3600 * framerate)), - int(frames / (60 * framerate) % 60), - int(frames / framerate % 60), - int(frames % framerate)) + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_timecode(rt) + + +def frames_to_secons(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_seconds(rt) def get_reformated_path(path, padded=True): diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 30240f456e..97f6552c51 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -25,27 +25,21 @@ class CollectOcioReview(pyblish.api.InstancePlugin): # get basic variables review_track_name = instance.data["review"] master_layer = instance.data["masterLayer"] - otio_timeline_context = instance.context.data["otioTimeline"] + otio_timeline = instance.context.data["otioTimeline"] otio_clip = instance.data["otioClip"] - otio_clip_range = otio_clip.range_in_parent() + otio_tl_range = otio_clip.range_in_parent() + # skip if master layer is False if not master_layer: return - for _otio_clip in otio_timeline_context.each_clip(): - track_name = _otio_clip.parent().name - parent_range = _otio_clip.range_in_parent() - if track_name not in review_track_name: + for track in otio_timeline.tracks: + if review_track_name not in track.name: continue - if isinstance(_otio_clip, otio.schema.Clip): - test_start, test_end = pype.lib.otio_range_to_frame_range( - parent_range) - main_start, main_end = pype.lib.otio_range_to_frame_range( - otio_clip_range) - if pype.lib.is_overlapping_otio_ranges( - parent_range, otio_clip_range, strict=False): - # add found clips to list - otio_review_clips.append(_otio_clip) + otio_review_clips = otio.algorithms.track_trimmed_to_range( + track, + otio_tl_range + ) instance.data["otioReviewClips"] = otio_review_clips self.log.debug( diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index f829659dff..bbb6f7097e 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -2,14 +2,30 @@ import os import sys import six import errno -from pyblish import api -import pype import clique from avalon.vendor import filelink +import opentimelineio as otio +from pyblish import api +import pype + class ExtractOTIOReview(pype.api.Extractor): - """Extract OTIO timeline into one concuted video file""" + """ Extract OTIO timeline into one concuted video file. + + Expecting (instance.data): + otioClip (otio.schema.clip): clip from otio timeline + otioReviewClips (list): list with instances of otio.schema.clip + or otio.schema.gap + + Process description: + Comparing `otioClip` parent range with `otioReviewClip` parent range + will result in frame range witch is the trimmed cut. In case more otio + clips or otio gaps are found in otioReviewClips then ffmpeg will + generate multiple clips and those are then concuted together to one + video file or image sequence. Resulting files are then added to + instance as representation ready for review family plugins. + """ # order = api.ExtractorOrder order = api.CollectorOrder @@ -17,24 +33,206 @@ class ExtractOTIOReview(pype.api.Extractor): hosts = ["resolve"] families = ["review"] + collections = list() + sequence_workflow = False + + def _trim_available_range(self, avl_range, start, duration, fps): + avl_start = int(avl_range.start_time.value) + avl_durtation = int(avl_range.duration.value) + src_start = int(avl_start + start) + + self.log.debug(f"_ avl_start: {avl_start}") + self.log.debug(f"_ avl_durtation: {avl_durtation}") + self.log.debug(f"_ src_start: {src_start}") + # it only trims to source if + if src_start < avl_start: + if self.sequence_workflow: + gap_range = list(range(src_start, avl_start)) + _collection = self.create_gap_collection( + self.sequence_workflow, -1, _range=gap_range) + self.collections.append(_collection) + start = 0 + # if duration < avl_durtation: + # end = int(start + duration - 1) + # av_end = avl_start + avl_durtation - 1 + # self.collections.append(range(av_end, end)) + # duration = avl_durtation + return self._trim_media_range( + avl_range, self._range_from_frames(start, duration, fps) + ) + def process(self, instance): # self.create_representation( # _otio_clip, otio_clip_range, instance) - # """ - # Expecting (instance.data): - # otioClip (otio.schema.clip): clip from otio timeline - # otioReviewClips (list): list with instances of otio.schema.clip - # or otio.schema.gap - # - # Process description: - # Comparing `otioClip` parent range with `otioReviewClip` parent range will result in frame range witch is the trimmed cut. In case more otio clips or otio gaps are found in otioReviewClips then ffmpeg will generate multiple clips and those are then concuted together to one video file or image sequence. Resulting files are then added to instance as representation ready for review family plugins. - # """" - - otio_clip = instance.data["otioClip"] - media_reference = otio_clip.media_reference - self.log.debug(media_reference.metadata) + # get ranges and other time info from instance clip + staging_dir = self.staging_dir(instance) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] otio_review_clips = instance.data["otioReviewClips"] - self.log.debug(otio_review_clips) + + # in case of more than one clip check if second clip is sequence + # this will define what ffmpeg workflow will be used + # test first clip if it is not gap + test_clip = otio_review_clips[0] + if not isinstance(test_clip, otio.schema.Clip): + # if first was gap then test second + test_clip = otio_review_clips[1] + + # make sure second clip is not gap + if isinstance(test_clip, otio.schema.Clip): + metadata = test_clip.media_reference.metadata + is_sequence = metadata.get("isSequence") + if is_sequence: + path = test_clip.media_reference.target_url + available_range = self._trim_media_range( + test_clip.available_range(), + test_clip.source_range + ) + collection = self._make_collection( + path, available_range, metadata) + self.sequence_workflow = collection + + # loop all otio review clips + for index, r_otio_cl in enumerate(otio_review_clips): + self.log.debug(f">>> r_otio_cl: {r_otio_cl}") + src_range = r_otio_cl.source_range + start = src_range.start_time.value + duration = src_range.duration.value + available_range = None + fps = src_range.duration.rate + + # add available range only if not gap + if isinstance(r_otio_cl, otio.schema.Clip): + available_range = r_otio_cl.available_range() + fps = available_range.duration.rate + + # reframing handles conditions + if (len(otio_review_clips) > 1) and (index == 0): + # more clips | first clip reframing with handle + start -= handle_start + duration += handle_start + elif len(otio_review_clips) > 1 \ + and (index == len(otio_review_clips) - 1): + # more clips | last clip reframing with handle + duration += handle_end + elif len(otio_review_clips) == 1: + # one clip | add both handles + start -= handle_start + duration += (handle_start + handle_end) + + if available_range: + available_range = self._trim_available_range( + available_range, start, duration, fps) + + first, last = pype.lib.otio_range_to_frame_range( + available_range) + self.log.debug(f"_ first, last: {first}-{last}") + + # media source info + if isinstance(r_otio_cl, otio.schema.Clip): + path = r_otio_cl.media_reference.target_url + metadata = r_otio_cl.media_reference.metadata + + if self.sequence_workflow: + _collection = self._make_collection( + path, available_range, metadata) + self.collections.append(_collection) + self.sequence_workflow = _collection + + # create seconds values + start_sec = self._frames_to_secons( + start, + src_range.start_time.rate) + duration_sec = self._frames_to_secons( + duration, + src_range.duration.rate) + else: + # create seconds values + start_sec = 0 + duration_sec = self._frames_to_secons( + duration, + src_range.duration.rate) + + # if sequence workflow + if self.sequence_workflow: + _collection = self.create_gap_collection( + self.sequence_workflow, index, duration=duration + ) + self.collections.append(_collection) + self.sequence_workflow = _collection + + self.log.debug(f"_ start_sec: {start_sec}") + self.log.debug(f"_ duration_sec: {duration_sec}") + + self.log.debug(f"_ self.sequence_workflow: {self.sequence_workflow}") + self.log.debug(f"_ self.collections: {self.collections}") + + @staticmethod + def _frames_to_secons(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_seconds(rt) + + @staticmethod + def _make_collection(path, otio_range, metadata): + if "%" not in path: + return None + basename = os.path.basename(path) + head = basename.split("%")[0] + tail = os.path.splitext(basename)[-1] + first, last = pype.lib.otio_range_to_frame_range(otio_range) + collection = clique.Collection( + head=head, tail=tail, padding=metadata["padding"]) + collection.indexes.update([i for i in range(first, (last + 1))]) + return collection + + @staticmethod + def _trim_media_range(media_range, source_range): + rw_media_start = otio.opentime.RationalTime( + media_range.start_time.value + source_range.start_time.value, + media_range.start_time.rate + ) + rw_media_duration = otio.opentime.RationalTime( + source_range.duration.value, + media_range.duration.rate + ) + return otio.opentime.TimeRange( + rw_media_start, rw_media_duration) + + @staticmethod + def _range_from_frames(start, duration, fps): + return otio.opentime.TimeRange( + otio.opentime.RationalTime(start, fps), + otio.opentime.RationalTime(duration, fps) + ) + + @staticmethod + def create_gap_collection(collection, index, duration=None, _range=None): + head = "gap" + collection.head[-1] + tail = collection.tail + padding = collection.padding + first_frame = min(collection.indexes) + last_frame = max(collection.indexes) + 1 + + if _range: + new_range = _range + if duration: + if index == 0: + new_range = range( + int(first_frame - duration), first_frame) + else: + new_range = range( + last_frame, int(last_frame + duration)) + + return clique.Collection( + head, tail, padding, indexes=set(new_range)) + + # otio_src_range_handles = pype.lib.otio_range_with_handles( + # otio_src_range, instance) + # self.log.debug(otio_src_range_handles) + # range_convert = pype.lib.otio_range_to_frame_range + # tl_start, tl_end = range_convert(otio_tl_range) + # self.log.debug((tl_start, tl_end)) + # inst_data = instance.data # asset = inst_data['asset'] From eea9a699d807e1451fef131f7154261e9100a130 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 11 Dec 2020 18:45:39 +0100 Subject: [PATCH 035/198] feat(global): otio review sequence extract - known bug of sequence numbering --- .../global/publish/extract_otio_review.py | 391 ++++++++++++------ 1 file changed, 264 insertions(+), 127 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index bbb6f7097e..57bc82cf22 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -2,9 +2,9 @@ import os import sys import six import errno -import clique -from avalon.vendor import filelink +import clique +import shutil import opentimelineio as otio from pyblish import api import pype @@ -33,78 +33,82 @@ class ExtractOTIOReview(pype.api.Extractor): hosts = ["resolve"] families = ["review"] - collections = list() + # plugin default attributes + temp_file_head = "tempFile." + padding = "%08d" + next_sequence_frame = 0 + to_width = 800 + to_height = 600 + representation_files = list() sequence_workflow = False - def _trim_available_range(self, avl_range, start, duration, fps): - avl_start = int(avl_range.start_time.value) - avl_durtation = int(avl_range.duration.value) - src_start = int(avl_start + start) - - self.log.debug(f"_ avl_start: {avl_start}") - self.log.debug(f"_ avl_durtation: {avl_durtation}") - self.log.debug(f"_ src_start: {src_start}") - # it only trims to source if - if src_start < avl_start: - if self.sequence_workflow: - gap_range = list(range(src_start, avl_start)) - _collection = self.create_gap_collection( - self.sequence_workflow, -1, _range=gap_range) - self.collections.append(_collection) - start = 0 - # if duration < avl_durtation: - # end = int(start + duration - 1) - # av_end = avl_start + avl_durtation - 1 - # self.collections.append(range(av_end, end)) - # duration = avl_durtation - return self._trim_media_range( - avl_range, self._range_from_frames(start, duration, fps) - ) - def process(self, instance): - # self.create_representation( - # _otio_clip, otio_clip_range, instance) - # get ranges and other time info from instance clip - staging_dir = self.staging_dir(instance) + # reset to empty list > for some reason it is inheriting data + # from previouse instances + if self.representation_files: + self.representation_files = list() + + # get otio clip and other time info from instance clip handle_start = instance.data["handleStart"] handle_end = instance.data["handleEnd"] otio_review_clips = instance.data["otioReviewClips"] + # skip instance if no reviewable data available + if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \ + and (len(otio_review_clips) == 1): + self.log.warning( + "Instance `{}` has nothing to process".format(instance)) + return + else: + self.staging_dir = self.staging_dir(instance) + if not instance.data.get("representations"): + instance.data["representations"] = list() + # in case of more than one clip check if second clip is sequence # this will define what ffmpeg workflow will be used # test first clip if it is not gap test_clip = otio_review_clips[0] - if not isinstance(test_clip, otio.schema.Clip): - # if first was gap then test second + if (not isinstance(test_clip, otio.schema.Clip)) \ + and (len(otio_review_clips) > 1): + # if first was gap then test second in case there are more test_clip = otio_review_clips[1] # make sure second clip is not gap if isinstance(test_clip, otio.schema.Clip): metadata = test_clip.media_reference.metadata + + # get resolution data from metadata if they are available + self.to_width = metadata.get("width") or self.to_width + self.to_height = metadata.get("height") or self.to_height + self.actual_fps = test_clip.source_range.start_time.rate + + # define future workflow sequencial or movie is_sequence = metadata.get("isSequence") + if is_sequence: path = test_clip.media_reference.target_url available_range = self._trim_media_range( test_clip.available_range(), test_clip.source_range ) - collection = self._make_collection( + _dir_path, collection = self._make_sequence_collection( path, available_range, metadata) + self.padding = collection.format("{padding}") + self.next_sequence_frame = 1001 self.sequence_workflow = collection # loop all otio review clips for index, r_otio_cl in enumerate(otio_review_clips): - self.log.debug(f">>> r_otio_cl: {r_otio_cl}") src_range = r_otio_cl.source_range start = src_range.start_time.value duration = src_range.duration.value available_range = None - fps = src_range.duration.rate + self.actual_fps = src_range.duration.rate # add available range only if not gap if isinstance(r_otio_cl, otio.schema.Clip): available_range = r_otio_cl.available_range() - fps = available_range.duration.rate + self.actual_fps = available_range.duration.rate # reframing handles conditions if (len(otio_review_clips) > 1) and (index == 0): @@ -122,11 +126,10 @@ class ExtractOTIOReview(pype.api.Extractor): if available_range: available_range = self._trim_available_range( - available_range, start, duration, fps) + available_range, start, duration, self.actual_fps) first, last = pype.lib.otio_range_to_frame_range( available_range) - self.log.debug(f"_ first, last: {first}-{last}") # media source info if isinstance(r_otio_cl, otio.schema.Clip): @@ -134,10 +137,20 @@ class ExtractOTIOReview(pype.api.Extractor): metadata = r_otio_cl.media_reference.metadata if self.sequence_workflow: - _collection = self._make_collection( + dir_path, collection = self._make_sequence_collection( path, available_range, metadata) - self.collections.append(_collection) - self.sequence_workflow = _collection + + # to preserve future sequence numbering + # if index <= 1: + # self.next_sequence_frame = max(collection.indexes) + + # render segment + self._render_sequence_seqment( + collection, + input_dir=dir_path + ) + self.representation_files.extend([f for f in collection]) + self.sequence_workflow = collection # create seconds values start_sec = self._frames_to_secons( @@ -155,17 +168,168 @@ class ExtractOTIOReview(pype.api.Extractor): # if sequence workflow if self.sequence_workflow: - _collection = self.create_gap_collection( - self.sequence_workflow, index, duration=duration + collection = self._create_gap_collection( + self.sequence_workflow, **{ + "eventNumber": index, + "duration": duration + } ) - self.collections.append(_collection) - self.sequence_workflow = _collection + self.representation_files.extend([f for f in collection]) + self.sequence_workflow = collection self.log.debug(f"_ start_sec: {start_sec}") self.log.debug(f"_ duration_sec: {duration_sec}") - self.log.debug(f"_ self.sequence_workflow: {self.sequence_workflow}") - self.log.debug(f"_ self.collections: {self.collections}") + # creating and registering representation + representation = self.create_representation(start, duration) + instance.data["representations"].append(representation) + self.log.info(f"Adding representation: {representation}") + + def create_representation(self, start, duration): + end = start + duration + files = self.representation_files.pop() + ext = os.path.splitext(files)[-1] + + # create default representation data + representation_data = { + "ext": ext[1:], + "name": ext[1:], + "files": files, + "frameStart": start, + "frameEnd": end, + "stagingDir": self.staging_dir, + "tags": ["review", "ftrackreview", "delete"] + } + + # update data if sequence workflow + if self.sequence_workflow: + collections, _rem = clique.assemble(self.representation_files) + collection = collections.pop() + start = min(collection.indexes) + end = max(collection.indexes) + files = self.representation_files + representation_data.update({ + "files": files, + "frameStart": start, + "frameEnd": end, + }) + return representation_data + + def _trim_available_range(self, avl_range, start, duration, fps): + avl_start = int(avl_range.start_time.value) + src_start = int(avl_start + start) + avl_durtation = int(avl_range.duration.value - start) + + # it only trims to source if + if src_start < avl_start: + if self.sequence_workflow: + start_gap_range = list(range(src_start, avl_start)) + collection = self._create_gap_collection( + self.sequence_workflow, **{"range": start_gap_range}) + self.representation_files.extend([f for f in collection]) + start = 0 + duration -= len(start_gap_range) + if duration > avl_durtation: + if self.sequence_workflow: + gap_start = int(src_start + avl_durtation) + gap_end = int(src_start + duration) + end_gap_range = list(range(gap_start, gap_end)) + collection = self._create_gap_collection( + self.sequence_workflow, **{"range": end_gap_range}) + self.representation_files.extend([f for f in collection]) + duration = avl_durtation + return self._trim_media_range( + avl_range, self._range_from_frames(start, duration, fps) + ) + + def _render_sequence_seqment(self, collection, input_dir=None): + # get rendering app path + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + if input_dir: + # copying files to temp folder + for indx, file_item in enumerate(collection): + seq_number = self.padding % ( + self.next_sequence_frame + indx) + # create path to source + output_file = "{}{}{}".format( + self.temp_file_head, + seq_number, + collection.format("{tail}")) + input_path = os.path.join(input_dir, file_item) + output_path = os.path.join(self.staging_dir, output_file) + try: + shutil.copyfile(input_path, output_path) + except OSError as e: + self.log.critical( + "Cannot copy {} to {}".format(input_path, output_path)) + self.log.critical(e) + six.reraise(*sys.exc_info()) + self.next_sequence_frame = int(seq_number) + 1 + else: + # generating gap files + file = "{}{}".format(self.temp_file_head, + collection.format("{padding}{tail}")) + frame_start = min(collection.indexes) + frame_duration = len(collection.indexes) + sec_duration = frame_duration / self.actual_fps + + # create path to destination + output_path = os.path.join(self.staging_dir, file) + # form command for rendering gap files + gap_cmd = " ".join([ + ffmpeg_path, + "-t {secDuration} -r {frameRate}", + "-f lavfi -i color=c=black:s={width}x{height}", + "-tune stillimage", + "-start_number {frameStart}", + output_path + ]).format( + secDuration=sec_duration, + frameRate=self.actual_fps, + frameStart=frame_start, + width=self.to_width, + height=self.to_height + ) + # execute + self.log.debug("Executing: {}".format(gap_cmd)) + output = pype.api.subprocess(gap_cmd, shell=True) + self.log.debug("Output: {}".format(output)) + + def _create_gap_collection(self, collection, **kwargs): + head = collection.head + tail = collection.tail + padding = collection.padding + first_frame = self.next_sequence_frame + last_frame = first_frame + len(collection.indexes) + + new_range = kwargs.get("range") + + if not new_range: + duration = kwargs.get("duration") + event_number = kwargs.get("eventNumber") + + # validate kwards + e_msg = ("Missing required kargs `duration` or `eventNumber`" + "kwargs: `{}`").format(kwargs) + assert duration, e_msg + assert event_number is not None, e_msg + + # create new range + if event_number == 0: + new_range = range(first_frame, (last_frame + 1)) + else: + new_range = range(first_frame, (last_frame - 1)) + + # create collection + collection = clique.Collection( + head, tail, padding, indexes=set(new_range)) + + # render segment + self._render_sequence_seqment(collection) + self.next_sequence_frame = max(collection.indexes) + 1 + + return collection @staticmethod def _frames_to_secons(frames, framerate): @@ -173,17 +337,18 @@ class ExtractOTIOReview(pype.api.Extractor): return otio.opentime.to_seconds(rt) @staticmethod - def _make_collection(path, otio_range, metadata): + def _make_sequence_collection(path, otio_range, metadata): if "%" not in path: return None - basename = os.path.basename(path) - head = basename.split("%")[0] - tail = os.path.splitext(basename)[-1] + file_name = os.path.basename(path) + dir_path = os.path.dirname(path) + head = file_name.split("%")[0] + tail = os.path.splitext(file_name)[-1] first, last = pype.lib.otio_range_to_frame_range(otio_range) collection = clique.Collection( head=head, tail=tail, padding=metadata["padding"]) collection.indexes.update([i for i in range(first, (last + 1))]) - return collection + return dir_path, collection @staticmethod def _trim_media_range(media_range, source_range): @@ -205,34 +370,6 @@ class ExtractOTIOReview(pype.api.Extractor): otio.opentime.RationalTime(duration, fps) ) - @staticmethod - def create_gap_collection(collection, index, duration=None, _range=None): - head = "gap" + collection.head[-1] - tail = collection.tail - padding = collection.padding - first_frame = min(collection.indexes) - last_frame = max(collection.indexes) + 1 - - if _range: - new_range = _range - if duration: - if index == 0: - new_range = range( - int(first_frame - duration), first_frame) - else: - new_range = range( - last_frame, int(last_frame + duration)) - - return clique.Collection( - head, tail, padding, indexes=set(new_range)) - - # otio_src_range_handles = pype.lib.otio_range_with_handles( - # otio_src_range, instance) - # self.log.debug(otio_src_range_handles) - # range_convert = pype.lib.otio_range_to_frame_range - # tl_start, tl_end = range_convert(otio_tl_range) - # self.log.debug((tl_start, tl_end)) - # inst_data = instance.data # asset = inst_data['asset'] @@ -555,48 +692,48 @@ class ExtractOTIOReview(pype.api.Extractor): # self.log.critical("An unexpected error occurred.") # six.reraise(*sys.exc_info()) # - # def create_representation(self, otio_clip, to_otio_range, instance): - # to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( - # to_otio_range) - # tl_start, tl_end = pype.lib.otio_range_to_frame_range( - # otio_clip.range_in_parent()) - # source_start, source_end = pype.lib.otio_range_to_frame_range( - # otio_clip.source_range) - # media_reference = otio_clip.media_reference - # metadata = media_reference.metadata - # mr_start, mr_end = pype.lib.otio_range_to_frame_range( - # media_reference.available_range) - # path = media_reference.target_url - # reference_frame_start = (mr_start + source_start) + ( - # to_tl_start - tl_start) - # reference_frame_end = (mr_start + source_end) - ( - # tl_end - to_tl_end) - # - # base_name = os.path.basename(path) - # staging_dir = os.path.dirname(path) - # ext = os.path.splitext(base_name)[1][1:] - # - # if metadata.get("isSequence"): - # files = list() - # padding = metadata["padding"] - # base_name = pype.lib.convert_to_padded_path(base_name, padding) - # for index in range( - # reference_frame_start, (reference_frame_end + 1)): - # file_name = base_name % index - # path_test = os.path.join(staging_dir, file_name) - # if os.path.exists(path_test): - # files.append(file_name) - # - # self.log.debug(files) - # else: - # files = base_name - # - # representation = { - # "ext": ext, - # "name": ext, - # "files": files, - # "frameStart": reference_frame_start, - # "frameEnd": reference_frame_end, - # "stagingDir": staging_dir - # } - # self.log.debug(representation) +# def create_representation(self, otio_clip, to_otio_range, instance): +# to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( +# to_otio_range) +# tl_start, tl_end = pype.lib.otio_range_to_frame_range( +# otio_clip.range_in_parent()) +# source_start, source_end = pype.lib.otio_range_to_frame_range( +# otio_clip.source_range) +# media_reference = otio_clip.media_reference +# metadata = media_reference.metadata +# mr_start, mr_end = pype.lib.otio_range_to_frame_range( +# media_reference.available_range) +# path = media_reference.target_url +# reference_frame_start = (mr_start + source_start) + ( +# to_tl_start - tl_start) +# reference_frame_end = (mr_start + source_end) - ( +# tl_end - to_tl_end) +# +# base_name = os.path.basename(path) +# staging_dir = os.path.dirname(path) +# ext = os.path.splitext(base_name)[1][1:] +# +# if metadata.get("isSequence"): +# files = list() +# padding = metadata["padding"] +# base_name = pype.lib.convert_to_padded_path(base_name, padding) +# for index in range( +# reference_frame_start, (reference_frame_end + 1)): +# file_name = base_name % index +# path_test = os.path.join(staging_dir, file_name) +# if os.path.exists(path_test): +# files.append(file_name) +# +# self.log.debug(files) +# else: +# files = base_name +# +# representation = { +# "ext": ext, +# "name": ext, +# "files": files, +# "frameStart": reference_frame_start, +# "frameEnd": reference_frame_end, +# "stagingDir": staging_dir +# } +# self.log.debug(representation) From 8d8c6f4a044c1e8616fd1239e7601f2c8f47053c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 15 Dec 2020 11:14:26 +0100 Subject: [PATCH 036/198] feat(resolve): extract review sequence wip --- .../global/publish/extract_otio_review.py | 173 +++++++++--------- 1 file changed, 82 insertions(+), 91 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index 57bc82cf22..aad9b63830 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -35,18 +35,19 @@ class ExtractOTIOReview(pype.api.Extractor): # plugin default attributes temp_file_head = "tempFile." - padding = "%08d" - next_sequence_frame = 0 to_width = 800 to_height = 600 - representation_files = list() sequence_workflow = False + sequence_ext = ".jpg" def process(self, instance): - # reset to empty list > for some reason it is inheriting data - # from previouse instances - if self.representation_files: - self.representation_files = list() + self.representation_files = list() + self.used_frames = list() + self.workfile_start = int(instance.data.get( + "workfileFrameStart", 1001)) + self.padding = "%0{}d".format(len(str(self.workfile_start))) + self.used_frames.append(self.workfile_start) + self.log.debug(f"_ self.used_frames-0: {self.used_frames}") # get otio clip and other time info from instance clip handle_start = instance.data["handleStart"] @@ -93,9 +94,8 @@ class ExtractOTIOReview(pype.api.Extractor): ) _dir_path, collection = self._make_sequence_collection( path, available_range, metadata) - self.padding = collection.format("{padding}") - self.next_sequence_frame = 1001 self.sequence_workflow = collection + self.sequence_ext = collection.format("{tail}") # loop all otio review clips for index, r_otio_cl in enumerate(otio_review_clips): @@ -130,6 +130,7 @@ class ExtractOTIOReview(pype.api.Extractor): first, last = pype.lib.otio_range_to_frame_range( available_range) + self.log.debug(f"_ first, last: {first}-{last}") # media source info if isinstance(r_otio_cl, otio.schema.Clip): @@ -140,17 +141,11 @@ class ExtractOTIOReview(pype.api.Extractor): dir_path, collection = self._make_sequence_collection( path, available_range, metadata) - # to preserve future sequence numbering - # if index <= 1: - # self.next_sequence_frame = max(collection.indexes) - # render segment self._render_sequence_seqment( - collection, + collection=collection, input_dir=dir_path ) - self.representation_files.extend([f for f in collection]) - self.sequence_workflow = collection # create seconds values start_sec = self._frames_to_secons( @@ -168,14 +163,7 @@ class ExtractOTIOReview(pype.api.Extractor): # if sequence workflow if self.sequence_workflow: - collection = self._create_gap_collection( - self.sequence_workflow, **{ - "eventNumber": index, - "duration": duration - } - ) - self.representation_files.extend([f for f in collection]) - self.sequence_workflow = collection + self._render_sequence_seqment(gap=duration) self.log.debug(f"_ start_sec: {start_sec}") self.log.debug(f"_ duration_sec: {duration_sec}") @@ -187,14 +175,9 @@ class ExtractOTIOReview(pype.api.Extractor): def create_representation(self, start, duration): end = start + duration - files = self.representation_files.pop() - ext = os.path.splitext(files)[-1] # create default representation data representation_data = { - "ext": ext[1:], - "name": ext[1:], - "files": files, "frameStart": start, "frameEnd": end, "stagingDir": self.staging_dir, @@ -203,12 +186,20 @@ class ExtractOTIOReview(pype.api.Extractor): # update data if sequence workflow if self.sequence_workflow: - collections, _rem = clique.assemble(self.representation_files) - collection = collections.pop() + collection = clique.Collection( + self.temp_file_head, + tail=self.sequence_ext, + padding=len(str(self.workfile_start)), + indexes=set(self.used_frames) + ) start = min(collection.indexes) end = max(collection.indexes) - files = self.representation_files + self.log.debug(collection) + files = [f for f in collection] + ext = collection.format("{tail}") representation_data.update({ + "name": ext[1:], + "ext": ext[1:], "files": files, "frameStart": start, "frameEnd": end, @@ -220,42 +211,59 @@ class ExtractOTIOReview(pype.api.Extractor): src_start = int(avl_start + start) avl_durtation = int(avl_range.duration.value - start) - # it only trims to source if + # if media start is les then clip requires if src_start < avl_start: + # calculate gap + gap_duration = src_start - avl_start + + # create gap data to disk if self.sequence_workflow: - start_gap_range = list(range(src_start, avl_start)) - collection = self._create_gap_collection( - self.sequence_workflow, **{"range": start_gap_range}) - self.representation_files.extend([f for f in collection]) + self._render_sequence_seqment(gap=gap_duration) + self.log.debug(f"_ self.used_frames-1: {self.used_frames}") + # fix start and end to correct values start = 0 - duration -= len(start_gap_range) + duration -= len(gap_duration) + + # if media duration is shorter then clip requirement if duration > avl_durtation: + # calculate gap + gap_start = int(src_start + avl_durtation) + gap_end = int(src_start + duration) + gap_duration = gap_start - gap_end + + # create gap data to disk if self.sequence_workflow: - gap_start = int(src_start + avl_durtation) - gap_end = int(src_start + duration) - end_gap_range = list(range(gap_start, gap_end)) - collection = self._create_gap_collection( - self.sequence_workflow, **{"range": end_gap_range}) - self.representation_files.extend([f for f in collection]) + self._render_sequence_seqment(gap=gap_duration) + self.log.debug(f"_ self.used_frames-2: {self.used_frames}") + + # fix duration lenght duration = avl_durtation + + # return correct trimmed range return self._trim_media_range( avl_range, self._range_from_frames(start, duration, fps) ) - def _render_sequence_seqment(self, collection, input_dir=None): + def _render_sequence_seqment(self, + collection=None, input_dir=None, gap=None): # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - - if input_dir: + if input_dir and collection: # copying files to temp folder - for indx, file_item in enumerate(collection): - seq_number = self.padding % ( - self.next_sequence_frame + indx) + for file_item in collection: + if self.used_frames[-1] == self.workfile_start: + seq_number = self.padding % (self.used_frames[-1]) + self.workfile_start -= 1 + else: + seq_number = self.padding % ( + self.used_frames[-1] + 1) + self.used_frames.append(int(seq_number)) # create path to source output_file = "{}{}{}".format( self.temp_file_head, seq_number, - collection.format("{tail}")) + self.sequence_ext + ) input_path = os.path.join(input_dir, file_item) output_path = os.path.join(self.staging_dir, output_file) try: @@ -265,14 +273,21 @@ class ExtractOTIOReview(pype.api.Extractor): "Cannot copy {} to {}".format(input_path, output_path)) self.log.critical(e) six.reraise(*sys.exc_info()) - self.next_sequence_frame = int(seq_number) + 1 + self.log.debug(f"_ self.used_frames-2: {self.used_frames}") else: + self.log.debug(f"_ gap: {gap}") # generating gap files - file = "{}{}".format(self.temp_file_head, - collection.format("{padding}{tail}")) - frame_start = min(collection.indexes) - frame_duration = len(collection.indexes) - sec_duration = frame_duration / self.actual_fps + file = "{}{}{}".format( + self.temp_file_head, + self.padding, + self.sequence_ext + ) + frame_start = self.used_frames[-1] + 1 + + if self.used_frames[-1] == self.workfile_start: + frame_start = self.used_frames[-1] + + sec_duration = self._frames_to_secons(gap, self.actual_fps) # create path to destination output_path = os.path.join(self.staging_dir, file) @@ -296,40 +311,16 @@ class ExtractOTIOReview(pype.api.Extractor): output = pype.api.subprocess(gap_cmd, shell=True) self.log.debug("Output: {}".format(output)) - def _create_gap_collection(self, collection, **kwargs): - head = collection.head - tail = collection.tail - padding = collection.padding - first_frame = self.next_sequence_frame - last_frame = first_frame + len(collection.indexes) - - new_range = kwargs.get("range") - - if not new_range: - duration = kwargs.get("duration") - event_number = kwargs.get("eventNumber") - - # validate kwards - e_msg = ("Missing required kargs `duration` or `eventNumber`" - "kwargs: `{}`").format(kwargs) - assert duration, e_msg - assert event_number is not None, e_msg - - # create new range - if event_number == 0: - new_range = range(first_frame, (last_frame + 1)) - else: - new_range = range(first_frame, (last_frame - 1)) - - # create collection - collection = clique.Collection( - head, tail, padding, indexes=set(new_range)) - - # render segment - self._render_sequence_seqment(collection) - self.next_sequence_frame = max(collection.indexes) + 1 - - return collection + if output: + # generate used frames + for _i in range(1, (int(gap) + 1)): + if self.used_frames[-1] == self.workfile_start: + seq_number = self.padding % (self.used_frames[-1]) + self.workfile_start -= 1 + else: + seq_number = self.padding % ( + self.used_frames[-1] + 1) + self.used_frames.append(int(seq_number)) @staticmethod def _frames_to_secons(frames, framerate): From 6ad0c22553cbbfe998c73c829daa42009e06abad Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 15 Dec 2020 12:08:42 +0100 Subject: [PATCH 037/198] feat(resolve): wip sequence rendering otio extract --- .../global/publish/extract_otio_review.py | 100 +++++++++++------- 1 file changed, 63 insertions(+), 37 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index aad9b63830..d9066093b3 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -245,12 +245,33 @@ class ExtractOTIOReview(pype.api.Extractor): ) def _render_sequence_seqment(self, - collection=None, input_dir=None, gap=None): + collection=None, input_dir=None, + video_path=None, gap=None): # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + if input_dir and collection: - # copying files to temp folder - for file_item in collection: + output_file = "{}{}{}".format( + self.temp_file_head, + self.padding, + self.sequence_ext + ) + # create path to destination + output_path = os.path.join(self.staging_dir, output_file) + + # generate frame start + out_frame_start = self.used_frames[-1] + 1 + if self.used_frames[-1] == self.workfile_start: + out_frame_start = self.used_frames[-1] + + in_frame_start = min(collection.indexes) + + # converting image sequence to image sequence + input_file = collection.format("{head}{padding}{tail}") + input_path = os.path.join(input_dir, input_file) + + # generate used frames + for _i in collection: if self.used_frames[-1] == self.workfile_start: seq_number = self.padding % (self.used_frames[-1]) self.workfile_start -= 1 @@ -258,24 +279,28 @@ class ExtractOTIOReview(pype.api.Extractor): seq_number = self.padding % ( self.used_frames[-1] + 1) self.used_frames.append(int(seq_number)) - # create path to source - output_file = "{}{}{}".format( - self.temp_file_head, - seq_number, - self.sequence_ext - ) - input_path = os.path.join(input_dir, file_item) - output_path = os.path.join(self.staging_dir, output_file) - try: - shutil.copyfile(input_path, output_path) - except OSError as e: - self.log.critical( - "Cannot copy {} to {}".format(input_path, output_path)) - self.log.critical(e) - six.reraise(*sys.exc_info()) - self.log.debug(f"_ self.used_frames-2: {self.used_frames}") - else: - self.log.debug(f"_ gap: {gap}") + + # form command for rendering gap files + command = " ".join([ + ffmpeg_path, + "-start_number {inFrameStart}", + "-i {inputPath}", + "-start_number {outFrameStart}", + output_path + ]).format( + inputPath=input_path, + inFrameStart=in_frame_start, + outFrameStart=out_frame_start, + # TODO: reformating to output resolution + width=self.to_width, + height=self.to_height + ) + elif video_path: + # TODO: when input is video file + # and want to convert to image sequence + pass + elif gap: + # TODO: function to create default output file and out frame start # generating gap files file = "{}{}{}".format( self.temp_file_head, @@ -287,16 +312,28 @@ class ExtractOTIOReview(pype.api.Extractor): if self.used_frames[-1] == self.workfile_start: frame_start = self.used_frames[-1] + # TODO: function for adding used frames with input frame duration + # generate used frames + for _i in range(1, (int(gap) + 1)): + if self.used_frames[-1] == self.workfile_start: + seq_number = self.padding % (self.used_frames[-1]) + self.workfile_start -= 1 + else: + seq_number = self.padding % ( + self.used_frames[-1] + 1) + self.used_frames.append(int(seq_number)) + sec_duration = self._frames_to_secons(gap, self.actual_fps) # create path to destination output_path = os.path.join(self.staging_dir, file) # form command for rendering gap files - gap_cmd = " ".join([ + command = " ".join([ ffmpeg_path, "-t {secDuration} -r {frameRate}", "-f lavfi -i color=c=black:s={width}x{height}", "-tune stillimage", + # TODO: add this with function for output file path framestart "-start_number {frameStart}", output_path ]).format( @@ -306,21 +343,10 @@ class ExtractOTIOReview(pype.api.Extractor): width=self.to_width, height=self.to_height ) - # execute - self.log.debug("Executing: {}".format(gap_cmd)) - output = pype.api.subprocess(gap_cmd, shell=True) - self.log.debug("Output: {}".format(output)) - - if output: - # generate used frames - for _i in range(1, (int(gap) + 1)): - if self.used_frames[-1] == self.workfile_start: - seq_number = self.padding % (self.used_frames[-1]) - self.workfile_start -= 1 - else: - seq_number = self.padding % ( - self.used_frames[-1] + 1) - self.used_frames.append(int(seq_number)) + # execute + self.log.debug("Executing: {}".format(command)) + output = pype.api.subprocess(command, shell=True) + self.log.debug("Output: {}".format(output)) @staticmethod def _frames_to_secons(frames, framerate): From 900ccedc325ac3575102bcb50d326b9b7fa39394 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 15 Dec 2020 19:06:55 +0100 Subject: [PATCH 038/198] feat(resolve): wip extract otio review adding video file to sequence workflow --- .../global/publish/extract_otio_review.py | 37 ++++++++++++------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index d9066093b3..41b72b131d 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -138,14 +138,20 @@ class ExtractOTIOReview(pype.api.Extractor): metadata = r_otio_cl.media_reference.metadata if self.sequence_workflow: - dir_path, collection = self._make_sequence_collection( - path, available_range, metadata) + if metadata.get("padding"): + # render image sequence to sequence + dir_path, collection = self._make_sequence_collection( + path, available_range, metadata) - # render segment - self._render_sequence_seqment( - collection=collection, - input_dir=dir_path - ) + # render segment + self._render_sequence_seqment( + sequence=[dir_path, collection] + ) + else: + # render video file to sequence + self._render_sequence_seqment( + video=[path, available_range] + ) # create seconds values start_sec = self._frames_to_secons( @@ -226,6 +232,7 @@ class ExtractOTIOReview(pype.api.Extractor): # if media duration is shorter then clip requirement if duration > avl_durtation: + # TODO: this will render missing frame before not at the end of footage. need to fix this so the rendered frames will be numbered after the footage. # calculate gap gap_start = int(src_start + avl_durtation) gap_end = int(src_start + duration) @@ -245,12 +252,14 @@ class ExtractOTIOReview(pype.api.Extractor): ) def _render_sequence_seqment(self, - collection=None, input_dir=None, - video_path=None, gap=None): + sequence=None, + video=None, + gap=None): # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - if input_dir and collection: + if sequence: + input_dir, collection = sequence output_file = "{}{}{}".format( self.temp_file_head, self.padding, @@ -295,10 +304,10 @@ class ExtractOTIOReview(pype.api.Extractor): width=self.to_width, height=self.to_height ) - elif video_path: - # TODO: when input is video file - # and want to convert to image sequence - pass + elif video: + video_path, otio_range = video + self.log.debug( + f">> video_path, otio_range: {video_path},{otio_range}") elif gap: # TODO: function to create default output file and out frame start # generating gap files From eb91b46fc6096d958e58854eca0b798fa358f947 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 15 Dec 2020 20:55:57 +0100 Subject: [PATCH 039/198] feat(global): improving code wip --- .../global/publish/extract_otio_review.py | 156 ++++++++---------- 1 file changed, 73 insertions(+), 83 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index 41b72b131d..f4108bad49 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -258,105 +258,95 @@ class ExtractOTIOReview(pype.api.Extractor): # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + # create path and frame start to destination + output_path, out_frame_start = self.add_sequence_output() + + # start command list + command = [ffmpeg_path] + if sequence: input_dir, collection = sequence - output_file = "{}{}{}".format( - self.temp_file_head, - self.padding, - self.sequence_ext - ) - # create path to destination - output_path = os.path.join(self.staging_dir, output_file) - - # generate frame start - out_frame_start = self.used_frames[-1] + 1 - if self.used_frames[-1] == self.workfile_start: - out_frame_start = self.used_frames[-1] - + frame_duration = len(collection.indexes) in_frame_start = min(collection.indexes) # converting image sequence to image sequence input_file = collection.format("{head}{padding}{tail}") input_path = os.path.join(input_dir, input_file) - # generate used frames - for _i in collection: - if self.used_frames[-1] == self.workfile_start: - seq_number = self.padding % (self.used_frames[-1]) - self.workfile_start -= 1 - else: - seq_number = self.padding % ( - self.used_frames[-1] + 1) - self.used_frames.append(int(seq_number)) - # form command for rendering gap files - command = " ".join([ - ffmpeg_path, - "-start_number {inFrameStart}", - "-i {inputPath}", - "-start_number {outFrameStart}", - output_path - ]).format( - inputPath=input_path, - inFrameStart=in_frame_start, - outFrameStart=out_frame_start, - # TODO: reformating to output resolution - width=self.to_width, - height=self.to_height - ) + command.extend([ + "-start_number {}".format(in_frame_start), + "-i {}".format(input_path) + ]) + elif video: video_path, otio_range = video - self.log.debug( - f">> video_path, otio_range: {video_path},{otio_range}") - elif gap: - # TODO: function to create default output file and out frame start - # generating gap files - file = "{}{}{}".format( - self.temp_file_head, - self.padding, - self.sequence_ext - ) - frame_start = self.used_frames[-1] + 1 + frame_start = otio_range.start_time.value + input_fps = otio_range.start_time.rate + frame_duration = otio_range.duration.value + sec_start = self._frames_to_secons(frame_start, input_fps) + sec_duration = self._frames_to_secons(frame_duration, input_fps) - if self.used_frames[-1] == self.workfile_start: - frame_start = self.used_frames[-1] - - # TODO: function for adding used frames with input frame duration - # generate used frames - for _i in range(1, (int(gap) + 1)): - if self.used_frames[-1] == self.workfile_start: - seq_number = self.padding % (self.used_frames[-1]) - self.workfile_start -= 1 - else: - seq_number = self.padding % ( - self.used_frames[-1] + 1) - self.used_frames.append(int(seq_number)) - - sec_duration = self._frames_to_secons(gap, self.actual_fps) - - # create path to destination - output_path = os.path.join(self.staging_dir, file) # form command for rendering gap files - command = " ".join([ - ffmpeg_path, - "-t {secDuration} -r {frameRate}", - "-f lavfi -i color=c=black:s={width}x{height}", - "-tune stillimage", - # TODO: add this with function for output file path framestart - "-start_number {frameStart}", - output_path - ]).format( - secDuration=sec_duration, - frameRate=self.actual_fps, - frameStart=frame_start, - width=self.to_width, - height=self.to_height - ) + command.extend([ + "-ss {}".format(sec_start), + "-t {}".format(sec_duration), + "-i {}".format(video_path) + ]) + + elif gap: + frame_duration = gap + sec_duration = self._frames_to_secons( + frame_duration, self.actual_fps) + + # form command for rendering gap files + command.extend([ + "-t {} -r {}".format(sec_duration, self.actual_fps), + "-f lavfi", + "-i color=c=black:s={}x{}".format(self.to_width, + self.to_height), + "-tune stillimage" + ]) + + # add output attributes + command.extend([ + "-start_number {}".format(out_frame_start), + output_path + ]) # execute - self.log.debug("Executing: {}".format(command)) - output = pype.api.subprocess(command, shell=True) + self.log.debug("Executing: {}".format(" ".join(command))) + output = pype.api.subprocess(" ".join(command), shell=True) self.log.debug("Output: {}".format(output)) + # generate used frames + self.generate_used_frames(frame_duration) + + def generate_used_frames(self, duration): + for _i in range(1, (int(duration) + 1)): + if self.used_frames[-1] == self.workfile_start: + seq_number = self.padding % (self.used_frames[-1]) + self.workfile_start -= 1 + else: + seq_number = self.padding % ( + self.used_frames[-1] + 1) + self.used_frames.append(int(seq_number)) + + def add_sequence_output(self): + output_file = "{}{}{}".format( + self.temp_file_head, + self.padding, + self.sequence_ext + ) + # create path to destination + output_path = os.path.join(self.staging_dir, output_file) + + # generate frame start + out_frame_start = self.used_frames[-1] + 1 + if self.used_frames[-1] == self.workfile_start: + out_frame_start = self.used_frames[-1] + + return output_path, out_frame_start + @staticmethod def _frames_to_secons(frames, framerate): rt = otio.opentime.from_frames(frames, framerate) From 9497fc621e11423afa5629f899ca3dda4f15986f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 15 Dec 2020 21:36:01 +0100 Subject: [PATCH 040/198] feat(global): clean code and finalizing --- .../global/publish/extract_otio_review.py | 536 ++---------------- 1 file changed, 54 insertions(+), 482 deletions(-) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index f4108bad49..07f75e3312 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -37,17 +37,20 @@ class ExtractOTIOReview(pype.api.Extractor): temp_file_head = "tempFile." to_width = 800 to_height = 600 - sequence_workflow = False - sequence_ext = ".jpg" + output_ext = ".jpg" def process(self, instance): self.representation_files = list() self.used_frames = list() self.workfile_start = int(instance.data.get( "workfileFrameStart", 1001)) - self.padding = "%0{}d".format(len(str(self.workfile_start))) + self.padding = len(str(self.workfile_start)) self.used_frames.append(self.workfile_start) self.log.debug(f"_ self.used_frames-0: {self.used_frames}") + self.to_width = instance.data.get( + "resolutionWidth") or self.to_width + self.to_height = instance.data.get( + "resolutionHeight") or self.to_height # get otio clip and other time info from instance clip handle_start = instance.data["handleStart"] @@ -65,39 +68,6 @@ class ExtractOTIOReview(pype.api.Extractor): if not instance.data.get("representations"): instance.data["representations"] = list() - # in case of more than one clip check if second clip is sequence - # this will define what ffmpeg workflow will be used - # test first clip if it is not gap - test_clip = otio_review_clips[0] - if (not isinstance(test_clip, otio.schema.Clip)) \ - and (len(otio_review_clips) > 1): - # if first was gap then test second in case there are more - test_clip = otio_review_clips[1] - - # make sure second clip is not gap - if isinstance(test_clip, otio.schema.Clip): - metadata = test_clip.media_reference.metadata - - # get resolution data from metadata if they are available - self.to_width = metadata.get("width") or self.to_width - self.to_height = metadata.get("height") or self.to_height - self.actual_fps = test_clip.source_range.start_time.rate - - # define future workflow sequencial or movie - is_sequence = metadata.get("isSequence") - - if is_sequence: - path = test_clip.media_reference.target_url - available_range = self._trim_media_range( - test_clip.available_range(), - test_clip.source_range - ) - _dir_path, collection = self._make_sequence_collection( - path, available_range, metadata) - self.sequence_workflow = collection - self.sequence_ext = collection.format("{tail}") - - # loop all otio review clips for index, r_otio_cl in enumerate(otio_review_clips): src_range = r_otio_cl.source_range start = src_range.start_time.value @@ -128,58 +98,35 @@ class ExtractOTIOReview(pype.api.Extractor): available_range = self._trim_available_range( available_range, start, duration, self.actual_fps) - first, last = pype.lib.otio_range_to_frame_range( - available_range) - self.log.debug(f"_ first, last: {first}-{last}") - # media source info if isinstance(r_otio_cl, otio.schema.Clip): path = r_otio_cl.media_reference.target_url metadata = r_otio_cl.media_reference.metadata - if self.sequence_workflow: - if metadata.get("padding"): - # render image sequence to sequence - dir_path, collection = self._make_sequence_collection( - path, available_range, metadata) + if metadata.get("padding"): + # render image sequence to sequence + dir_path, collection = self._make_sequence_collection( + path, available_range, metadata) - # render segment - self._render_sequence_seqment( - sequence=[dir_path, collection] - ) - else: - # render video file to sequence - self._render_sequence_seqment( - video=[path, available_range] - ) + # render segment + self._render_seqment( + sequence=[dir_path, collection] + ) + else: + # render video file to sequence + self._render_seqment( + video=[path, available_range] + ) - # create seconds values - start_sec = self._frames_to_secons( - start, - src_range.start_time.rate) - duration_sec = self._frames_to_secons( - duration, - src_range.duration.rate) else: - # create seconds values - start_sec = 0 - duration_sec = self._frames_to_secons( - duration, - src_range.duration.rate) - - # if sequence workflow - if self.sequence_workflow: - self._render_sequence_seqment(gap=duration) - - self.log.debug(f"_ start_sec: {start_sec}") - self.log.debug(f"_ duration_sec: {duration_sec}") + self._render_seqment(gap=duration) # creating and registering representation - representation = self.create_representation(start, duration) + representation = self._create_representation(start, duration) instance.data["representations"].append(representation) self.log.info(f"Adding representation: {representation}") - def create_representation(self, start, duration): + def _create_representation(self, start, duration): end = start + duration # create default representation data @@ -190,26 +137,24 @@ class ExtractOTIOReview(pype.api.Extractor): "tags": ["review", "ftrackreview", "delete"] } - # update data if sequence workflow - if self.sequence_workflow: - collection = clique.Collection( - self.temp_file_head, - tail=self.sequence_ext, - padding=len(str(self.workfile_start)), - indexes=set(self.used_frames) - ) - start = min(collection.indexes) - end = max(collection.indexes) - self.log.debug(collection) - files = [f for f in collection] - ext = collection.format("{tail}") - representation_data.update({ - "name": ext[1:], - "ext": ext[1:], - "files": files, - "frameStart": start, - "frameEnd": end, - }) + collection = clique.Collection( + self.temp_file_head, + tail=self.output_ext, + padding=self.padding, + indexes=set(self.used_frames) + ) + start = min(collection.indexes) + end = max(collection.indexes) + + files = [f for f in collection] + ext = collection.format("{tail}") + representation_data.update({ + "name": ext[1:], + "ext": ext[1:], + "files": files, + "frameStart": start, + "frameEnd": end, + }) return representation_data def _trim_available_range(self, avl_range, start, duration, fps): @@ -223,9 +168,8 @@ class ExtractOTIOReview(pype.api.Extractor): gap_duration = src_start - avl_start # create gap data to disk - if self.sequence_workflow: - self._render_sequence_seqment(gap=gap_duration) - self.log.debug(f"_ self.used_frames-1: {self.used_frames}") + self._render_seqment(gap=gap_duration) + self.log.debug(f"_ self.used_frames-1: {self.used_frames}") # fix start and end to correct values start = 0 duration -= len(gap_duration) @@ -239,9 +183,8 @@ class ExtractOTIOReview(pype.api.Extractor): gap_duration = gap_start - gap_end # create gap data to disk - if self.sequence_workflow: - self._render_sequence_seqment(gap=gap_duration) - self.log.debug(f"_ self.used_frames-2: {self.used_frames}") + self._render_seqment(gap=gap_duration) + self.log.debug(f"_ self.used_frames-2: {self.used_frames}") # fix duration lenght duration = avl_durtation @@ -251,15 +194,12 @@ class ExtractOTIOReview(pype.api.Extractor): avl_range, self._range_from_frames(start, duration, fps) ) - def _render_sequence_seqment(self, - sequence=None, - video=None, - gap=None): + def _render_seqment(self, sequence=None, video=None, gap=None): # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") # create path and frame start to destination - output_path, out_frame_start = self.add_sequence_output() + output_path, out_frame_start = self._add_ffmpeg_output() # start command list command = [ffmpeg_path] @@ -319,23 +259,23 @@ class ExtractOTIOReview(pype.api.Extractor): self.log.debug("Output: {}".format(output)) # generate used frames - self.generate_used_frames(frame_duration) + self._generate_used_frames(frame_duration) - def generate_used_frames(self, duration): + def _generate_used_frames(self, duration): + padding = "{{:0{}d}}".format(self.padding) for _i in range(1, (int(duration) + 1)): if self.used_frames[-1] == self.workfile_start: - seq_number = self.padding % (self.used_frames[-1]) + seq_number = padding.format(self.used_frames[-1]) self.workfile_start -= 1 else: - seq_number = self.padding % ( - self.used_frames[-1] + 1) + seq_number = padding.format(self.used_frames[-1] + 1) self.used_frames.append(int(seq_number)) - def add_sequence_output(self): + def _add_ffmpeg_output(self): output_file = "{}{}{}".format( self.temp_file_head, - self.padding, - self.sequence_ext + "%0{}d".format(self.padding), + self.output_ext ) # create path to destination output_path = os.path.join(self.staging_dir, output_file) @@ -385,371 +325,3 @@ class ExtractOTIOReview(pype.api.Extractor): otio.opentime.RationalTime(start, fps), otio.opentime.RationalTime(duration, fps) ) - - - # inst_data = instance.data - # asset = inst_data['asset'] - # item = inst_data['item'] - # event_number = int(item.eventNumber()) - # - # # get representation and loop them - # representations = inst_data["representations"] - # - # # check if sequence - # is_sequence = inst_data["isSequence"] - # - # # get resolution default - # resolution_width = inst_data["resolutionWidth"] - # resolution_height = inst_data["resolutionHeight"] - # - # # frame range data - # media_duration = inst_data["mediaDuration"] - # - # ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - # ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") - # - # # filter out mov and img sequences - # representations_new = representations[:] - # for repre in representations: - # input_args = list() - # output_args = list() - # - # tags = repre.get("tags", []) - # - # # check if supported tags are in representation for activation - # filter_tag = False - # for tag in ["_cut-bigger", "_cut-smaller"]: - # if tag in tags: - # filter_tag = True - # break - # if not filter_tag: - # continue - # - # self.log.debug("__ repre: {}".format(repre)) - # - # files = repre.get("files") - # staging_dir = repre.get("stagingDir") - # fps = repre.get("fps") - # ext = repre.get("ext") - # - # # make paths - # full_output_dir = os.path.join( - # staging_dir, "cuts") - # - # if is_sequence: - # new_files = list() - # - # # frame range delivery included handles - # frame_start = ( - # inst_data["frameStart"] - inst_data["handleStart"]) - # frame_end = ( - # inst_data["frameEnd"] + inst_data["handleEnd"]) - # self.log.debug("_ frame_start: {}".format(frame_start)) - # self.log.debug("_ frame_end: {}".format(frame_end)) - # - # # make collection from input files list - # collections, remainder = clique.assemble(files) - # collection = collections.pop() - # self.log.debug("_ collection: {}".format(collection)) - # - # # name components - # head = collection.format("{head}") - # padding = collection.format("{padding}") - # tail = collection.format("{tail}") - # self.log.debug("_ head: {}".format(head)) - # self.log.debug("_ padding: {}".format(padding)) - # self.log.debug("_ tail: {}".format(tail)) - # - # # make destination file with instance data - # # frame start and end range - # index = 0 - # for image in collection: - # dst_file_num = frame_start + index - # dst_file_name = "".join([ - # str(event_number), - # head, - # str(padding % dst_file_num), - # tail - # ]) - # src = os.path.join(staging_dir, image) - # dst = os.path.join(full_output_dir, dst_file_name) - # self.log.info("Creating temp hardlinks: {}".format(dst)) - # self.hardlink_file(src, dst) - # new_files.append(dst_file_name) - # index += 1 - # - # self.log.debug("_ new_files: {}".format(new_files)) - # - # else: - # # ffmpeg when single file - # new_files = "{}_{}".format(asset, files) - # - # # frame range - # frame_start = repre.get("frameStart") - # frame_end = repre.get("frameEnd") - # - # full_input_path = os.path.join( - # staging_dir, files) - # - # os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) - # - # full_output_path = os.path.join( - # full_output_dir, new_files) - # - # self.log.debug( - # "__ full_input_path: {}".format(full_input_path)) - # self.log.debug( - # "__ full_output_path: {}".format(full_output_path)) - # - # # check if audio stream is in input video file - # ffprob_cmd = ( - # "\"{ffprobe_path}\" -i \"{full_input_path}\" -show_streams" - # " -select_streams a -loglevel error" - # ).format(**locals()) - # - # self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) - # audio_check_output = pype.api.subprocess(ffprob_cmd) - # self.log.debug( - # "audio_check_output: {}".format(audio_check_output)) - # - # # Fix one frame difference - # """ TODO: this is just work-around for issue: - # https://github.com/pypeclub/pype/issues/659 - # """ - # frame_duration_extend = 1 - # if audio_check_output: - # frame_duration_extend = 0 - # - # # translate frame to sec - # start_sec = float(frame_start) / fps - # duration_sec = float( - # (frame_end - frame_start) + frame_duration_extend) / fps - # - # empty_add = None - # - # # check if not missing frames at start - # if (start_sec < 0) or (media_duration < frame_end): - # # for later swithing off `-c:v copy` output arg - # empty_add = True - # - # # init empty variables - # video_empty_start = video_layer_start = "" - # audio_empty_start = audio_layer_start = "" - # video_empty_end = video_layer_end = "" - # audio_empty_end = audio_layer_end = "" - # audio_input = audio_output = "" - # v_inp_idx = 0 - # concat_n = 1 - # - # # try to get video native resolution data - # try: - # resolution_output = pype.api.subprocess(( - # "\"{ffprobe_path}\" -i \"{full_input_path}\"" - # " -v error " - # "-select_streams v:0 -show_entries " - # "stream=width,height -of csv=s=x:p=0" - # ).format(**locals())) - # - # x, y = resolution_output.split("x") - # resolution_width = int(x) - # resolution_height = int(y) - # except Exception as _ex: - # self.log.warning( - # "Video native resolution is untracable: {}".format( - # _ex)) - # - # if audio_check_output: - # # adding input for empty audio - # input_args.append("-f lavfi -i anullsrc") - # - # # define audio empty concat variables - # audio_input = "[1:a]" - # audio_output = ":a=1" - # v_inp_idx = 1 - # - # # adding input for video black frame - # input_args.append(( - # "-f lavfi -i \"color=c=black:" - # "s={resolution_width}x{resolution_height}:r={fps}\"" - # ).format(**locals())) - # - # if (start_sec < 0): - # # recalculate input video timing - # empty_start_dur = abs(start_sec) - # start_sec = 0 - # duration_sec = float(frame_end - ( - # frame_start + (empty_start_dur * fps)) + 1) / fps - # - # # define starting empty video concat variables - # video_empty_start = ( - # "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" # noqa - # ).format(**locals()) - # video_layer_start = "[gv0]" - # - # if audio_check_output: - # # define starting empty audio concat variables - # audio_empty_start = ( - # "[0]atrim=duration={empty_start_dur}[ga0];" - # ).format(**locals()) - # audio_layer_start = "[ga0]" - # - # # alter concat number of clips - # concat_n += 1 - # - # # check if not missing frames at the end - # if (media_duration < frame_end): - # # recalculate timing - # empty_end_dur = float( - # frame_end - media_duration + 1) / fps - # duration_sec = float( - # media_duration - frame_start) / fps - # - # # define ending empty video concat variables - # video_empty_end = ( - # "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" - # ).format(**locals()) - # video_layer_end = "[gv1]" - # - # if audio_check_output: - # # define ending empty audio concat variables - # audio_empty_end = ( - # "[0]atrim=duration={empty_end_dur}[ga1];" - # ).format(**locals()) - # audio_layer_end = "[ga0]" - # - # # alter concat number of clips - # concat_n += 1 - # - # # concatting black frame togather - # output_args.append(( - # "-filter_complex \"" - # "{audio_empty_start}" - # "{video_empty_start}" - # "{audio_empty_end}" - # "{video_empty_end}" - # "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" # noqa - # "{video_layer_end}{audio_layer_end}" - # "concat=n={concat_n}:v=1{audio_output}\"" - # ).format(**locals())) - # - # # append ffmpeg input video clip - # input_args.append("-ss {:0.2f}".format(start_sec)) - # input_args.append("-t {:0.2f}".format(duration_sec)) - # input_args.append("-i \"{}\"".format(full_input_path)) - # - # # add copy audio video codec if only shortening clip - # if ("_cut-bigger" in tags) and (not empty_add): - # output_args.append("-c:v copy") - # - # # make sure it is having no frame to frame comprassion - # output_args.append("-intra") - # - # # output filename - # output_args.append("-y \"{}\"".format(full_output_path)) - # - # mov_args = [ - # "\"{}\"".format(ffmpeg_path), - # " ".join(input_args), - # " ".join(output_args) - # ] - # subprcs_cmd = " ".join(mov_args) - # - # # run subprocess - # self.log.debug("Executing: {}".format(subprcs_cmd)) - # output = pype.api.subprocess(subprcs_cmd) - # self.log.debug("Output: {}".format(output)) - # - # repre_new = { - # "files": new_files, - # "stagingDir": full_output_dir, - # "frameStart": frame_start, - # "frameEnd": frame_end, - # "frameStartFtrack": frame_start, - # "frameEndFtrack": frame_end, - # "step": 1, - # "fps": fps, - # "name": "cut_up_preview", - # "tags": ["review"] + self.tags_addition, - # "ext": ext, - # "anatomy_template": "publish" - # } - # - # representations_new.append(repre_new) - # - # for repre in representations_new: - # if ("delete" in repre.get("tags", [])) and ( - # "cut_up_preview" not in repre["name"]): - # representations_new.remove(repre) - # - # self.log.debug( - # "Representations: {}".format(representations_new)) - # instance.data["representations"] = representations_new - # - # def hardlink_file(self, src, dst): - # dirname = os.path.dirname(dst) - # - # # make sure the destination folder exist - # try: - # os.makedirs(dirname) - # except OSError as e: - # if e.errno == errno.EEXIST: - # pass - # else: - # self.log.critical("An unexpected error occurred.") - # six.reraise(*sys.exc_info()) - # - # # create hardlined file - # try: - # filelink.create(src, dst, filelink.HARDLINK) - # except OSError as e: - # if e.errno == errno.EEXIST: - # pass - # else: - # self.log.critical("An unexpected error occurred.") - # six.reraise(*sys.exc_info()) - # -# def create_representation(self, otio_clip, to_otio_range, instance): -# to_tl_start, to_tl_end = pype.lib.otio_range_to_frame_range( -# to_otio_range) -# tl_start, tl_end = pype.lib.otio_range_to_frame_range( -# otio_clip.range_in_parent()) -# source_start, source_end = pype.lib.otio_range_to_frame_range( -# otio_clip.source_range) -# media_reference = otio_clip.media_reference -# metadata = media_reference.metadata -# mr_start, mr_end = pype.lib.otio_range_to_frame_range( -# media_reference.available_range) -# path = media_reference.target_url -# reference_frame_start = (mr_start + source_start) + ( -# to_tl_start - tl_start) -# reference_frame_end = (mr_start + source_end) - ( -# tl_end - to_tl_end) -# -# base_name = os.path.basename(path) -# staging_dir = os.path.dirname(path) -# ext = os.path.splitext(base_name)[1][1:] -# -# if metadata.get("isSequence"): -# files = list() -# padding = metadata["padding"] -# base_name = pype.lib.convert_to_padded_path(base_name, padding) -# for index in range( -# reference_frame_start, (reference_frame_end + 1)): -# file_name = base_name % index -# path_test = os.path.join(staging_dir, file_name) -# if os.path.exists(path_test): -# files.append(file_name) -# -# self.log.debug(files) -# else: -# files = base_name -# -# representation = { -# "ext": ext, -# "name": ext, -# "files": files, -# "frameStart": reference_frame_start, -# "frameEnd": reference_frame_end, -# "stagingDir": staging_dir -# } -# self.log.debug(representation) From 8c94caf330f14f4da74cd7122840fab824bc2dba Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 16 Dec 2020 17:01:45 +0100 Subject: [PATCH 041/198] feat(global): finalized collect and extract otio review plugin adding docstirngs and comments --- .../global/publish/collect_otio_review.py | 55 +++- .../global/publish/extract_otio_review.py | 238 ++++++++++++++---- 2 files changed, 238 insertions(+), 55 deletions(-) diff --git a/pype/plugins/global/publish/collect_otio_review.py b/pype/plugins/global/publish/collect_otio_review.py index 97f6552c51..c197e0066d 100644 --- a/pype/plugins/global/publish/collect_otio_review.py +++ b/pype/plugins/global/publish/collect_otio_review.py @@ -1,19 +1,21 @@ """ Requires: - otioTimeline -> context data attribute - review -> instance data attribute - masterLayer -> instance data attribute - otioClipRange -> instance data attribute + instance -> review + instance -> masterLayer + instance -> otioClip + context -> otioTimeline + +Provides: + instance -> otioReviewClips """ -# import os + import opentimelineio as otio import pyblish.api -import pype.lib from pprint import pformat class CollectOcioReview(pyblish.api.InstancePlugin): - """Get matching otio from defined review layer""" + """Get matching otio track from defined review layer""" label = "Collect OTIO review" order = pyblish.api.CollectorOrder - 0.57 @@ -27,8 +29,14 @@ class CollectOcioReview(pyblish.api.InstancePlugin): master_layer = instance.data["masterLayer"] otio_timeline = instance.context.data["otioTimeline"] otio_clip = instance.data["otioClip"] + + # generate range in parent otio_tl_range = otio_clip.range_in_parent() + # calculate real timeline end needed for the clip + clip_end_frame = int( + otio_tl_range.start_time.value + otio_tl_range.duration.value) + # skip if master layer is False if not master_layer: return @@ -36,10 +44,43 @@ class CollectOcioReview(pyblish.api.InstancePlugin): for track in otio_timeline.tracks: if review_track_name not in track.name: continue + + # process correct track + otio_gap = None + + # get track parent range + track_rip = track.range_in_parent() + + # calculate real track end frame + track_end_frame = int( + track_rip.start_time.value + track_rip.duration.value) + + # check if the end of track is not lower then clip requirement + if clip_end_frame > track_end_frame: + # calculate diference duration + gap_duration = clip_end_frame - track_end_frame + # create rational time range for gap + otio_gap_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + float(0), + track_rip.start_time.rate + ), + duration=otio.opentime.RationalTime( + float(gap_duration), + track_rip.start_time.rate + ) + ) + # crate gap + otio_gap = otio.schema.Gap(source_range=otio_gap_range) + + # trim available clips from devined track as reviewable source otio_review_clips = otio.algorithms.track_trimmed_to_range( track, otio_tl_range ) + # add gap at the end if track end is shorter then needed + if otio_gap: + otio_review_clips.append(otio_gap) instance.data["otioReviewClips"] = otio_review_clips self.log.debug( diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index 07f75e3312..6c4dfd65ea 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -1,30 +1,41 @@ -import os -import sys -import six -import errno +""" +Requires: + instance -> handleStart + instance -> handleEnd + instance -> otioClip + instance -> otioReviewClips +Optional: + instance -> workfileFrameStart + instance -> resolutionWidth + instance -> resolutionHeight + +Provides: + instance -> otioReviewClips +""" + +import os import clique -import shutil import opentimelineio as otio from pyblish import api import pype class ExtractOTIOReview(pype.api.Extractor): - """ Extract OTIO timeline into one concuted video file. + """ + Extract OTIO timeline into one concuted image sequence file. - Expecting (instance.data): - otioClip (otio.schema.clip): clip from otio timeline - otioReviewClips (list): list with instances of otio.schema.clip - or otio.schema.gap + The `otioReviewClip` is holding trimmed range of clips relative to + the `otioClip`. Handles are added during looping by available list + of Gap and clips in the track. Handle start (head) is added before + first Gap or Clip and Handle end (tail) is added at the end of last + Clip or Gap. In case there is missing source material after the + handles addition Gap will be added. At the end all Gaps are converted + to black frames and available material is converted to image sequence + frames. At the end representation is created and added to the instance. + + At the moment only image sequence output is supported - Process description: - Comparing `otioClip` parent range with `otioReviewClip` parent range - will result in frame range witch is the trimmed cut. In case more otio - clips or otio gaps are found in otioReviewClips then ffmpeg will - generate multiple clips and those are then concuted together to one - video file or image sequence. Resulting files are then added to - instance as representation ready for review family plugins. """ # order = api.ExtractorOrder @@ -40,23 +51,26 @@ class ExtractOTIOReview(pype.api.Extractor): output_ext = ".jpg" def process(self, instance): + # TODO: convert resulting image sequence to mp4 + # TODO: add oudio ouput to the mp4 if audio in review is on. + + # get otio clip and other time info from instance clip + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + otio_review_clips = instance.data["otioReviewClips"] + + # add plugin wide attributes self.representation_files = list() self.used_frames = list() self.workfile_start = int(instance.data.get( "workfileFrameStart", 1001)) self.padding = len(str(self.workfile_start)) self.used_frames.append(self.workfile_start) - self.log.debug(f"_ self.used_frames-0: {self.used_frames}") self.to_width = instance.data.get( "resolutionWidth") or self.to_width self.to_height = instance.data.get( "resolutionHeight") or self.to_height - # get otio clip and other time info from instance clip - handle_start = instance.data["handleStart"] - handle_end = instance.data["handleEnd"] - otio_review_clips = instance.data["otioReviewClips"] - # skip instance if no reviewable data available if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \ and (len(otio_review_clips) == 1): @@ -68,7 +82,9 @@ class ExtractOTIOReview(pype.api.Extractor): if not instance.data.get("representations"): instance.data["representations"] = list() + # loop available clips in otio track for index, r_otio_cl in enumerate(otio_review_clips): + # get frame range values src_range = r_otio_cl.source_range start = src_range.start_time.value duration = src_range.duration.value @@ -110,16 +126,22 @@ class ExtractOTIOReview(pype.api.Extractor): # render segment self._render_seqment( - sequence=[dir_path, collection] - ) + sequence=[dir_path, collection]) + # generate used frames + self._generate_used_frames( + len(collection.indexes)) else: # render video file to sequence self._render_seqment( - video=[path, available_range] - ) + video=[path, available_range]) + # generate used frames + self._generate_used_frames( + available_range.duration.value) else: self._render_seqment(gap=duration) + # generate used frames + self._generate_used_frames(duration) # creating and registering representation representation = self._create_representation(start, duration) @@ -127,6 +149,17 @@ class ExtractOTIOReview(pype.api.Extractor): self.log.info(f"Adding representation: {representation}") def _create_representation(self, start, duration): + """ + Creating representation data. + + Args: + start (int): start frame + duration (int): duration frames + + Returns: + dict: representation data + """ + end = start + duration # create default representation data @@ -158,6 +191,21 @@ class ExtractOTIOReview(pype.api.Extractor): return representation_data def _trim_available_range(self, avl_range, start, duration, fps): + """ + Trim available media range to source range. + + If missing media range is detected it will convert it into + black frames gaps. + + Args: + avl_range (otio.time.TimeRange): media available time range + start (int): start frame + duration (int): duration frames + fps (float): frame rate + + Returns: + otio.time.TimeRange: trimmed available range + """ avl_start = int(avl_range.start_time.value) src_start = int(avl_start + start) avl_durtation = int(avl_range.duration.value - start) @@ -169,22 +217,24 @@ class ExtractOTIOReview(pype.api.Extractor): # create gap data to disk self._render_seqment(gap=gap_duration) - self.log.debug(f"_ self.used_frames-1: {self.used_frames}") + # generate used frames + self._generate_used_frames(gap_duration) + # fix start and end to correct values start = 0 duration -= len(gap_duration) # if media duration is shorter then clip requirement if duration > avl_durtation: - # TODO: this will render missing frame before not at the end of footage. need to fix this so the rendered frames will be numbered after the footage. # calculate gap gap_start = int(src_start + avl_durtation) gap_end = int(src_start + duration) - gap_duration = gap_start - gap_end + gap_duration = gap_end - gap_start # create gap data to disk - self._render_seqment(gap=gap_duration) - self.log.debug(f"_ self.used_frames-2: {self.used_frames}") + self._render_seqment(gap=gap_duration, end_offset=avl_durtation) + # generate used frames + self._generate_used_frames(gap_duration, end_offset=avl_durtation) # fix duration lenght duration = avl_durtation @@ -194,19 +244,37 @@ class ExtractOTIOReview(pype.api.Extractor): avl_range, self._range_from_frames(start, duration, fps) ) - def _render_seqment(self, sequence=None, video=None, gap=None): + def _render_seqment(self, sequence=None, + video=None, gap=None, end_offset=None): + """ + Render seqment into image sequence frames. + + Using ffmpeg to convert compatible video and image source + to defined image sequence format. + + Args: + sequence (list): input dir path string, collection object in list + video (list)[optional]: video_path string, otio_range in list + gap (int)[optional]: gap duration + end_offset (int)[optional]: offset gap frame start in frames + + Returns: + otio.time.TimeRange: trimmed available range + """ # get rendering app path ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") # create path and frame start to destination - output_path, out_frame_start = self._add_ffmpeg_output() + output_path, out_frame_start = self._get_ffmpeg_output() + + if end_offset: + out_frame_start += end_offset # start command list command = [ffmpeg_path] if sequence: input_dir, collection = sequence - frame_duration = len(collection.indexes) in_frame_start = min(collection.indexes) # converting image sequence to image sequence @@ -235,9 +303,8 @@ class ExtractOTIOReview(pype.api.Extractor): ]) elif gap: - frame_duration = gap sec_duration = self._frames_to_secons( - frame_duration, self.actual_fps) + gap, self.actual_fps) # form command for rendering gap files command.extend([ @@ -258,20 +325,49 @@ class ExtractOTIOReview(pype.api.Extractor): output = pype.api.subprocess(" ".join(command), shell=True) self.log.debug("Output: {}".format(output)) - # generate used frames - self._generate_used_frames(frame_duration) + def _generate_used_frames(self, duration, end_offset=None): + """ + Generating used frames into plugin argument `used_frames`. + + The argument `used_frames` is used for checking next available + frame to start with during rendering sequence segments. + + Args: + duration (int): duration of frames needed to be generated + end_offset (int)[optional]: in case frames need to be offseted + + """ - def _generate_used_frames(self, duration): padding = "{{:0{}d}}".format(self.padding) - for _i in range(1, (int(duration) + 1)): - if self.used_frames[-1] == self.workfile_start: - seq_number = padding.format(self.used_frames[-1]) - self.workfile_start -= 1 - else: - seq_number = padding.format(self.used_frames[-1] + 1) - self.used_frames.append(int(seq_number)) + if end_offset: + new_frames = list() + start_frame = self.used_frames[-1] + for index in range((end_offset + 1), + (int(end_offset + duration) + 1)): + seq_number = padding.format(start_frame + index) + self.log.debug( + f"index: `{index}` | seq_number: `{seq_number}`") + new_frames.append(int(seq_number)) + new_frames += self.used_frames + self.used_frames = new_frames + else: + for _i in range(1, (int(duration) + 1)): + if self.used_frames[-1] == self.workfile_start: + seq_number = padding.format(self.used_frames[-1]) + self.workfile_start -= 1 + else: + seq_number = padding.format(self.used_frames[-1] + 1) + self.used_frames.append(int(seq_number)) - def _add_ffmpeg_output(self): + def _get_ffmpeg_output(self): + """ + Returning ffmpeg output command arguments. + + Returns: + str: output_path is path for image sequence output + int: out_frame_start is starting sequence frame + + """ output_file = "{}{}{}".format( self.temp_file_head, "%0{}d".format(self.padding), @@ -289,11 +385,34 @@ class ExtractOTIOReview(pype.api.Extractor): @staticmethod def _frames_to_secons(frames, framerate): + """ + Returning secons. + + Args: + frames (int): frame + framerate (flaot): frame rate + + Returns: + float: second value + + """ rt = otio.opentime.from_frames(frames, framerate) return otio.opentime.to_seconds(rt) @staticmethod def _make_sequence_collection(path, otio_range, metadata): + """ + Make collection from path otio range and otio metadata. + + Args: + path (str): path to image sequence with `%d` + otio_range (otio.opentime.TimeRange): range to be used + metadata (dict): data where padding value can be found + + Returns: + list: dir_path (str): path to sequence, collection object + + """ if "%" not in path: return None file_name = os.path.basename(path) @@ -308,6 +427,17 @@ class ExtractOTIOReview(pype.api.Extractor): @staticmethod def _trim_media_range(media_range, source_range): + """ + Trim input media range with clip source range. + + Args: + media_range (otio.opentime.TimeRange): available range of media + source_range (otio.opentime.TimeRange): clip required range + + Returns: + otio.opentime.TimeRange: trimmed media range + + """ rw_media_start = otio.opentime.RationalTime( media_range.start_time.value + source_range.start_time.value, media_range.start_time.rate @@ -321,6 +451,18 @@ class ExtractOTIOReview(pype.api.Extractor): @staticmethod def _range_from_frames(start, duration, fps): + """ + Returns otio time range. + + Args: + start (int): frame start + duration (int): frame duration + fps (float): frame range + + Returns: + otio.opentime.TimeRange: crated range + + """ return otio.opentime.TimeRange( otio.opentime.RationalTime(start, fps), otio.opentime.RationalTime(duration, fps) From c04093e2b8b08c7c3a92cee66a015a9a1daa38f6 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 14:37:36 +0100 Subject: [PATCH 042/198] feat(resolve): moving resolve to pype3 --- pype/hooks/global/pre_with_windows_shell.py | 2 +- pype/hooks/resolve/pre_resolve_setup.py | 13 +++++++------ .../defaults/system_settings/applications.json | 11 +++++++---- pype/settings/defaults/system_settings/modules.json | 2 +- requirements.txt | 2 +- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pype/hooks/global/pre_with_windows_shell.py b/pype/hooks/global/pre_with_windows_shell.py index 918c0d63fd..d675c9bf5b 100644 --- a/pype/hooks/global/pre_with_windows_shell.py +++ b/pype/hooks/global/pre_with_windows_shell.py @@ -11,7 +11,7 @@ class LaunchWithWindowsShell(PreLaunchHook): """ order = 10 - app_groups = ["nuke", "nukex", "hiero", "nukestudio"] + app_groups = ["resolve", "nuke", "nukex", "hiero", "nukestudio"] platforms = ["windows"] def execute(self): diff --git a/pype/hooks/resolve/pre_resolve_setup.py b/pype/hooks/resolve/pre_resolve_setup.py index 4f6d33c6eb..3799e227ff 100644 --- a/pype/hooks/resolve/pre_resolve_setup.py +++ b/pype/hooks/resolve/pre_resolve_setup.py @@ -15,7 +15,8 @@ class ResolvePrelaunch(PreLaunchHook): def execute(self): # making sure pyton 3.6 is installed at provided path - py36_dir = os.path.normpath(self.env.get("PYTHON36_RESOLVE", "")) + py36_dir = os.path.normpath( + self.launch_context.env.get("PYTHON36_RESOLVE", "")) assert os.path.isdir(py36_dir), ( "Python 3.6 is not installed at the provided folder path. Either " "make sure the `environments\resolve.json` is having correctly " @@ -23,11 +24,10 @@ class ResolvePrelaunch(PreLaunchHook): f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`" ) self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...") - self.env["PYTHON36_RESOLVE"] = py36_dir # setting utility scripts dir for scripts syncing us_dir = os.path.normpath( - self.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") + self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") ) assert os.path.isdir(us_dir), ( "Resolve utility script dir does not exists. Either make sure " @@ -38,8 +38,9 @@ class ResolvePrelaunch(PreLaunchHook): self.log.debug(f"-- us_dir: `{us_dir}`") # correctly format path for pre python script - pre_py_sc = os.path.normpath(self.env.get("PRE_PYTHON_SCRIPT", "")) - self.env["PRE_PYTHON_SCRIPT"] = pre_py_sc + pre_py_sc = os.path.normpath( + self.launch_context.env.get("PRE_PYTHON_SCRIPT", "")) + self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...") try: __import__("pype.hosts.resolve") @@ -55,4 +56,4 @@ class ResolvePrelaunch(PreLaunchHook): # Resolve Setup integration importlib.reload(utils) self.log.debug(f"-- utils.__file__: `{utils.__file__}`") - utils.setup(self.env) + utils.setup(self.launch_context.env) diff --git a/pype/settings/defaults/system_settings/applications.json b/pype/settings/defaults/system_settings/applications.json index 79d39c94f9..639b52e423 100644 --- a/pype/settings/defaults/system_settings/applications.json +++ b/pype/settings/defaults/system_settings/applications.json @@ -788,9 +788,7 @@ "RESOLVE_DEV" ] }, - "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [ - "{STUDIO_SOFT}/davinci_resolve/scripts/python" - ], + "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [], "RESOLVE_SCRIPT_API": { "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting", "darvin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting", @@ -834,7 +832,12 @@ "variant_label": "16", "icon": "", "executables": { - "windows": [], + "windows": [ + [ + "C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe", + "" + ] + ], "darwin": [], "linux": [] }, diff --git a/pype/settings/defaults/system_settings/modules.json b/pype/settings/defaults/system_settings/modules.json index 0f4b0b37f3..488cb91827 100644 --- a/pype/settings/defaults/system_settings/modules.json +++ b/pype/settings/defaults/system_settings/modules.json @@ -153,4 +153,4 @@ "idle_manager": { "enabled": true } -} +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 658405e2fb..c719b06b9c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ google-api-python-client jsonschema keyring log4mongo -OpenTimelineIO +OpenTimelineIO==0.11.0 pathlib2 Pillow pynput From 5edb912a8d0bddcb160bde961c4823e676b89678 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 15:53:55 +0100 Subject: [PATCH 043/198] fix(global): do `blessed` exception in terminal.py --- pype/lib/terminal.py | 83 +++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/pype/lib/terminal.py b/pype/lib/terminal.py index afaca8241a..461d13f84a 100644 --- a/pype/lib/terminal.py +++ b/pype/lib/terminal.py @@ -13,10 +13,12 @@ import re import os import sys -import blessed +try: + import blessed + term = blessed.Terminal() - -term = blessed.Terminal() +except Exception: + term = None class Terminal: @@ -29,44 +31,45 @@ class Terminal: """ # shortcuts for colorama codes + _sdict = {} + if term: + _SB = term.bold + _RST = "" + _LR = term.tomato2 + _LG = term.aquamarine3 + _LB = term.turquoise2 + _LM = term.slateblue2 + _LY = term.gold + _R = term.red + _G = term.green + _B = term.blue + _C = term.cyan + _Y = term.yellow + _W = term.white - _SB = term.bold - _RST = "" - _LR = term.tomato2 - _LG = term.aquamarine3 - _LB = term.turquoise2 - _LM = term.slateblue2 - _LY = term.gold - _R = term.red - _G = term.green - _B = term.blue - _C = term.cyan - _Y = term.yellow - _W = term.white + # dictionary replacing string sequences with colorized one + _sdict = { - # dictionary replacing string sequences with colorized one - _sdict = { - - r">>> ": _SB + _LG + r">>> " + _RST, - r"!!!(?!\sCRI|\sERR)": _SB + _R + r"!!! " + _RST, - r"\-\-\- ": _SB + _C + r"--- " + _RST, - r"\*\*\*(?!\sWRN)": _SB + _LY + r"***" + _RST, - r"\*\*\* WRN": _SB + _LY + r"*** WRN" + _RST, - r" \- ": _SB + _LY + r" - " + _RST, - r"\[ ": _SB + _LG + r"[ " + _RST, - r"\]": _SB + _LG + r"]" + _RST, - r"{": _LG + r"{", - r"}": r"}" + _RST, - r"\(": _LY + r"(", - r"\)": r")" + _RST, - r"^\.\.\. ": _SB + _LR + r"... " + _RST, - r"!!! ERR: ": - _SB + _LR + r"!!! ERR: " + _RST, - r"!!! CRI: ": - _SB + _R + r"!!! CRI: " + _RST, - r"(?i)failed": _SB + _LR + "FAILED" + _RST, - r"(?i)error": _SB + _LR + "ERROR" + _RST - } + r">>> ": _SB + _LG + r">>> " + _RST, + r"!!!(?!\sCRI|\sERR)": _SB + _R + r"!!! " + _RST, + r"\-\-\- ": _SB + _C + r"--- " + _RST, + r"\*\*\*(?!\sWRN)": _SB + _LY + r"***" + _RST, + r"\*\*\* WRN": _SB + _LY + r"*** WRN" + _RST, + r" \- ": _SB + _LY + r" - " + _RST, + r"\[ ": _SB + _LG + r"[ " + _RST, + r"\]": _SB + _LG + r"]" + _RST, + r"{": _LG + r"{", + r"}": r"}" + _RST, + r"\(": _LY + r"(", + r"\)": r")" + _RST, + r"^\.\.\. ": _SB + _LR + r"... " + _RST, + r"!!! ERR: ": + _SB + _LR + r"!!! ERR: " + _RST, + r"!!! CRI: ": + _SB + _R + r"!!! CRI: " + _RST, + r"(?i)failed": _SB + _LR + "FAILED" + _RST, + r"(?i)error": _SB + _LR + "ERROR" + _RST + } def __init__(self): pass @@ -124,7 +127,7 @@ class Terminal: """ T = Terminal # if we dont want colors, just print raw message - if os.environ.get('PYPE_LOG_NO_COLORS'): + if not T._sdict or os.environ.get('PYPE_LOG_NO_COLORS'): return message else: message = re.sub(r'\[(.*)\]', '[ ' + T._SB + T._W + From abc60f3aae8e7d2ab82be483e2b20a0e0c2f72d4 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 17:11:00 +0100 Subject: [PATCH 044/198] hound fixes and other --- pype/hosts/resolve/lib_hiero.py | 838 --------------------------- pype/hosts/resolve/pipeline_hiero.py | 302 ---------- pype/hosts/resolve/rendering.py | 111 ---- pype/hosts/resolve/todo-rendering.py | 135 +++++ 4 files changed, 135 insertions(+), 1251 deletions(-) delete mode 100644 pype/hosts/resolve/lib_hiero.py delete mode 100644 pype/hosts/resolve/pipeline_hiero.py delete mode 100644 pype/hosts/resolve/rendering.py create mode 100644 pype/hosts/resolve/todo-rendering.py diff --git a/pype/hosts/resolve/lib_hiero.py b/pype/hosts/resolve/lib_hiero.py deleted file mode 100644 index 891ca3905c..0000000000 --- a/pype/hosts/resolve/lib_hiero.py +++ /dev/null @@ -1,838 +0,0 @@ -""" -Host specific functions where host api is connected -""" -import os -import re -import sys -import ast -import hiero -import avalon.api as avalon -import avalon.io -from avalon.vendor.Qt import QtWidgets -from pype.api import (Logger, Anatomy, config) -from . import tags -import shutil -from compiler.ast import flatten - -try: - from PySide.QtCore import QFile, QTextStream - from PySide.QtXml import QDomDocument -except ImportError: - from PySide2.QtCore import QFile, QTextStream - from PySide2.QtXml import QDomDocument - -# from opentimelineio import opentime -# from pprint import pformat - -log = Logger().get_logger(__name__, "hiero") - -self = sys.modules[__name__] -self._has_been_setup = False -self._has_menu = False -self._registered_gui = None -self.pype_tag_name = "Pype Data" -self.default_sequence_name = "PypeSequence" -self.default_bin_name = "PypeBin" - -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - - -def get_current_project(remove_untitled=False): - projects = flatten(hiero.core.projects()) - if not remove_untitled: - return next(iter(projects)) - - # if remove_untitled - for proj in projects: - if "Untitled" in proj.name(): - proj.close() - else: - return proj - - -def get_current_sequence(name=None, new=False): - """ - Get current sequence in context of active project. - - Args: - name (str)[optional]: name of sequence we want to return - new (bool)[optional]: if we want to create new one - - Returns: - hiero.core.Sequence: the sequence object - """ - sequence = None - project = get_current_project() - root_bin = project.clipsBin() - - if new: - # create new - name = name or self.default_sequence_name - sequence = hiero.core.Sequence(name) - root_bin.addItem(hiero.core.BinItem(sequence)) - elif name: - # look for sequence by name - sequences = project.sequences() - for _sequence in sequences: - if _sequence.name() == name: - sequence = _sequence - if not sequence: - # if nothing found create new with input name - sequence = get_current_sequence(name, True) - elif not name and not new: - # if name is none and new is False then return current open sequence - sequence = hiero.ui.activeSequence() - - return sequence - - -def get_current_track(sequence, name, audio=False): - """ - Get current track in context of active project. - - Creates new if none is found. - - Args: - sequence (hiero.core.Sequence): hiero sequene object - name (str): name of track we want to return - audio (bool)[optional]: switch to AudioTrack - - Returns: - hiero.core.Track: the track object - """ - tracks = sequence.videoTracks() - - if audio: - tracks = sequence.audioTracks() - - # get track by name - track = None - for _track in tracks: - if _track.name() in name: - track = _track - - if not track: - if not audio: - track = hiero.core.VideoTrack(name) - else: - track = hiero.core.AudioTrack(name) - sequence.addTrack(track) - - return track - - -def get_track_items( - selected=False, - sequence_name=None, - track_item_name=None, - track_name=None, - track_type=None, - check_enabled=True, - check_locked=True, - check_tagged=False): - """Get all available current timeline track items. - - Attribute: - selected (bool)[optional]: return only selected items on timeline - sequence_name (str)[optional]: return only clips from input sequence - track_item_name (str)[optional]: return only item with input name - track_name (str)[optional]: return only items from track name - track_type (str)[optional]: return only items of given type - (`audio` or `video`) default is `video` - check_enabled (bool)[optional]: ignore disabled if True - check_locked (bool)[optional]: ignore locked if True - - Return: - list or hiero.core.TrackItem: list of track items or single track item - """ - return_list = list() - track_items = list() - - # get selected track items or all in active sequence - if selected: - selected_items = list(hiero.selection) - for item in selected_items: - if track_name and track_name in item.parent().name(): - # filter only items fitting input track name - track_items.append(item) - elif not track_name: - # or add all if no track_name was defined - track_items.append(item) - else: - sequence = get_current_sequence(name=sequence_name) - # get all available tracks from sequence - tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) - # loop all tracks - for track in tracks: - if check_locked and track.isLocked(): - continue - if check_enabled and not track.isEnabled(): - continue - # and all items in track - for item in track.items(): - if check_tagged and not item.tags(): - continue - - # check if track item is enabled - if check_enabled: - if not item.isEnabled(): - continue - if track_item_name: - if item.name() in track_item_name: - return item - # make sure only track items with correct track names are added - if track_name and track_name in track.name(): - # filter out only defined track_name items - track_items.append(item) - elif not track_name: - # or add all if no track_name is defined - track_items.append(item) - - # filter out only track items with defined track_type - for track_item in track_items: - if track_type and track_type == "video" and isinstance( - track_item.parent(), hiero.core.VideoTrack): - # only video track items are allowed - return_list.append(track_item) - elif track_type and track_type == "audio" and isinstance( - track_item.parent(), hiero.core.AudioTrack): - # only audio track items are allowed - return_list.append(track_item) - elif not track_type: - # add all if no track_type is defined - return_list.append(track_item) - - return return_list - - -def get_track_item_pype_tag(track_item): - """ - Get pype track item tag created by creator or loader plugin. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - hiero.core.Tag: hierarchy, orig clip attributes - """ - # get all tags from track item - _tags = track_item.tags() - if not _tags: - return None - for tag in _tags: - # return only correct tag defined by global name - if tag.name() in self.pype_tag_name: - return tag - - -def set_track_item_pype_tag(track_item, data=None): - """ - Set pype track item tag to input track_item. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - hiero.core.Tag - """ - data = data or dict() - - # basic Tag's attribute - tag_data = { - "editable": "0", - "note": "Pype data holder", - "icon": "pype_icon.png", - "metadata": {k: v for k, v in data.items()} - } - # get available pype tag if any - _tag = get_track_item_pype_tag(track_item) - - if _tag: - # it not tag then create one - tag = tags.update_tag(_tag, tag_data) - else: - # if pype tag available then update with input data - tag = tags.create_tag(self.pype_tag_name, tag_data) - # add it to the input track item - track_item.addTag(tag) - - return tag - - -def get_track_item_pype_data(track_item): - """ - Get track item's pype tag data. - - Attributes: - trackItem (hiero.core.TrackItem): hiero object - - Returns: - dict: data found on pype tag - """ - data = dict() - # get pype data tag from track item - tag = get_track_item_pype_tag(track_item) - - if not tag: - return None - - # get tag metadata attribut - tag_data = tag.metadata() - # convert tag metadata to normal keys names and values to correct types - for k, v in dict(tag_data).items(): - key = k.replace("tag.", "") - - try: - # capture exceptions which are related to strings only - value = ast.literal_eval(v) - except (ValueError, SyntaxError): - value = v - - data.update({key: value}) - - return data - - -def imprint(track_item, data=None): - """ - Adding `Avalon data` into a hiero track item tag. - - Also including publish attribute into tag. - - Arguments: - track_item (hiero.core.TrackItem): hiero track item object - data (dict): Any data which needst to be imprinted - - Examples: - data = { - 'asset': 'sq020sh0280', - 'family': 'render', - 'subset': 'subsetMain' - } - """ - data = data or {} - - tag = set_track_item_pype_tag(track_item, data) - - # add publish attribute - set_publish_attribute(tag, True) - - -def set_publish_attribute(tag, value): - """ Set Publish attribute in input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = tag.metadata() - # set data to the publish attribute - tag_data.setValue("tag.publish", str(value)) - - -def get_publish_attribute(tag): - """ Get Publish attribute from input Tag object - - Attribute: - tag (hiero.core.Tag): a tag object - value (bool): True or False - """ - tag_data = tag.metadata() - # get data to the publish attribute - value = tag_data.value("tag.publish") - # return value converted to bool value. Atring is stored in tag. - return ast.literal_eval(value) - - -def sync_avalon_data_to_workfile(): - # import session to get project dir - project_name = avalon.Session["AVALON_PROJECT"] - - anatomy = Anatomy(project_name) - work_template = anatomy.templates["work"]["path"] - work_root = anatomy.root_value_for_template(work_template) - active_project_root = ( - os.path.join(work_root, project_name) - ).replace("\\", "/") - # getting project - project = get_current_project() - - if "Tag Presets" in project.name(): - return - - log.debug("Synchronizing Pype metadata to project: {}".format( - project.name())) - - # set project root with backward compatibility - try: - project.setProjectDirectory(active_project_root) - except Exception: - # old way of seting it - project.setProjectRoot(active_project_root) - - # get project data from avalon db - project_doc = avalon.io.find_one({"type": "project"}) - project_data = project_doc["data"] - - log.debug("project_data: {}".format(project_data)) - - # get format and fps property from avalon db on project - width = project_data["resolutionWidth"] - height = project_data["resolutionHeight"] - pixel_aspect = project_data["pixelAspect"] - fps = project_data['fps'] - format_name = project_data['code'] - - # create new format in hiero project - format = hiero.core.Format(width, height, pixel_aspect, format_name) - project.setOutputFormat(format) - - # set fps to hiero project - project.setFramerate(fps) - - # TODO: add auto colorspace set from project drop - log.info("Project property has been synchronised with Avalon db") - - -def launch_workfiles_app(event): - """ - Event for launching workfiles after hiero start - - Args: - event (obj): required but unused - """ - from . import launch_workfiles_app - launch_workfiles_app() - - -def setup(console=False, port=None, menu=True): - """Setup integration - - Registers Pyblish for Hiero plug-ins and appends an item to the File-menu - - Arguments: - console (bool): Display console with GUI - port (int, optional): Port from which to start looking for an - available port to connect with Pyblish QML, default - provided by Pyblish Integration. - menu (bool, optional): Display file menu in Hiero. - """ - - if self._has_been_setup: - teardown() - - add_submission() - - if menu: - add_to_filemenu() - self._has_menu = True - - self._has_been_setup = True - log.debug("pyblish: Loaded successfully.") - - -def teardown(): - """Remove integration""" - if not self._has_been_setup: - return - - if self._has_menu: - remove_from_filemenu() - self._has_menu = False - - self._has_been_setup = False - log.debug("pyblish: Integration torn down successfully") - - -def remove_from_filemenu(): - raise NotImplementedError("Implement me please.") - - -def add_to_filemenu(): - PublishAction() - - -class PyblishSubmission(hiero.exporters.FnSubmission.Submission): - - def __init__(self): - hiero.exporters.FnSubmission.Submission.__init__(self) - - def addToQueue(self): - from . import publish - # Add submission to Hiero module for retrieval in plugins. - hiero.submission = self - publish() - - -def add_submission(): - registry = hiero.core.taskRegistry - registry.addSubmission("Pyblish", PyblishSubmission) - - -class PublishAction(QtWidgets.QAction): - """ - Action with is showing as menu item - """ - - def __init__(self): - QtWidgets.QAction.__init__(self, "Publish", None) - self.triggered.connect(self.publish) - - for interest in ["kShowContextMenu/kTimeline", - "kShowContextMenukBin", - "kShowContextMenu/kSpreadsheet"]: - hiero.core.events.registerInterest(interest, self.eventHandler) - - self.setShortcut("Ctrl+Alt+P") - - def publish(self): - from . import publish - # Removing "submission" attribute from hiero module, to prevent tasks - # from getting picked up when not using the "Export" dialog. - if hasattr(hiero, "submission"): - del hiero.submission - publish() - - def eventHandler(self, event): - # Add the Menu to the right-click menu - event.menu.addAction(self) - - -# def CreateNukeWorkfile(nodes=None, -# nodes_effects=None, -# to_timeline=False, -# **kwargs): -# ''' Creating nuke workfile with particular version with given nodes -# Also it is creating timeline track items as precomps. -# -# Arguments: -# nodes(list of dict): each key in dict is knob order is important -# to_timeline(type): will build trackItem with metadata -# -# Returns: -# bool: True if done -# -# Raises: -# Exception: with traceback -# -# ''' -# import hiero.core -# from avalon.nuke import imprint -# from pype.hosts.nuke import ( -# lib as nklib -# ) -# -# # check if the file exists if does then Raise "File exists!" -# if os.path.exists(filepath): -# raise FileExistsError("File already exists: `{}`".format(filepath)) -# -# # if no representations matching then -# # Raise "no representations to be build" -# if len(representations) == 0: -# raise AttributeError("Missing list of `representations`") -# -# # check nodes input -# if len(nodes) == 0: -# log.warning("Missing list of `nodes`") -# -# # create temp nk file -# nuke_script = hiero.core.nuke.ScriptWriter() -# -# # create root node and save all metadata -# root_node = hiero.core.nuke.RootNode() -# -# anatomy = Anatomy(os.environ["AVALON_PROJECT"]) -# work_template = anatomy.templates["work"]["path"] -# root_path = anatomy.root_value_for_template(work_template) -# -# nuke_script.addNode(root_node) -# -# # here to call pype.hosts.nuke.lib.BuildWorkfile -# script_builder = nklib.BuildWorkfile( -# root_node=root_node, -# root_path=root_path, -# nodes=nuke_script.getNodes(), -# **kwargs -# ) - - -def create_nuke_workfile_clips(nuke_workfiles, seq=None): - ''' - nuke_workfiles is list of dictionaries like: - [{ - 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', - 'name': 'test', - 'handleStart': 15, # added asymetrically to handles - 'handleEnd': 10, # added asymetrically to handles - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'Comp-tracking', - 'work_dir': 'VFX_PR', - 'shot': '00010' - }] - ''' - - proj = hiero.core.projects()[-1] - root = proj.clipsBin() - - if not seq: - seq = hiero.core.Sequence('NewSequences') - root.addItem(hiero.core.BinItem(seq)) - # todo will ned to define this better - # track = seq[1] # lazy example to get a destination# track - clips_lst = [] - for nk in nuke_workfiles: - task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) - bin = create_bin(task_path, proj) - - if nk['task'] not in seq.videoTracks(): - track = hiero.core.VideoTrack(nk['task']) - seq.addTrack(track) - else: - track = seq.tracks(nk['task']) - - # create clip media - media = hiero.core.MediaSource(nk['path']) - media_in = int(media.startTime() or 0) - media_duration = int(media.duration() or 0) - - handle_start = nk.get("handleStart") - handle_end = nk.get("handleEnd") - - if media_in: - source_in = media_in + handle_start - else: - source_in = nk["frameStart"] + handle_start - - if media_duration: - source_out = (media_in + media_duration - 1) - handle_end - else: - source_out = nk["frameEnd"] - handle_end - - source = hiero.core.Clip(media) - - name = os.path.basename(os.path.splitext(nk['path'])[0]) - split_name = split_by_client_version(name)[0] or name - - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if split_name not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) - - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() - - # add to track as clip item - trackItem = hiero.core.TrackItem( - split_name, hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(source_in) - trackItem.setSourceOut(source_out) - trackItem.setTimelineIn(nk["clipIn"]) - trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) - track.addTrackItem(trackItem) - clips_lst.append(trackItem) - - return clips_lst - - -def create_bin(path=None, project=None): - ''' - Create bin in project. - If the path is "bin1/bin2/bin3" it will create whole depth - and return `bin3` - - ''' - # get the first loaded project - project = project or get_current_project() - - path = path or self.default_bin_name - - path = path.replace("\\", "/").split("/") - - root_bin = project.clipsBin() - - done_bin_lst = [] - for i, b in enumerate(path): - if i == 0 and len(path) > 1: - if b in [bin.name() for bin in root_bin.bins()]: - bin = [bin for bin in root_bin.bins() if b in bin.name()][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - root_bin.addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i >= 1 and i < len(path) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i == len(path) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - return done_bin_lst[-1] - - -def split_by_client_version(string): - regex = r"[/_.]v\d+" - try: - matches = re.findall(regex, string, re.IGNORECASE) - return string.split(matches[0]) - except Exception as error: - log.error(error) - return None - - -def get_selected_track_items(sequence=None): - _sequence = sequence or get_current_sequence() - - # Getting selection - timeline_editor = hiero.ui.getTimelineEditor(_sequence) - return timeline_editor.selection() - - -def set_selected_track_items(track_items_list, sequence=None): - _sequence = sequence or get_current_sequence() - - # Getting selection - timeline_editor = hiero.ui.getTimelineEditor(_sequence) - return timeline_editor.setSelection(track_items_list) - - -def _read_doc_from_path(path): - # reading QDomDocument from HROX path - hrox_file = QFile(path) - if not hrox_file.open(QFile.ReadOnly): - raise RuntimeError("Failed to open file for reading") - doc = QDomDocument() - doc.setContent(hrox_file) - hrox_file.close() - return doc - - -def _write_doc_to_path(doc, path): - # write QDomDocument to path as HROX - hrox_file = QFile(path) - if not hrox_file.open(QFile.WriteOnly): - raise RuntimeError("Failed to open file for writing") - stream = QTextStream(hrox_file) - doc.save(stream, 1) - hrox_file.close() - - -def _set_hrox_project_knobs(doc, **knobs): - # set attributes to Project Tag - proj_elem = doc.documentElement().firstChildElement("Project") - for k, v in knobs.items(): - proj_elem.setAttribute(k, v) - - -def apply_colorspace_project(): - # get path the the active projects - project = get_current_project(remove_untitled=True) - current_file = project.path() - - # close the active project - project.close() - - # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_project_clrs = colorspace.get("hiero", {}).get("project", {}) - - # save the workfile as subversion "comment:_colorspaceChange" - split_current_file = os.path.splitext(current_file) - copy_current_file = current_file - - if "_colorspaceChange" not in current_file: - copy_current_file = ( - split_current_file[0] - + "_colorspaceChange" - + split_current_file[1] - ) - - try: - # duplicate the file so the changes are applied only to the copy - shutil.copyfile(current_file, copy_current_file) - except shutil.Error: - # in case the file already exists and it want to copy to the - # same filewe need to do this trick - # TEMP file name change - copy_current_file_tmp = copy_current_file + "_tmp" - # create TEMP file - shutil.copyfile(current_file, copy_current_file_tmp) - # remove original file - os.remove(current_file) - # copy TEMP back to original name - shutil.copyfile(copy_current_file_tmp, copy_current_file) - # remove the TEMP file as we dont need it - os.remove(copy_current_file_tmp) - - # use the code from bellow for changing xml hrox Attributes - hiero_project_clrs.update({"name": os.path.basename(copy_current_file)}) - - # read HROX in as QDomSocument - doc = _read_doc_from_path(copy_current_file) - - # apply project colorspace properties - _set_hrox_project_knobs(doc, **hiero_project_clrs) - - # write QDomSocument back as HROX - _write_doc_to_path(doc, copy_current_file) - - # open the file as current project - hiero.core.openProject(copy_current_file) - - -def apply_colorspace_clips(): - project = get_current_project(remove_untitled=True) - clips = project.clips() - - # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {}) - - for clip in clips: - clip_media_source_path = clip.mediaSource().firstpath() - clip_name = clip.name() - clip_colorspace = clip.sourceMediaColourTransform() - - if "default" in clip_colorspace: - continue - - # check if any colorspace presets for read is mathing - preset_clrsp = next((hiero_clips_clrs[k] - for k in hiero_clips_clrs - if bool(re.search(k, clip_media_source_path))), - None) - - if preset_clrsp: - log.debug("Changing clip.path: {}".format(clip_media_source_path)) - log.info("Changing clip `{}` colorspace {} to {}".format( - clip_name, clip_colorspace, preset_clrsp)) - # set the found preset to the clip - clip.setSourceMediaColourTransform(preset_clrsp) - - # save project after all is changed - project.save() diff --git a/pype/hosts/resolve/pipeline_hiero.py b/pype/hosts/resolve/pipeline_hiero.py deleted file mode 100644 index 73025e790f..0000000000 --- a/pype/hosts/resolve/pipeline_hiero.py +++ /dev/null @@ -1,302 +0,0 @@ -""" -Basic avalon integration -""" -import os -import contextlib -from collections import OrderedDict -from avalon.tools import ( - workfiles, - publish as _publish -) -from avalon.pipeline import AVALON_CONTAINER_ID -from avalon import api as avalon -from avalon import schema -from pyblish import api as pyblish -import pype -from pype.api import Logger - -from . import lib, menu, events - -log = Logger().get_logger(__name__, "hiero") - -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") - -# plugin paths -LOAD_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "load") -CREATE_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "create") -INVENTORY_PATH = os.path.join(pype.PLUGINS_DIR, "hiero", "inventory") - -PUBLISH_PATH = os.path.join( - pype.PLUGINS_DIR, "hiero", "publish" -).replace("\\", "/") - -AVALON_CONTAINERS = ":AVALON_CONTAINERS" - - -def install(): - """ - Installing Hiero integration for avalon - - Args: - config (obj): avalon config module `pype` in our case, it is not - used but required by avalon.api.install() - - """ - - # adding all events - events.register_events() - - log.info("Registering Hiero plug-ins..") - pyblish.register_host("hiero") - pyblish.register_plugin_path(PUBLISH_PATH) - avalon.register_plugin_path(avalon.Loader, LOAD_PATH) - avalon.register_plugin_path(avalon.Creator, CREATE_PATH) - avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) - - # register callback for switching publishable - pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) - - # Disable all families except for the ones we explicitly want to see - family_states = [ - "write", - "review", - "plate" - ] - - avalon.data["familiesStateDefault"] = False - avalon.data["familiesStateToggled"] = family_states - - # install menu - menu.menu_install() - - # register hiero events - events.register_hiero_events() - - -def uninstall(): - """ - Uninstalling Hiero integration for avalon - - """ - log.info("Deregistering Hiero plug-ins..") - pyblish.deregister_host("hiero") - pyblish.deregister_plugin_path(PUBLISH_PATH) - avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH) - avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH) - - # register callback for switching publishable - pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) - - -def containerise(track_item, - name, - namespace, - context, - loader=None, - data=None): - """Bundle Hiero's object into an assembly and imprint it with metadata - - Containerisation enables a tracking of version, author and origin - for loaded assets. - - Arguments: - track_item (hiero.core.TrackItem): object to imprint as container - name (str): Name of resulting assembly - namespace (str): Namespace under which to host container - context (dict): Asset information - loader (str, optional): Name of node used to produce this container. - - Returns: - track_item (hiero.core.TrackItem): containerised object - - """ - - data_imprint = OrderedDict({ - "schema": "avalon-core:container-2.0", - "id": AVALON_CONTAINER_ID, - "name": str(name), - "namespace": str(namespace), - "loader": str(loader), - "representation": str(context["representation"]["_id"]), - }) - - if data: - for k, v in data.items(): - data_imprint.update({k: v}) - - log.debug("_ data_imprint: {}".format(data_imprint)) - lib.set_track_item_pype_tag(track_item, data_imprint) - - return track_item - - -def ls(): - """List available containers. - - This function is used by the Container Manager in Nuke. You'll - need to implement a for-loop that then *yields* one Container at - a time. - - See the `container.json` schema for details on how it should look, - and the Maya equivalent, which is in `avalon.maya.pipeline` - """ - - # get all track items from current timeline - all_track_items = lib.get_track_items() - - for track_item in all_track_items: - container = parse_container(track_item) - if container: - yield container - - -def parse_container(track_item, validate=True): - """Return container data from track_item's pype tag. - - Args: - track_item (hiero.core.TrackItem): A containerised track item. - validate (bool)[optional]: validating with avalon scheme - - Returns: - dict: The container schema data for input containerized track item. - - """ - # convert tag metadata to normal keys names - data = lib.get_track_item_pype_data(track_item) - - if validate and data and data.get("schema"): - schema.validate(data) - - if not isinstance(data, dict): - return - - # If not all required data return the empty container - required = ['schema', 'id', 'name', - 'namespace', 'loader', 'representation'] - - if not all(key in data for key in required): - return - - container = {key: data[key] for key in required} - - container["objectName"] = track_item.name() - - # Store reference to the node object - container["_track_item"] = track_item - - return container - - -def update_container(track_item, data=None): - """Update container data to input track_item's pype tag. - - Args: - track_item (hiero.core.TrackItem): A containerised track item. - data (dict)[optional]: dictionery with data to be updated - - Returns: - bool: True if container was updated correctly - - """ - data = data or dict() - - container = lib.get_track_item_pype_data(track_item) - - for _key, _value in container.items(): - try: - container[_key] = data[_key] - except KeyError: - pass - - log.info("Updating container: `{}`".format(track_item.name())) - return bool(lib.set_track_item_pype_tag(track_item, container)) - - -def launch_workfiles_app(*args): - ''' Wrapping function for workfiles launcher ''' - - workdir = os.environ["AVALON_WORKDIR"] - - # show workfile gui - workfiles.show(workdir) - - -def publish(parent): - """Shorthand to publish from within host""" - return _publish.show(parent) - - -@contextlib.contextmanager -def maintained_selection(): - """Maintain selection during context - - Example: - >>> with maintained_selection(): - ... for track_item in track_items: - ... < do some stuff > - """ - from .lib import ( - set_selected_track_items, - get_selected_track_items - ) - previous_selection = get_selected_track_items() - reset_selection() - try: - # do the operation - yield - finally: - reset_selection() - set_selected_track_items(previous_selection) - - -def reset_selection(): - """Deselect all selected nodes - """ - from .lib import set_selected_track_items - set_selected_track_items([]) - - -def reload_config(): - """Attempt to reload pipeline at run-time. - - CAUTION: This is primarily for development and debugging purposes. - - """ - import importlib - - for module in ( - "avalon", - "avalon.lib", - "avalon.pipeline", - "pyblish", - "pypeapp", - "{}.api".format(AVALON_CONFIG), - "{}.hosts.hiero.lib".format(AVALON_CONFIG), - "{}.hosts.hiero.menu".format(AVALON_CONFIG), - "{}.hosts.hiero.tags".format(AVALON_CONFIG) - ): - log.info("Reloading module: {}...".format(module)) - try: - module = importlib.import_module(module) - import imp - imp.reload(module) - except Exception as e: - log.warning("Cannot reload module: {}".format(e)) - importlib.reload(module) - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - from pype.hosts.hiero import ( - get_track_item_pype_tag, - set_publish_attribute - ) - - # Whether instances should be passthrough based on new value - track_item = instance.data["item"] - tag = get_track_item_pype_tag(track_item) - set_publish_attribute(tag, new_value) diff --git a/pype/hosts/resolve/rendering.py b/pype/hosts/resolve/rendering.py deleted file mode 100644 index e38466e5d4..0000000000 --- a/pype/hosts/resolve/rendering.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python - -""" -Example DaVinci Resolve script: -Load a still from DRX file, apply the still to all clips in all timelines. Set render format and codec, add render jobs for all timelines, render to specified path and wait for rendering completion. -Once render is complete, delete all jobs -""" - -from python_get_resolve import GetResolve -import sys -import time - -def AddTimelineToRender( project, timeline, presetName, targetDirectory, renderFormat, renderCodec ): - project.SetCurrentTimeline(timeline) - project.LoadRenderPreset(presetName) - - if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec): - return False - - project.SetRenderSettings({"SelectAllFrames" : 1, "TargetDir" : targetDirectory}) - return project.AddRenderJob() - -def RenderAllTimelines( resolve, presetName, targetDirectory, renderFormat, renderCodec ): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - - resolve.OpenPage("Deliver") - timelineCount = project.GetTimelineCount() - - for index in range (0, int(timelineCount)): - if not AddTimelineToRender(project, project.GetTimelineByIndex(index + 1), presetName, targetDirectory, renderFormat, renderCodec): - return False - return project.StartRendering() - -def IsRenderingInProgress( resolve ): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - - return project.IsRenderingInProgress() - -def WaitForRenderingCompletion( resolve ): - while IsRenderingInProgress(resolve): - time.sleep(1) - return - -def ApplyDRXToAllTimelineClips( timeline, path, gradeMode = 0 ): - trackCount = timeline.GetTrackCount("video") - - clips = {} - for index in range (1, int(trackCount) + 1): - clips.update( timeline.GetItemsInTrack("video", index) ) - return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips) - -def ApplyDRXToAllTimelines( resolve, path, gradeMode = 0 ): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - if not project: - return False - timelineCount = project.GetTimelineCount() - - for index in range (0, int(timelineCount)): - timeline = project.GetTimelineByIndex(index + 1) - project.SetCurrentTimeline( timeline ) - if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode): - return False - return True - -def DeleteAllRenderJobs( resolve ): - projectManager = resolve.GetProjectManager() - project = projectManager.GetCurrentProject() - project.DeleteAllRenderJobs() - return - -# Inputs: -# - DRX file to import grade still and apply it for clips -# - grade mode (0, 1 or 2) -# - preset name for rendering -# - render path -# - render format -# - render codec -if len(sys.argv) < 7: - print("input parameters for scripts are [drx file path] [grade mode] [render preset name] [render path] [render format] [render codec]") - sys.exit() - -drxPath = sys.argv[1] -gradeMode = sys.argv[2] -renderPresetName = sys.argv[3] -renderPath = sys.argv[4] -renderFormat = sys.argv[5] -renderCodec = sys.argv[6] - -# Get currently open project -resolve = GetResolve() - -if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode): - print("Unable to apply a still from drx file to all timelines") - sys.exit() - -if not RenderAllTimelines(resolve, renderPresetName, renderPath, renderFormat, renderCodec): - print("Unable to set all timelines for rendering") - sys.exit() - -WaitForRenderingCompletion(resolve) - -DeleteAllRenderJobs(resolve) - -print("Rendering is completed.") diff --git a/pype/hosts/resolve/todo-rendering.py b/pype/hosts/resolve/todo-rendering.py new file mode 100644 index 0000000000..cff9eebead --- /dev/null +++ b/pype/hosts/resolve/todo-rendering.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# TODO: convert this script to be usable with PYPE +""" +Example DaVinci Resolve script: +Load a still from DRX file, apply the still to all clips in all timelines. +Set render format and codec, add render jobs for all timelines, render +to specified path and wait for rendering completion. +Once render is complete, delete all jobs + +clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py +""" + +from python_get_resolve import GetResolve +import sys +import time + + +def AddTimelineToRender(project, timeline, presetName, + targetDirectory, renderFormat, renderCodec): + project.SetCurrentTimeline(timeline) + project.LoadRenderPreset(presetName) + + if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec): + return False + + project.SetRenderSettings( + {"SelectAllFrames": 1, "TargetDir": targetDirectory}) + return project.AddRenderJob() + + +def RenderAllTimelines(resolve, presetName, targetDirectory, + renderFormat, renderCodec): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + + resolve.OpenPage("Deliver") + timelineCount = project.GetTimelineCount() + + for index in range(0, int(timelineCount)): + if not AddTimelineToRender( + project, + project.GetTimelineByIndex(index + 1), + presetName, + targetDirectory, + renderFormat, + renderCodec): + return False + return project.StartRendering() + + +def IsRenderingInProgress(resolve): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + + return project.IsRenderingInProgress() + + +def WaitForRenderingCompletion(resolve): + while IsRenderingInProgress(resolve): + time.sleep(1) + return + + +def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0): + trackCount = timeline.GetTrackCount("video") + + clips = {} + for index in range(1, int(trackCount) + 1): + clips.update(timeline.GetItemsInTrack("video", index)) + return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips) + + +def ApplyDRXToAllTimelines(resolve, path, gradeMode=0): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + if not project: + return False + timelineCount = project.GetTimelineCount() + + for index in range(0, int(timelineCount)): + timeline = project.GetTimelineByIndex(index + 1) + project.SetCurrentTimeline(timeline) + if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode): + return False + return True + + +def DeleteAllRenderJobs(resolve): + projectManager = resolve.GetProjectManager() + project = projectManager.GetCurrentProject() + project.DeleteAllRenderJobs() + return + + +# Inputs: +# - DRX file to import grade still and apply it for clips +# - grade mode (0, 1 or 2) +# - preset name for rendering +# - render path +# - render format +# - render codec +if len(sys.argv) < 7: + print( + "input parameters for scripts are [drx file path] [grade mode] " + "[render preset name] [render path] [render format] [render codec]") + sys.exit() + +drxPath = sys.argv[1] +gradeMode = sys.argv[2] +renderPresetName = sys.argv[3] +renderPath = sys.argv[4] +renderFormat = sys.argv[5] +renderCodec = sys.argv[6] + +# Get currently open project +resolve = GetResolve() + +if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode): + print("Unable to apply a still from drx file to all timelines") + sys.exit() + +if not RenderAllTimelines(resolve, renderPresetName, renderPath, + renderFormat, renderCodec): + print("Unable to set all timelines for rendering") + sys.exit() + +WaitForRenderingCompletion(resolve) + +DeleteAllRenderJobs(resolve) + +print("Rendering is completed.") From ae608efdbc8c2c0aa15ec5efd141669ae2ed6afe Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 17:25:23 +0100 Subject: [PATCH 045/198] hound fixes --- pype/hosts/resolve/todo-rendering.py | 3 +-- pype/hosts/resolve/utility_scripts/OTIO_export.py | 5 ++--- pype/hosts/resolve/utility_scripts/OTIO_import.py | 5 ++--- pype/lib/__init__.py | 6 +++--- pype/plugins/resolve/publish/extract_workfile.py | 1 + 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/pype/hosts/resolve/todo-rendering.py b/pype/hosts/resolve/todo-rendering.py index cff9eebead..87b04dd98f 100644 --- a/pype/hosts/resolve/todo-rendering.py +++ b/pype/hosts/resolve/todo-rendering.py @@ -6,9 +6,8 @@ Load a still from DRX file, apply the still to all clips in all timelines. Set render format and codec, add render jobs for all timelines, render to specified path and wait for rendering completion. Once render is complete, delete all jobs - -clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py """ +# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa from python_get_resolve import GetResolve import sys diff --git a/pype/hosts/resolve/utility_scripts/OTIO_export.py b/pype/hosts/resolve/utility_scripts/OTIO_export.py index 3e08cb370d..a1142f56dd 100644 --- a/pype/hosts/resolve/utility_scripts/OTIO_export.py +++ b/pype/hosts/resolve/utility_scripts/OTIO_export.py @@ -1,13 +1,12 @@ #!/usr/bin/env python import os -import sys from pype.hosts.resolve.otio import davinci_export as otio_export -resolve = bmd.scriptapp("Resolve") +resolve = bmd.scriptapp("Resolve") # noqa fu = resolve.Fusion() ui = fu.UIManager -disp = bmd.UIDispatcher(fu.UIManager) +disp = bmd.UIDispatcher(fu.UIManager) # noqa title_font = ui.Font({"PixelSize": 18}) diff --git a/pype/hosts/resolve/utility_scripts/OTIO_import.py b/pype/hosts/resolve/utility_scripts/OTIO_import.py index 879f7eb0b5..5719ec3e3c 100644 --- a/pype/hosts/resolve/utility_scripts/OTIO_import.py +++ b/pype/hosts/resolve/utility_scripts/OTIO_import.py @@ -1,12 +1,11 @@ #!/usr/bin/env python import os -import sys from pype.hosts.resolve.otio import davinci_import as otio_import -resolve = bmd.scriptapp("Resolve") +resolve = bmd.scriptapp("Resolve") # noqa fu = resolve.Fusion() ui = fu.UIManager -disp = bmd.UIDispatcher(fu.UIManager) +disp = bmd.UIDispatcher(fu.UIManager) # noqa title_font = ui.Font({"PixelSize": 18}) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 7992815a75..598dd757b8 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -143,10 +143,10 @@ __all__ = [ "IniSettingRegistry", "JSONSettingRegistry", "PypeSettingsRegistry", - "timeit" + "timeit", "is_overlapping_otio_ranges", "otio_range_with_handles", - "convert_to_padded_path" - "otio_range_to_frame_range", + "convert_to_padded_path", + "otio_range_to_frame_range" ] diff --git a/pype/plugins/resolve/publish/extract_workfile.py b/pype/plugins/resolve/publish/extract_workfile.py index a88794841b..e52e829ee4 100644 --- a/pype/plugins/resolve/publish/extract_workfile.py +++ b/pype/plugins/resolve/publish/extract_workfile.py @@ -3,6 +3,7 @@ import pyblish.api import pype.api from pype.hosts import resolve + class ExtractWorkfile(pype.api.Extractor): """ Extractor export DRP workfile file representation From a9a2d983b95dba615c4ac919a1421845770f6ee2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 17:25:37 +0100 Subject: [PATCH 046/198] davinci otio import wip --- pype/hosts/resolve/otio/davinci_import.py | 64 ++++++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/pype/hosts/resolve/otio/davinci_import.py b/pype/hosts/resolve/otio/davinci_import.py index 19133279bb..3bbb007b25 100644 --- a/pype/hosts/resolve/otio/davinci_import.py +++ b/pype/hosts/resolve/otio/davinci_import.py @@ -1,4 +1,5 @@ import sys +import json import DaVinciResolveScript import opentimelineio as otio @@ -17,30 +18,89 @@ self.project_fps = None def build_timeline(otio_timeline): + # TODO: build timeline in mediapool `otioImport` folder + # TODO: loop otio tracks and build them in the new timeline for clip in otio_timeline.each_clip(): + # TODO: create track item print(clip.name) print(clip.parent().name) print(clip.range_in_parent()) def _build_track(otio_track): + # TODO: _build_track pass def _build_media_pool_item(otio_media_reference): + # TODO: _build_media_pool_item pass def _build_track_item(otio_clip): + # TODO: _build_track_item pass def _build_gap(otio_clip): + # TODO: _build_gap pass -def _build_marker(otio_marker): - pass +def _build_marker(track_item, otio_marker): + frame_start = otio_marker.marked_range.start_time.value + frame_duration = otio_marker.marked_range.duration.value + + # marker attributes + frameId = (frame_start / 10) * 10 + color = otio_marker.color + name = otio_marker.name + note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata) + duration = (frame_duration / 10) * 10 + + track_item.AddMarker( + frameId, + color, + name, + note, + duration + ) + + +def _build_media_pool_folder(name): + """ + Returns folder with input name and sets it as current folder. + + It will create new media bin if none is found in root media bin + + Args: + name (str): name of bin + + Returns: + resolve.api.MediaPool.Folder: description + + """ + + root_folder = self.media_pool.GetRootFolder() + sub_folders = root_folder.GetSubFolderList() + testing_names = list() + + for subfolder in sub_folders: + subf_name = subfolder.GetName() + if name in subf_name: + testing_names.append(subfolder) + else: + testing_names.append(False) + + matching = next((f for f in testing_names if f is not False), None) + + if not matching: + new_folder = self.media_pool.AddSubFolder(root_folder, name) + self.media_pool.SetCurrentFolder(new_folder) + else: + self.media_pool.SetCurrentFolder(matching) + + return self.media_pool.GetCurrentFolder() def read_from_file(otio_file): From 566c1fc2a965350b3be006bb740f32e2ac8da98e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 18 Dec 2020 18:06:55 +0100 Subject: [PATCH 047/198] resolve plugin fix --- pype/hosts/resolve/plugin.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pype/hosts/resolve/plugin.py b/pype/hosts/resolve/plugin.py index 1b7e6fc051..e5ba39c535 100644 --- a/pype/hosts/resolve/plugin.py +++ b/pype/hosts/resolve/plugin.py @@ -2,7 +2,6 @@ import re from avalon import api from pype.hosts import resolve from avalon.vendor import qargparse -from pype.api import config from . import lib from Qt import QtWidgets, QtCore @@ -344,8 +343,12 @@ class Creator(api.Creator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - self.presets = config.get_presets()['plugins']["resolve"][ - "create"].get(self.__class__.__name__, {}) + from pype.api import get_current_project_settings + resolve_p_settings = get_current_project_settings().get("resolve") + self.presets = dict() + if resolve_p_settings: + self.presets = resolve_p_settings["create"].get( + self.__class__.__name__, {}) # adding basic current context resolve objects self.project = resolve.get_current_project() From 6dab474ba89632e96ba85808476ab38aaa394ce2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 12:51:58 +0100 Subject: [PATCH 048/198] next task update use partially settings --- .../ftrack/events/event_next_task_update.py | 302 +++++++++--------- 1 file changed, 146 insertions(+), 156 deletions(-) diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index deb789f981..ac50a7e0aa 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -1,19 +1,28 @@ -import operator import collections from pype.modules.ftrack import BaseEvent class NextTaskUpdate(BaseEvent): - def filter_entities_info(self, session, event): + def launch(self, session, event): + '''Propagates status from version to task when changed''' + + filtered_entities_info = self.filter_entities_info(event) + if not filtered_entities_info: + return + + for project_id, entities_info in filtered_entities_info.items(): + self.process_by_project(session, event, project_id, entities_info) + + def filter_entities_info(self, event): # Filter if event contain relevant data entities_info = event["data"].get("entities") if not entities_info: return - first_filtered_entities = [] + filtered_entities_info = {} for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entityType") != "task": + # Care only about Task `entity_type` + if entity_info.get("entity_type") != "Task": continue # Care only about changes of status @@ -25,190 +34,159 @@ class NextTaskUpdate(BaseEvent): ): continue - first_filtered_entities.append(entity_info) + project_id = None + for parent_info in reversed(entity_info["parents"]): + if parent_info["entityType"] == "show": + project_id = parent_info["entityId"] + break - if not first_filtered_entities: - return first_filtered_entities + if project_id: + filtered_entities_info[project_id].append(entity_info) + return filtered_entities_info - status_ids = [ - entity_info["changes"]["statusid"]["new"] - for entity_info in first_filtered_entities - ] - statuses_by_id = self.get_statuses_by_id( - session, status_ids=status_ids + def process_by_project(self, session, event, project_id, _entities_info): + project_entity = self.get_project_entity_from_event( + session, event, project_id + ) + project_settings = self.get_settings_for_project( + session, event, project_entity=project_entity ) - # Make sure `entity_type` is "Task" - task_object_type = session.query( - "select id, name from ObjectType where name is \"Task\"" - ).one() - # Care only about tasks having status with state `Done` - filtered_entities = [] - for entity_info in first_filtered_entities: - if entity_info["objectTypeId"] != task_object_type["id"]: - continue - status_id = entity_info["changes"]["statusid"]["new"] - status_entity = statuses_by_id[status_id] - if status_entity["state"]["name"].lower() == "done": - filtered_entities.append(entity_info) + project_name = project_entity["full_name"] - return filtered_entities + # Load status mapping from presets + event_settings = ( + project_settings["ftrack"]["events"]["next_task_update"] + ) + if not event_settings["enabled"]: + self.log.debug("Project \"{}\" has disabled {}.".format( + project_name, self.__class__.__name__ + )) + return - def get_parents_by_id(self, session, entities_info): - parent_ids = [ - "\"{}\"".format(entity_info["parentId"]) - for entity_info in entities_info - ] - parent_entities = session.query( - "TypedContext where id in ({})".format(", ".join(parent_ids)) - ).all() + statuses = session.query("Status").all() - return { - entity["id"]: entity - for entity in parent_entities - } - - def get_tasks_by_id(self, session, parent_ids): - joined_parent_ids = ",".join([ - "\"{}\"".format(parent_id) - for parent_id in parent_ids - ]) - task_entities = session.query( - "Task where parent_id in ({})".format(joined_parent_ids) - ).all() - - return { - entity["id"]: entity - for entity in task_entities - } - - def get_statuses_by_id(self, session, task_entities=None, status_ids=None): - if task_entities is None and status_ids is None: - return {} - - if status_ids is None: - status_ids = [] - for task_entity in task_entities: - status_ids.append(task_entity["status_id"]) - - if not status_ids: - return {} - - status_entities = session.query( - "Status where id in ({})".format(", ".join(status_ids)) - ).all() - - return { - entity["id"]: entity - for entity in status_entities - } - - def get_sorted_task_types(self, session): - data = { - _type: _type.get("sort") - for _type in session.query("Type").all() - if _type.get("sort") is not None - } - - return [ - item[0] - for item in sorted(data.items(), key=operator.itemgetter(1)) - ] - - def launch(self, session, event): - '''Propagates status from version to task when changed''' - - entities_info = self.filter_entities_info(session, event) + entities_info = self.filter_by_status_state(_entities_info, statuses) if not entities_info: return - parents_by_id = self.get_parents_by_id(session, entities_info) - tasks_by_id = self.get_tasks_by_id( - session, tuple(parents_by_id.keys()) + parent_ids = set() + event_task_ids_by_parent_id = collections.defaultdict(list) + for entity_info in entities_info: + parent_id = entity_info["parentId"] + entity_id = entity_info["entityId"] + parent_ids.add(parent_id) + event_task_ids_by_parent_id[parent_id].append(entity_id) + + # From now it doesn't matter what was in event data + task_entities = session.query( + ( + "select id, type_id, status_id, parent_id, link from Task" + " where parent_id in ({})" + ).format(self.join_query_keys(parent_ids)) + ).all() + + tasks_by_parent_id = collections.defaultdict(list) + for task_entity in task_entities: + tasks_by_parent_id[task_entity["parent_id"]].append(task_entity) + + self.set_next_task_statuses( + session, + tasks_by_parent_id, + event_task_ids_by_parent_id, + statuses ) - tasks_to_parent_id = collections.defaultdict(list) - for task_entity in tasks_by_id.values(): - tasks_to_parent_id[task_entity["parent_id"]].append(task_entity) + def filter_by_status_state(self, entities_info, statuses): + statuses_by_id = { + status["id"]: status + for status in statuses + } - statuses_by_id = self.get_statuses_by_id(session, tasks_by_id.values()) + # Care only about tasks having status with state `Done` + filtered_entities_info = [] + for entity_info in entities_info: + status_id = entity_info["changes"]["statusid"]["new"] + status_entity = statuses_by_id[status_id] + if status_entity["state"]["name"].lower() == "done": + filtered_entities_info.append(entity_info) + return filtered_entities_info + def set_next_task_statuses( + self, + session, + tasks_by_parent_id, + event_task_ids_by_parent_id, + statuses + ): + statuses_by_low_name = { + status["name"].lower(): status + for status in statuses + } next_status_name = "Ready" - next_status = session.query( - "Status where name is \"{}\"".format(next_status_name) - ).first() + next_status = statuses_by_low_name.get(next_status_name.lower()) if not next_status: self.log.warning("Couldn't find status with name \"{}\"".format( next_status_name )) return - for entity_info in entities_info: - parent_id = entity_info["parentId"] - task_id = entity_info["entityId"] - task_entity = tasks_by_id[task_id] + statuses_by_id = { + status["id"].lower(): status + for status in statuses + } - all_same_type_taks_done = True - for parents_task in tasks_to_parent_id[parent_id]: - if ( - parents_task["id"] == task_id - or parents_task["type_id"] != task_entity["type_id"] - ): + sorted_task_type_ids = self.get_sorted_task_type_ids(session) + + for parent_id, _task_entities in tasks_by_parent_id.items(): + task_entities_by_type_id = collections.defaultdict(list) + for _task_entity in _task_entities: + type_id = _task_entity["type_id"] + task_entities_by_type_id[type_id].append(_task_entity) + + event_ids = set(event_task_ids_by_parent_id[parent_id]) + next_tasks = [] + for type_id in sorted_task_type_ids: + if type_id not in task_entities_by_type_id: continue - parents_task_status = statuses_by_id[parents_task["status_id"]] - low_status_name = parents_task_status["name"].lower() - # Skip if task's status name "Omitted" - if low_status_name == "omitted": - continue - - low_state_name = parents_task_status["state"]["name"].lower() - if low_state_name != "done": - all_same_type_taks_done = False + all_in_type_done = True + task_entities = task_entities_by_type_id[type_id] + if not event_ids: + next_tasks = task_entities break - if not all_same_type_taks_done: + for task_entity in task_entities: + task_id = task_entity["id"] + if task_id in event_ids: + event_ids.remove(task_id) + + task_status = statuses_by_id[task_entity["status_id"]] + low_status_name = task_status["name"].lower() + if low_status_name == "omitted": + continue + + low_state_name = task_status["state"]["name"].lower() + if low_state_name != "done": + all_in_type_done = False + break + + if not all_in_type_done: + break + + if not next_tasks: continue - # Prepare all task types - sorted_task_types = self.get_sorted_task_types(session) - sorted_task_types_len = len(sorted_task_types) - - from_idx = None - for idx, task_type in enumerate(sorted_task_types): - if task_type["id"] == task_entity["type_id"]: - from_idx = idx + 1 - break - - # Current task type is last in order - if from_idx is None or from_idx >= sorted_task_types_len: - continue - - next_task_type_id = None - next_task_type_tasks = [] - for idx in range(from_idx, sorted_task_types_len): - next_task_type = sorted_task_types[idx] - for parents_task in tasks_to_parent_id[parent_id]: - if next_task_type_id is None: - if parents_task["type_id"] != next_task_type["id"]: - continue - next_task_type_id = next_task_type["id"] - - if parents_task["type_id"] == next_task_type_id: - next_task_type_tasks.append(parents_task) - - if next_task_type_id is not None: - break - - for next_task_entity in next_task_type_tasks: - if next_task_entity["status"]["name"].lower() != "not ready": + for task_entity in next_tasks: + task_status = statuses_by_id[task_entity["status_id"]] + if task_status["name"].lower() != "not ready": continue ent_path = "/".join( - [ent["name"] for ent in next_task_entity["link"]] + [ent["name"] for ent in task_entity["link"]] ) try: - next_task_entity["status"] = next_status + task_entity["status_id"] = next_status["id"] session.commit() self.log.info( "\"{}\" updated status to \"{}\"".format( @@ -224,6 +202,18 @@ class NextTaskUpdate(BaseEvent): exc_info=True ) + def get_sorted_task_type_ids(self, session): + types_by_order = collections.defaultdict(list) + for _type in session.query("Type").all(): + sort_oder = _type.get("sort") + if sort_oder is not None: + types_by_order[sort_oder].append(_type["id"]) + + types = [] + for sort_oder in sorted(types_by_order.keys()): + types.extend(types_by_order[sort_oder]) + return types + def register(session, plugins_presets): NextTaskUpdate(session, plugins_presets).register() From 7fea99738105aeda22997d3566b092693b9dd9a8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 12:55:32 +0100 Subject: [PATCH 049/198] fix variable type --- pype/modules/ftrack/events/event_next_task_update.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index ac50a7e0aa..7eb6368964 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -19,7 +19,7 @@ class NextTaskUpdate(BaseEvent): if not entities_info: return - filtered_entities_info = {} + filtered_entities_info = collections.defaultdict(list) for entity_info in entities_info: # Care only about Task `entity_type` if entity_info.get("entity_type") != "Task": From deb8e6e7ca9d5879f60a7be2bd2020b650716df0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 13:59:28 +0100 Subject: [PATCH 050/198] use settings for status changes --- .../ftrack/events/event_next_task_update.py | 77 ++++++++++++++----- 1 file changed, 57 insertions(+), 20 deletions(-) diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index 7eb6368964..6252985e1b 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -94,7 +94,9 @@ class NextTaskUpdate(BaseEvent): session, tasks_by_parent_id, event_task_ids_by_parent_id, - statuses + statuses, + project_entity, + event_settings ) def filter_by_status_state(self, entities_info, statuses): @@ -117,25 +119,33 @@ class NextTaskUpdate(BaseEvent): session, tasks_by_parent_id, event_task_ids_by_parent_id, - statuses + statuses, + project_entity, + event_settings ): - statuses_by_low_name = { - status["name"].lower(): status - for status in statuses - } - next_status_name = "Ready" - next_status = statuses_by_low_name.get(next_status_name.lower()) - if not next_status: - self.log.warning("Couldn't find status with name \"{}\"".format( - next_status_name - )) - return - statuses_by_id = { - status["id"].lower(): status + status["id"]: status for status in statuses } + ignored_statuses = set( + status_name.lower() + for status_name in event_settings["ignored_statuses"] + ) + mapping = { + status_from.lower(): status_to.lower() + for status_from, status_to in event_settings["mapping"].items() + } + + task_type_ids = set() + for task_entities in tasks_by_parent_id.values(): + for task_entity in task_entities: + task_type_ids.add(task_entity["type_id"]) + + statusese_by_obj_id = self.statuses_for_tasks( + task_type_ids, project_entity + ) + sorted_task_type_ids = self.get_sorted_task_type_ids(session) for parent_id, _task_entities in tasks_by_parent_id.items(): @@ -163,7 +173,7 @@ class NextTaskUpdate(BaseEvent): task_status = statuses_by_id[task_entity["status_id"]] low_status_name = task_status["name"].lower() - if low_status_name == "omitted": + if low_status_name in ignored_statuses: continue low_state_name = task_status["state"]["name"].lower() @@ -179,29 +189,56 @@ class NextTaskUpdate(BaseEvent): for task_entity in next_tasks: task_status = statuses_by_id[task_entity["status_id"]] - if task_status["name"].lower() != "not ready": + old_status_name = task_status["name"].lower() + new_task_name = mapping.get(old_status_name) + if not new_task_name: + self.log.debug( + "Didn't found mapping for status \"{}\".".format( + task_status["name"] + ) + ) continue ent_path = "/".join( [ent["name"] for ent in task_entity["link"]] ) + type_id = task_entity["type_id"] + new_status = statusese_by_obj_id[type_id].get(new_task_name) + if new_status is None: + self.log.warning(( + "\"{}\" does not have available status name \"{}\"" + ).format(ent_path, new_task_name)) + continue + try: - task_entity["status_id"] = next_status["id"] + task_entity["status_id"] = new_status["id"] session.commit() self.log.info( "\"{}\" updated status to \"{}\"".format( - ent_path, next_status_name + ent_path, new_status["name"] ) ) except Exception: session.rollback() self.log.warning( "\"{}\" status couldnt be set to \"{}\"".format( - ent_path, next_status_name + ent_path, new_status["name"] ), exc_info=True ) + def statuses_for_tasks(self, task_type_ids, project_entity): + project_schema = project_entity["project_schema"] + output = {} + for task_type_id in task_type_ids: + statuses = project_schema.get_statuses("Task", task_type_id) + output[task_type_id] = { + status["name"].lower(): status + for status in statuses + } + + return output + def get_sorted_task_type_ids(self, session): types_by_order = collections.defaultdict(list) for _type in session.query("Type").all(): From 5770d7cbd7486ef5ffd83c3120bb7770b754fc40 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 14:03:38 +0100 Subject: [PATCH 051/198] added few more filterings --- pype/modules/ftrack/events/event_next_task_update.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index 6252985e1b..a26976c16d 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -188,8 +188,14 @@ class NextTaskUpdate(BaseEvent): continue for task_entity in next_tasks: + if task_entity["status"]["state"]["name"].lower() == "done": + continue + task_status = statuses_by_id[task_entity["status_id"]] old_status_name = task_status["name"].lower() + if old_status_name in ignored_statuses: + continue + new_task_name = mapping.get(old_status_name) if not new_task_name: self.log.debug( From a27d7d335dcc9bc9357d57e6635e35704bdc9bd5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 14:06:23 +0100 Subject: [PATCH 052/198] updated settings gui and values --- .../defaults/project_settings/ftrack.json | 7 +++++-- .../projects_schema/schema_project_ftrack.json | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/pype/settings/defaults/project_settings/ftrack.json b/pype/settings/defaults/project_settings/ftrack.json index 2bf11de468..941c091f5f 100644 --- a/pype/settings/defaults/project_settings/ftrack.json +++ b/pype/settings/defaults/project_settings/ftrack.json @@ -84,8 +84,11 @@ "next_task_update": { "enabled": true, "mapping": { - "Ready": "Not Ready" - } + "Not Ready": "Ready" + }, + "ignored_statuses": [ + "Omitted" + ] } }, "user_handlers": { diff --git a/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_ftrack.json b/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_ftrack.json index 1554989c55..2625e0062f 100644 --- a/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_ftrack.json +++ b/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_ftrack.json @@ -302,12 +302,29 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "label", + "label": "Change status on next task by task types order when task status state changed to \"Done\"." + }, + { + "type": "label", + "label": "Mapping of next task status changes From -> To." + }, { "type": "dict-modifiable", "key": "mapping", "object_type": { "type": "text" } + }, + { + "type": "label", + "label": "Status names that are ignored on \"Done\" check (e.g. \"Omitted\")." + }, + { + "type": "list", + "key": "ignored_statuses", + "object_type": "text" } ] } From 5a39ee349e98447be5eb3818939593ad00d4e8cc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 21 Dec 2020 14:39:50 +0100 Subject: [PATCH 053/198] settings key in class definition --- pype/modules/ftrack/events/event_next_task_update.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/modules/ftrack/events/event_next_task_update.py b/pype/modules/ftrack/events/event_next_task_update.py index a26976c16d..14847f8656 100644 --- a/pype/modules/ftrack/events/event_next_task_update.py +++ b/pype/modules/ftrack/events/event_next_task_update.py @@ -3,6 +3,8 @@ from pype.modules.ftrack import BaseEvent class NextTaskUpdate(BaseEvent): + settings_key = "next_task_update" + def launch(self, session, event): '''Propagates status from version to task when changed''' @@ -56,7 +58,7 @@ class NextTaskUpdate(BaseEvent): # Load status mapping from presets event_settings = ( - project_settings["ftrack"]["events"]["next_task_update"] + project_settings["ftrack"]["events"][self.settings_key] ) if not event_settings["enabled"]: self.log.debug("Project \"{}\" has disabled {}.".format( From e9704dace72b08a261fbf4eaab38d19f7645d819 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:38:42 +0100 Subject: [PATCH 054/198] adding settings for resolve creator --- .../schema_project_resolve.json | 89 +++++++++++++++++-- 1 file changed, 80 insertions(+), 9 deletions(-) diff --git a/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_resolve.json b/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_resolve.json index fcd649db83..18f6817407 100644 --- a/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_resolve.json +++ b/pype/tools/settings/settings/gui_schemas/projects_schema/schema_project_resolve.json @@ -19,19 +19,90 @@ "is_group": true, "children": [ { - "type": "text", - "key": "clipName", - "label": "Clip name template" + "type": "collapsible-wrap", + "label": "Shot Hierarchy And Rename Settings", + "collapsable": false, + "children": [ + { + "type": "text", + "key": "hierarchy", + "label": "Shot parent hierarchy" + }, + { + "type": "boolean", + "key": "clipRename", + "label": "Rename clips" + }, + { + "type": "text", + "key": "clipName", + "label": "Clip name template" + }, + { + "type": "number", + "key": "countFrom", + "label": "Count sequence from" + }, + { + "type": "number", + "key": "countSteps", + "label": "Stepping number" + } + ] }, { - "type": "text", - "key": "folder", - "label": "Folder" + "type": "collapsible-wrap", + "label": "Shot Template Keywords", + "collapsable": false, + "children": [ + { + "type": "text", + "key": "folder", + "label": "{folder}" + }, + { + "type": "text", + "key": "episode", + "label": "{episode}" + }, + { + "type": "text", + "key": "sequence", + "label": "{sequence}" + }, + { + "type": "text", + "key": "track", + "label": "{track}" + }, + { + "type": "text", + "key": "shot", + "label": "{shot}" + } + ] }, { - "type": "number", - "key": "steps", - "label": "Steps" + "type": "collapsible-wrap", + "label": "Shot Attributes", + "collapsable": false, + "children": [ + { + "type": "number", + "key": "workfileFrameStart", + "label": "Workfiles Start Frame" + }, + { + "type": "number", + "key": "handleStart", + "label": "Handle start (head)" + }, + { + "type": "number", + "key": "handleEnd", + "label": "Handle end (tail)" + } + ] } ] } From 51ff88581536110793211ca8b7eebfd920510fb2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:39:15 +0100 Subject: [PATCH 055/198] creator plugin rename and delete old --- .../resolve/create/create_shot_clip.py | 256 ++++++++++++++--- .../resolve/create/create_shot_clip_new.py | 264 ------------------ 2 files changed, 221 insertions(+), 299 deletions(-) delete mode 100644 pype/plugins/resolve/create/create_shot_clip_new.py diff --git a/pype/plugins/resolve/create/create_shot_clip.py b/pype/plugins/resolve/create/create_shot_clip.py index bd2e013fac..35cb30636e 100644 --- a/pype/plugins/resolve/create/create_shot_clip.py +++ b/pype/plugins/resolve/create/create_shot_clip.py @@ -6,45 +6,218 @@ from pype.hosts.resolve import lib class CreateShotClip(resolve.Creator): """Publishable clip""" - label = "Shot" + label = "Create Publishable Clip" family = "clip" icon = "film" defaults = ["Main"] - gui_name = "Pype sequencial rename with hirerarchy" - gui_info = "Define sequencial rename and fill hierarchy data." + gui_tracks = resolve.get_video_track_names() + gui_name = "Pype publish attributes creator" + gui_info = "Define sequential rename and fill hierarchy data." gui_inputs = { - "clipName": "{episode}{sequence}{shot}", - "hierarchy": "{folder}/{sequence}/{shot}", - "countFrom": 10, - "steps": 10, + "renameHierarchy": { + "type": "section", + "label": "Shot Hierarchy And Rename Settings", + "target": "ui", + "order": 0, + "value": { + "hierarchy": { + "value": "{folder}/{sequence}", + "type": "QLineEdit", + "label": "Shot Parent Hierarchy", + "target": "tag", + "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa + "order": 0}, + "clipRename": { + "value": False, + "type": "QCheckBox", + "label": "Rename clips", + "target": "ui", + "toolTip": "Renaming selected clips on fly", # noqa + "order": 1}, + "clipName": { + "value": "{sequence}{shot}", + "type": "QLineEdit", + "label": "Clip Name Template", + "target": "ui", + "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa + "order": 2}, + "countFrom": { + "value": 10, + "type": "QSpinBox", + "label": "Count sequence from", + "target": "ui", + "toolTip": "Set when the sequence number stafrom", # noqa + "order": 3}, + "countSteps": { + "value": 10, + "type": "QSpinBox", + "label": "Stepping number", + "target": "ui", + "toolTip": "What number is adding every new step", # noqa + "order": 4}, + } + }, "hierarchyData": { - "folder": "shots", - "shot": "sh####", - "track": "{track}", - "sequence": "sc010", - "episode": "ep01" + "type": "dict", + "label": "Shot Template Keywords", + "target": "tag", + "order": 1, + "value": { + "folder": { + "value": "shots", + "type": "QLineEdit", + "label": "{folder}", + "target": "tag", + "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 0}, + "episode": { + "value": "ep01", + "type": "QLineEdit", + "label": "{episode}", + "target": "tag", + "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 1}, + "sequence": { + "value": "sq01", + "type": "QLineEdit", + "label": "{sequence}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 2}, + "track": { + "value": "{_track_}", + "type": "QLineEdit", + "label": "{track}", + "target": "tag", + "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 3}, + "shot": { + "value": "sh###", + "type": "QLineEdit", + "label": "{shot}", + "target": "tag", + "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa + "order": 4} + } + }, + "verticalSync": { + "type": "section", + "label": "Vertical Synchronization Of Attributes", + "target": "ui", + "order": 2, + "value": { + "vSyncOn": { + "value": True, + "type": "QCheckBox", + "label": "Enable Vertical Sync", + "target": "ui", + "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa + "order": 0}, + "vSyncTrack": { + "value": gui_tracks, # noqa + "type": "QComboBox", + "label": "Master track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1} + } + }, + "publishSettings": { + "type": "section", + "label": "Publish Settings", + "target": "ui", + "order": 3, + "value": { + "subsetName": { + "value": ["", "main", "bg", "fg", "bg", + "animatic"], + "type": "QComboBox", + "label": "Subset Name", + "target": "ui", + "toolTip": "chose subset name patern, if is selected, name of track layer will be used", # noqa + "order": 0}, + "subsetFamily": { + "value": ["plate", "take"], + "type": "QComboBox", + "label": "Subset Family", + "target": "ui", "toolTip": "What use of this subset is for", # noqa + "order": 1}, + "reviewTrack": { + "value": ["< none >"] + gui_tracks, + "type": "QComboBox", + "label": "Use Review Track", + "target": "ui", + "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa + "order": 2}, + "audio": { + "value": False, + "type": "QCheckBox", + "label": "Include audio", + "target": "tag", + "toolTip": "Process subsets with corresponding audio", # noqa + "order": 3}, + "sourceResolution": { + "value": False, + "type": "QCheckBox", + "label": "Source resolution", + "target": "tag", + "toolTip": "Is resloution taken from timeline or source?", # noqa + "order": 4}, + } + }, + "shotAttr": { + "type": "section", + "label": "Shot Attributes", + "target": "ui", + "order": 4, + "value": { + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0}, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle start (head)", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1}, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle end (tail)", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2}, + } } } + presets = None def process(self): - # solve gui inputs overwrites from presets - # overwrite gui inputs from presets + print("_____ presets: {}".format(pformat(self.presets))) + # get key pares from presets and match it on ui inputs for k, v in self.gui_inputs.items(): - if isinstance(v, dict): - # nested dictionary (only one level allowed) - for _k, _v in v.items(): + if v["type"] in ("dict", "section"): + # nested dictionary (only one level allowed + # for sections and dict) + for _k, _v in v["value"].items(): if self.presets.get(_k): - self.gui_inputs[k][_k] = self.presets[_k] + self.gui_inputs[k][ + "value"][_k]["value"] = self.presets[_k] if self.presets.get(k): - self.gui_inputs[k] = self.presets[k] + self.gui_inputs[k]["value"] = self.presets[k] + print(pformat(self.gui_inputs)) # open widget for plugins inputs widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) widget.exec_() - print(f"__ selected_clips: {self.selected}") if len(self.selected) < 1: return @@ -52,28 +225,41 @@ class CreateShotClip(resolve.Creator): print("Operation aborted") return + self.rename_add = 0 + + # get ui output for track name for vertical sync + v_sync_track = widget.result["vSyncTrack"]["value"] + + # sort selected trackItems by + sorted_selected_track_items = list() + unsorted_selected_track_items = list() + for track_item_data in self.selected: + if track_item_data["track"]["name"] in v_sync_track: + sorted_selected_track_items.append(track_item_data) + else: + unsorted_selected_track_items.append(track_item_data) + + sorted_selected_track_items.extend(unsorted_selected_track_items) + # sequence attrs sq_frame_start = self.sequence.GetStartFrame() sq_markers = self.sequence.GetMarkers() - print(f"__ sq_frame_start: {pformat(sq_frame_start)}") - print(f"__ seq_markers: {pformat(sq_markers)}") # create media bin for compound clips (trackItems) mp_folder = resolve.create_current_sequence_media_bin(self.sequence) - print(f"_ mp_folder: {mp_folder.GetName()}") - lib.rename_add = 0 - for i, t_data in enumerate(self.selected): - lib.rename_index = i + kwargs = { + "ui_inputs": widget.result, + "avalon": self.data, + "mp_folder": mp_folder, + "sq_frame_start": sq_frame_start, + "sq_markers": sq_markers + } - # clear color after it is done - t_data["clip"]["item"].ClearClipColor() + for i, track_item_data in enumerate(sorted_selected_track_items): + self.rename_index = i # convert track item to timeline media pool item - resolve.create_compound_clip( - t_data, - mp_folder, - rename=True, - **dict( - {"presets": widget.result}) - ) + track_item = resolve.PublishClip( + self, track_item_data, **kwargs).convert() + track_item.SetClipColor(lib.publish_clip_color) diff --git a/pype/plugins/resolve/create/create_shot_clip_new.py b/pype/plugins/resolve/create/create_shot_clip_new.py deleted file mode 100644 index 5f6790394b..0000000000 --- a/pype/plugins/resolve/create/create_shot_clip_new.py +++ /dev/null @@ -1,264 +0,0 @@ -from pprint import pformat -from pype.hosts import resolve -from pype.hosts.resolve import lib - - -class CreateShotClipNew(resolve.Creator): - """Publishable clip""" - - label = "Create Publishable Clip [New]" - family = "clip" - icon = "film" - defaults = ["Main"] - - gui_tracks = resolve.get_video_track_names() - gui_name = "Pype publish attributes creator" - gui_info = "Define sequential rename and fill hierarchy data." - gui_inputs = { - "renameHierarchy": { - "type": "section", - "label": "Shot Hierarchy And Rename Settings", - "target": "ui", - "order": 0, - "value": { - "hierarchy": { - "value": "{folder}/{sequence}", - "type": "QLineEdit", - "label": "Shot Parent Hierarchy", - "target": "tag", - "toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa - "order": 0}, - "clipRename": { - "value": False, - "type": "QCheckBox", - "label": "Rename clips", - "target": "ui", - "toolTip": "Renaming selected clips on fly", # noqa - "order": 1}, - "clipName": { - "value": "{sequence}{shot}", - "type": "QLineEdit", - "label": "Clip Name Template", - "target": "ui", - "toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa - "order": 2}, - "countFrom": { - "value": 10, - "type": "QSpinBox", - "label": "Count sequence from", - "target": "ui", - "toolTip": "Set when the sequence number stafrom", # noqa - "order": 3}, - "countSteps": { - "value": 10, - "type": "QSpinBox", - "label": "Stepping number", - "target": "ui", - "toolTip": "What number is adding every new step", # noqa - "order": 4}, - } - }, - "hierarchyData": { - "type": "dict", - "label": "Shot Template Keywords", - "target": "tag", - "order": 1, - "value": { - "folder": { - "value": "shots", - "type": "QLineEdit", - "label": "{folder}", - "target": "tag", - "toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 0}, - "episode": { - "value": "ep01", - "type": "QLineEdit", - "label": "{episode}", - "target": "tag", - "toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 1}, - "sequence": { - "value": "sq01", - "type": "QLineEdit", - "label": "{sequence}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 2}, - "track": { - "value": "{_track_}", - "type": "QLineEdit", - "label": "{track}", - "target": "tag", - "toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 3}, - "shot": { - "value": "sh###", - "type": "QLineEdit", - "label": "{shot}", - "target": "tag", - "toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa - "order": 4} - } - }, - "verticalSync": { - "type": "section", - "label": "Vertical Synchronization Of Attributes", - "target": "ui", - "order": 2, - "value": { - "vSyncOn": { - "value": True, - "type": "QCheckBox", - "label": "Enable Vertical Sync", - "target": "ui", - "toolTip": "Switch on if you want clips above each other to share its attributes", # noqa - "order": 0}, - "vSyncTrack": { - "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Master track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1} - } - }, - "publishSettings": { - "type": "section", - "label": "Publish Settings", - "target": "ui", - "order": 3, - "value": { - "subsetName": { - "value": ["", "main", "bg", "fg", "bg", - "animatic"], - "type": "QComboBox", - "label": "Subset Name", - "target": "ui", - "toolTip": "chose subset name patern, if is selected, name of track layer will be used", # noqa - "order": 0}, - "subsetFamily": { - "value": ["plate", "take"], - "type": "QComboBox", - "label": "Subset Family", - "target": "ui", "toolTip": "What use of this subset is for", # noqa - "order": 1}, - "reviewTrack": { - "value": ["< none >"] + gui_tracks, - "type": "QComboBox", - "label": "Use Review Track", - "target": "ui", - "toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa - "order": 2}, - "audio": { - "value": False, - "type": "QCheckBox", - "label": "Include audio", - "target": "tag", - "toolTip": "Process subsets with corresponding audio", # noqa - "order": 3}, - "sourceResolution": { - "value": False, - "type": "QCheckBox", - "label": "Source resolution", - "target": "tag", - "toolTip": "Is resloution taken from timeline or source?", # noqa - "order": 4}, - } - }, - "frameRangeAttr": { - "type": "section", - "label": "Shot Attributes", - "target": "ui", - "order": 4, - "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0}, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle Start", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1}, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle End", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2}, - } - } - } - - presets = None - - def process(self): - # get key pares from presets and match it on ui inputs - for k, v in self.gui_inputs.items(): - if v["type"] in ("dict", "section"): - # nested dictionary (only one level allowed - # for sections and dict) - for _k, _v in v["value"].items(): - if self.presets.get(_k): - self.gui_inputs[k][ - "value"][_k]["value"] = self.presets[_k] - if self.presets.get(k): - self.gui_inputs[k]["value"] = self.presets[k] - - print(pformat(self.gui_inputs)) - # open widget for plugins inputs - widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs) - widget.exec_() - - if len(self.selected) < 1: - return - - if not widget.result: - print("Operation aborted") - return - - self.rename_add = 0 - - # get ui output for track name for vertical sync - v_sync_track = widget.result["vSyncTrack"]["value"] - - # sort selected trackItems by - sorted_selected_track_items = list() - unsorted_selected_track_items = list() - for track_item_data in self.selected: - if track_item_data["track"]["name"] in v_sync_track: - sorted_selected_track_items.append(track_item_data) - else: - unsorted_selected_track_items.append(track_item_data) - - sorted_selected_track_items.extend(unsorted_selected_track_items) - - # sequence attrs - sq_frame_start = self.sequence.GetStartFrame() - sq_markers = self.sequence.GetMarkers() - - # create media bin for compound clips (trackItems) - mp_folder = resolve.create_current_sequence_media_bin(self.sequence) - - kwargs = { - "ui_inputs": widget.result, - "avalon": self.data, - "mp_folder": mp_folder, - "sq_frame_start": sq_frame_start, - "sq_markers": sq_markers - } - - for i, track_item_data in enumerate(sorted_selected_track_items): - self.rename_index = i - - # convert track item to timeline media pool item - track_item = resolve.PublishClip( - self, track_item_data, **kwargs).convert() - track_item.SetClipColor(lib.publish_clip_color) From 821631f669fb712b3436d7a122d624d6d3b4ee08 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:39:46 +0100 Subject: [PATCH 056/198] resolve: moving functions to pype.lib --- pype/lib/__init__.py | 10 +- pype/lib/editorial.py | 60 ++++++++++ .../global/publish/extract_otio_review.py | 106 ++++++------------ 3 files changed, 102 insertions(+), 74 deletions(-) diff --git a/pype/lib/__init__.py b/pype/lib/__init__.py index 598dd757b8..35ae0a901a 100644 --- a/pype/lib/__init__.py +++ b/pype/lib/__init__.py @@ -80,7 +80,10 @@ from .editorial import ( is_overlapping_otio_ranges, otio_range_to_frame_range, otio_range_with_handles, - convert_to_padded_path + convert_to_padded_path, + trim_media_range, + range_from_frames, + frames_to_secons ) terminal = Terminal @@ -148,5 +151,8 @@ __all__ = [ "is_overlapping_otio_ranges", "otio_range_with_handles", "convert_to_padded_path", - "otio_range_to_frame_range" + "otio_range_to_frame_range", + "trim_media_range", + "range_from_frames", + "frames_to_secons" ] diff --git a/pype/lib/editorial.py b/pype/lib/editorial.py index c0ad4ace00..42c345b47d 100644 --- a/pype/lib/editorial.py +++ b/pype/lib/editorial.py @@ -1,4 +1,5 @@ import re +from opentimelineio import opentime from opentimelineio.opentime import ( to_frames, RationalTime, TimeRange) @@ -70,3 +71,62 @@ def convert_to_padded_path(path, padding): if "%d" in path: path = re.sub("%d", "%0{padding}d".format(padding=padding), path) return path + + +def trim_media_range(media_range, source_range): + """ + Trim input media range with clip source range. + + Args: + media_range (otio.opentime.TimeRange): available range of media + source_range (otio.opentime.TimeRange): clip required range + + Returns: + otio.opentime.TimeRange: trimmed media range + + """ + rw_media_start = RationalTime( + media_range.start_time.value + source_range.start_time.value, + media_range.start_time.rate + ) + rw_media_duration = RationalTime( + source_range.duration.value, + media_range.duration.rate + ) + return TimeRange( + rw_media_start, rw_media_duration) + + +def range_from_frames(start, duration, fps): + """ + Returns otio time range. + + Args: + start (int): frame start + duration (int): frame duration + fps (float): frame range + + Returns: + otio.opentime.TimeRange: crated range + + """ + return TimeRange( + RationalTime(start, fps), + RationalTime(duration, fps) + ) + + +def frames_to_secons(frames, framerate): + """ + Returning secons. + + Args: + frames (int): frame + framerate (flaot): frame rate + + Returns: + float: second value + + """ + rt = opentime.from_frames(frames, framerate) + return opentime.to_seconds(rt) diff --git a/pype/plugins/global/publish/extract_otio_review.py b/pype/plugins/global/publish/extract_otio_review.py index 6c4dfd65ea..63b3331174 100644 --- a/pype/plugins/global/publish/extract_otio_review.py +++ b/pype/plugins/global/publish/extract_otio_review.py @@ -38,16 +38,15 @@ class ExtractOTIOReview(pype.api.Extractor): """ - # order = api.ExtractorOrder - order = api.CollectorOrder + order = api.ExtractorOrder label = "Extract OTIO review" hosts = ["resolve"] families = ["review"] # plugin default attributes temp_file_head = "tempFile." - to_width = 800 - to_height = 600 + to_width = 1280 + to_height = 720 output_ext = ".jpg" def process(self, instance): @@ -116,11 +115,32 @@ class ExtractOTIOReview(pype.api.Extractor): # media source info if isinstance(r_otio_cl, otio.schema.Clip): - path = r_otio_cl.media_reference.target_url - metadata = r_otio_cl.media_reference.metadata + media_ref = r_otio_cl.media_reference + metadata = media_ref.metadata - if metadata.get("padding"): - # render image sequence to sequence + if isinstance(media_ref, otio.schema.ImageSequenceReference): + dirname = media_ref.target_url_base + head = media_ref.name_prefix + tail = media_ref.name_suffix + first, last = pype.lib.otio_range_to_frame_range( + available_range) + collection = clique.Collection( + head=head, + tail=tail, + padding=media_ref.frame_zero_padding + ) + collection.indexes.update( + [i for i in range(first, (last + 1))]) + # render segment + self._render_seqment( + sequence=[dirname, collection]) + # generate used frames + self._generate_used_frames( + len(collection.indexes)) + elif metadata.get("padding"): + # in case it is file sequence but not new OTIO schema + # `ImageSequenceReference` + path = media_ref.target_url dir_path, collection = self._make_sequence_collection( path, available_range, metadata) @@ -131,6 +151,7 @@ class ExtractOTIOReview(pype.api.Extractor): self._generate_used_frames( len(collection.indexes)) else: + path = media_ref.target_url # render video file to sequence self._render_seqment( video=[path, available_range]) @@ -240,8 +261,8 @@ class ExtractOTIOReview(pype.api.Extractor): duration = avl_durtation # return correct trimmed range - return self._trim_media_range( - avl_range, self._range_from_frames(start, duration, fps) + return pype.lib.trim_media_range( + avl_range, pype.lib.range_from_frames(start, duration, fps) ) def _render_seqment(self, sequence=None, @@ -292,8 +313,8 @@ class ExtractOTIOReview(pype.api.Extractor): frame_start = otio_range.start_time.value input_fps = otio_range.start_time.rate frame_duration = otio_range.duration.value - sec_start = self._frames_to_secons(frame_start, input_fps) - sec_duration = self._frames_to_secons(frame_duration, input_fps) + sec_start = pype.lib.frames_to_secons(frame_start, input_fps) + sec_duration = pype.lib.frames_to_secons(frame_duration, input_fps) # form command for rendering gap files command.extend([ @@ -303,7 +324,7 @@ class ExtractOTIOReview(pype.api.Extractor): ]) elif gap: - sec_duration = self._frames_to_secons( + sec_duration = pype.lib.frames_to_secons( gap, self.actual_fps) # form command for rendering gap files @@ -383,22 +404,6 @@ class ExtractOTIOReview(pype.api.Extractor): return output_path, out_frame_start - @staticmethod - def _frames_to_secons(frames, framerate): - """ - Returning secons. - - Args: - frames (int): frame - framerate (flaot): frame rate - - Returns: - float: second value - - """ - rt = otio.opentime.from_frames(frames, framerate) - return otio.opentime.to_seconds(rt) - @staticmethod def _make_sequence_collection(path, otio_range, metadata): """ @@ -424,46 +429,3 @@ class ExtractOTIOReview(pype.api.Extractor): head=head, tail=tail, padding=metadata["padding"]) collection.indexes.update([i for i in range(first, (last + 1))]) return dir_path, collection - - @staticmethod - def _trim_media_range(media_range, source_range): - """ - Trim input media range with clip source range. - - Args: - media_range (otio.opentime.TimeRange): available range of media - source_range (otio.opentime.TimeRange): clip required range - - Returns: - otio.opentime.TimeRange: trimmed media range - - """ - rw_media_start = otio.opentime.RationalTime( - media_range.start_time.value + source_range.start_time.value, - media_range.start_time.rate - ) - rw_media_duration = otio.opentime.RationalTime( - source_range.duration.value, - media_range.duration.rate - ) - return otio.opentime.TimeRange( - rw_media_start, rw_media_duration) - - @staticmethod - def _range_from_frames(start, duration, fps): - """ - Returns otio time range. - - Args: - start (int): frame start - duration (int): frame duration - fps (float): frame range - - Returns: - otio.opentime.TimeRange: crated range - - """ - return otio.opentime.TimeRange( - otio.opentime.RationalTime(start, fps), - otio.opentime.RationalTime(duration, fps) - ) From 0a77c1b32e6ccb296d88f36ee04036d745012407 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:41:34 +0100 Subject: [PATCH 057/198] resolve: available functions for OTIO 0.13.0 --- pype/hosts/resolve/otio/davinci_export.py | 51 +++++++++++++++++++---- 1 file changed, 42 insertions(+), 9 deletions(-) diff --git a/pype/hosts/resolve/otio/davinci_export.py b/pype/hosts/resolve/otio/davinci_export.py index 7244544183..0909bcb26a 100644 --- a/pype/hosts/resolve/otio/davinci_export.py +++ b/pype/hosts/resolve/otio/davinci_export.py @@ -1,7 +1,13 @@ +""" compatibility OpenTimelineIO 0.12.0 and older +""" + +import os +import re import sys import json import opentimelineio as otio from . import utils +import clique self = sys.modules[__name__] self.track_types = { @@ -29,7 +35,7 @@ def create_otio_reference(media_pool_item): metadata = _get_metadata_media_pool_item(media_pool_item) mp_clip_property = media_pool_item.GetClipProperty() path = mp_clip_property["File Path"] - reformat_path = utils.get_reformated_path(path, padded=False) + reformat_path = utils.get_reformated_path(path, padded=True) padding = utils.get_padding_from_path(path) if padding: @@ -40,7 +46,7 @@ def create_otio_reference(media_pool_item): # get clip property regarding to type mp_clip_property = media_pool_item.GetClipProperty() - fps = mp_clip_property["FPS"] + fps = float(mp_clip_property["FPS"]) if mp_clip_property["Type"] == "Video": frame_start = int(mp_clip_property["Start"]) frame_duration = int(mp_clip_property["Frames"]) @@ -50,14 +56,41 @@ def create_otio_reference(media_pool_item): frame_duration = int(utils.timecode_to_frames( audio_duration, float(fps))) - otio_ex_ref_item = otio.schema.ExternalReference( - target_url=reformat_path, - available_range=create_otio_time_range( - frame_start, - frame_duration, - fps + otio_ex_ref_item = None + + if padding: + # if it is file sequence try to create `ImageSequenceReference` + # the OTIO might not be compatible so return nothing and do it old way + try: + dirname, filename = os.path.split(path) + collection = clique.parse(filename, '{head}[{ranges}]{tail}') + padding_num = len(re.findall("(\\d+)(?=-)", filename).pop()) + otio_ex_ref_item = otio.schema.ImageSequenceReference( + target_url_base=dirname + os.sep, + name_prefix=collection.format("{head}"), + name_suffix=collection.format("{tail}"), + start_frame=frame_start, + frame_zero_padding=padding_num, + rate=fps, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) + ) + except AttributeError: + pass + + if not otio_ex_ref_item: + # in case old OTIO or video file create `ExternalReference` + otio_ex_ref_item = otio.schema.ExternalReference( + target_url=reformat_path, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) ) - ) # add metadata to otio item add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata) From 7a6034c11b7c07525750941cb0369bd4cf2231a3 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:42:33 +0100 Subject: [PATCH 058/198] OTIO requirements for 0.13 with fixes cpp build --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c719b06b9c..c1f72f9582 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ google-api-python-client jsonschema keyring log4mongo -OpenTimelineIO==0.11.0 +git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8 pathlib2 Pillow pynput From c2069caa48577d1086761bc5b47fa8c338f92b23 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 21 Dec 2020 19:43:07 +0100 Subject: [PATCH 059/198] resolve: wip on otio subset resources plugin --- .../publish/collect_otio_frame_ranges.py | 4 +- .../publish/collect_otio_subset_resources.py | 100 ++++++++++++++++++ 2 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 pype/plugins/global/publish/collect_otio_subset_resources.py diff --git a/pype/plugins/global/publish/collect_otio_frame_ranges.py b/pype/plugins/global/publish/collect_otio_frame_ranges.py index 5d1370850f..4224abe5a4 100644 --- a/pype/plugins/global/publish/collect_otio_frame_ranges.py +++ b/pype/plugins/global/publish/collect_otio_frame_ranges.py @@ -29,13 +29,10 @@ class CollectOcioFrameRanges(pyblish.api.InstancePlugin): # get ranges otio_tl_range = otio_clip.range_in_parent() - self.log.debug(otio_tl_range) otio_src_range = otio_clip.source_range otio_avalable_range = otio_clip.available_range() - self.log.debug(otio_avalable_range) otio_tl_range_handles = pype.lib.otio_range_with_handles( otio_tl_range, instance) - self.log.debug(otio_tl_range_handles) otio_src_range_handles = pype.lib.otio_range_with_handles( otio_src_range, instance) @@ -43,6 +40,7 @@ class CollectOcioFrameRanges(pyblish.api.InstancePlugin): src_starting_from = otio.opentime.to_frames( otio_avalable_range.start_time, otio_avalable_range.start_time.rate) + # convert to frames range_convert = pype.lib.otio_range_to_frame_range tl_start, tl_end = range_convert(otio_tl_range) diff --git a/pype/plugins/global/publish/collect_otio_subset_resources.py b/pype/plugins/global/publish/collect_otio_subset_resources.py new file mode 100644 index 0000000000..1642c3371d --- /dev/null +++ b/pype/plugins/global/publish/collect_otio_subset_resources.py @@ -0,0 +1,100 @@ +""" +Requires: + instance -> review + instance -> masterLayer + instance -> otioClip + context -> otioTimeline + +Provides: + instance -> otioReviewClips +""" + +import clique +import opentimelineio as otio +import pyblish.api +import pype + + +class CollectOcioSubsetResources(pyblish.api.InstancePlugin): + """Get Resources for a subset version""" + + label = "Collect OTIO subset resources" + order = pyblish.api.CollectorOrder - 0.57 + families = ["clip"] + hosts = ["resolve"] + + def process(self, instance): + # get basic variables + otio_clip = instance.data["otioClip"] + + # generate range in parent + otio_src_range = otio_clip.source_range + otio_avalable_range = otio_clip.available_range() + otio_visible_range = otio_clip.visible_range() + otio_src_range_handles = pype.lib.otio_range_with_handles( + otio_src_range, instance) + trimmed_media_range = pype.lib.trim_media_range( + otio_avalable_range, otio_src_range_handles) + + self.log.debug( + "_ otio_avalable_range: {}".format(otio_avalable_range)) + self.log.debug( + "_ otio_visible_range: {}".format(otio_visible_range)) + self.log.debug( + "_ otio_src_range: {}".format(otio_src_range)) + self.log.debug( + "_ otio_src_range_handles: {}".format(otio_src_range_handles)) + self.log.debug( + "_ trimmed_media_range: {}".format(trimmed_media_range)) + + # + # media_ref = otio_clip.media_reference + # metadata = media_ref.metadata + # + # if isinstance(media_ref, otio.schema.ImageSequenceReference): + # dirname = media_ref.target_url_base + # head = media_ref.name_prefix + # tail = media_ref.name_suffix + # first, last = pype.lib.otio_range_to_frame_range( + # available_range) + # collection = clique.Collection( + # head=head, + # tail=tail, + # padding=media_ref.frame_zero_padding + # ) + # collection.indexes.update( + # [i for i in range(first, (last + 1))]) + # # render segment + # self._render_seqment( + # sequence=[dirname, collection]) + # # generate used frames + # self._generate_used_frames( + # len(collection.indexes)) + # elif metadata.get("padding"): + # # in case it is file sequence but not new OTIO schema + # # `ImageSequenceReference` + # path = media_ref.target_url + # dir_path, collection = self._make_sequence_collection( + # path, available_range, metadata) + # + # # render segment + # self._render_seqment( + # sequence=[dir_path, collection]) + # # generate used frames + # self._generate_used_frames( + # len(collection.indexes)) + # else: + # path = media_ref.target_url + # # render video file to sequence + # self._render_seqment( + # video=[path, available_range]) + # # generate used frames + # self._generate_used_frames( + # available_range.duration.value) + # + # instance.data["otioReviewClips"] = otio_review_clips + # self.log.debug( + # "_ otio_review_clips: {}".format(otio_review_clips)) + # + # self.log.debug( + # "_ instance.data: {}".format(pformat(instance.data))) From 925f5f92139607681ab44a39e1cd787393ea919c Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 21 Dec 2020 22:08:43 +0100 Subject: [PATCH 060/198] register deadline plugins via module manager --- pype/modules/__init__.py | 2 ++ pype/modules/base.py | 1 + pype/modules/deadline/deadline_module.py | 13 +++++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pype/modules/__init__.py b/pype/modules/__init__.py index 4f76dc2df0..42da1ae288 100644 --- a/pype/modules/__init__.py +++ b/pype/modules/__init__.py @@ -34,6 +34,7 @@ from .ftrack import ( from .clockify import ClockifyModule from .logging import LoggingModule from .muster import MusterModule +from .deadline import DeadlineModule from .standalonepublish_action import StandAlonePublishAction from .websocket_server import WebsocketModule from .sync_server import SyncServer @@ -72,6 +73,7 @@ __all__ = ( "IdleManager", "LoggingModule", "MusterModule", + "DeadlineModule", "StandAlonePublishAction", "WebsocketModule", diff --git a/pype/modules/base.py b/pype/modules/base.py index 525320f1a7..d4d49f916d 100644 --- a/pype/modules/base.py +++ b/pype/modules/base.py @@ -428,6 +428,7 @@ class TrayModulesManager(ModulesManager): "user", "ftrack", "muster", + "deadline", "launcher_tool", "avalon", "clockify", diff --git a/pype/modules/deadline/deadline_module.py b/pype/modules/deadline/deadline_module.py index 6de68c390f..ba920f7f13 100644 --- a/pype/modules/deadline/deadline_module.py +++ b/pype/modules/deadline/deadline_module.py @@ -1,7 +1,9 @@ -from .. import PypeModule +import os +from pype.modules import ( + PypeModule, IPluginPaths) -class DeadlineModule(PypeModule): +class DeadlineModule(PypeModule, IPluginPaths): name = "deadline" def initialize(self, modules_settings): @@ -18,3 +20,10 @@ class DeadlineModule(PypeModule): def connect_with_modules(self, *_a, **_kw): return + + def get_plugin_paths(self): + """Deadline plugin paths.""" + current_dir = os.path.dirname(os.path.abspath(__file__)) + return { + "publish": [os.path.join(current_dir, "plugins", "publish")] + } From 4ea02c384e203b36eae327707c2f048890c85345 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 21 Dec 2020 22:08:56 +0100 Subject: [PATCH 061/198] move deadline plugins to deadline module --- .../deadline/plugins}/publish/submit_aftereffects_deadline.py | 0 .../deadline/plugins}/publish/submit_maya_deadline.py | 0 .../deadline/plugins}/publish/submit_nuke_deadline.py | 0 .../deadline/plugins}/publish/submit_publish_job.py | 0 .../deadline/plugins}/publish/validate_deadline_connection.py | 2 +- 5 files changed, 1 insertion(+), 1 deletion(-) rename pype/{plugins/aftereffects => modules/deadline/plugins}/publish/submit_aftereffects_deadline.py (100%) rename pype/{plugins/maya => modules/deadline/plugins}/publish/submit_maya_deadline.py (100%) rename pype/{plugins/nuke => modules/deadline/plugins}/publish/submit_nuke_deadline.py (100%) rename pype/{plugins/global => modules/deadline/plugins}/publish/submit_publish_job.py (100%) rename pype/{plugins/maya => modules/deadline/plugins}/publish/validate_deadline_connection.py (98%) diff --git a/pype/plugins/aftereffects/publish/submit_aftereffects_deadline.py b/pype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py similarity index 100% rename from pype/plugins/aftereffects/publish/submit_aftereffects_deadline.py rename to pype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/modules/deadline/plugins/publish/submit_maya_deadline.py similarity index 100% rename from pype/plugins/maya/publish/submit_maya_deadline.py rename to pype/modules/deadline/plugins/publish/submit_maya_deadline.py diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/modules/deadline/plugins/publish/submit_nuke_deadline.py similarity index 100% rename from pype/plugins/nuke/publish/submit_nuke_deadline.py rename to pype/modules/deadline/plugins/publish/submit_nuke_deadline.py diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/modules/deadline/plugins/publish/submit_publish_job.py similarity index 100% rename from pype/plugins/global/publish/submit_publish_job.py rename to pype/modules/deadline/plugins/publish/submit_publish_job.py diff --git a/pype/plugins/maya/publish/validate_deadline_connection.py b/pype/modules/deadline/plugins/publish/validate_deadline_connection.py similarity index 98% rename from pype/plugins/maya/publish/validate_deadline_connection.py rename to pype/modules/deadline/plugins/publish/validate_deadline_connection.py index 0733c3badf..1c49e68ee1 100644 --- a/pype/plugins/maya/publish/validate_deadline_connection.py +++ b/pype/modules/deadline/plugins/publish/validate_deadline_connection.py @@ -10,7 +10,7 @@ class ValidateDeadlineConnection(pyblish.api.ContextPlugin): label = "Validate Deadline Web Service" order = pyblish.api.ValidatorOrder - hosts = ["maya"] + hosts = ["maya", "nuke"] families = ["renderlayer"] def process(self, context): From ff09244146775a8cc5e4b8e7b104c077a136a365 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 21 Dec 2020 22:09:22 +0100 Subject: [PATCH 062/198] add missing scandir to requirements (uncaught from pathlib2) --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 658405e2fb..6faadba909 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,6 +24,7 @@ pytest-cov pytest-print pyqt5 Qt.py +scandir speedcopy six Sphinx From 4e521445fff0c67363d42405857725067e09e313 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 21 Dec 2020 22:09:59 +0100 Subject: [PATCH 063/198] read deadline and muster "enabled" settings in maya render creator --- pype/plugins/maya/create/create_render.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index b718079b43..2b0b0e19f7 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -127,18 +127,18 @@ class CreateRender(avalon.maya.Creator): system_settings = get_system_settings()["modules"] + deadline_enabled = system_settings["deadline"]["enabled"] + muster_enabled = system_settings["muster"]["enabled"] deadline_url = system_settings["deadline"]["DEADLINE_REST_URL"] muster_url = system_settings["muster"]["MUSTER_REST_URL"] - if deadline_url and muster_url: + if deadline_enabled and muster_enabled: self.log.error( "Both Deadline and Muster are enabled. " "Cannot support both." ) raise RuntimeError("Both Deadline and Muster are enabled") - if deadline_url is None: - self.log.warning("Deadline REST API url not found.") - else: + if deadline_enabled: argument = "{}/api/pools?NamesOnly=true".format(deadline_url) try: response = self._requests_get(argument) @@ -155,9 +155,7 @@ class CreateRender(avalon.maya.Creator): # set any secondary pools self.data["secondaryPool"] = ["-"] + pools - if muster_url is None: - self.log.warning("Muster REST API URL not found.") - else: + if muster_enabled: self.log.info(">>> Loading Muster credentials ...") self._load_credentials() self.log.info(">>> Getting pools ...") From 710acc9fc28fe46d2ac94417e880aae6e0b8a715 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 22 Dec 2020 11:37:27 +0100 Subject: [PATCH 064/198] import `FTRACK_MODULE_DIR` to ftrack's module init --- pype/modules/ftrack/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/modules/ftrack/__init__.py b/pype/modules/ftrack/__init__.py index c02b0fca19..4fb427f13a 100644 --- a/pype/modules/ftrack/__init__.py +++ b/pype/modules/ftrack/__init__.py @@ -1,6 +1,7 @@ from .ftrack_module import ( FtrackModule, - IFtrackEventHandlerPaths + IFtrackEventHandlerPaths, + FTRACK_MODULE_DIR ) from . import ftrack_server from .ftrack_server import FtrackServer, check_ftrack_url @@ -9,6 +10,7 @@ from .lib import BaseHandler, BaseEvent, BaseAction, ServerAction __all__ = ( "FtrackModule", "IFtrackEventHandlerPaths", + "FTRACK_MODULE_DIR", "ftrack_server", "FtrackServer", From 122d80709c2f8db3ffbf0b7270ecd60b03d4bcc1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 22 Dec 2020 11:42:19 +0100 Subject: [PATCH 065/198] added prealunch hook that will add python 2 ftrack api modules to PYTHONPATH --- .../ftrack/launch_hooks/pre_python2_vendor.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 pype/modules/ftrack/launch_hooks/pre_python2_vendor.py diff --git a/pype/modules/ftrack/launch_hooks/pre_python2_vendor.py b/pype/modules/ftrack/launch_hooks/pre_python2_vendor.py new file mode 100644 index 0000000000..8e55e29763 --- /dev/null +++ b/pype/modules/ftrack/launch_hooks/pre_python2_vendor.py @@ -0,0 +1,27 @@ +import os +from pype.lib import PreLaunchHook +from pype.modules.ftrack import FTRACK_MODULE_DIR + + +class PrePyhton2Support(PreLaunchHook): + """Add python ftrack api module for Python 2 to PYTHONPATH. + + Path to vendor modules is added to the beggining of PYTHONPATH. + """ + # There will be needed more granular filtering in future + app_groups = ["maya", "nuke", "nukex", "hiero", "nukestudio"] + + def execute(self): + # Prepare dir path + python2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") + # Load PYTHONPATH from current launch context + python_path = self.launch_context.env.get("PYTHONPATH") + + # Just override if PYTHONPATH is not set yet + if not python_path: + python_path = python2_vendor + else: + python_path = os.pathsep.join([python2_vendor, python_path]) + + # Set new PYTHONPATH to launch context environments + self.launch_context.env["PYTHONPATH"] = python_path From 0e920a3f66d407280ba413e2a1d88527ee97f42a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 22 Dec 2020 11:53:14 +0100 Subject: [PATCH 066/198] moved ftrack_api vendor to ftrack module --- .../ftrack/python2_vendor/arrow/__init__.py | 18 + .../ftrack/python2_vendor/arrow/_version.py | 1 + .../ftrack/python2_vendor/arrow/api.py | 54 + .../ftrack/python2_vendor/arrow/arrow.py | 1584 ++++++ .../ftrack/python2_vendor/arrow/constants.py | 9 + .../ftrack/python2_vendor/arrow/factory.py | 301 ++ .../ftrack/python2_vendor/arrow/formatter.py | 139 + .../ftrack/python2_vendor/arrow/locales.py | 4267 +++++++++++++++++ .../ftrack/python2_vendor/arrow/parser.py | 596 +++ .../ftrack/python2_vendor/arrow/util.py | 115 + .../python2_vendor}/backports/__init__.py | 0 .../backports/functools_lru_cache.py | 50 +- .../python2_vendor}/builtins/__init__.py | 0 .../ftrack_api_old/__init__.py | 0 .../_centralized_storage_scenario.py | 0 .../ftrack_api_old/_python_ntpath.py | 0 .../ftrack_api_old/_version.py | 0 .../ftrack_api_old/_weakref.py | 0 .../ftrack_api_old/accessor/__init__.py | 0 .../ftrack_api_old/accessor/base.py | 0 .../ftrack_api_old/accessor/disk.py | 0 .../ftrack_api_old/accessor/server.py | 0 .../ftrack_api_old/attribute.py | 0 .../python2_vendor}/ftrack_api_old/cache.py | 0 .../ftrack_api_old/collection.py | 0 .../python2_vendor}/ftrack_api_old/data.py | 0 .../ftrack_api_old/entity/__init__.py | 0 .../ftrack_api_old/entity/asset_version.py | 0 .../ftrack_api_old/entity/base.py | 0 .../ftrack_api_old/entity/component.py | 0 .../ftrack_api_old/entity/factory.py | 0 .../ftrack_api_old/entity/job.py | 0 .../ftrack_api_old/entity/location.py | 0 .../ftrack_api_old/entity/note.py | 0 .../ftrack_api_old/entity/project_schema.py | 0 .../ftrack_api_old/entity/user.py | 0 .../ftrack_api_old/event/__init__.py | 0 .../ftrack_api_old/event/base.py | 0 .../ftrack_api_old/event/expression.py | 0 .../ftrack_api_old/event/hub.py | 0 .../ftrack_api_old/event/subscriber.py | 0 .../ftrack_api_old/event/subscription.py | 0 .../ftrack_api_old/exception.py | 0 .../ftrack_api_old/formatter.py | 0 .../ftrack_api_old/inspection.py | 0 .../python2_vendor}/ftrack_api_old/logging.py | 0 .../ftrack_api_old/operation.py | 0 .../python2_vendor}/ftrack_api_old/plugin.py | 0 .../python2_vendor}/ftrack_api_old/query.py | 0 .../__init__.py | 0 .../resource_identifier_transformer/base.py | 0 .../python2_vendor}/ftrack_api_old/session.py | 0 .../ftrack_api_old/structure/__init__.py | 0 .../ftrack_api_old/structure/base.py | 0 .../ftrack_api_old/structure/entity_id.py | 0 .../ftrack_api_old/structure/id.py | 0 .../ftrack_api_old/structure/origin.py | 0 .../ftrack_api_old/structure/standard.py | 0 .../python2_vendor}/ftrack_api_old/symbol.py | 0 .../ftrack/python2_vendor/future/__init__.py | 93 + .../future/backports/__init__.py | 26 + .../future/backports/_markupbase.py | 422 ++ .../future/backports/datetime.py | 2152 +++++++++ .../future/backports/email/__init__.py | 78 + .../future/backports/email/_encoded_words.py | 232 + .../backports/email/_header_value_parser.py | 2965 ++++++++++++ .../future/backports/email/_parseaddr.py | 546 +++ .../future/backports/email/_policybase.py | 365 ++ .../future/backports/email/base64mime.py | 120 + .../future/backports/email/charset.py | 409 ++ .../future/backports/email/encoders.py | 90 + .../future/backports/email/errors.py | 111 + .../future/backports/email/feedparser.py | 525 ++ .../future/backports/email/generator.py | 498 ++ .../future/backports/email/header.py | 581 +++ .../future/backports/email/headerregistry.py | 592 +++ .../future/backports/email/iterators.py | 74 + .../future/backports/email/message.py | 882 ++++ .../future/backports/email/mime/__init__.py | 0 .../backports/email/mime/application.py | 39 + .../future/backports/email/mime/audio.py | 74 + .../future/backports/email/mime/base.py | 25 + .../future/backports/email/mime/image.py | 48 + .../future/backports/email/mime/message.py | 36 + .../future/backports/email/mime/multipart.py | 49 + .../backports/email/mime/nonmultipart.py | 24 + .../future/backports/email/mime/text.py | 44 + .../future/backports/email/parser.py | 135 + .../future/backports/email/policy.py | 193 + .../future/backports/email/quoprimime.py | 326 ++ .../future/backports/email/utils.py | 400 ++ .../future/backports/html/__init__.py | 27 + .../future/backports/html/entities.py | 2514 ++++++++++ .../future/backports/html/parser.py | 536 +++ .../future/backports/http/__init__.py | 0 .../future/backports/http/client.py | 1346 ++++++ .../future/backports/http/cookiejar.py | 2110 ++++++++ .../future/backports/http/cookies.py | 598 +++ .../future/backports/http/server.py | 1226 +++++ .../python2_vendor/future/backports/misc.py | 944 ++++ .../python2_vendor/future/backports/socket.py | 454 ++ .../future/backports/socketserver.py | 747 +++ .../future/backports/test/__init__.py | 9 + .../future/backports/test/badcert.pem | 36 + .../future/backports/test/badkey.pem | 40 + .../future/backports/test/dh512.pem | 9 + .../test/https_svn_python_org_root.pem | 41 + .../future/backports/test/keycert.passwd.pem | 33 + .../future/backports/test/keycert.pem | 31 + .../future/backports/test/keycert2.pem | 31 + .../future/backports/test/nokia.pem | 31 + .../future/backports/test/nullbytecert.pem | 90 + .../future/backports/test/nullcert.pem | 0 .../future/backports/test/pystone.py | 272 ++ .../future/backports/test/sha256.pem | 128 + .../future/backports/test/ssl_cert.pem | 15 + .../future/backports/test/ssl_key.passwd.pem | 18 + .../future/backports/test/ssl_key.pem | 16 + .../future/backports/test/ssl_servers.py | 207 + .../future/backports/test/support.py | 2048 ++++++++ .../future/backports/total_ordering.py | 38 + .../future/backports/urllib/__init__.py | 0 .../future/backports/urllib/error.py | 75 + .../future/backports/urllib/parse.py | 991 ++++ .../future/backports/urllib/request.py | 2647 ++++++++++ .../future/backports/urllib/response.py | 103 + .../future/backports/urllib/robotparser.py | 211 + .../future/backports/xmlrpc/__init__.py | 1 + .../future/backports/xmlrpc/client.py | 1496 ++++++ .../future/backports/xmlrpc/server.py | 999 ++++ .../future/builtins/__init__.py | 51 + .../future/builtins/disabled.py | 66 + .../future/builtins/iterators.py | 52 + .../python2_vendor/future/builtins/misc.py | 135 + .../future/builtins/new_min_max.py | 59 + .../python2_vendor/future/builtins/newnext.py | 70 + .../future/builtins/newround.py | 102 + .../future/builtins/newsuper.py | 114 + .../python2_vendor/future/moves/__init__.py | 8 + .../future/moves/_dummy_thread.py | 8 + .../future/moves/_markupbase.py | 8 + .../python2_vendor/future/moves/_thread.py | 8 + .../python2_vendor/future/moves/builtins.py | 10 + .../future/moves/collections.py | 18 + .../future/moves/configparser.py | 8 + .../python2_vendor/future/moves/copyreg.py | 12 + .../future/moves/dbm/__init__.py | 20 + .../python2_vendor/future/moves/dbm/dumb.py | 9 + .../python2_vendor/future/moves/dbm/gnu.py | 9 + .../python2_vendor/future/moves/dbm/ndbm.py | 9 + .../future/moves/html/__init__.py | 31 + .../future/moves/html/entities.py | 8 + .../future/moves/html/parser.py | 8 + .../future/moves/http/__init__.py | 4 + .../future/moves/http/client.py | 8 + .../future/moves/http/cookiejar.py | 8 + .../future/moves/http/cookies.py | 9 + .../future/moves/http/server.py | 20 + .../python2_vendor/future/moves/itertools.py | 8 + .../python2_vendor/future/moves/pickle.py | 11 + .../python2_vendor/future/moves/queue.py | 8 + .../python2_vendor/future/moves/reprlib.py | 8 + .../future/moves/socketserver.py | 8 + .../python2_vendor/future/moves/subprocess.py | 11 + .../ftrack/python2_vendor/future/moves/sys.py | 8 + .../future/moves/test/__init__.py | 5 + .../future/moves/test/support.py | 10 + .../future/moves/tkinter/__init__.py | 27 + .../future/moves/tkinter/colorchooser.py | 12 + .../future/moves/tkinter/commondialog.py | 12 + .../future/moves/tkinter/constants.py | 12 + .../future/moves/tkinter/dialog.py | 12 + .../future/moves/tkinter/dnd.py | 12 + .../future/moves/tkinter/filedialog.py | 12 + .../future/moves/tkinter/font.py | 12 + .../future/moves/tkinter/messagebox.py | 12 + .../future/moves/tkinter/scrolledtext.py | 12 + .../future/moves/tkinter/simpledialog.py | 12 + .../future/moves/tkinter/tix.py | 12 + .../future/moves/tkinter/ttk.py | 12 + .../future/moves/urllib/__init__.py | 5 + .../future/moves/urllib/error.py | 16 + .../future/moves/urllib/parse.py | 28 + .../future/moves/urllib/request.py | 94 + .../future/moves/urllib/response.py | 12 + .../future/moves/urllib/robotparser.py | 8 + .../python2_vendor/future/moves/winreg.py | 8 + .../future/moves/xmlrpc/__init__.py | 0 .../future/moves/xmlrpc/client.py | 7 + .../future/moves/xmlrpc/server.py | 7 + .../future/standard_library/__init__.py | 815 ++++ .../python2_vendor/future/tests/__init__.py | 0 .../python2_vendor/future/tests/base.py | 539 +++ .../python2_vendor/future/types/__init__.py | 257 + .../python2_vendor/future/types/newbytes.py | 460 ++ .../python2_vendor/future/types/newdict.py | 111 + .../python2_vendor/future/types/newint.py | 381 ++ .../python2_vendor/future/types/newlist.py | 95 + .../future/types/newmemoryview.py | 29 + .../python2_vendor/future/types/newobject.py | 117 + .../python2_vendor/future/types/newopen.py | 32 + .../python2_vendor/future/types/newrange.py | 170 + .../python2_vendor/future/types/newstr.py | 426 ++ .../python2_vendor/future/utils/__init__.py | 767 +++ .../future/utils/surrogateescape.py | 198 + .../ftrack/python2_vendor/reprlib/__init__.py | 9 + .../vendor/backports/configparser/__init__.py | 1390 ------ pype/vendor/backports/configparser/helpers.py | 171 - 208 files changed, 45098 insertions(+), 1580 deletions(-) create mode 100644 pype/modules/ftrack/python2_vendor/arrow/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/_version.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/api.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/arrow.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/constants.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/factory.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/formatter.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/locales.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/parser.py create mode 100644 pype/modules/ftrack/python2_vendor/arrow/util.py rename pype/{vendor => modules/ftrack/python2_vendor}/backports/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/backports/functools_lru_cache.py (83%) rename pype/{vendor => modules/ftrack/python2_vendor}/builtins/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/_centralized_storage_scenario.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/_python_ntpath.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/_version.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/_weakref.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/accessor/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/accessor/base.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/accessor/disk.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/accessor/server.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/attribute.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/cache.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/collection.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/data.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/asset_version.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/base.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/component.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/factory.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/job.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/location.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/note.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/project_schema.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/entity/user.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/base.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/expression.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/hub.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/subscriber.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/event/subscription.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/exception.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/formatter.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/inspection.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/logging.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/operation.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/plugin.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/query.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/resource_identifier_transformer/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/resource_identifier_transformer/base.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/session.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/__init__.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/base.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/entity_id.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/id.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/origin.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/structure/standard.py (100%) rename pype/{vendor => modules/ftrack/python2_vendor}/ftrack_api_old/symbol.py (100%) create mode 100644 pype/modules/ftrack/python2_vendor/future/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/_markupbase.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/datetime.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/_encoded_words.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/_header_value_parser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/_parseaddr.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/_policybase.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/base64mime.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/charset.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/encoders.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/errors.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/feedparser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/generator.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/header.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/headerregistry.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/iterators.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/message.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/application.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/audio.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/base.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/image.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/message.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/multipart.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/nonmultipart.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/mime/text.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/parser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/policy.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/quoprimime.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/email/utils.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/html/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/html/entities.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/html/parser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/http/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/http/client.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/http/cookiejar.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/http/cookies.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/http/server.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/misc.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/socket.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/socketserver.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/badcert.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/badkey.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/dh512.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/https_svn_python_org_root.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/keycert.passwd.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/keycert.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/keycert2.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/nokia.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/nullbytecert.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/nullcert.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/pystone.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/sha256.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/ssl_cert.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.passwd.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.pem create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/ssl_servers.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/test/support.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/total_ordering.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/error.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/parse.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/request.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/response.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/urllib/robotparser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/client.py create mode 100644 pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/server.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/disabled.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/iterators.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/misc.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/new_min_max.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/newnext.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/newround.py create mode 100644 pype/modules/ftrack/python2_vendor/future/builtins/newsuper.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/_dummy_thread.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/_markupbase.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/_thread.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/builtins.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/collections.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/configparser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/copyreg.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/dbm/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/dbm/dumb.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/dbm/gnu.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/dbm/ndbm.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/html/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/html/entities.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/html/parser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/http/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/http/client.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/http/cookiejar.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/http/cookies.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/http/server.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/itertools.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/pickle.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/queue.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/reprlib.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/socketserver.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/subprocess.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/sys.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/test/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/test/support.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/colorchooser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/commondialog.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/constants.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/dialog.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/dnd.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/filedialog.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/font.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/messagebox.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/scrolledtext.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/simpledialog.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/tix.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/tkinter/ttk.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/error.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/parse.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/request.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/response.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/urllib/robotparser.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/winreg.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/client.py create mode 100644 pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/server.py create mode 100644 pype/modules/ftrack/python2_vendor/future/standard_library/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/tests/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/tests/base.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newbytes.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newdict.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newint.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newlist.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newmemoryview.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newobject.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newopen.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newrange.py create mode 100644 pype/modules/ftrack/python2_vendor/future/types/newstr.py create mode 100644 pype/modules/ftrack/python2_vendor/future/utils/__init__.py create mode 100644 pype/modules/ftrack/python2_vendor/future/utils/surrogateescape.py create mode 100644 pype/modules/ftrack/python2_vendor/reprlib/__init__.py delete mode 100644 pype/vendor/backports/configparser/__init__.py delete mode 100644 pype/vendor/backports/configparser/helpers.py diff --git a/pype/modules/ftrack/python2_vendor/arrow/__init__.py b/pype/modules/ftrack/python2_vendor/arrow/__init__.py new file mode 100644 index 0000000000..2883527be8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +from ._version import __version__ +from .api import get, now, utcnow +from .arrow import Arrow +from .factory import ArrowFactory +from .formatter import ( + FORMAT_ATOM, + FORMAT_COOKIE, + FORMAT_RFC822, + FORMAT_RFC850, + FORMAT_RFC1036, + FORMAT_RFC1123, + FORMAT_RFC2822, + FORMAT_RFC3339, + FORMAT_RSS, + FORMAT_W3C, +) +from .parser import ParserError diff --git a/pype/modules/ftrack/python2_vendor/arrow/_version.py b/pype/modules/ftrack/python2_vendor/arrow/_version.py new file mode 100644 index 0000000000..fd86b3ee91 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/_version.py @@ -0,0 +1 @@ +__version__ = "0.17.0" diff --git a/pype/modules/ftrack/python2_vendor/arrow/api.py b/pype/modules/ftrack/python2_vendor/arrow/api.py new file mode 100644 index 0000000000..a6b7be3de2 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/api.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +""" +Provides the default implementation of :class:`ArrowFactory ` +methods for use as a module API. + +""" + +from __future__ import absolute_import + +from arrow.factory import ArrowFactory + +# internal default factory. +_factory = ArrowFactory() + + +def get(*args, **kwargs): + """Calls the default :class:`ArrowFactory ` ``get`` method.""" + + return _factory.get(*args, **kwargs) + + +get.__doc__ = _factory.get.__doc__ + + +def utcnow(): + """Calls the default :class:`ArrowFactory ` ``utcnow`` method.""" + + return _factory.utcnow() + + +utcnow.__doc__ = _factory.utcnow.__doc__ + + +def now(tz=None): + """Calls the default :class:`ArrowFactory ` ``now`` method.""" + + return _factory.now(tz) + + +now.__doc__ = _factory.now.__doc__ + + +def factory(type): + """Returns an :class:`.ArrowFactory` for the specified :class:`Arrow ` + or derived type. + + :param type: the type, :class:`Arrow ` or derived. + + """ + + return ArrowFactory(type) + + +__all__ = ["get", "utcnow", "now", "factory"] diff --git a/pype/modules/ftrack/python2_vendor/arrow/arrow.py b/pype/modules/ftrack/python2_vendor/arrow/arrow.py new file mode 100644 index 0000000000..4fe9541789 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/arrow.py @@ -0,0 +1,1584 @@ +# -*- coding: utf-8 -*- +""" +Provides the :class:`Arrow ` class, an enhanced ``datetime`` +replacement. + +""" + +from __future__ import absolute_import + +import calendar +import sys +import warnings +from datetime import datetime, timedelta +from datetime import tzinfo as dt_tzinfo +from math import trunc + +from dateutil import tz as dateutil_tz +from dateutil.relativedelta import relativedelta + +from arrow import formatter, locales, parser, util + +if sys.version_info[:2] < (3, 6): # pragma: no cover + with warnings.catch_warnings(): + warnings.simplefilter("default", DeprecationWarning) + warnings.warn( + "Arrow will drop support for Python 2.7 and 3.5 in the upcoming v1.0.0 release. Please upgrade to " + "Python 3.6+ to continue receiving updates for Arrow.", + DeprecationWarning, + ) + + +class Arrow(object): + """An :class:`Arrow ` object. + + Implements the ``datetime`` interface, behaving as an aware ``datetime`` while implementing + additional functionality. + + :param year: the calendar year. + :param month: the calendar month. + :param day: the calendar day. + :param hour: (optional) the hour. Defaults to 0. + :param minute: (optional) the minute, Defaults to 0. + :param second: (optional) the second, Defaults to 0. + :param microsecond: (optional) the microsecond. Defaults to 0. + :param tzinfo: (optional) A timezone expression. Defaults to UTC. + :param fold: (optional) 0 or 1, used to disambiguate repeated times. Defaults to 0. + + .. _tz-expr: + + Recognized timezone expressions: + + - A ``tzinfo`` object. + - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. + - A ``str`` in ISO 8601 style, as in '+07:00'. + - A ``str``, one of the following: 'local', 'utc', 'UTC'. + + Usage:: + + >>> import arrow + >>> arrow.Arrow(2013, 5, 5, 12, 30, 45) + + + """ + + resolution = datetime.resolution + + _ATTRS = ["year", "month", "day", "hour", "minute", "second", "microsecond"] + _ATTRS_PLURAL = ["{}s".format(a) for a in _ATTRS] + _MONTHS_PER_QUARTER = 3 + _SECS_PER_MINUTE = float(60) + _SECS_PER_HOUR = float(60 * 60) + _SECS_PER_DAY = float(60 * 60 * 24) + _SECS_PER_WEEK = float(60 * 60 * 24 * 7) + _SECS_PER_MONTH = float(60 * 60 * 24 * 30.5) + _SECS_PER_YEAR = float(60 * 60 * 24 * 365.25) + + def __init__( + self, + year, + month, + day, + hour=0, + minute=0, + second=0, + microsecond=0, + tzinfo=None, + **kwargs + ): + if tzinfo is None: + tzinfo = dateutil_tz.tzutc() + # detect that tzinfo is a pytz object (issue #626) + elif ( + isinstance(tzinfo, dt_tzinfo) + and hasattr(tzinfo, "localize") + and hasattr(tzinfo, "zone") + and tzinfo.zone + ): + tzinfo = parser.TzinfoParser.parse(tzinfo.zone) + elif util.isstr(tzinfo): + tzinfo = parser.TzinfoParser.parse(tzinfo) + + fold = kwargs.get("fold", 0) + + # use enfold here to cover direct arrow.Arrow init on 2.7/3.5 + self._datetime = dateutil_tz.enfold( + datetime(year, month, day, hour, minute, second, microsecond, tzinfo), + fold=fold, + ) + + # factories: single object, both original and from datetime. + + @classmethod + def now(cls, tzinfo=None): + """Constructs an :class:`Arrow ` object, representing "now" in the given + timezone. + + :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. + + Usage:: + + >>> arrow.now('Asia/Baku') + + + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzlocal() + + dt = datetime.now(tzinfo) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def utcnow(cls): + """Constructs an :class:`Arrow ` object, representing "now" in UTC + time. + + Usage:: + + >>> arrow.utcnow() + + + """ + + dt = datetime.now(dateutil_tz.tzutc()) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromtimestamp(cls, timestamp, tzinfo=None): + """Constructs an :class:`Arrow ` object from a timestamp, converted to + the given timezone. + + :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. + :param tzinfo: (optional) a ``tzinfo`` object. Defaults to local time. + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzlocal() + elif util.isstr(tzinfo): + tzinfo = parser.TzinfoParser.parse(tzinfo) + + if not util.is_timestamp(timestamp): + raise ValueError( + "The provided timestamp '{}' is invalid.".format(timestamp) + ) + + timestamp = util.normalize_timestamp(float(timestamp)) + dt = datetime.fromtimestamp(timestamp, tzinfo) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def utcfromtimestamp(cls, timestamp): + """Constructs an :class:`Arrow ` object from a timestamp, in UTC time. + + :param timestamp: an ``int`` or ``float`` timestamp, or a ``str`` that converts to either. + + """ + + if not util.is_timestamp(timestamp): + raise ValueError( + "The provided timestamp '{}' is invalid.".format(timestamp) + ) + + timestamp = util.normalize_timestamp(float(timestamp)) + dt = datetime.utcfromtimestamp(timestamp) + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dateutil_tz.tzutc(), + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromdatetime(cls, dt, tzinfo=None): + """Constructs an :class:`Arrow ` object from a ``datetime`` and + optional replacement timezone. + + :param dt: the ``datetime`` + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to ``dt``'s + timezone, or UTC if naive. + + If you only want to replace the timezone of naive datetimes:: + + >>> dt + datetime.datetime(2013, 5, 5, 0, 0, tzinfo=tzutc()) + >>> arrow.Arrow.fromdatetime(dt, dt.tzinfo or 'US/Pacific') + + + """ + + if tzinfo is None: + if dt.tzinfo is None: + tzinfo = dateutil_tz.tzutc() + else: + tzinfo = dt.tzinfo + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + tzinfo, + fold=getattr(dt, "fold", 0), + ) + + @classmethod + def fromdate(cls, date, tzinfo=None): + """Constructs an :class:`Arrow ` object from a ``date`` and optional + replacement timezone. Time values are set to 0. + + :param date: the ``date`` + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to UTC. + """ + + if tzinfo is None: + tzinfo = dateutil_tz.tzutc() + + return cls(date.year, date.month, date.day, tzinfo=tzinfo) + + @classmethod + def strptime(cls, date_str, fmt, tzinfo=None): + """Constructs an :class:`Arrow ` object from a date string and format, + in the style of ``datetime.strptime``. Optionally replaces the parsed timezone. + + :param date_str: the date string. + :param fmt: the format string. + :param tzinfo: (optional) A :ref:`timezone expression `. Defaults to the parsed + timezone if ``fmt`` contains a timezone directive, otherwise UTC. + + Usage:: + + >>> arrow.Arrow.strptime('20-01-2019 15:49:10', '%d-%m-%Y %H:%M:%S') + + + """ + + dt = datetime.strptime(date_str, fmt) + if tzinfo is None: + tzinfo = dt.tzinfo + + return cls( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + tzinfo, + fold=getattr(dt, "fold", 0), + ) + + # factories: ranges and spans + + @classmethod + def range(cls, frame, start, end=None, tz=None, limit=None): + """Returns an iterator of :class:`Arrow ` objects, representing + points in time between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param tz: (optional) A :ref:`timezone expression `. Defaults to + ``start``'s timezone, or UTC if ``start`` is naive. + :param limit: (optional) A maximum number of tuples to return. + + **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to + return the entire range. Call with ``limit`` alone to return a maximum # of results from + the start. Call with both to cap a range at a maximum # of results. + + **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before + iterating. As such, either call with naive objects and ``tz``, or aware objects from the + same timezone and no ``tz``. + + Supported frame values: year, quarter, month, week, day, hour, minute, second. + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + Usage:: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.range('hour', start, end): + ... print(repr(r)) + ... + + + + + + + **NOTE**: Unlike Python's ``range``, ``end`` *may* be included in the returned iterator:: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 13, 30) + >>> for r in arrow.Arrow.range('hour', start, end): + ... print(repr(r)) + ... + + + + """ + + _, frame_relative, relative_steps = cls._get_frames(frame) + + tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) + + start = cls._get_datetime(start).replace(tzinfo=tzinfo) + end, limit = cls._get_iteration_params(end, limit) + end = cls._get_datetime(end).replace(tzinfo=tzinfo) + + current = cls.fromdatetime(start) + original_day = start.day + day_is_clipped = False + i = 0 + + while current <= end and i < limit: + i += 1 + yield current + + values = [getattr(current, f) for f in cls._ATTRS] + current = cls(*values, tzinfo=tzinfo).shift( + **{frame_relative: relative_steps} + ) + + if frame in ["month", "quarter", "year"] and current.day < original_day: + day_is_clipped = True + + if day_is_clipped and not cls._is_last_day_of_month(current): + current = current.replace(day=original_day) + + def span(self, frame, count=1, bounds="[)"): + """Returns two new :class:`Arrow ` objects, representing the timespan + of the :class:`Arrow ` object in a given timeframe. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param count: (optional) the number of frames to span. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the span. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + + Supported frame values: year, quarter, month, week, day, hour, minute, second. + + Usage:: + + >>> arrow.utcnow() + + + >>> arrow.utcnow().span('hour') + (, ) + + >>> arrow.utcnow().span('day') + (, ) + + >>> arrow.utcnow().span('day', count=2) + (, ) + + >>> arrow.utcnow().span('day', bounds='[]') + (, ) + + """ + + util.validate_bounds(bounds) + + frame_absolute, frame_relative, relative_steps = self._get_frames(frame) + + if frame_absolute == "week": + attr = "day" + elif frame_absolute == "quarter": + attr = "month" + else: + attr = frame_absolute + + index = self._ATTRS.index(attr) + frames = self._ATTRS[: index + 1] + + values = [getattr(self, f) for f in frames] + + for _ in range(3 - len(values)): + values.append(1) + + floor = self.__class__(*values, tzinfo=self.tzinfo) + + if frame_absolute == "week": + floor = floor.shift(days=-(self.isoweekday() - 1)) + elif frame_absolute == "quarter": + floor = floor.shift(months=-((self.month - 1) % 3)) + + ceil = floor.shift(**{frame_relative: count * relative_steps}) + + if bounds[0] == "(": + floor = floor.shift(microseconds=+1) + + if bounds[1] == ")": + ceil = ceil.shift(microseconds=-1) + + return floor, ceil + + def floor(self, frame): + """Returns a new :class:`Arrow ` object, representing the "floor" + of the timespan of the :class:`Arrow ` object in a given timeframe. + Equivalent to the first element in the 2-tuple returned by + :func:`span `. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + + Usage:: + + >>> arrow.utcnow().floor('hour') + + """ + + return self.span(frame)[0] + + def ceil(self, frame): + """Returns a new :class:`Arrow ` object, representing the "ceiling" + of the timespan of the :class:`Arrow ` object in a given timeframe. + Equivalent to the second element in the 2-tuple returned by + :func:`span `. + + :param frame: the timeframe. Can be any ``datetime`` property (day, hour, minute...). + + Usage:: + + >>> arrow.utcnow().ceil('hour') + + """ + + return self.span(frame)[1] + + @classmethod + def span_range(cls, frame, start, end, tz=None, limit=None, bounds="[)"): + """Returns an iterator of tuples, each :class:`Arrow ` objects, + representing a series of timespans between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param tz: (optional) A :ref:`timezone expression `. Defaults to + ``start``'s timezone, or UTC if ``start`` is naive. + :param limit: (optional) A maximum number of tuples to return. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in each span in the range. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + + **NOTE**: The ``end`` or ``limit`` must be provided. Call with ``end`` alone to + return the entire range. Call with ``limit`` alone to return a maximum # of results from + the start. Call with both to cap a range at a maximum # of results. + + **NOTE**: ``tz`` internally **replaces** the timezones of both ``start`` and ``end`` before + iterating. As such, either call with naive objects and ``tz``, or aware objects from the + same timezone and no ``tz``. + + Supported frame values: year, quarter, month, week, day, hour, minute, second. + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + **NOTE**: Unlike Python's ``range``, ``end`` will *always* be included in the returned + iterator of timespans. + + Usage: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.span_range('hour', start, end): + ... print(r) + ... + (, ) + (, ) + (, ) + (, ) + (, ) + (, ) + + """ + + tzinfo = cls._get_tzinfo(start.tzinfo if tz is None else tz) + start = cls.fromdatetime(start, tzinfo).span(frame)[0] + _range = cls.range(frame, start, end, tz, limit) + return (r.span(frame, bounds=bounds) for r in _range) + + @classmethod + def interval(cls, frame, start, end, interval=1, tz=None, bounds="[)"): + """Returns an iterator of tuples, each :class:`Arrow ` objects, + representing a series of intervals between two inputs. + + :param frame: The timeframe. Can be any ``datetime`` property (day, hour, minute...). + :param start: A datetime expression, the start of the range. + :param end: (optional) A datetime expression, the end of the range. + :param interval: (optional) Time interval for the given time frame. + :param tz: (optional) A timezone expression. Defaults to UTC. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the intervals. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '[)' is used. + + Supported frame values: year, quarter, month, week, day, hour, minute, second + + Recognized datetime expressions: + + - An :class:`Arrow ` object. + - A ``datetime`` object. + + Recognized timezone expressions: + + - A ``tzinfo`` object. + - A ``str`` describing a timezone, similar to 'US/Pacific', or 'Europe/Berlin'. + - A ``str`` in ISO 8601 style, as in '+07:00'. + - A ``str``, one of the following: 'local', 'utc', 'UTC'. + + Usage: + + >>> start = datetime(2013, 5, 5, 12, 30) + >>> end = datetime(2013, 5, 5, 17, 15) + >>> for r in arrow.Arrow.interval('hour', start, end, 2): + ... print r + ... + (, ) + (, ) + (, ) + """ + if interval < 1: + raise ValueError("interval has to be a positive integer") + + spanRange = iter(cls.span_range(frame, start, end, tz, bounds=bounds)) + while True: + try: + intvlStart, intvlEnd = next(spanRange) + for _ in range(interval - 1): + _, intvlEnd = next(spanRange) + yield intvlStart, intvlEnd + except StopIteration: + return + + # representations + + def __repr__(self): + return "<{} [{}]>".format(self.__class__.__name__, self.__str__()) + + def __str__(self): + return self._datetime.isoformat() + + def __format__(self, formatstr): + + if len(formatstr) > 0: + return self.format(formatstr) + + return str(self) + + def __hash__(self): + return self._datetime.__hash__() + + # attributes and properties + + def __getattr__(self, name): + + if name == "week": + return self.isocalendar()[1] + + if name == "quarter": + return int((self.month - 1) / self._MONTHS_PER_QUARTER) + 1 + + if not name.startswith("_"): + value = getattr(self._datetime, name, None) + + if value is not None: + return value + + return object.__getattribute__(self, name) + + @property + def tzinfo(self): + """Gets the ``tzinfo`` of the :class:`Arrow ` object. + + Usage:: + + >>> arw=arrow.utcnow() + >>> arw.tzinfo + tzutc() + + """ + + return self._datetime.tzinfo + + @tzinfo.setter + def tzinfo(self, tzinfo): + """ Sets the ``tzinfo`` of the :class:`Arrow ` object. """ + + self._datetime = self._datetime.replace(tzinfo=tzinfo) + + @property + def datetime(self): + """Returns a datetime representation of the :class:`Arrow ` object. + + Usage:: + + >>> arw=arrow.utcnow() + >>> arw.datetime + datetime.datetime(2019, 1, 24, 16, 35, 27, 276649, tzinfo=tzutc()) + + """ + + return self._datetime + + @property + def naive(self): + """Returns a naive datetime representation of the :class:`Arrow ` + object. + + Usage:: + + >>> nairobi = arrow.now('Africa/Nairobi') + >>> nairobi + + >>> nairobi.naive + datetime.datetime(2019, 1, 23, 19, 27, 12, 297999) + + """ + + return self._datetime.replace(tzinfo=None) + + @property + def timestamp(self): + """Returns a timestamp representation of the :class:`Arrow ` object, in + UTC time. + + Usage:: + + >>> arrow.utcnow().timestamp + 1548260567 + + """ + + warnings.warn( + "For compatibility with the datetime.timestamp() method this property will be replaced with a method in " + "the 1.0.0 release, please switch to the .int_timestamp property for identical behaviour as soon as " + "possible.", + DeprecationWarning, + ) + return calendar.timegm(self._datetime.utctimetuple()) + + @property + def int_timestamp(self): + """Returns a timestamp representation of the :class:`Arrow ` object, in + UTC time. + + Usage:: + + >>> arrow.utcnow().int_timestamp + 1548260567 + + """ + + return calendar.timegm(self._datetime.utctimetuple()) + + @property + def float_timestamp(self): + """Returns a floating-point representation of the :class:`Arrow ` + object, in UTC time. + + Usage:: + + >>> arrow.utcnow().float_timestamp + 1548260516.830896 + + """ + + # IDEA get rid of this in 1.0.0 and wrap datetime.timestamp() + # Or for compatibility retain this but make it call the timestamp method + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + return self.timestamp + float(self.microsecond) / 1000000 + + @property + def fold(self): + """ Returns the ``fold`` value of the :class:`Arrow ` object. """ + + # in python < 3.6 _datetime will be a _DatetimeWithFold if fold=1 and a datetime with no fold attribute + # otherwise, so we need to return zero to cover the latter case + return getattr(self._datetime, "fold", 0) + + @property + def ambiguous(self): + """ Returns a boolean indicating whether the :class:`Arrow ` object is ambiguous.""" + + return dateutil_tz.datetime_ambiguous(self._datetime) + + @property + def imaginary(self): + """Indicates whether the :class: `Arrow ` object exists in the current timezone.""" + + return not dateutil_tz.datetime_exists(self._datetime) + + # mutation and duplication. + + def clone(self): + """Returns a new :class:`Arrow ` object, cloned from the current one. + + Usage: + + >>> arw = arrow.utcnow() + >>> cloned = arw.clone() + + """ + + return self.fromdatetime(self._datetime) + + def replace(self, **kwargs): + """Returns a new :class:`Arrow ` object with attributes updated + according to inputs. + + Use property names to set their value absolutely:: + + >>> import arrow + >>> arw = arrow.utcnow() + >>> arw + + >>> arw.replace(year=2014, month=6) + + + You can also replace the timezone without conversion, using a + :ref:`timezone expression `:: + + >>> arw.replace(tzinfo=tz.tzlocal()) + + + """ + + absolute_kwargs = {} + + for key, value in kwargs.items(): + + if key in self._ATTRS: + absolute_kwargs[key] = value + elif key in ["week", "quarter"]: + raise AttributeError("setting absolute {} is not supported".format(key)) + elif key not in ["tzinfo", "fold"]: + raise AttributeError('unknown attribute: "{}"'.format(key)) + + current = self._datetime.replace(**absolute_kwargs) + + tzinfo = kwargs.get("tzinfo") + + if tzinfo is not None: + tzinfo = self._get_tzinfo(tzinfo) + current = current.replace(tzinfo=tzinfo) + + fold = kwargs.get("fold") + + # TODO revisit this once we drop support for 2.7/3.5 + if fold is not None: + current = dateutil_tz.enfold(current, fold=fold) + + return self.fromdatetime(current) + + def shift(self, **kwargs): + """Returns a new :class:`Arrow ` object with attributes updated + according to inputs. + + Use pluralized property names to relatively shift their current value: + + >>> import arrow + >>> arw = arrow.utcnow() + >>> arw + + >>> arw.shift(years=1, months=-1) + + + Day-of-the-week relative shifting can use either Python's weekday numbers + (Monday = 0, Tuesday = 1 .. Sunday = 6) or using dateutil.relativedelta's + day instances (MO, TU .. SU). When using weekday numbers, the returned + date will always be greater than or equal to the starting date. + + Using the above code (which is a Saturday) and asking it to shift to Saturday: + + >>> arw.shift(weekday=5) + + + While asking for a Monday: + + >>> arw.shift(weekday=0) + + + """ + + relative_kwargs = {} + additional_attrs = ["weeks", "quarters", "weekday"] + + for key, value in kwargs.items(): + + if key in self._ATTRS_PLURAL or key in additional_attrs: + relative_kwargs[key] = value + else: + raise AttributeError( + "Invalid shift time frame. Please select one of the following: {}.".format( + ", ".join(self._ATTRS_PLURAL + additional_attrs) + ) + ) + + # core datetime does not support quarters, translate to months. + relative_kwargs.setdefault("months", 0) + relative_kwargs["months"] += ( + relative_kwargs.pop("quarters", 0) * self._MONTHS_PER_QUARTER + ) + + current = self._datetime + relativedelta(**relative_kwargs) + + if not dateutil_tz.datetime_exists(current): + current = dateutil_tz.resolve_imaginary(current) + + return self.fromdatetime(current) + + def to(self, tz): + """Returns a new :class:`Arrow ` object, converted + to the target timezone. + + :param tz: A :ref:`timezone expression `. + + Usage:: + + >>> utc = arrow.utcnow() + >>> utc + + + >>> utc.to('US/Pacific') + + + >>> utc.to(tz.tzlocal()) + + + >>> utc.to('-07:00') + + + >>> utc.to('local') + + + >>> utc.to('local').to('utc') + + + """ + + if not isinstance(tz, dt_tzinfo): + tz = parser.TzinfoParser.parse(tz) + + dt = self._datetime.astimezone(tz) + + return self.__class__( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + fold=getattr(dt, "fold", 0), + ) + + # string output and formatting + + def format(self, fmt="YYYY-MM-DD HH:mm:ssZZ", locale="en_us"): + """Returns a string representation of the :class:`Arrow ` object, + formatted according to a format string. + + :param fmt: the format string. + + Usage:: + + >>> arrow.utcnow().format('YYYY-MM-DD HH:mm:ss ZZ') + '2013-05-09 03:56:47 -00:00' + + >>> arrow.utcnow().format('X') + '1368071882' + + >>> arrow.utcnow().format('MMMM DD, YYYY') + 'May 09, 2013' + + >>> arrow.utcnow().format() + '2013-05-09 03:56:47 -00:00' + + """ + + return formatter.DateTimeFormatter(locale).format(self._datetime, fmt) + + def humanize( + self, other=None, locale="en_us", only_distance=False, granularity="auto" + ): + """Returns a localized, humanized representation of a relative difference in time. + + :param other: (optional) an :class:`Arrow ` or ``datetime`` object. + Defaults to now in the current :class:`Arrow ` object's timezone. + :param locale: (optional) a ``str`` specifying a locale. Defaults to 'en_us'. + :param only_distance: (optional) returns only time difference eg: "11 seconds" without "in" or "ago" part. + :param granularity: (optional) defines the precision of the output. Set it to strings 'second', 'minute', + 'hour', 'day', 'week', 'month' or 'year' or a list of any combination of these strings + + Usage:: + + >>> earlier = arrow.utcnow().shift(hours=-2) + >>> earlier.humanize() + '2 hours ago' + + >>> later = earlier.shift(hours=4) + >>> later.humanize(earlier) + 'in 4 hours' + + """ + + locale_name = locale + locale = locales.get_locale(locale) + + if other is None: + utc = datetime.utcnow().replace(tzinfo=dateutil_tz.tzutc()) + dt = utc.astimezone(self._datetime.tzinfo) + + elif isinstance(other, Arrow): + dt = other._datetime + + elif isinstance(other, datetime): + if other.tzinfo is None: + dt = other.replace(tzinfo=self._datetime.tzinfo) + else: + dt = other.astimezone(self._datetime.tzinfo) + + else: + raise TypeError( + "Invalid 'other' argument of type '{}'. " + "Argument must be of type None, Arrow, or datetime.".format( + type(other).__name__ + ) + ) + + if isinstance(granularity, list) and len(granularity) == 1: + granularity = granularity[0] + + delta = int(round(util.total_seconds(self._datetime - dt))) + sign = -1 if delta < 0 else 1 + diff = abs(delta) + delta = diff + + try: + if granularity == "auto": + if diff < 10: + return locale.describe("now", only_distance=only_distance) + + if diff < 45: + seconds = sign * delta + return locale.describe( + "seconds", seconds, only_distance=only_distance + ) + + elif diff < 90: + return locale.describe("minute", sign, only_distance=only_distance) + elif diff < 2700: + minutes = sign * int(max(delta / 60, 2)) + return locale.describe( + "minutes", minutes, only_distance=only_distance + ) + + elif diff < 5400: + return locale.describe("hour", sign, only_distance=only_distance) + elif diff < 79200: + hours = sign * int(max(delta / 3600, 2)) + return locale.describe("hours", hours, only_distance=only_distance) + + # anything less than 48 hours should be 1 day + elif diff < 172800: + return locale.describe("day", sign, only_distance=only_distance) + elif diff < 554400: + days = sign * int(max(delta / 86400, 2)) + return locale.describe("days", days, only_distance=only_distance) + + elif diff < 907200: + return locale.describe("week", sign, only_distance=only_distance) + elif diff < 2419200: + weeks = sign * int(max(delta / 604800, 2)) + return locale.describe("weeks", weeks, only_distance=only_distance) + + elif diff < 3888000: + return locale.describe("month", sign, only_distance=only_distance) + elif diff < 29808000: + self_months = self._datetime.year * 12 + self._datetime.month + other_months = dt.year * 12 + dt.month + + months = sign * int(max(abs(other_months - self_months), 2)) + + return locale.describe( + "months", months, only_distance=only_distance + ) + + elif diff < 47260800: + return locale.describe("year", sign, only_distance=only_distance) + else: + years = sign * int(max(delta / 31536000, 2)) + return locale.describe("years", years, only_distance=only_distance) + + elif util.isstr(granularity): + if granularity == "second": + delta = sign * delta + if abs(delta) < 2: + return locale.describe("now", only_distance=only_distance) + elif granularity == "minute": + delta = sign * delta / self._SECS_PER_MINUTE + elif granularity == "hour": + delta = sign * delta / self._SECS_PER_HOUR + elif granularity == "day": + delta = sign * delta / self._SECS_PER_DAY + elif granularity == "week": + delta = sign * delta / self._SECS_PER_WEEK + elif granularity == "month": + delta = sign * delta / self._SECS_PER_MONTH + elif granularity == "year": + delta = sign * delta / self._SECS_PER_YEAR + else: + raise AttributeError( + "Invalid level of granularity. Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'" + ) + + if trunc(abs(delta)) != 1: + granularity += "s" + return locale.describe(granularity, delta, only_distance=only_distance) + + else: + timeframes = [] + if "year" in granularity: + years = sign * delta / self._SECS_PER_YEAR + delta %= self._SECS_PER_YEAR + timeframes.append(["year", years]) + + if "month" in granularity: + months = sign * delta / self._SECS_PER_MONTH + delta %= self._SECS_PER_MONTH + timeframes.append(["month", months]) + + if "week" in granularity: + weeks = sign * delta / self._SECS_PER_WEEK + delta %= self._SECS_PER_WEEK + timeframes.append(["week", weeks]) + + if "day" in granularity: + days = sign * delta / self._SECS_PER_DAY + delta %= self._SECS_PER_DAY + timeframes.append(["day", days]) + + if "hour" in granularity: + hours = sign * delta / self._SECS_PER_HOUR + delta %= self._SECS_PER_HOUR + timeframes.append(["hour", hours]) + + if "minute" in granularity: + minutes = sign * delta / self._SECS_PER_MINUTE + delta %= self._SECS_PER_MINUTE + timeframes.append(["minute", minutes]) + + if "second" in granularity: + seconds = sign * delta + timeframes.append(["second", seconds]) + + if len(timeframes) < len(granularity): + raise AttributeError( + "Invalid level of granularity. " + "Please select between 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'." + ) + + for tf in timeframes: + # Make granularity plural if the delta is not equal to 1 + if trunc(abs(tf[1])) != 1: + tf[0] += "s" + return locale.describe_multi(timeframes, only_distance=only_distance) + + except KeyError as e: + raise ValueError( + "Humanization of the {} granularity is not currently translated in the '{}' locale. " + "Please consider making a contribution to this locale.".format( + e, locale_name + ) + ) + + # query functions + + def is_between(self, start, end, bounds="()"): + """Returns a boolean denoting whether the specified date and time is between + the start and end dates and times. + + :param start: an :class:`Arrow ` object. + :param end: an :class:`Arrow ` object. + :param bounds: (optional) a ``str`` of either '()', '(]', '[)', or '[]' that specifies + whether to include or exclude the start and end values in the range. '(' excludes + the start, '[' includes the start, ')' excludes the end, and ']' includes the end. + If the bounds are not specified, the default bound '()' is used. + + Usage:: + + >>> start = arrow.get(datetime(2013, 5, 5, 12, 30, 10)) + >>> end = arrow.get(datetime(2013, 5, 5, 12, 30, 36)) + >>> arrow.get(datetime(2013, 5, 5, 12, 30, 27)).is_between(start, end) + True + + >>> start = arrow.get(datetime(2013, 5, 5)) + >>> end = arrow.get(datetime(2013, 5, 8)) + >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[]') + True + + >>> start = arrow.get(datetime(2013, 5, 5)) + >>> end = arrow.get(datetime(2013, 5, 8)) + >>> arrow.get(datetime(2013, 5, 8)).is_between(start, end, '[)') + False + + """ + + util.validate_bounds(bounds) + + if not isinstance(start, Arrow): + raise TypeError( + "Can't parse start date argument type of '{}'".format(type(start)) + ) + + if not isinstance(end, Arrow): + raise TypeError( + "Can't parse end date argument type of '{}'".format(type(end)) + ) + + include_start = bounds[0] == "[" + include_end = bounds[1] == "]" + + target_timestamp = self.float_timestamp + start_timestamp = start.float_timestamp + end_timestamp = end.float_timestamp + + if include_start and include_end: + return ( + target_timestamp >= start_timestamp + and target_timestamp <= end_timestamp + ) + elif include_start and not include_end: + return ( + target_timestamp >= start_timestamp and target_timestamp < end_timestamp + ) + elif not include_start and include_end: + return ( + target_timestamp > start_timestamp and target_timestamp <= end_timestamp + ) + else: + return ( + target_timestamp > start_timestamp and target_timestamp < end_timestamp + ) + + # datetime methods + + def date(self): + """Returns a ``date`` object with the same year, month and day. + + Usage:: + + >>> arrow.utcnow().date() + datetime.date(2019, 1, 23) + + """ + + return self._datetime.date() + + def time(self): + """Returns a ``time`` object with the same hour, minute, second, microsecond. + + Usage:: + + >>> arrow.utcnow().time() + datetime.time(12, 15, 34, 68352) + + """ + + return self._datetime.time() + + def timetz(self): + """Returns a ``time`` object with the same hour, minute, second, microsecond and + tzinfo. + + Usage:: + + >>> arrow.utcnow().timetz() + datetime.time(12, 5, 18, 298893, tzinfo=tzutc()) + + """ + + return self._datetime.timetz() + + def astimezone(self, tz): + """Returns a ``datetime`` object, converted to the specified timezone. + + :param tz: a ``tzinfo`` object. + + Usage:: + + >>> pacific=arrow.now('US/Pacific') + >>> nyc=arrow.now('America/New_York').tzinfo + >>> pacific.astimezone(nyc) + datetime.datetime(2019, 1, 20, 10, 24, 22, 328172, tzinfo=tzfile('/usr/share/zoneinfo/America/New_York')) + + """ + + return self._datetime.astimezone(tz) + + def utcoffset(self): + """Returns a ``timedelta`` object representing the whole number of minutes difference from + UTC time. + + Usage:: + + >>> arrow.now('US/Pacific').utcoffset() + datetime.timedelta(-1, 57600) + + """ + + return self._datetime.utcoffset() + + def dst(self): + """Returns the daylight savings time adjustment. + + Usage:: + + >>> arrow.utcnow().dst() + datetime.timedelta(0) + + """ + + return self._datetime.dst() + + def timetuple(self): + """Returns a ``time.struct_time``, in the current timezone. + + Usage:: + + >>> arrow.utcnow().timetuple() + time.struct_time(tm_year=2019, tm_mon=1, tm_mday=20, tm_hour=15, tm_min=17, tm_sec=8, tm_wday=6, tm_yday=20, tm_isdst=0) + + """ + + return self._datetime.timetuple() + + def utctimetuple(self): + """Returns a ``time.struct_time``, in UTC time. + + Usage:: + + >>> arrow.utcnow().utctimetuple() + time.struct_time(tm_year=2019, tm_mon=1, tm_mday=19, tm_hour=21, tm_min=41, tm_sec=7, tm_wday=5, tm_yday=19, tm_isdst=0) + + """ + + return self._datetime.utctimetuple() + + def toordinal(self): + """Returns the proleptic Gregorian ordinal of the date. + + Usage:: + + >>> arrow.utcnow().toordinal() + 737078 + + """ + + return self._datetime.toordinal() + + def weekday(self): + """Returns the day of the week as an integer (0-6). + + Usage:: + + >>> arrow.utcnow().weekday() + 5 + + """ + + return self._datetime.weekday() + + def isoweekday(self): + """Returns the ISO day of the week as an integer (1-7). + + Usage:: + + >>> arrow.utcnow().isoweekday() + 6 + + """ + + return self._datetime.isoweekday() + + def isocalendar(self): + """Returns a 3-tuple, (ISO year, ISO week number, ISO weekday). + + Usage:: + + >>> arrow.utcnow().isocalendar() + (2019, 3, 6) + + """ + + return self._datetime.isocalendar() + + def isoformat(self, sep="T"): + """Returns an ISO 8601 formatted representation of the date and time. + + Usage:: + + >>> arrow.utcnow().isoformat() + '2019-01-19T18:30:52.442118+00:00' + + """ + + return self._datetime.isoformat(sep) + + def ctime(self): + """Returns a ctime formatted representation of the date and time. + + Usage:: + + >>> arrow.utcnow().ctime() + 'Sat Jan 19 18:26:50 2019' + + """ + + return self._datetime.ctime() + + def strftime(self, format): + """Formats in the style of ``datetime.strftime``. + + :param format: the format string. + + Usage:: + + >>> arrow.utcnow().strftime('%d-%m-%Y %H:%M:%S') + '23-01-2019 12:28:17' + + """ + + return self._datetime.strftime(format) + + def for_json(self): + """Serializes for the ``for_json`` protocol of simplejson. + + Usage:: + + >>> arrow.utcnow().for_json() + '2019-01-19T18:25:36.760079+00:00' + + """ + + return self.isoformat() + + # math + + def __add__(self, other): + + if isinstance(other, (timedelta, relativedelta)): + return self.fromdatetime(self._datetime + other, self._datetime.tzinfo) + + return NotImplemented + + def __radd__(self, other): + return self.__add__(other) + + def __sub__(self, other): + + if isinstance(other, (timedelta, relativedelta)): + return self.fromdatetime(self._datetime - other, self._datetime.tzinfo) + + elif isinstance(other, datetime): + return self._datetime - other + + elif isinstance(other, Arrow): + return self._datetime - other._datetime + + return NotImplemented + + def __rsub__(self, other): + + if isinstance(other, datetime): + return other - self._datetime + + return NotImplemented + + # comparisons + + def __eq__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return False + + return self._datetime == self._get_datetime(other) + + def __ne__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return True + + return not self.__eq__(other) + + def __gt__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return NotImplemented + + return self._datetime > self._get_datetime(other) + + def __ge__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return NotImplemented + + return self._datetime >= self._get_datetime(other) + + def __lt__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return NotImplemented + + return self._datetime < self._get_datetime(other) + + def __le__(self, other): + + if not isinstance(other, (Arrow, datetime)): + return NotImplemented + + return self._datetime <= self._get_datetime(other) + + def __cmp__(self, other): + if sys.version_info[0] < 3: # pragma: no cover + if not isinstance(other, (Arrow, datetime)): + raise TypeError( + "can't compare '{}' to '{}'".format(type(self), type(other)) + ) + + # internal methods + + @staticmethod + def _get_tzinfo(tz_expr): + + if tz_expr is None: + return dateutil_tz.tzutc() + if isinstance(tz_expr, dt_tzinfo): + return tz_expr + else: + try: + return parser.TzinfoParser.parse(tz_expr) + except parser.ParserError: + raise ValueError("'{}' not recognized as a timezone".format(tz_expr)) + + @classmethod + def _get_datetime(cls, expr): + """Get datetime object for a specified expression.""" + if isinstance(expr, Arrow): + return expr.datetime + elif isinstance(expr, datetime): + return expr + elif util.is_timestamp(expr): + timestamp = float(expr) + return cls.utcfromtimestamp(timestamp).datetime + else: + raise ValueError( + "'{}' not recognized as a datetime or timestamp.".format(expr) + ) + + @classmethod + def _get_frames(cls, name): + + if name in cls._ATTRS: + return name, "{}s".format(name), 1 + elif name[-1] == "s" and name[:-1] in cls._ATTRS: + return name[:-1], name, 1 + elif name in ["week", "weeks"]: + return "week", "weeks", 1 + elif name in ["quarter", "quarters"]: + return "quarter", "months", 3 + + supported = ", ".join( + [ + "year(s)", + "month(s)", + "day(s)", + "hour(s)", + "minute(s)", + "second(s)", + "microsecond(s)", + "week(s)", + "quarter(s)", + ] + ) + raise AttributeError( + "range/span over frame {} not supported. Supported frames: {}".format( + name, supported + ) + ) + + @classmethod + def _get_iteration_params(cls, end, limit): + + if end is None: + + if limit is None: + raise ValueError("one of 'end' or 'limit' is required") + + return cls.max, limit + + else: + if limit is None: + return end, sys.maxsize + return end, limit + + @staticmethod + def _is_last_day_of_month(date): + return date.day == calendar.monthrange(date.year, date.month)[1] + + +Arrow.min = Arrow.fromdatetime(datetime.min) +Arrow.max = Arrow.fromdatetime(datetime.max) diff --git a/pype/modules/ftrack/python2_vendor/arrow/constants.py b/pype/modules/ftrack/python2_vendor/arrow/constants.py new file mode 100644 index 0000000000..81e37b26de --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/constants.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- + +# Output of time.mktime(datetime.max.timetuple()) on macOS +# This value must be hardcoded for compatibility with Windows +# Platform-independent max timestamps are hard to form +# https://stackoverflow.com/q/46133223 +MAX_TIMESTAMP = 253402318799.0 +MAX_TIMESTAMP_MS = MAX_TIMESTAMP * 1000 +MAX_TIMESTAMP_US = MAX_TIMESTAMP * 1000000 diff --git a/pype/modules/ftrack/python2_vendor/arrow/factory.py b/pype/modules/ftrack/python2_vendor/arrow/factory.py new file mode 100644 index 0000000000..05933e8151 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/factory.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +""" +Implements the :class:`ArrowFactory ` class, +providing factory methods for common :class:`Arrow ` +construction scenarios. + +""" + +from __future__ import absolute_import + +import calendar +from datetime import date, datetime +from datetime import tzinfo as dt_tzinfo +from time import struct_time + +from dateutil import tz as dateutil_tz + +from arrow import parser +from arrow.arrow import Arrow +from arrow.util import is_timestamp, iso_to_gregorian, isstr + + +class ArrowFactory(object): + """A factory for generating :class:`Arrow ` objects. + + :param type: (optional) the :class:`Arrow `-based class to construct from. + Defaults to :class:`Arrow `. + + """ + + def __init__(self, type=Arrow): + self.type = type + + def get(self, *args, **kwargs): + """Returns an :class:`Arrow ` object based on flexible inputs. + + :param locale: (optional) a ``str`` specifying a locale for the parser. Defaults to 'en_us'. + :param tzinfo: (optional) a :ref:`timezone expression ` or tzinfo object. + Replaces the timezone unless using an input form that is explicitly UTC or specifies + the timezone in a positional argument. Defaults to UTC. + :param normalize_whitespace: (optional) a ``bool`` specifying whether or not to normalize + redundant whitespace (spaces, tabs, and newlines) in a datetime string before parsing. + Defaults to false. + + Usage:: + + >>> import arrow + + **No inputs** to get current UTC time:: + + >>> arrow.get() + + + **None** to also get current UTC time:: + + >>> arrow.get(None) + + + **One** :class:`Arrow ` object, to get a copy. + + >>> arw = arrow.utcnow() + >>> arrow.get(arw) + + + **One** ``float`` or ``int``, convertible to a floating-point timestamp, to get + that timestamp in UTC:: + + >>> arrow.get(1367992474.293378) + + + >>> arrow.get(1367992474) + + + **One** ISO 8601-formatted ``str``, to parse it:: + + >>> arrow.get('2013-09-29T01:26:43.830580') + + + **One** ISO 8601-formatted ``str``, in basic format, to parse it:: + + >>> arrow.get('20160413T133656.456289') + + + **One** ``tzinfo``, to get the current time **converted** to that timezone:: + + >>> arrow.get(tz.tzlocal()) + + + **One** naive ``datetime``, to get that datetime in UTC:: + + >>> arrow.get(datetime(2013, 5, 5)) + + + **One** aware ``datetime``, to get that datetime:: + + >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal())) + + + **One** naive ``date``, to get that date in UTC:: + + >>> arrow.get(date(2013, 5, 5)) + + + **One** time.struct time:: + + >>> arrow.get(gmtime(0)) + + + **One** iso calendar ``tuple``, to get that week date in UTC:: + + >>> arrow.get((2013, 18, 7)) + + + **Two** arguments, a naive or aware ``datetime``, and a replacement + :ref:`timezone expression `:: + + >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific') + + + **Two** arguments, a naive ``date``, and a replacement + :ref:`timezone expression `:: + + >>> arrow.get(date(2013, 5, 5), 'US/Pacific') + + + **Two** arguments, both ``str``, to parse the first according to the format of the second:: + + >>> arrow.get('2013-05-05 12:30:45 America/Chicago', 'YYYY-MM-DD HH:mm:ss ZZZ') + + + **Two** arguments, first a ``str`` to parse and second a ``list`` of formats to try:: + + >>> arrow.get('2013-05-05 12:30:45', ['MM/DD/YYYY', 'YYYY-MM-DD HH:mm:ss']) + + + **Three or more** arguments, as for the constructor of a ``datetime``:: + + >>> arrow.get(2013, 5, 5, 12, 30, 45) + + + """ + + arg_count = len(args) + locale = kwargs.pop("locale", "en_us") + tz = kwargs.get("tzinfo", None) + normalize_whitespace = kwargs.pop("normalize_whitespace", False) + + # if kwargs given, send to constructor unless only tzinfo provided + if len(kwargs) > 1: + arg_count = 3 + + # tzinfo kwarg is not provided + if len(kwargs) == 1 and tz is None: + arg_count = 3 + + # () -> now, @ utc. + if arg_count == 0: + if isstr(tz): + tz = parser.TzinfoParser.parse(tz) + return self.type.now(tz) + + if isinstance(tz, dt_tzinfo): + return self.type.now(tz) + + return self.type.utcnow() + + if arg_count == 1: + arg = args[0] + + # (None) -> now, @ utc. + if arg is None: + return self.type.utcnow() + + # try (int, float) -> from timestamp with tz + elif not isstr(arg) and is_timestamp(arg): + if tz is None: + # set to UTC by default + tz = dateutil_tz.tzutc() + return self.type.fromtimestamp(arg, tzinfo=tz) + + # (Arrow) -> from the object's datetime. + elif isinstance(arg, Arrow): + return self.type.fromdatetime(arg.datetime) + + # (datetime) -> from datetime. + elif isinstance(arg, datetime): + return self.type.fromdatetime(arg) + + # (date) -> from date. + elif isinstance(arg, date): + return self.type.fromdate(arg) + + # (tzinfo) -> now, @ tzinfo. + elif isinstance(arg, dt_tzinfo): + return self.type.now(arg) + + # (str) -> parse. + elif isstr(arg): + dt = parser.DateTimeParser(locale).parse_iso(arg, normalize_whitespace) + return self.type.fromdatetime(dt, tz) + + # (struct_time) -> from struct_time + elif isinstance(arg, struct_time): + return self.type.utcfromtimestamp(calendar.timegm(arg)) + + # (iso calendar) -> convert then from date + elif isinstance(arg, tuple) and len(arg) == 3: + dt = iso_to_gregorian(*arg) + return self.type.fromdate(dt) + + else: + raise TypeError( + "Can't parse single argument of type '{}'".format(type(arg)) + ) + + elif arg_count == 2: + + arg_1, arg_2 = args[0], args[1] + + if isinstance(arg_1, datetime): + + # (datetime, tzinfo/str) -> fromdatetime replace tzinfo. + if isinstance(arg_2, dt_tzinfo) or isstr(arg_2): + return self.type.fromdatetime(arg_1, arg_2) + else: + raise TypeError( + "Can't parse two arguments of types 'datetime', '{}'".format( + type(arg_2) + ) + ) + + elif isinstance(arg_1, date): + + # (date, tzinfo/str) -> fromdate replace tzinfo. + if isinstance(arg_2, dt_tzinfo) or isstr(arg_2): + return self.type.fromdate(arg_1, tzinfo=arg_2) + else: + raise TypeError( + "Can't parse two arguments of types 'date', '{}'".format( + type(arg_2) + ) + ) + + # (str, format) -> parse. + elif isstr(arg_1) and (isstr(arg_2) or isinstance(arg_2, list)): + dt = parser.DateTimeParser(locale).parse( + args[0], args[1], normalize_whitespace + ) + return self.type.fromdatetime(dt, tzinfo=tz) + + else: + raise TypeError( + "Can't parse two arguments of types '{}' and '{}'".format( + type(arg_1), type(arg_2) + ) + ) + + # 3+ args -> datetime-like via constructor. + else: + return self.type(*args, **kwargs) + + def utcnow(self): + """Returns an :class:`Arrow ` object, representing "now" in UTC time. + + Usage:: + + >>> import arrow + >>> arrow.utcnow() + + """ + + return self.type.utcnow() + + def now(self, tz=None): + """Returns an :class:`Arrow ` object, representing "now" in the given + timezone. + + :param tz: (optional) A :ref:`timezone expression `. Defaults to local time. + + Usage:: + + >>> import arrow + >>> arrow.now() + + + >>> arrow.now('US/Pacific') + + + >>> arrow.now('+02:00') + + + >>> arrow.now('local') + + """ + + if tz is None: + tz = dateutil_tz.tzlocal() + elif not isinstance(tz, dt_tzinfo): + tz = parser.TzinfoParser.parse(tz) + + return self.type.now(tz) diff --git a/pype/modules/ftrack/python2_vendor/arrow/formatter.py b/pype/modules/ftrack/python2_vendor/arrow/formatter.py new file mode 100644 index 0000000000..9f9d7a44da --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/formatter.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division + +import calendar +import re + +from dateutil import tz as dateutil_tz + +from arrow import locales, util + +FORMAT_ATOM = "YYYY-MM-DD HH:mm:ssZZ" +FORMAT_COOKIE = "dddd, DD-MMM-YYYY HH:mm:ss ZZZ" +FORMAT_RFC822 = "ddd, DD MMM YY HH:mm:ss Z" +FORMAT_RFC850 = "dddd, DD-MMM-YY HH:mm:ss ZZZ" +FORMAT_RFC1036 = "ddd, DD MMM YY HH:mm:ss Z" +FORMAT_RFC1123 = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_RFC2822 = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_RFC3339 = "YYYY-MM-DD HH:mm:ssZZ" +FORMAT_RSS = "ddd, DD MMM YYYY HH:mm:ss Z" +FORMAT_W3C = "YYYY-MM-DD HH:mm:ssZZ" + + +class DateTimeFormatter(object): + + # This pattern matches characters enclosed in square brackets are matched as + # an atomic group. For more info on atomic groups and how to they are + # emulated in Python's re library, see https://stackoverflow.com/a/13577411/2701578 + + _FORMAT_RE = re.compile( + r"(\[(?:(?=(?P[^]]))(?P=literal))*\]|YYY?Y?|MM?M?M?|Do|DD?D?D?|d?dd?d?|HH?|hh?|mm?|ss?|SS?S?S?S?S?|ZZ?Z?|a|A|X|x|W)" + ) + + def __init__(self, locale="en_us"): + + self.locale = locales.get_locale(locale) + + def format(cls, dt, fmt): + + return cls._FORMAT_RE.sub(lambda m: cls._format_token(dt, m.group(0)), fmt) + + def _format_token(self, dt, token): + + if token and token.startswith("[") and token.endswith("]"): + return token[1:-1] + + if token == "YYYY": + return self.locale.year_full(dt.year) + if token == "YY": + return self.locale.year_abbreviation(dt.year) + + if token == "MMMM": + return self.locale.month_name(dt.month) + if token == "MMM": + return self.locale.month_abbreviation(dt.month) + if token == "MM": + return "{:02d}".format(dt.month) + if token == "M": + return str(dt.month) + + if token == "DDDD": + return "{:03d}".format(dt.timetuple().tm_yday) + if token == "DDD": + return str(dt.timetuple().tm_yday) + if token == "DD": + return "{:02d}".format(dt.day) + if token == "D": + return str(dt.day) + + if token == "Do": + return self.locale.ordinal_number(dt.day) + + if token == "dddd": + return self.locale.day_name(dt.isoweekday()) + if token == "ddd": + return self.locale.day_abbreviation(dt.isoweekday()) + if token == "d": + return str(dt.isoweekday()) + + if token == "HH": + return "{:02d}".format(dt.hour) + if token == "H": + return str(dt.hour) + if token == "hh": + return "{:02d}".format(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12)) + if token == "h": + return str(dt.hour if 0 < dt.hour < 13 else abs(dt.hour - 12)) + + if token == "mm": + return "{:02d}".format(dt.minute) + if token == "m": + return str(dt.minute) + + if token == "ss": + return "{:02d}".format(dt.second) + if token == "s": + return str(dt.second) + + if token == "SSSSSS": + return str("{:06d}".format(int(dt.microsecond))) + if token == "SSSSS": + return str("{:05d}".format(int(dt.microsecond / 10))) + if token == "SSSS": + return str("{:04d}".format(int(dt.microsecond / 100))) + if token == "SSS": + return str("{:03d}".format(int(dt.microsecond / 1000))) + if token == "SS": + return str("{:02d}".format(int(dt.microsecond / 10000))) + if token == "S": + return str(int(dt.microsecond / 100000)) + + if token == "X": + # TODO: replace with a call to dt.timestamp() when we drop Python 2.7 + return str(calendar.timegm(dt.utctimetuple())) + + if token == "x": + # TODO: replace with a call to dt.timestamp() when we drop Python 2.7 + ts = calendar.timegm(dt.utctimetuple()) + (dt.microsecond / 1000000) + return str(int(ts * 1000000)) + + if token == "ZZZ": + return dt.tzname() + + if token in ["ZZ", "Z"]: + separator = ":" if token == "ZZ" else "" + tz = dateutil_tz.tzutc() if dt.tzinfo is None else dt.tzinfo + total_minutes = int(util.total_seconds(tz.utcoffset(dt)) / 60) + + sign = "+" if total_minutes >= 0 else "-" + total_minutes = abs(total_minutes) + hour, minute = divmod(total_minutes, 60) + + return "{}{:02d}{}{:02d}".format(sign, hour, separator, minute) + + if token in ("a", "A"): + return self.locale.meridian(dt.hour, token) + + if token == "W": + year, week, day = dt.isocalendar() + return "{}-W{:02d}-{}".format(year, week, day) diff --git a/pype/modules/ftrack/python2_vendor/arrow/locales.py b/pype/modules/ftrack/python2_vendor/arrow/locales.py new file mode 100644 index 0000000000..6833da5a78 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/locales.py @@ -0,0 +1,4267 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import inspect +import sys +from math import trunc + + +def get_locale(name): + """Returns an appropriate :class:`Locale ` + corresponding to an inpute locale name. + + :param name: the name of the locale. + + """ + + locale_cls = _locales.get(name.lower()) + + if locale_cls is None: + raise ValueError("Unsupported locale '{}'".format(name)) + + return locale_cls() + + +def get_locale_by_class_name(name): + """Returns an appropriate :class:`Locale ` + corresponding to an locale class name. + + :param name: the name of the locale class. + + """ + locale_cls = globals().get(name) + + if locale_cls is None: + raise ValueError("Unsupported locale '{}'".format(name)) + + return locale_cls() + + +# base locale type. + + +class Locale(object): + """ Represents locale-specific data and functionality. """ + + names = [] + + timeframes = { + "now": "", + "second": "", + "seconds": "", + "minute": "", + "minutes": "", + "hour": "", + "hours": "", + "day": "", + "days": "", + "week": "", + "weeks": "", + "month": "", + "months": "", + "year": "", + "years": "", + } + + meridians = {"am": "", "pm": "", "AM": "", "PM": ""} + + past = None + future = None + and_word = None + + month_names = [] + month_abbreviations = [] + + day_names = [] + day_abbreviations = [] + + ordinal_day_re = r"(\d+)" + + def __init__(self): + + self._month_name_to_ordinal = None + + def describe(self, timeframe, delta=0, only_distance=False): + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + humanized = self._format_timeframe(timeframe, delta) + if not only_distance: + humanized = self._format_relative(humanized, timeframe, delta) + + return humanized + + def describe_multi(self, timeframes, only_distance=False): + """Describes a delta within multiple timeframes in plain language. + + :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. + :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords + """ + + humanized = "" + for index, (timeframe, delta) in enumerate(timeframes): + humanized += self._format_timeframe(timeframe, delta) + if index == len(timeframes) - 2 and self.and_word: + humanized += " " + self.and_word + " " + elif index < len(timeframes) - 1: + humanized += " " + + if not only_distance: + humanized = self._format_relative(humanized, timeframe, delta) + + return humanized + + def day_name(self, day): + """Returns the day name for a specified day of the week. + + :param day: the ``int`` day of the week (1-7). + + """ + + return self.day_names[day] + + def day_abbreviation(self, day): + """Returns the day abbreviation for a specified day of the week. + + :param day: the ``int`` day of the week (1-7). + + """ + + return self.day_abbreviations[day] + + def month_name(self, month): + """Returns the month name for a specified month of the year. + + :param month: the ``int`` month of the year (1-12). + + """ + + return self.month_names[month] + + def month_abbreviation(self, month): + """Returns the month abbreviation for a specified month of the year. + + :param month: the ``int`` month of the year (1-12). + + """ + + return self.month_abbreviations[month] + + def month_number(self, name): + """Returns the month number for a month specified by name or abbreviation. + + :param name: the month name or abbreviation. + + """ + + if self._month_name_to_ordinal is None: + self._month_name_to_ordinal = self._name_to_ordinal(self.month_names) + self._month_name_to_ordinal.update( + self._name_to_ordinal(self.month_abbreviations) + ) + + return self._month_name_to_ordinal.get(name) + + def year_full(self, year): + """Returns the year for specific locale if available + + :param name: the ``int`` year (4-digit) + """ + return "{:04d}".format(year) + + def year_abbreviation(self, year): + """Returns the year for specific locale if available + + :param name: the ``int`` year (4-digit) + """ + return "{:04d}".format(year)[2:] + + def meridian(self, hour, token): + """Returns the meridian indicator for a specified hour and format token. + + :param hour: the ``int`` hour of the day. + :param token: the format token. + """ + + if token == "a": + return self.meridians["am"] if hour < 12 else self.meridians["pm"] + if token == "A": + return self.meridians["AM"] if hour < 12 else self.meridians["PM"] + + def ordinal_number(self, n): + """Returns the ordinal format of a given integer + + :param n: an integer + """ + return self._ordinal_number(n) + + def _ordinal_number(self, n): + return "{}".format(n) + + def _name_to_ordinal(self, lst): + return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:]))) + + def _format_timeframe(self, timeframe, delta): + return self.timeframes[timeframe].format(trunc(abs(delta))) + + def _format_relative(self, humanized, timeframe, delta): + + if timeframe == "now": + return humanized + + direction = self.past if delta < 0 else self.future + + return direction.format(humanized) + + +# base locale type implementations. + + +class EnglishLocale(Locale): + + names = [ + "en", + "en_us", + "en_gb", + "en_au", + "en_be", + "en_jp", + "en_za", + "en_ca", + "en_ph", + ] + + past = "{0} ago" + future = "in {0}" + and_word = "and" + + timeframes = { + "now": "just now", + "second": "a second", + "seconds": "{0} seconds", + "minute": "a minute", + "minutes": "{0} minutes", + "hour": "an hour", + "hours": "{0} hours", + "day": "a day", + "days": "{0} days", + "week": "a week", + "weeks": "{0} weeks", + "month": "a month", + "months": "{0} months", + "year": "a year", + "years": "{0} years", + } + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + month_names = [ + "", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + + day_names = [ + "", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + ordinal_day_re = r"((?P[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))" + + def _ordinal_number(self, n): + if n % 100 not in (11, 12, 13): + remainder = abs(n) % 10 + if remainder == 1: + return "{}st".format(n) + elif remainder == 2: + return "{}nd".format(n) + elif remainder == 3: + return "{}rd".format(n) + return "{}th".format(n) + + def describe(self, timeframe, delta=0, only_distance=False): + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + humanized = super(EnglishLocale, self).describe(timeframe, delta, only_distance) + if only_distance and timeframe == "now": + humanized = "instantly" + + return humanized + + +class ItalianLocale(Locale): + names = ["it", "it_it"] + past = "{0} fa" + future = "tra {0}" + and_word = "e" + + timeframes = { + "now": "adesso", + "second": "un secondo", + "seconds": "{0} qualche secondo", + "minute": "un minuto", + "minutes": "{0} minuti", + "hour": "un'ora", + "hours": "{0} ore", + "day": "un giorno", + "days": "{0} giorni", + "week": "una settimana,", + "weeks": "{0} settimane", + "month": "un mese", + "months": "{0} mesi", + "year": "un anno", + "years": "{0} anni", + } + + month_names = [ + "", + "gennaio", + "febbraio", + "marzo", + "aprile", + "maggio", + "giugno", + "luglio", + "agosto", + "settembre", + "ottobre", + "novembre", + "dicembre", + ] + month_abbreviations = [ + "", + "gen", + "feb", + "mar", + "apr", + "mag", + "giu", + "lug", + "ago", + "set", + "ott", + "nov", + "dic", + ] + + day_names = [ + "", + "lunedì", + "martedì", + "mercoledì", + "giovedì", + "venerdì", + "sabato", + "domenica", + ] + day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"] + + ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])" + + def _ordinal_number(self, n): + return "{}º".format(n) + + +class SpanishLocale(Locale): + names = ["es", "es_es"] + past = "hace {0}" + future = "en {0}" + and_word = "y" + + timeframes = { + "now": "ahora", + "second": "un segundo", + "seconds": "{0} segundos", + "minute": "un minuto", + "minutes": "{0} minutos", + "hour": "una hora", + "hours": "{0} horas", + "day": "un día", + "days": "{0} días", + "week": "una semana", + "weeks": "{0} semanas", + "month": "un mes", + "months": "{0} meses", + "year": "un año", + "years": "{0} años", + } + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + month_names = [ + "", + "enero", + "febrero", + "marzo", + "abril", + "mayo", + "junio", + "julio", + "agosto", + "septiembre", + "octubre", + "noviembre", + "diciembre", + ] + month_abbreviations = [ + "", + "ene", + "feb", + "mar", + "abr", + "may", + "jun", + "jul", + "ago", + "sep", + "oct", + "nov", + "dic", + ] + + day_names = [ + "", + "lunes", + "martes", + "miércoles", + "jueves", + "viernes", + "sábado", + "domingo", + ] + day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"] + + ordinal_day_re = r"((?P[1-3]?[0-9](?=[ºª]))[ºª])" + + def _ordinal_number(self, n): + return "{}º".format(n) + + +class FrenchBaseLocale(Locale): + + past = "il y a {0}" + future = "dans {0}" + and_word = "et" + + timeframes = { + "now": "maintenant", + "second": "une seconde", + "seconds": "{0} quelques secondes", + "minute": "une minute", + "minutes": "{0} minutes", + "hour": "une heure", + "hours": "{0} heures", + "day": "un jour", + "days": "{0} jours", + "week": "une semaine", + "weeks": "{0} semaines", + "month": "un mois", + "months": "{0} mois", + "year": "un an", + "years": "{0} ans", + } + + month_names = [ + "", + "janvier", + "février", + "mars", + "avril", + "mai", + "juin", + "juillet", + "août", + "septembre", + "octobre", + "novembre", + "décembre", + ] + + day_names = [ + "", + "lundi", + "mardi", + "mercredi", + "jeudi", + "vendredi", + "samedi", + "dimanche", + ] + day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"] + + ordinal_day_re = ( + r"((?P\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)" + ) + + def _ordinal_number(self, n): + if abs(n) == 1: + return "{}er".format(n) + return "{}e".format(n) + + +class FrenchLocale(FrenchBaseLocale, Locale): + + names = ["fr", "fr_fr"] + + month_abbreviations = [ + "", + "janv", + "févr", + "mars", + "avr", + "mai", + "juin", + "juil", + "août", + "sept", + "oct", + "nov", + "déc", + ] + + +class FrenchCanadianLocale(FrenchBaseLocale, Locale): + + names = ["fr_ca"] + + month_abbreviations = [ + "", + "janv", + "févr", + "mars", + "avr", + "mai", + "juin", + "juill", + "août", + "sept", + "oct", + "nov", + "déc", + ] + + +class GreekLocale(Locale): + + names = ["el", "el_gr"] + + past = "{0} πριν" + future = "σε {0}" + and_word = "και" + + timeframes = { + "now": "τώρα", + "second": "ένα δεύτερο", + "seconds": "{0} δευτερόλεπτα", + "minute": "ένα λεπτό", + "minutes": "{0} λεπτά", + "hour": "μία ώρα", + "hours": "{0} ώρες", + "day": "μία μέρα", + "days": "{0} μέρες", + "month": "ένα μήνα", + "months": "{0} μήνες", + "year": "ένα χρόνο", + "years": "{0} χρόνια", + } + + month_names = [ + "", + "Ιανουαρίου", + "Φεβρουαρίου", + "Μαρτίου", + "Απριλίου", + "Μαΐου", + "Ιουνίου", + "Ιουλίου", + "Αυγούστου", + "Σεπτεμβρίου", + "Οκτωβρίου", + "Νοεμβρίου", + "Δεκεμβρίου", + ] + month_abbreviations = [ + "", + "Ιαν", + "Φεβ", + "Μαρ", + "Απρ", + "Μαϊ", + "Ιον", + "Ιολ", + "Αυγ", + "Σεπ", + "Οκτ", + "Νοε", + "Δεκ", + ] + + day_names = [ + "", + "Δευτέρα", + "Τρίτη", + "Τετάρτη", + "Πέμπτη", + "Παρασκευή", + "Σάββατο", + "Κυριακή", + ] + day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"] + + +class JapaneseLocale(Locale): + + names = ["ja", "ja_jp"] + + past = "{0}前" + future = "{0}後" + + timeframes = { + "now": "現在", + "second": "二番目の", + "seconds": "{0}数秒", + "minute": "1分", + "minutes": "{0}分", + "hour": "1時間", + "hours": "{0}時間", + "day": "1日", + "days": "{0}日", + "week": "1週間", + "weeks": "{0}週間", + "month": "1ヶ月", + "months": "{0}ヶ月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"] + day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"] + + +class SwedishLocale(Locale): + + names = ["sv", "sv_se"] + + past = "för {0} sen" + future = "om {0}" + and_word = "och" + + timeframes = { + "now": "just nu", + "second": "en sekund", + "seconds": "{0} några sekunder", + "minute": "en minut", + "minutes": "{0} minuter", + "hour": "en timme", + "hours": "{0} timmar", + "day": "en dag", + "days": "{0} dagar", + "week": "en vecka", + "weeks": "{0} veckor", + "month": "en månad", + "months": "{0} månader", + "year": "ett år", + "years": "{0} år", + } + + month_names = [ + "", + "januari", + "februari", + "mars", + "april", + "maj", + "juni", + "juli", + "augusti", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "måndag", + "tisdag", + "onsdag", + "torsdag", + "fredag", + "lördag", + "söndag", + ] + day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"] + + +class FinnishLocale(Locale): + + names = ["fi", "fi_fi"] + + # The finnish grammar is very complex, and its hard to convert + # 1-to-1 to something like English. + + past = "{0} sitten" + future = "{0} kuluttua" + + timeframes = { + "now": ["juuri nyt", "juuri nyt"], + "second": ["sekunti", "sekunti"], + "seconds": ["{0} muutama sekunti", "{0} muutaman sekunnin"], + "minute": ["minuutti", "minuutin"], + "minutes": ["{0} minuuttia", "{0} minuutin"], + "hour": ["tunti", "tunnin"], + "hours": ["{0} tuntia", "{0} tunnin"], + "day": ["päivä", "päivä"], + "days": ["{0} päivää", "{0} päivän"], + "month": ["kuukausi", "kuukauden"], + "months": ["{0} kuukautta", "{0} kuukauden"], + "year": ["vuosi", "vuoden"], + "years": ["{0} vuotta", "{0} vuoden"], + } + + # Months and days are lowercase in Finnish + month_names = [ + "", + "tammikuu", + "helmikuu", + "maaliskuu", + "huhtikuu", + "toukokuu", + "kesäkuu", + "heinäkuu", + "elokuu", + "syyskuu", + "lokakuu", + "marraskuu", + "joulukuu", + ] + + month_abbreviations = [ + "", + "tammi", + "helmi", + "maalis", + "huhti", + "touko", + "kesä", + "heinä", + "elo", + "syys", + "loka", + "marras", + "joulu", + ] + + day_names = [ + "", + "maanantai", + "tiistai", + "keskiviikko", + "torstai", + "perjantai", + "lauantai", + "sunnuntai", + ] + + day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"] + + def _format_timeframe(self, timeframe, delta): + return ( + self.timeframes[timeframe][0].format(abs(delta)), + self.timeframes[timeframe][1].format(abs(delta)), + ) + + def _format_relative(self, humanized, timeframe, delta): + if timeframe == "now": + return humanized[0] + + direction = self.past if delta < 0 else self.future + which = 0 if delta < 0 else 1 + + return direction.format(humanized[which]) + + def _ordinal_number(self, n): + return "{}.".format(n) + + +class ChineseCNLocale(Locale): + + names = ["zh", "zh_cn"] + + past = "{0}前" + future = "{0}后" + + timeframes = { + "now": "刚才", + "second": "一秒", + "seconds": "{0}秒", + "minute": "1分钟", + "minutes": "{0}分钟", + "hour": "1小时", + "hours": "{0}小时", + "day": "1天", + "days": "{0}天", + "week": "一周", + "weeks": "{0}周", + "month": "1个月", + "months": "{0}个月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "一月", + "二月", + "三月", + "四月", + "五月", + "六月", + "七月", + "八月", + "九月", + "十月", + "十一月", + "十二月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class ChineseTWLocale(Locale): + + names = ["zh_tw"] + + past = "{0}前" + future = "{0}後" + and_word = "和" + + timeframes = { + "now": "剛才", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分鐘", + "minutes": "{0}分鐘", + "hour": "1小時", + "hours": "{0}小時", + "day": "1天", + "days": "{0}天", + "week": "1週", + "weeks": "{0}週", + "month": "1個月", + "months": "{0}個月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "週一", "週二", "週三", "週四", "週五", "週六", "週日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class HongKongLocale(Locale): + + names = ["zh_hk"] + + past = "{0}前" + future = "{0}後" + + timeframes = { + "now": "剛才", + "second": "1秒", + "seconds": "{0}秒", + "minute": "1分鐘", + "minutes": "{0}分鐘", + "hour": "1小時", + "hours": "{0}小時", + "day": "1天", + "days": "{0}天", + "week": "1星期", + "weeks": "{0}星期", + "month": "1個月", + "months": "{0}個月", + "year": "1年", + "years": "{0}年", + } + + month_names = [ + "", + "1月", + "2月", + "3月", + "4月", + "5月", + "6月", + "7月", + "8月", + "9月", + "10月", + "11月", + "12月", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"] + day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"] + + +class KoreanLocale(Locale): + + names = ["ko", "ko_kr"] + + past = "{0} 전" + future = "{0} 후" + + timeframes = { + "now": "지금", + "second": "1초", + "seconds": "{0}초", + "minute": "1분", + "minutes": "{0}분", + "hour": "한시간", + "hours": "{0}시간", + "day": "하루", + "days": "{0}일", + "week": "1주", + "weeks": "{0}주", + "month": "한달", + "months": "{0}개월", + "year": "1년", + "years": "{0}년", + } + + special_dayframes = { + -3: "그끄제", + -2: "그제", + -1: "어제", + 1: "내일", + 2: "모레", + 3: "글피", + 4: "그글피", + } + + special_yearframes = {-2: "제작년", -1: "작년", 1: "내년", 2: "내후년"} + + month_names = [ + "", + "1월", + "2월", + "3월", + "4월", + "5월", + "6월", + "7월", + "8월", + "9월", + "10월", + "11월", + "12월", + ] + month_abbreviations = [ + "", + " 1", + " 2", + " 3", + " 4", + " 5", + " 6", + " 7", + " 8", + " 9", + "10", + "11", + "12", + ] + + day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"] + day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"] + + def _ordinal_number(self, n): + ordinals = ["0", "첫", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉", "열"] + if n < len(ordinals): + return "{}번째".format(ordinals[n]) + return "{}번째".format(n) + + def _format_relative(self, humanized, timeframe, delta): + if timeframe in ("day", "days"): + special = self.special_dayframes.get(delta) + if special: + return special + elif timeframe in ("year", "years"): + special = self.special_yearframes.get(delta) + if special: + return special + + return super(KoreanLocale, self)._format_relative(humanized, timeframe, delta) + + +# derived locale types & implementations. +class DutchLocale(Locale): + + names = ["nl", "nl_nl"] + + past = "{0} geleden" + future = "over {0}" + + timeframes = { + "now": "nu", + "second": "een seconde", + "seconds": "{0} seconden", + "minute": "een minuut", + "minutes": "{0} minuten", + "hour": "een uur", + "hours": "{0} uur", + "day": "een dag", + "days": "{0} dagen", + "week": "een week", + "weeks": "{0} weken", + "month": "een maand", + "months": "{0} maanden", + "year": "een jaar", + "years": "{0} jaar", + } + + # In Dutch names of months and days are not starting with a capital letter + # like in the English language. + month_names = [ + "", + "januari", + "februari", + "maart", + "april", + "mei", + "juni", + "juli", + "augustus", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mrt", + "apr", + "mei", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "maandag", + "dinsdag", + "woensdag", + "donderdag", + "vrijdag", + "zaterdag", + "zondag", + ] + day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"] + + +class SlavicBaseLocale(Locale): + def _format_timeframe(self, timeframe, delta): + + form = self.timeframes[timeframe] + delta = abs(delta) + + if isinstance(form, list): + + if delta % 10 == 1 and delta % 100 != 11: + form = form[0] + elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): + form = form[1] + else: + form = form[2] + + return form.format(delta) + + +class BelarusianLocale(SlavicBaseLocale): + + names = ["be", "be_by"] + + past = "{0} таму" + future = "праз {0}" + + timeframes = { + "now": "зараз", + "second": "секунду", + "seconds": "{0} некалькі секунд", + "minute": "хвіліну", + "minutes": ["{0} хвіліну", "{0} хвіліны", "{0} хвілін"], + "hour": "гадзіну", + "hours": ["{0} гадзіну", "{0} гадзіны", "{0} гадзін"], + "day": "дзень", + "days": ["{0} дзень", "{0} дні", "{0} дзён"], + "month": "месяц", + "months": ["{0} месяц", "{0} месяцы", "{0} месяцаў"], + "year": "год", + "years": ["{0} год", "{0} гады", "{0} гадоў"], + } + + month_names = [ + "", + "студзеня", + "лютага", + "сакавіка", + "красавіка", + "траўня", + "чэрвеня", + "ліпеня", + "жніўня", + "верасня", + "кастрычніка", + "лістапада", + "снежня", + ] + month_abbreviations = [ + "", + "студ", + "лют", + "сак", + "крас", + "трав", + "чэрв", + "ліп", + "жнів", + "вер", + "каст", + "ліст", + "снеж", + ] + + day_names = [ + "", + "панядзелак", + "аўторак", + "серада", + "чацвер", + "пятніца", + "субота", + "нядзеля", + ] + day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"] + + +class PolishLocale(SlavicBaseLocale): + + names = ["pl", "pl_pl"] + + past = "{0} temu" + future = "za {0}" + + # The nouns should be in genitive case (Polish: "dopełniacz") + # in order to correctly form `past` & `future` expressions. + timeframes = { + "now": "teraz", + "second": "sekundę", + "seconds": ["{0} sekund", "{0} sekundy", "{0} sekund"], + "minute": "minutę", + "minutes": ["{0} minut", "{0} minuty", "{0} minut"], + "hour": "godzinę", + "hours": ["{0} godzin", "{0} godziny", "{0} godzin"], + "day": "dzień", + "days": "{0} dni", + "week": "tydzień", + "weeks": ["{0} tygodni", "{0} tygodnie", "{0} tygodni"], + "month": "miesiąc", + "months": ["{0} miesięcy", "{0} miesiące", "{0} miesięcy"], + "year": "rok", + "years": ["{0} lat", "{0} lata", "{0} lat"], + } + + month_names = [ + "", + "styczeń", + "luty", + "marzec", + "kwiecień", + "maj", + "czerwiec", + "lipiec", + "sierpień", + "wrzesień", + "październik", + "listopad", + "grudzień", + ] + month_abbreviations = [ + "", + "sty", + "lut", + "mar", + "kwi", + "maj", + "cze", + "lip", + "sie", + "wrz", + "paź", + "lis", + "gru", + ] + + day_names = [ + "", + "poniedziałek", + "wtorek", + "środa", + "czwartek", + "piątek", + "sobota", + "niedziela", + ] + day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"] + + +class RussianLocale(SlavicBaseLocale): + + names = ["ru", "ru_ru"] + + past = "{0} назад" + future = "через {0}" + + timeframes = { + "now": "сейчас", + "second": "Второй", + "seconds": "{0} несколько секунд", + "minute": "минуту", + "minutes": ["{0} минуту", "{0} минуты", "{0} минут"], + "hour": "час", + "hours": ["{0} час", "{0} часа", "{0} часов"], + "day": "день", + "days": ["{0} день", "{0} дня", "{0} дней"], + "week": "неделю", + "weeks": ["{0} неделю", "{0} недели", "{0} недель"], + "month": "месяц", + "months": ["{0} месяц", "{0} месяца", "{0} месяцев"], + "year": "год", + "years": ["{0} год", "{0} года", "{0} лет"], + } + + month_names = [ + "", + "января", + "февраля", + "марта", + "апреля", + "мая", + "июня", + "июля", + "августа", + "сентября", + "октября", + "ноября", + "декабря", + ] + month_abbreviations = [ + "", + "янв", + "фев", + "мар", + "апр", + "май", + "июн", + "июл", + "авг", + "сен", + "окт", + "ноя", + "дек", + ] + + day_names = [ + "", + "понедельник", + "вторник", + "среда", + "четверг", + "пятница", + "суббота", + "воскресенье", + ] + day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"] + + +class AfrikaansLocale(Locale): + + names = ["af", "af_nl"] + + past = "{0} gelede" + future = "in {0}" + + timeframes = { + "now": "nou", + "second": "n sekonde", + "seconds": "{0} sekondes", + "minute": "minuut", + "minutes": "{0} minute", + "hour": "uur", + "hours": "{0} ure", + "day": "een dag", + "days": "{0} dae", + "month": "een maand", + "months": "{0} maande", + "year": "een jaar", + "years": "{0} jaar", + } + + month_names = [ + "", + "Januarie", + "Februarie", + "Maart", + "April", + "Mei", + "Junie", + "Julie", + "Augustus", + "September", + "Oktober", + "November", + "Desember", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mrt", + "Apr", + "Mei", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Des", + ] + + day_names = [ + "", + "Maandag", + "Dinsdag", + "Woensdag", + "Donderdag", + "Vrydag", + "Saterdag", + "Sondag", + ] + day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"] + + +class BulgarianLocale(SlavicBaseLocale): + + names = ["bg", "bg_BG"] + + past = "{0} назад" + future = "напред {0}" + + timeframes = { + "now": "сега", + "second": "секунда", + "seconds": "{0} няколко секунди", + "minute": "минута", + "minutes": ["{0} минута", "{0} минути", "{0} минути"], + "hour": "час", + "hours": ["{0} час", "{0} часа", "{0} часа"], + "day": "ден", + "days": ["{0} ден", "{0} дни", "{0} дни"], + "month": "месец", + "months": ["{0} месец", "{0} месеца", "{0} месеца"], + "year": "година", + "years": ["{0} година", "{0} години", "{0} години"], + } + + month_names = [ + "", + "януари", + "февруари", + "март", + "април", + "май", + "юни", + "юли", + "август", + "септември", + "октомври", + "ноември", + "декември", + ] + month_abbreviations = [ + "", + "ян", + "февр", + "март", + "апр", + "май", + "юни", + "юли", + "авг", + "септ", + "окт", + "ноем", + "дек", + ] + + day_names = [ + "", + "понеделник", + "вторник", + "сряда", + "четвъртък", + "петък", + "събота", + "неделя", + ] + day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"] + + +class UkrainianLocale(SlavicBaseLocale): + + names = ["ua", "uk_ua"] + + past = "{0} тому" + future = "за {0}" + + timeframes = { + "now": "зараз", + "second": "секунда", + "seconds": "{0} кілька секунд", + "minute": "хвилину", + "minutes": ["{0} хвилину", "{0} хвилини", "{0} хвилин"], + "hour": "годину", + "hours": ["{0} годину", "{0} години", "{0} годин"], + "day": "день", + "days": ["{0} день", "{0} дні", "{0} днів"], + "month": "місяць", + "months": ["{0} місяць", "{0} місяці", "{0} місяців"], + "year": "рік", + "years": ["{0} рік", "{0} роки", "{0} років"], + } + + month_names = [ + "", + "січня", + "лютого", + "березня", + "квітня", + "травня", + "червня", + "липня", + "серпня", + "вересня", + "жовтня", + "листопада", + "грудня", + ] + month_abbreviations = [ + "", + "січ", + "лют", + "бер", + "квіт", + "трав", + "черв", + "лип", + "серп", + "вер", + "жовт", + "лист", + "груд", + ] + + day_names = [ + "", + "понеділок", + "вівторок", + "середа", + "четвер", + "п’ятниця", + "субота", + "неділя", + ] + day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"] + + +class MacedonianLocale(SlavicBaseLocale): + names = ["mk", "mk_mk"] + + past = "пред {0}" + future = "за {0}" + + timeframes = { + "now": "сега", + "second": "една секунда", + "seconds": ["{0} секунда", "{0} секунди", "{0} секунди"], + "minute": "една минута", + "minutes": ["{0} минута", "{0} минути", "{0} минути"], + "hour": "еден саат", + "hours": ["{0} саат", "{0} саати", "{0} саати"], + "day": "еден ден", + "days": ["{0} ден", "{0} дена", "{0} дена"], + "week": "една недела", + "weeks": ["{0} недела", "{0} недели", "{0} недели"], + "month": "еден месец", + "months": ["{0} месец", "{0} месеци", "{0} месеци"], + "year": "една година", + "years": ["{0} година", "{0} години", "{0} години"], + } + + meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"} + + month_names = [ + "", + "Јануари", + "Февруари", + "Март", + "Април", + "Мај", + "Јуни", + "Јули", + "Август", + "Септември", + "Октомври", + "Ноември", + "Декември", + ] + month_abbreviations = [ + "", + "Јан", + "Фев", + "Мар", + "Апр", + "Мај", + "Јун", + "Јул", + "Авг", + "Септ", + "Окт", + "Ноем", + "Декем", + ] + + day_names = [ + "", + "Понеделник", + "Вторник", + "Среда", + "Четврток", + "Петок", + "Сабота", + "Недела", + ] + day_abbreviations = [ + "", + "Пон", + "Вт", + "Сре", + "Чет", + "Пет", + "Саб", + "Нед", + ] + + +class GermanBaseLocale(Locale): + + past = "vor {0}" + future = "in {0}" + and_word = "und" + + timeframes = { + "now": "gerade eben", + "second": "eine Sekunde", + "seconds": "{0} Sekunden", + "minute": "einer Minute", + "minutes": "{0} Minuten", + "hour": "einer Stunde", + "hours": "{0} Stunden", + "day": "einem Tag", + "days": "{0} Tagen", + "week": "einer Woche", + "weeks": "{0} Wochen", + "month": "einem Monat", + "months": "{0} Monaten", + "year": "einem Jahr", + "years": "{0} Jahren", + } + + timeframes_only_distance = timeframes.copy() + timeframes_only_distance["minute"] = "eine Minute" + timeframes_only_distance["hour"] = "eine Stunde" + timeframes_only_distance["day"] = "ein Tag" + timeframes_only_distance["week"] = "eine Woche" + timeframes_only_distance["month"] = "ein Monat" + timeframes_only_distance["year"] = "ein Jahr" + + month_names = [ + "", + "Januar", + "Februar", + "März", + "April", + "Mai", + "Juni", + "Juli", + "August", + "September", + "Oktober", + "November", + "Dezember", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mär", + "Apr", + "Mai", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Dez", + ] + + day_names = [ + "", + "Montag", + "Dienstag", + "Mittwoch", + "Donnerstag", + "Freitag", + "Samstag", + "Sonntag", + ] + + day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"] + + def _ordinal_number(self, n): + return "{}.".format(n) + + def describe(self, timeframe, delta=0, only_distance=False): + """Describes a delta within a timeframe in plain language. + + :param timeframe: a string representing a timeframe. + :param delta: a quantity representing a delta in a timeframe. + :param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords + """ + + if not only_distance: + return super(GermanBaseLocale, self).describe( + timeframe, delta, only_distance + ) + + # German uses a different case without 'in' or 'ago' + humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta))) + + return humanized + + +class GermanLocale(GermanBaseLocale, Locale): + + names = ["de", "de_de"] + + +class SwissLocale(GermanBaseLocale, Locale): + + names = ["de_ch"] + + +class AustrianLocale(GermanBaseLocale, Locale): + + names = ["de_at"] + + month_names = [ + "", + "Jänner", + "Februar", + "März", + "April", + "Mai", + "Juni", + "Juli", + "August", + "September", + "Oktober", + "November", + "Dezember", + ] + + +class NorwegianLocale(Locale): + + names = ["nb", "nb_no"] + + past = "for {0} siden" + future = "om {0}" + + timeframes = { + "now": "nå nettopp", + "second": "et sekund", + "seconds": "{0} noen sekunder", + "minute": "ett minutt", + "minutes": "{0} minutter", + "hour": "en time", + "hours": "{0} timer", + "day": "en dag", + "days": "{0} dager", + "month": "en måned", + "months": "{0} måneder", + "year": "ett år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "mars", + "april", + "mai", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "mai", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "des", + ] + + day_names = [ + "", + "mandag", + "tirsdag", + "onsdag", + "torsdag", + "fredag", + "lørdag", + "søndag", + ] + day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"] + + +class NewNorwegianLocale(Locale): + + names = ["nn", "nn_no"] + + past = "for {0} sidan" + future = "om {0}" + + timeframes = { + "now": "no nettopp", + "second": "et sekund", + "seconds": "{0} nokre sekund", + "minute": "ett minutt", + "minutes": "{0} minutt", + "hour": "ein time", + "hours": "{0} timar", + "day": "ein dag", + "days": "{0} dagar", + "month": "en månad", + "months": "{0} månader", + "year": "eit år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "mars", + "april", + "mai", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "mai", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "des", + ] + + day_names = [ + "", + "måndag", + "tysdag", + "onsdag", + "torsdag", + "fredag", + "laurdag", + "sundag", + ] + day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"] + + +class PortugueseLocale(Locale): + names = ["pt", "pt_pt"] + + past = "há {0}" + future = "em {0}" + and_word = "e" + + timeframes = { + "now": "agora", + "second": "um segundo", + "seconds": "{0} segundos", + "minute": "um minuto", + "minutes": "{0} minutos", + "hour": "uma hora", + "hours": "{0} horas", + "day": "um dia", + "days": "{0} dias", + "week": "uma semana", + "weeks": "{0} semanas", + "month": "um mês", + "months": "{0} meses", + "year": "um ano", + "years": "{0} anos", + } + + month_names = [ + "", + "Janeiro", + "Fevereiro", + "Março", + "Abril", + "Maio", + "Junho", + "Julho", + "Agosto", + "Setembro", + "Outubro", + "Novembro", + "Dezembro", + ] + month_abbreviations = [ + "", + "Jan", + "Fev", + "Mar", + "Abr", + "Mai", + "Jun", + "Jul", + "Ago", + "Set", + "Out", + "Nov", + "Dez", + ] + + day_names = [ + "", + "Segunda-feira", + "Terça-feira", + "Quarta-feira", + "Quinta-feira", + "Sexta-feira", + "Sábado", + "Domingo", + ] + day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"] + + +class BrazilianPortugueseLocale(PortugueseLocale): + names = ["pt_br"] + + past = "faz {0}" + + +class TagalogLocale(Locale): + + names = ["tl", "tl_ph"] + + past = "nakaraang {0}" + future = "{0} mula ngayon" + + timeframes = { + "now": "ngayon lang", + "second": "isang segundo", + "seconds": "{0} segundo", + "minute": "isang minuto", + "minutes": "{0} minuto", + "hour": "isang oras", + "hours": "{0} oras", + "day": "isang araw", + "days": "{0} araw", + "week": "isang linggo", + "weeks": "{0} linggo", + "month": "isang buwan", + "months": "{0} buwan", + "year": "isang taon", + "years": "{0} taon", + } + + month_names = [ + "", + "Enero", + "Pebrero", + "Marso", + "Abril", + "Mayo", + "Hunyo", + "Hulyo", + "Agosto", + "Setyembre", + "Oktubre", + "Nobyembre", + "Disyembre", + ] + month_abbreviations = [ + "", + "Ene", + "Peb", + "Mar", + "Abr", + "May", + "Hun", + "Hul", + "Ago", + "Set", + "Okt", + "Nob", + "Dis", + ] + + day_names = [ + "", + "Lunes", + "Martes", + "Miyerkules", + "Huwebes", + "Biyernes", + "Sabado", + "Linggo", + ] + day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"] + + meridians = {"am": "nu", "pm": "nh", "AM": "ng umaga", "PM": "ng hapon"} + + def _ordinal_number(self, n): + return "ika-{}".format(n) + + +class VietnameseLocale(Locale): + + names = ["vi", "vi_vn"] + + past = "{0} trước" + future = "{0} nữa" + + timeframes = { + "now": "hiện tại", + "second": "một giây", + "seconds": "{0} giây", + "minute": "một phút", + "minutes": "{0} phút", + "hour": "một giờ", + "hours": "{0} giờ", + "day": "một ngày", + "days": "{0} ngày", + "week": "một tuần", + "weeks": "{0} tuần", + "month": "một tháng", + "months": "{0} tháng", + "year": "một năm", + "years": "{0} năm", + } + + month_names = [ + "", + "Tháng Một", + "Tháng Hai", + "Tháng Ba", + "Tháng Tư", + "Tháng Năm", + "Tháng Sáu", + "Tháng Bảy", + "Tháng Tám", + "Tháng Chín", + "Tháng Mười", + "Tháng Mười Một", + "Tháng Mười Hai", + ] + month_abbreviations = [ + "", + "Tháng 1", + "Tháng 2", + "Tháng 3", + "Tháng 4", + "Tháng 5", + "Tháng 6", + "Tháng 7", + "Tháng 8", + "Tháng 9", + "Tháng 10", + "Tháng 11", + "Tháng 12", + ] + + day_names = [ + "", + "Thứ Hai", + "Thứ Ba", + "Thứ Tư", + "Thứ Năm", + "Thứ Sáu", + "Thứ Bảy", + "Chủ Nhật", + ] + day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"] + + +class TurkishLocale(Locale): + + names = ["tr", "tr_tr"] + + past = "{0} önce" + future = "{0} sonra" + + timeframes = { + "now": "şimdi", + "second": "bir saniye", + "seconds": "{0} saniye", + "minute": "bir dakika", + "minutes": "{0} dakika", + "hour": "bir saat", + "hours": "{0} saat", + "day": "bir gün", + "days": "{0} gün", + "month": "bir ay", + "months": "{0} ay", + "year": "yıl", + "years": "{0} yıl", + } + + month_names = [ + "", + "Ocak", + "Şubat", + "Mart", + "Nisan", + "Mayıs", + "Haziran", + "Temmuz", + "Ağustos", + "Eylül", + "Ekim", + "Kasım", + "Aralık", + ] + month_abbreviations = [ + "", + "Oca", + "Şub", + "Mar", + "Nis", + "May", + "Haz", + "Tem", + "Ağu", + "Eyl", + "Eki", + "Kas", + "Ara", + ] + + day_names = [ + "", + "Pazartesi", + "Salı", + "Çarşamba", + "Perşembe", + "Cuma", + "Cumartesi", + "Pazar", + ] + day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"] + + +class AzerbaijaniLocale(Locale): + + names = ["az", "az_az"] + + past = "{0} əvvəl" + future = "{0} sonra" + + timeframes = { + "now": "indi", + "second": "saniyə", + "seconds": "{0} saniyə", + "minute": "bir dəqiqə", + "minutes": "{0} dəqiqə", + "hour": "bir saat", + "hours": "{0} saat", + "day": "bir gün", + "days": "{0} gün", + "month": "bir ay", + "months": "{0} ay", + "year": "il", + "years": "{0} il", + } + + month_names = [ + "", + "Yanvar", + "Fevral", + "Mart", + "Aprel", + "May", + "İyun", + "İyul", + "Avqust", + "Sentyabr", + "Oktyabr", + "Noyabr", + "Dekabr", + ] + month_abbreviations = [ + "", + "Yan", + "Fev", + "Mar", + "Apr", + "May", + "İyn", + "İyl", + "Avq", + "Sen", + "Okt", + "Noy", + "Dek", + ] + + day_names = [ + "", + "Bazar ertəsi", + "Çərşənbə axşamı", + "Çərşənbə", + "Cümə axşamı", + "Cümə", + "Şənbə", + "Bazar", + ] + day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"] + + +class ArabicLocale(Locale): + names = [ + "ar", + "ar_ae", + "ar_bh", + "ar_dj", + "ar_eg", + "ar_eh", + "ar_er", + "ar_km", + "ar_kw", + "ar_ly", + "ar_om", + "ar_qa", + "ar_sa", + "ar_sd", + "ar_so", + "ar_ss", + "ar_td", + "ar_ye", + ] + + past = "منذ {0}" + future = "خلال {0}" + + timeframes = { + "now": "الآن", + "second": "ثانية", + "seconds": {"double": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"}, + "minute": "دقيقة", + "minutes": {"double": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"}, + "hour": "ساعة", + "hours": {"double": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"}, + "day": "يوم", + "days": {"double": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"}, + "month": "شهر", + "months": {"double": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"}, + "year": "سنة", + "years": {"double": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"}, + } + + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "مايو", + "يونيو", + "يوليو", + "أغسطس", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "مايو", + "يونيو", + "يوليو", + "أغسطس", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + + day_names = [ + "", + "الإثنين", + "الثلاثاء", + "الأربعاء", + "الخميس", + "الجمعة", + "السبت", + "الأحد", + ] + day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"] + + def _format_timeframe(self, timeframe, delta): + form = self.timeframes[timeframe] + delta = abs(delta) + if isinstance(form, dict): + if delta == 2: + form = form["double"] + elif delta > 2 and delta <= 10: + form = form["ten"] + else: + form = form["higher"] + + return form.format(delta) + + +class LevantArabicLocale(ArabicLocale): + names = ["ar_iq", "ar_jo", "ar_lb", "ar_ps", "ar_sy"] + month_names = [ + "", + "كانون الثاني", + "شباط", + "آذار", + "نيسان", + "أيار", + "حزيران", + "تموز", + "آب", + "أيلول", + "تشرين الأول", + "تشرين الثاني", + "كانون الأول", + ] + month_abbreviations = [ + "", + "كانون الثاني", + "شباط", + "آذار", + "نيسان", + "أيار", + "حزيران", + "تموز", + "آب", + "أيلول", + "تشرين الأول", + "تشرين الثاني", + "كانون الأول", + ] + + +class AlgeriaTunisiaArabicLocale(ArabicLocale): + names = ["ar_tn", "ar_dz"] + month_names = [ + "", + "جانفي", + "فيفري", + "مارس", + "أفريل", + "ماي", + "جوان", + "جويلية", + "أوت", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + month_abbreviations = [ + "", + "جانفي", + "فيفري", + "مارس", + "أفريل", + "ماي", + "جوان", + "جويلية", + "أوت", + "سبتمبر", + "أكتوبر", + "نوفمبر", + "ديسمبر", + ] + + +class MauritaniaArabicLocale(ArabicLocale): + names = ["ar_mr"] + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "إبريل", + "مايو", + "يونيو", + "يوليو", + "أغشت", + "شتمبر", + "أكتوبر", + "نوفمبر", + "دجمبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "إبريل", + "مايو", + "يونيو", + "يوليو", + "أغشت", + "شتمبر", + "أكتوبر", + "نوفمبر", + "دجمبر", + ] + + +class MoroccoArabicLocale(ArabicLocale): + names = ["ar_ma"] + month_names = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "ماي", + "يونيو", + "يوليوز", + "غشت", + "شتنبر", + "أكتوبر", + "نونبر", + "دجنبر", + ] + month_abbreviations = [ + "", + "يناير", + "فبراير", + "مارس", + "أبريل", + "ماي", + "يونيو", + "يوليوز", + "غشت", + "شتنبر", + "أكتوبر", + "نونبر", + "دجنبر", + ] + + +class IcelandicLocale(Locale): + def _format_timeframe(self, timeframe, delta): + + timeframe = self.timeframes[timeframe] + if delta < 0: + timeframe = timeframe[0] + elif delta > 0: + timeframe = timeframe[1] + + return timeframe.format(abs(delta)) + + names = ["is", "is_is"] + + past = "fyrir {0} síðan" + future = "eftir {0}" + + timeframes = { + "now": "rétt í þessu", + "second": ("sekúndu", "sekúndu"), + "seconds": ("{0} nokkrum sekúndum", "nokkrar sekúndur"), + "minute": ("einni mínútu", "eina mínútu"), + "minutes": ("{0} mínútum", "{0} mínútur"), + "hour": ("einum tíma", "einn tíma"), + "hours": ("{0} tímum", "{0} tíma"), + "day": ("einum degi", "einn dag"), + "days": ("{0} dögum", "{0} daga"), + "month": ("einum mánuði", "einn mánuð"), + "months": ("{0} mánuðum", "{0} mánuði"), + "year": ("einu ári", "eitt ár"), + "years": ("{0} árum", "{0} ár"), + } + + meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."} + + month_names = [ + "", + "janúar", + "febrúar", + "mars", + "apríl", + "maí", + "júní", + "júlí", + "ágúst", + "september", + "október", + "nóvember", + "desember", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maí", + "jún", + "júl", + "ágú", + "sep", + "okt", + "nóv", + "des", + ] + + day_names = [ + "", + "mánudagur", + "þriðjudagur", + "miðvikudagur", + "fimmtudagur", + "föstudagur", + "laugardagur", + "sunnudagur", + ] + day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"] + + +class DanishLocale(Locale): + + names = ["da", "da_dk"] + + past = "for {0} siden" + future = "efter {0}" + and_word = "og" + + timeframes = { + "now": "lige nu", + "second": "et sekund", + "seconds": "{0} et par sekunder", + "minute": "et minut", + "minutes": "{0} minutter", + "hour": "en time", + "hours": "{0} timer", + "day": "en dag", + "days": "{0} dage", + "month": "en måned", + "months": "{0} måneder", + "year": "et år", + "years": "{0} år", + } + + month_names = [ + "", + "januar", + "februar", + "marts", + "april", + "maj", + "juni", + "juli", + "august", + "september", + "oktober", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "mandag", + "tirsdag", + "onsdag", + "torsdag", + "fredag", + "lørdag", + "søndag", + ] + day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"] + + +class MalayalamLocale(Locale): + + names = ["ml"] + + past = "{0} മുമ്പ്" + future = "{0} ശേഷം" + + timeframes = { + "now": "ഇപ്പോൾ", + "second": "ഒരു നിമിഷം", + "seconds": "{0} സെക്കന്റ്‌", + "minute": "ഒരു മിനിറ്റ്", + "minutes": "{0} മിനിറ്റ്", + "hour": "ഒരു മണിക്കൂർ", + "hours": "{0} മണിക്കൂർ", + "day": "ഒരു ദിവസം ", + "days": "{0} ദിവസം ", + "month": "ഒരു മാസം ", + "months": "{0} മാസം ", + "year": "ഒരു വർഷം ", + "years": "{0} വർഷം ", + } + + meridians = { + "am": "രാവിലെ", + "pm": "ഉച്ചക്ക് ശേഷം", + "AM": "രാവിലെ", + "PM": "ഉച്ചക്ക് ശേഷം", + } + + month_names = [ + "", + "ജനുവരി", + "ഫെബ്രുവരി", + "മാർച്ച്‌", + "ഏപ്രിൽ ", + "മെയ്‌ ", + "ജൂണ്‍", + "ജൂലൈ", + "ഓഗസ്റ്റ്‌", + "സെപ്റ്റംബർ", + "ഒക്ടോബർ", + "നവംബർ", + "ഡിസംബർ", + ] + month_abbreviations = [ + "", + "ജനു", + "ഫെബ് ", + "മാർ", + "ഏപ്രിൽ", + "മേയ്", + "ജൂണ്‍", + "ജൂലൈ", + "ഓഗസ്റ", + "സെപ്റ്റ", + "ഒക്ടോ", + "നവം", + "ഡിസം", + ] + + day_names = ["", "തിങ്കള്‍", "ചൊവ്വ", "ബുധന്‍", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്‍"] + day_abbreviations = [ + "", + "തിങ്കള്‍", + "ചൊവ്വ", + "ബുധന്‍", + "വ്യാഴം", + "വെള്ളി", + "ശനി", + "ഞായര്‍", + ] + + +class HindiLocale(Locale): + + names = ["hi"] + + past = "{0} पहले" + future = "{0} बाद" + + timeframes = { + "now": "अभी", + "second": "एक पल", + "seconds": "{0} सेकंड्", + "minute": "एक मिनट ", + "minutes": "{0} मिनट ", + "hour": "एक घंटा", + "hours": "{0} घंटे", + "day": "एक दिन", + "days": "{0} दिन", + "month": "एक माह ", + "months": "{0} महीने ", + "year": "एक वर्ष ", + "years": "{0} साल ", + } + + meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"} + + month_names = [ + "", + "जनवरी", + "फरवरी", + "मार्च", + "अप्रैल ", + "मई", + "जून", + "जुलाई", + "अगस्त", + "सितंबर", + "अक्टूबर", + "नवंबर", + "दिसंबर", + ] + month_abbreviations = [ + "", + "जन", + "फ़र", + "मार्च", + "अप्रै", + "मई", + "जून", + "जुलाई", + "आग", + "सित", + "अकत", + "नवे", + "दिस", + ] + + day_names = [ + "", + "सोमवार", + "मंगलवार", + "बुधवार", + "गुरुवार", + "शुक्रवार", + "शनिवार", + "रविवार", + ] + day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"] + + +class CzechLocale(Locale): + names = ["cs", "cs_cz"] + + timeframes = { + "now": "Teď", + "second": {"past": "vteřina", "future": "vteřina", "zero": "vteřina"}, + "seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekund"]}, + "minute": {"past": "minutou", "future": "minutu", "zero": "{0} minut"}, + "minutes": {"past": "{0} minutami", "future": ["{0} minuty", "{0} minut"]}, + "hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodin"}, + "hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodin"]}, + "day": {"past": "dnem", "future": "den", "zero": "{0} dnů"}, + "days": {"past": "{0} dny", "future": ["{0} dny", "{0} dnů"]}, + "week": {"past": "týdnem", "future": "týden", "zero": "{0} týdnů"}, + "weeks": {"past": "{0} týdny", "future": ["{0} týdny", "{0} týdnů"]}, + "month": {"past": "měsícem", "future": "měsíc", "zero": "{0} měsíců"}, + "months": {"past": "{0} měsíci", "future": ["{0} měsíce", "{0} měsíců"]}, + "year": {"past": "rokem", "future": "rok", "zero": "{0} let"}, + "years": {"past": "{0} lety", "future": ["{0} roky", "{0} let"]}, + } + + past = "Před {0}" + future = "Za {0}" + + month_names = [ + "", + "leden", + "únor", + "březen", + "duben", + "květen", + "červen", + "červenec", + "srpen", + "září", + "říjen", + "listopad", + "prosinec", + ] + month_abbreviations = [ + "", + "led", + "úno", + "bře", + "dub", + "kvě", + "čvn", + "čvc", + "srp", + "zář", + "říj", + "lis", + "pro", + ] + + day_names = [ + "", + "pondělí", + "úterý", + "středa", + "čtvrtek", + "pátek", + "sobota", + "neděle", + ] + day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"] + + def _format_timeframe(self, timeframe, delta): + """Czech aware time frame format function, takes into account + the differences between past and future forms.""" + form = self.timeframes[timeframe] + if isinstance(form, dict): + if delta == 0: + form = form["zero"] # And *never* use 0 in the singular! + elif delta > 0: + form = form["future"] + else: + form = form["past"] + delta = abs(delta) + + if isinstance(form, list): + if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): + form = form[0] + else: + form = form[1] + + return form.format(delta) + + +class SlovakLocale(Locale): + names = ["sk", "sk_sk"] + + timeframes = { + "now": "Teraz", + "second": {"past": "sekundou", "future": "sekundu", "zero": "{0} sekúnd"}, + "seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekúnd"]}, + "minute": {"past": "minútou", "future": "minútu", "zero": "{0} minút"}, + "minutes": {"past": "{0} minútami", "future": ["{0} minúty", "{0} minút"]}, + "hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodín"}, + "hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodín"]}, + "day": {"past": "dňom", "future": "deň", "zero": "{0} dní"}, + "days": {"past": "{0} dňami", "future": ["{0} dni", "{0} dní"]}, + "week": {"past": "týždňom", "future": "týždeň", "zero": "{0} týždňov"}, + "weeks": {"past": "{0} týždňami", "future": ["{0} týždne", "{0} týždňov"]}, + "month": {"past": "mesiacom", "future": "mesiac", "zero": "{0} mesiacov"}, + "months": {"past": "{0} mesiacmi", "future": ["{0} mesiace", "{0} mesiacov"]}, + "year": {"past": "rokom", "future": "rok", "zero": "{0} rokov"}, + "years": {"past": "{0} rokmi", "future": ["{0} roky", "{0} rokov"]}, + } + + past = "Pred {0}" + future = "O {0}" + and_word = "a" + + month_names = [ + "", + "január", + "február", + "marec", + "apríl", + "máj", + "jún", + "júl", + "august", + "september", + "október", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "máj", + "jún", + "júl", + "aug", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "pondelok", + "utorok", + "streda", + "štvrtok", + "piatok", + "sobota", + "nedeľa", + ] + day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"] + + def _format_timeframe(self, timeframe, delta): + """Slovak aware time frame format function, takes into account + the differences between past and future forms.""" + form = self.timeframes[timeframe] + if isinstance(form, dict): + if delta == 0: + form = form["zero"] # And *never* use 0 in the singular! + elif delta > 0: + form = form["future"] + else: + form = form["past"] + delta = abs(delta) + + if isinstance(form, list): + if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20): + form = form[0] + else: + form = form[1] + + return form.format(delta) + + +class FarsiLocale(Locale): + + names = ["fa", "fa_ir"] + + past = "{0} قبل" + future = "در {0}" + + timeframes = { + "now": "اکنون", + "second": "یک لحظه", + "seconds": "{0} ثانیه", + "minute": "یک دقیقه", + "minutes": "{0} دقیقه", + "hour": "یک ساعت", + "hours": "{0} ساعت", + "day": "یک روز", + "days": "{0} روز", + "month": "یک ماه", + "months": "{0} ماه", + "year": "یک سال", + "years": "{0} سال", + } + + meridians = { + "am": "قبل از ظهر", + "pm": "بعد از ظهر", + "AM": "قبل از ظهر", + "PM": "بعد از ظهر", + } + + month_names = [ + "", + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + + day_names = [ + "", + "دو شنبه", + "سه شنبه", + "چهارشنبه", + "پنجشنبه", + "جمعه", + "شنبه", + "یکشنبه", + ] + day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + + +class HebrewLocale(Locale): + + names = ["he", "he_IL"] + + past = "לפני {0}" + future = "בעוד {0}" + and_word = "ו" + + timeframes = { + "now": "הרגע", + "second": "שנייה", + "seconds": "{0} שניות", + "minute": "דקה", + "minutes": "{0} דקות", + "hour": "שעה", + "hours": "{0} שעות", + "2-hours": "שעתיים", + "day": "יום", + "days": "{0} ימים", + "2-days": "יומיים", + "week": "שבוע", + "weeks": "{0} שבועות", + "2-weeks": "שבועיים", + "month": "חודש", + "months": "{0} חודשים", + "2-months": "חודשיים", + "year": "שנה", + "years": "{0} שנים", + "2-years": "שנתיים", + } + + meridians = { + "am": 'לפנ"צ', + "pm": 'אחר"צ', + "AM": "לפני הצהריים", + "PM": "אחרי הצהריים", + } + + month_names = [ + "", + "ינואר", + "פברואר", + "מרץ", + "אפריל", + "מאי", + "יוני", + "יולי", + "אוגוסט", + "ספטמבר", + "אוקטובר", + "נובמבר", + "דצמבר", + ] + month_abbreviations = [ + "", + "ינו׳", + "פבר׳", + "מרץ", + "אפר׳", + "מאי", + "יוני", + "יולי", + "אוג׳", + "ספט׳", + "אוק׳", + "נוב׳", + "דצמ׳", + ] + + day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"] + day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"] + + def _format_timeframe(self, timeframe, delta): + """Hebrew couple of aware""" + couple = "2-{}".format(timeframe) + single = timeframe.rstrip("s") + if abs(delta) == 2 and couple in self.timeframes: + key = couple + elif abs(delta) == 1 and single in self.timeframes: + key = single + else: + key = timeframe + + return self.timeframes[key].format(trunc(abs(delta))) + + def describe_multi(self, timeframes, only_distance=False): + """Describes a delta within multiple timeframes in plain language. + In Hebrew, the and word behaves a bit differently. + + :param timeframes: a list of string, quantity pairs each representing a timeframe and delta. + :param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords + """ + + humanized = "" + for index, (timeframe, delta) in enumerate(timeframes): + last_humanized = self._format_timeframe(timeframe, delta) + if index == 0: + humanized = last_humanized + elif index == len(timeframes) - 1: # Must have at least 2 items + humanized += " " + self.and_word + if last_humanized[0].isdecimal(): + humanized += "־" + humanized += last_humanized + else: # Don't add for the last one + humanized += ", " + last_humanized + + if not only_distance: + humanized = self._format_relative(humanized, timeframe, delta) + + return humanized + + +class MarathiLocale(Locale): + + names = ["mr"] + + past = "{0} आधी" + future = "{0} नंतर" + + timeframes = { + "now": "सद्य", + "second": "एक सेकंद", + "seconds": "{0} सेकंद", + "minute": "एक मिनिट ", + "minutes": "{0} मिनिट ", + "hour": "एक तास", + "hours": "{0} तास", + "day": "एक दिवस", + "days": "{0} दिवस", + "month": "एक महिना ", + "months": "{0} महिने ", + "year": "एक वर्ष ", + "years": "{0} वर्ष ", + } + + meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"} + + month_names = [ + "", + "जानेवारी", + "फेब्रुवारी", + "मार्च", + "एप्रिल", + "मे", + "जून", + "जुलै", + "अॉगस्ट", + "सप्टेंबर", + "अॉक्टोबर", + "नोव्हेंबर", + "डिसेंबर", + ] + month_abbreviations = [ + "", + "जान", + "फेब्रु", + "मार्च", + "एप्रि", + "मे", + "जून", + "जुलै", + "अॉग", + "सप्टें", + "अॉक्टो", + "नोव्हें", + "डिसें", + ] + + day_names = [ + "", + "सोमवार", + "मंगळवार", + "बुधवार", + "गुरुवार", + "शुक्रवार", + "शनिवार", + "रविवार", + ] + day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"] + + +def _map_locales(): + + locales = {} + + for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass): + if issubclass(cls, Locale): # pragma: no branch + for name in cls.names: + locales[name.lower()] = cls + + return locales + + +class CatalanLocale(Locale): + names = ["ca", "ca_es", "ca_ad", "ca_fr", "ca_it"] + past = "Fa {0}" + future = "En {0}" + and_word = "i" + + timeframes = { + "now": "Ara mateix", + "second": "un segon", + "seconds": "{0} segons", + "minute": "1 minut", + "minutes": "{0} minuts", + "hour": "una hora", + "hours": "{0} hores", + "day": "un dia", + "days": "{0} dies", + "month": "un mes", + "months": "{0} mesos", + "year": "un any", + "years": "{0} anys", + } + + month_names = [ + "", + "gener", + "febrer", + "març", + "abril", + "maig", + "juny", + "juliol", + "agost", + "setembre", + "octubre", + "novembre", + "desembre", + ] + month_abbreviations = [ + "", + "gen.", + "febr.", + "març", + "abr.", + "maig", + "juny", + "jul.", + "ag.", + "set.", + "oct.", + "nov.", + "des.", + ] + day_names = [ + "", + "dilluns", + "dimarts", + "dimecres", + "dijous", + "divendres", + "dissabte", + "diumenge", + ] + day_abbreviations = [ + "", + "dl.", + "dt.", + "dc.", + "dj.", + "dv.", + "ds.", + "dg.", + ] + + +class BasqueLocale(Locale): + names = ["eu", "eu_eu"] + past = "duela {0}" + future = "{0}" # I don't know what's the right phrase in Basque for the future. + + timeframes = { + "now": "Orain", + "second": "segundo bat", + "seconds": "{0} segundu", + "minute": "minutu bat", + "minutes": "{0} minutu", + "hour": "ordu bat", + "hours": "{0} ordu", + "day": "egun bat", + "days": "{0} egun", + "month": "hilabete bat", + "months": "{0} hilabet", + "year": "urte bat", + "years": "{0} urte", + } + + month_names = [ + "", + "urtarrilak", + "otsailak", + "martxoak", + "apirilak", + "maiatzak", + "ekainak", + "uztailak", + "abuztuak", + "irailak", + "urriak", + "azaroak", + "abenduak", + ] + month_abbreviations = [ + "", + "urt", + "ots", + "mar", + "api", + "mai", + "eka", + "uzt", + "abu", + "ira", + "urr", + "aza", + "abe", + ] + day_names = [ + "", + "astelehena", + "asteartea", + "asteazkena", + "osteguna", + "ostirala", + "larunbata", + "igandea", + ] + day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"] + + +class HungarianLocale(Locale): + + names = ["hu", "hu_hu"] + + past = "{0} ezelőtt" + future = "{0} múlva" + + timeframes = { + "now": "éppen most", + "second": {"past": "egy második", "future": "egy második"}, + "seconds": {"past": "{0} másodpercekkel", "future": "{0} pár másodperc"}, + "minute": {"past": "egy perccel", "future": "egy perc"}, + "minutes": {"past": "{0} perccel", "future": "{0} perc"}, + "hour": {"past": "egy órával", "future": "egy óra"}, + "hours": {"past": "{0} órával", "future": "{0} óra"}, + "day": {"past": "egy nappal", "future": "egy nap"}, + "days": {"past": "{0} nappal", "future": "{0} nap"}, + "month": {"past": "egy hónappal", "future": "egy hónap"}, + "months": {"past": "{0} hónappal", "future": "{0} hónap"}, + "year": {"past": "egy évvel", "future": "egy év"}, + "years": {"past": "{0} évvel", "future": "{0} év"}, + } + + month_names = [ + "", + "január", + "február", + "március", + "április", + "május", + "június", + "július", + "augusztus", + "szeptember", + "október", + "november", + "december", + ] + month_abbreviations = [ + "", + "jan", + "febr", + "márc", + "ápr", + "máj", + "jún", + "júl", + "aug", + "szept", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "hétfő", + "kedd", + "szerda", + "csütörtök", + "péntek", + "szombat", + "vasárnap", + ] + day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"] + + meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"} + + def _format_timeframe(self, timeframe, delta): + form = self.timeframes[timeframe] + + if isinstance(form, dict): + if delta > 0: + form = form["future"] + else: + form = form["past"] + + return form.format(abs(delta)) + + +class EsperantoLocale(Locale): + names = ["eo", "eo_xx"] + past = "antaŭ {0}" + future = "post {0}" + + timeframes = { + "now": "nun", + "second": "sekundo", + "seconds": "{0} kelkaj sekundoj", + "minute": "unu minuto", + "minutes": "{0} minutoj", + "hour": "un horo", + "hours": "{0} horoj", + "day": "unu tago", + "days": "{0} tagoj", + "month": "unu monato", + "months": "{0} monatoj", + "year": "unu jaro", + "years": "{0} jaroj", + } + + month_names = [ + "", + "januaro", + "februaro", + "marto", + "aprilo", + "majo", + "junio", + "julio", + "aŭgusto", + "septembro", + "oktobro", + "novembro", + "decembro", + ] + month_abbreviations = [ + "", + "jan", + "feb", + "mar", + "apr", + "maj", + "jun", + "jul", + "aŭg", + "sep", + "okt", + "nov", + "dec", + ] + + day_names = [ + "", + "lundo", + "mardo", + "merkredo", + "ĵaŭdo", + "vendredo", + "sabato", + "dimanĉo", + ] + day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"] + + meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"} + + ordinal_day_re = r"((?P[1-3]?[0-9](?=a))a)" + + def _ordinal_number(self, n): + return "{}a".format(n) + + +class ThaiLocale(Locale): + + names = ["th", "th_th"] + + past = "{0}{1}ที่ผ่านมา" + future = "ในอีก{1}{0}" + + timeframes = { + "now": "ขณะนี้", + "second": "วินาที", + "seconds": "{0} ไม่กี่วินาที", + "minute": "1 นาที", + "minutes": "{0} นาที", + "hour": "1 ชั่วโมง", + "hours": "{0} ชั่วโมง", + "day": "1 วัน", + "days": "{0} วัน", + "month": "1 เดือน", + "months": "{0} เดือน", + "year": "1 ปี", + "years": "{0} ปี", + } + + month_names = [ + "", + "มกราคม", + "กุมภาพันธ์", + "มีนาคม", + "เมษายน", + "พฤษภาคม", + "มิถุนายน", + "กรกฎาคม", + "สิงหาคม", + "กันยายน", + "ตุลาคม", + "พฤศจิกายน", + "ธันวาคม", + ] + month_abbreviations = [ + "", + "ม.ค.", + "ก.พ.", + "มี.ค.", + "เม.ย.", + "พ.ค.", + "มิ.ย.", + "ก.ค.", + "ส.ค.", + "ก.ย.", + "ต.ค.", + "พ.ย.", + "ธ.ค.", + ] + + day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"] + day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"] + + meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"} + + BE_OFFSET = 543 + + def year_full(self, year): + """Thai always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return "{:04d}".format(year) + + def year_abbreviation(self, year): + """Thai always use Buddhist Era (BE) which is CE + 543""" + year += self.BE_OFFSET + return "{:04d}".format(year)[2:] + + def _format_relative(self, humanized, timeframe, delta): + """Thai normally doesn't have any space between words""" + if timeframe == "now": + return humanized + space = "" if timeframe == "seconds" else " " + direction = self.past if delta < 0 else self.future + + return direction.format(humanized, space) + + +class BengaliLocale(Locale): + + names = ["bn", "bn_bd", "bn_in"] + + past = "{0} আগে" + future = "{0} পরে" + + timeframes = { + "now": "এখন", + "second": "একটি দ্বিতীয়", + "seconds": "{0} সেকেন্ড", + "minute": "এক মিনিট", + "minutes": "{0} মিনিট", + "hour": "এক ঘণ্টা", + "hours": "{0} ঘণ্টা", + "day": "এক দিন", + "days": "{0} দিন", + "month": "এক মাস", + "months": "{0} মাস ", + "year": "এক বছর", + "years": "{0} বছর", + } + + meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"} + + month_names = [ + "", + "জানুয়ারি", + "ফেব্রুয়ারি", + "মার্চ", + "এপ্রিল", + "মে", + "জুন", + "জুলাই", + "আগস্ট", + "সেপ্টেম্বর", + "অক্টোবর", + "নভেম্বর", + "ডিসেম্বর", + ] + month_abbreviations = [ + "", + "জানু", + "ফেব", + "মার্চ", + "এপ্রি", + "মে", + "জুন", + "জুল", + "অগা", + "সেপ্ট", + "অক্টো", + "নভে", + "ডিসে", + ] + + day_names = [ + "", + "সোমবার", + "মঙ্গলবার", + "বুধবার", + "বৃহস্পতিবার", + "শুক্রবার", + "শনিবার", + "রবিবার", + ] + day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"] + + def _ordinal_number(self, n): + if n > 10 or n == 0: + return "{}তম".format(n) + if n in [1, 5, 7, 8, 9, 10]: + return "{}ম".format(n) + if n in [2, 3]: + return "{}য়".format(n) + if n == 4: + return "{}র্থ".format(n) + if n == 6: + return "{}ষ্ঠ".format(n) + + +class RomanshLocale(Locale): + + names = ["rm", "rm_ch"] + + past = "avant {0}" + future = "en {0}" + + timeframes = { + "now": "en quest mument", + "second": "in secunda", + "seconds": "{0} secundas", + "minute": "ina minuta", + "minutes": "{0} minutas", + "hour": "in'ura", + "hours": "{0} ura", + "day": "in di", + "days": "{0} dis", + "month": "in mais", + "months": "{0} mais", + "year": "in onn", + "years": "{0} onns", + } + + month_names = [ + "", + "schaner", + "favrer", + "mars", + "avrigl", + "matg", + "zercladur", + "fanadur", + "avust", + "settember", + "october", + "november", + "december", + ] + + month_abbreviations = [ + "", + "schan", + "fav", + "mars", + "avr", + "matg", + "zer", + "fan", + "avu", + "set", + "oct", + "nov", + "dec", + ] + + day_names = [ + "", + "glindesdi", + "mardi", + "mesemna", + "gievgia", + "venderdi", + "sonda", + "dumengia", + ] + + day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"] + + +class RomanianLocale(Locale): + names = ["ro", "ro_ro"] + + past = "{0} în urmă" + future = "peste {0}" + and_word = "și" + + timeframes = { + "now": "acum", + "second": "o secunda", + "seconds": "{0} câteva secunde", + "minute": "un minut", + "minutes": "{0} minute", + "hour": "o oră", + "hours": "{0} ore", + "day": "o zi", + "days": "{0} zile", + "month": "o lună", + "months": "{0} luni", + "year": "un an", + "years": "{0} ani", + } + + month_names = [ + "", + "ianuarie", + "februarie", + "martie", + "aprilie", + "mai", + "iunie", + "iulie", + "august", + "septembrie", + "octombrie", + "noiembrie", + "decembrie", + ] + month_abbreviations = [ + "", + "ian", + "febr", + "mart", + "apr", + "mai", + "iun", + "iul", + "aug", + "sept", + "oct", + "nov", + "dec", + ] + + day_names = [ + "", + "luni", + "marți", + "miercuri", + "joi", + "vineri", + "sâmbătă", + "duminică", + ] + day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"] + + +class SlovenianLocale(Locale): + names = ["sl", "sl_si"] + + past = "pred {0}" + future = "čez {0}" + and_word = "in" + + timeframes = { + "now": "zdaj", + "second": "sekundo", + "seconds": "{0} sekund", + "minute": "minuta", + "minutes": "{0} minutami", + "hour": "uro", + "hours": "{0} ur", + "day": "dan", + "days": "{0} dni", + "month": "mesec", + "months": "{0} mesecev", + "year": "leto", + "years": "{0} let", + } + + meridians = {"am": "", "pm": "", "AM": "", "PM": ""} + + month_names = [ + "", + "Januar", + "Februar", + "Marec", + "April", + "Maj", + "Junij", + "Julij", + "Avgust", + "September", + "Oktober", + "November", + "December", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "Maj", + "Jun", + "Jul", + "Avg", + "Sep", + "Okt", + "Nov", + "Dec", + ] + + day_names = [ + "", + "Ponedeljek", + "Torek", + "Sreda", + "Četrtek", + "Petek", + "Sobota", + "Nedelja", + ] + + day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"] + + +class IndonesianLocale(Locale): + + names = ["id", "id_id"] + + past = "{0} yang lalu" + future = "dalam {0}" + and_word = "dan" + + timeframes = { + "now": "baru saja", + "second": "1 sebentar", + "seconds": "{0} detik", + "minute": "1 menit", + "minutes": "{0} menit", + "hour": "1 jam", + "hours": "{0} jam", + "day": "1 hari", + "days": "{0} hari", + "month": "1 bulan", + "months": "{0} bulan", + "year": "1 tahun", + "years": "{0} tahun", + } + + meridians = {"am": "", "pm": "", "AM": "", "PM": ""} + + month_names = [ + "", + "Januari", + "Februari", + "Maret", + "April", + "Mei", + "Juni", + "Juli", + "Agustus", + "September", + "Oktober", + "November", + "Desember", + ] + + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mar", + "Apr", + "Mei", + "Jun", + "Jul", + "Ags", + "Sept", + "Okt", + "Nov", + "Des", + ] + + day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"] + + day_abbreviations = [ + "", + "Senin", + "Selasa", + "Rabu", + "Kamis", + "Jumat", + "Sabtu", + "Minggu", + ] + + +class NepaliLocale(Locale): + names = ["ne", "ne_np"] + + past = "{0} पहिले" + future = "{0} पछी" + + timeframes = { + "now": "अहिले", + "second": "एक सेकेन्ड", + "seconds": "{0} सेकण्ड", + "minute": "मिनेट", + "minutes": "{0} मिनेट", + "hour": "एक घण्टा", + "hours": "{0} घण्टा", + "day": "एक दिन", + "days": "{0} दिन", + "month": "एक महिना", + "months": "{0} महिना", + "year": "एक बर्ष", + "years": "बर्ष", + } + + meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"} + + month_names = [ + "", + "जनवरी", + "फेब्रुअरी", + "मार्च", + "एप्रील", + "मे", + "जुन", + "जुलाई", + "अगष्ट", + "सेप्टेम्बर", + "अक्टोबर", + "नोवेम्बर", + "डिसेम्बर", + ] + month_abbreviations = [ + "", + "जन", + "फेब", + "मार्च", + "एप्रील", + "मे", + "जुन", + "जुलाई", + "अग", + "सेप", + "अक्ट", + "नोव", + "डिस", + ] + + day_names = [ + "", + "सोमवार", + "मंगलवार", + "बुधवार", + "बिहिवार", + "शुक्रवार", + "शनिवार", + "आइतवार", + ] + + day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"] + + +class EstonianLocale(Locale): + names = ["ee", "et"] + + past = "{0} tagasi" + future = "{0} pärast" + and_word = "ja" + + timeframes = { + "now": {"past": "just nüüd", "future": "just nüüd"}, + "second": {"past": "üks sekund", "future": "ühe sekundi"}, + "seconds": {"past": "{0} sekundit", "future": "{0} sekundi"}, + "minute": {"past": "üks minut", "future": "ühe minuti"}, + "minutes": {"past": "{0} minutit", "future": "{0} minuti"}, + "hour": {"past": "tund aega", "future": "tunni aja"}, + "hours": {"past": "{0} tundi", "future": "{0} tunni"}, + "day": {"past": "üks päev", "future": "ühe päeva"}, + "days": {"past": "{0} päeva", "future": "{0} päeva"}, + "month": {"past": "üks kuu", "future": "ühe kuu"}, + "months": {"past": "{0} kuud", "future": "{0} kuu"}, + "year": {"past": "üks aasta", "future": "ühe aasta"}, + "years": {"past": "{0} aastat", "future": "{0} aasta"}, + } + + month_names = [ + "", + "Jaanuar", + "Veebruar", + "Märts", + "Aprill", + "Mai", + "Juuni", + "Juuli", + "August", + "September", + "Oktoober", + "November", + "Detsember", + ] + month_abbreviations = [ + "", + "Jan", + "Veb", + "Mär", + "Apr", + "Mai", + "Jun", + "Jul", + "Aug", + "Sep", + "Okt", + "Nov", + "Dets", + ] + + day_names = [ + "", + "Esmaspäev", + "Teisipäev", + "Kolmapäev", + "Neljapäev", + "Reede", + "Laupäev", + "Pühapäev", + ] + day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"] + + def _format_timeframe(self, timeframe, delta): + form = self.timeframes[timeframe] + if delta > 0: + form = form["future"] + else: + form = form["past"] + return form.format(abs(delta)) + + +class SwahiliLocale(Locale): + + names = [ + "sw", + "sw_ke", + "sw_tz", + ] + + past = "{0} iliyopita" + future = "muda wa {0}" + and_word = "na" + + timeframes = { + "now": "sasa hivi", + "second": "sekunde", + "seconds": "sekunde {0}", + "minute": "dakika moja", + "minutes": "dakika {0}", + "hour": "saa moja", + "hours": "saa {0}", + "day": "siku moja", + "days": "siku {0}", + "week": "wiki moja", + "weeks": "wiki {0}", + "month": "mwezi moja", + "months": "miezi {0}", + "year": "mwaka moja", + "years": "miaka {0}", + } + + meridians = {"am": "asu", "pm": "mch", "AM": "ASU", "PM": "MCH"} + + month_names = [ + "", + "Januari", + "Februari", + "Machi", + "Aprili", + "Mei", + "Juni", + "Julai", + "Agosti", + "Septemba", + "Oktoba", + "Novemba", + "Desemba", + ] + month_abbreviations = [ + "", + "Jan", + "Feb", + "Mac", + "Apr", + "Mei", + "Jun", + "Jul", + "Ago", + "Sep", + "Okt", + "Nov", + "Des", + ] + + day_names = [ + "", + "Jumatatu", + "Jumanne", + "Jumatano", + "Alhamisi", + "Ijumaa", + "Jumamosi", + "Jumapili", + ] + day_abbreviations = [ + "", + "Jumatatu", + "Jumanne", + "Jumatano", + "Alhamisi", + "Ijumaa", + "Jumamosi", + "Jumapili", + ] + + +_locales = _map_locales() diff --git a/pype/modules/ftrack/python2_vendor/arrow/parser.py b/pype/modules/ftrack/python2_vendor/arrow/parser.py new file mode 100644 index 0000000000..243fd1721c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/parser.py @@ -0,0 +1,596 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import re +from datetime import datetime, timedelta + +from dateutil import tz + +from arrow import locales +from arrow.util import iso_to_gregorian, next_weekday, normalize_timestamp + +try: + from functools import lru_cache +except ImportError: # pragma: no cover + from backports.functools_lru_cache import lru_cache # pragma: no cover + + +class ParserError(ValueError): + pass + + +# Allows for ParserErrors to be propagated from _build_datetime() +# when day_of_year errors occur. +# Before this, the ParserErrors were caught by the try/except in +# _parse_multiformat() and the appropriate error message was not +# transmitted to the user. +class ParserMatchError(ParserError): + pass + + +class DateTimeParser(object): + + _FORMAT_RE = re.compile( + r"(YYY?Y?|MM?M?M?|Do|DD?D?D?|d?d?d?d|HH?|hh?|mm?|ss?|S+|ZZ?Z?|a|A|x|X|W)" + ) + _ESCAPE_RE = re.compile(r"\[[^\[\]]*\]") + + _ONE_OR_TWO_DIGIT_RE = re.compile(r"\d{1,2}") + _ONE_OR_TWO_OR_THREE_DIGIT_RE = re.compile(r"\d{1,3}") + _ONE_OR_MORE_DIGIT_RE = re.compile(r"\d+") + _TWO_DIGIT_RE = re.compile(r"\d{2}") + _THREE_DIGIT_RE = re.compile(r"\d{3}") + _FOUR_DIGIT_RE = re.compile(r"\d{4}") + _TZ_Z_RE = re.compile(r"([\+\-])(\d{2})(?:(\d{2}))?|Z") + _TZ_ZZ_RE = re.compile(r"([\+\-])(\d{2})(?:\:(\d{2}))?|Z") + _TZ_NAME_RE = re.compile(r"\w[\w+\-/]+") + # NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will + # break cases like "15 Jul 2000" and a format list (see issue #447) + _TIMESTAMP_RE = re.compile(r"^\-?\d+\.?\d+$") + _TIMESTAMP_EXPANDED_RE = re.compile(r"^\-?\d+$") + _TIME_RE = re.compile(r"^(\d{2})(?:\:?(\d{2}))?(?:\:?(\d{2}))?(?:([\.\,])(\d+))?$") + _WEEK_DATE_RE = re.compile(r"(?P\d{4})[\-]?W(?P\d{2})[\-]?(?P\d)?") + + _BASE_INPUT_RE_MAP = { + "YYYY": _FOUR_DIGIT_RE, + "YY": _TWO_DIGIT_RE, + "MM": _TWO_DIGIT_RE, + "M": _ONE_OR_TWO_DIGIT_RE, + "DDDD": _THREE_DIGIT_RE, + "DDD": _ONE_OR_TWO_OR_THREE_DIGIT_RE, + "DD": _TWO_DIGIT_RE, + "D": _ONE_OR_TWO_DIGIT_RE, + "HH": _TWO_DIGIT_RE, + "H": _ONE_OR_TWO_DIGIT_RE, + "hh": _TWO_DIGIT_RE, + "h": _ONE_OR_TWO_DIGIT_RE, + "mm": _TWO_DIGIT_RE, + "m": _ONE_OR_TWO_DIGIT_RE, + "ss": _TWO_DIGIT_RE, + "s": _ONE_OR_TWO_DIGIT_RE, + "X": _TIMESTAMP_RE, + "x": _TIMESTAMP_EXPANDED_RE, + "ZZZ": _TZ_NAME_RE, + "ZZ": _TZ_ZZ_RE, + "Z": _TZ_Z_RE, + "S": _ONE_OR_MORE_DIGIT_RE, + "W": _WEEK_DATE_RE, + } + + SEPARATORS = ["-", "/", "."] + + def __init__(self, locale="en_us", cache_size=0): + + self.locale = locales.get_locale(locale) + self._input_re_map = self._BASE_INPUT_RE_MAP.copy() + self._input_re_map.update( + { + "MMMM": self._generate_choice_re( + self.locale.month_names[1:], re.IGNORECASE + ), + "MMM": self._generate_choice_re( + self.locale.month_abbreviations[1:], re.IGNORECASE + ), + "Do": re.compile(self.locale.ordinal_day_re), + "dddd": self._generate_choice_re( + self.locale.day_names[1:], re.IGNORECASE + ), + "ddd": self._generate_choice_re( + self.locale.day_abbreviations[1:], re.IGNORECASE + ), + "d": re.compile(r"[1-7]"), + "a": self._generate_choice_re( + (self.locale.meridians["am"], self.locale.meridians["pm"]) + ), + # note: 'A' token accepts both 'am/pm' and 'AM/PM' formats to + # ensure backwards compatibility of this token + "A": self._generate_choice_re(self.locale.meridians.values()), + } + ) + if cache_size > 0: + self._generate_pattern_re = lru_cache(maxsize=cache_size)( + self._generate_pattern_re + ) + + # TODO: since we support more than ISO 8601, we should rename this function + # IDEA: break into multiple functions + def parse_iso(self, datetime_string, normalize_whitespace=False): + + if normalize_whitespace: + datetime_string = re.sub(r"\s+", " ", datetime_string.strip()) + + has_space_divider = " " in datetime_string + has_t_divider = "T" in datetime_string + + num_spaces = datetime_string.count(" ") + if has_space_divider and num_spaces != 1 or has_t_divider and num_spaces > 0: + raise ParserError( + "Expected an ISO 8601-like string, but was given '{}'. Try passing in a format string to resolve this.".format( + datetime_string + ) + ) + + has_time = has_space_divider or has_t_divider + has_tz = False + + # date formats (ISO 8601 and others) to test against + # NOTE: YYYYMM is omitted to avoid confusion with YYMMDD (no longer part of ISO 8601, but is still often used) + formats = [ + "YYYY-MM-DD", + "YYYY-M-DD", + "YYYY-M-D", + "YYYY/MM/DD", + "YYYY/M/DD", + "YYYY/M/D", + "YYYY.MM.DD", + "YYYY.M.DD", + "YYYY.M.D", + "YYYYMMDD", + "YYYY-DDDD", + "YYYYDDDD", + "YYYY-MM", + "YYYY/MM", + "YYYY.MM", + "YYYY", + "W", + ] + + if has_time: + + if has_space_divider: + date_string, time_string = datetime_string.split(" ", 1) + else: + date_string, time_string = datetime_string.split("T", 1) + + time_parts = re.split(r"[\+\-Z]", time_string, 1, re.IGNORECASE) + + time_components = self._TIME_RE.match(time_parts[0]) + + if time_components is None: + raise ParserError( + "Invalid time component provided. Please specify a format or provide a valid time component in the basic or extended ISO 8601 time format." + ) + + ( + hours, + minutes, + seconds, + subseconds_sep, + subseconds, + ) = time_components.groups() + + has_tz = len(time_parts) == 2 + has_minutes = minutes is not None + has_seconds = seconds is not None + has_subseconds = subseconds is not None + + is_basic_time_format = ":" not in time_parts[0] + tz_format = "Z" + + # use 'ZZ' token instead since tz offset is present in non-basic format + if has_tz and ":" in time_parts[1]: + tz_format = "ZZ" + + time_sep = "" if is_basic_time_format else ":" + + if has_subseconds: + time_string = "HH{time_sep}mm{time_sep}ss{subseconds_sep}S".format( + time_sep=time_sep, subseconds_sep=subseconds_sep + ) + elif has_seconds: + time_string = "HH{time_sep}mm{time_sep}ss".format(time_sep=time_sep) + elif has_minutes: + time_string = "HH{time_sep}mm".format(time_sep=time_sep) + else: + time_string = "HH" + + if has_space_divider: + formats = ["{} {}".format(f, time_string) for f in formats] + else: + formats = ["{}T{}".format(f, time_string) for f in formats] + + if has_time and has_tz: + # Add "Z" or "ZZ" to the format strings to indicate to + # _parse_token() that a timezone needs to be parsed + formats = ["{}{}".format(f, tz_format) for f in formats] + + return self._parse_multiformat(datetime_string, formats) + + def parse(self, datetime_string, fmt, normalize_whitespace=False): + + if normalize_whitespace: + datetime_string = re.sub(r"\s+", " ", datetime_string) + + if isinstance(fmt, list): + return self._parse_multiformat(datetime_string, fmt) + + fmt_tokens, fmt_pattern_re = self._generate_pattern_re(fmt) + + match = fmt_pattern_re.search(datetime_string) + + if match is None: + raise ParserMatchError( + "Failed to match '{}' when parsing '{}'".format(fmt, datetime_string) + ) + + parts = {} + for token in fmt_tokens: + if token == "Do": + value = match.group("value") + elif token == "W": + value = (match.group("year"), match.group("week"), match.group("day")) + else: + value = match.group(token) + self._parse_token(token, value, parts) + + return self._build_datetime(parts) + + def _generate_pattern_re(self, fmt): + + # fmt is a string of tokens like 'YYYY-MM-DD' + # we construct a new string by replacing each + # token by its pattern: + # 'YYYY-MM-DD' -> '(?P\d{4})-(?P\d{2})-(?P
\d{2})' + tokens = [] + offset = 0 + + # Escape all special RegEx chars + escaped_fmt = re.escape(fmt) + + # Extract the bracketed expressions to be reinserted later. + escaped_fmt = re.sub(self._ESCAPE_RE, "#", escaped_fmt) + + # Any number of S is the same as one. + # TODO: allow users to specify the number of digits to parse + escaped_fmt = re.sub(r"S+", "S", escaped_fmt) + + escaped_data = re.findall(self._ESCAPE_RE, fmt) + + fmt_pattern = escaped_fmt + + for m in self._FORMAT_RE.finditer(escaped_fmt): + token = m.group(0) + try: + input_re = self._input_re_map[token] + except KeyError: + raise ParserError("Unrecognized token '{}'".format(token)) + input_pattern = "(?P<{}>{})".format(token, input_re.pattern) + tokens.append(token) + # a pattern doesn't have the same length as the token + # it replaces! We keep the difference in the offset variable. + # This works because the string is scanned left-to-right and matches + # are returned in the order found by finditer. + fmt_pattern = ( + fmt_pattern[: m.start() + offset] + + input_pattern + + fmt_pattern[m.end() + offset :] + ) + offset += len(input_pattern) - (m.end() - m.start()) + + final_fmt_pattern = "" + split_fmt = fmt_pattern.split(r"\#") + + # Due to the way Python splits, 'split_fmt' will always be longer + for i in range(len(split_fmt)): + final_fmt_pattern += split_fmt[i] + if i < len(escaped_data): + final_fmt_pattern += escaped_data[i][1:-1] + + # Wrap final_fmt_pattern in a custom word boundary to strictly + # match the formatting pattern and filter out date and time formats + # that include junk such as: blah1998-09-12 blah, blah 1998-09-12blah, + # blah1998-09-12blah. The custom word boundary matches every character + # that is not a whitespace character to allow for searching for a date + # and time string in a natural language sentence. Therefore, searching + # for a string of the form YYYY-MM-DD in "blah 1998-09-12 blah" will + # work properly. + # Certain punctuation before or after the target pattern such as + # "1998-09-12," is permitted. For the full list of valid punctuation, + # see the documentation. + + starting_word_boundary = ( + r"(?\s])" # This is the list of punctuation that is ok before the pattern (i.e. "It can't not be these characters before the pattern") + r"(\b|^)" # The \b is to block cases like 1201912 but allow 201912 for pattern YYYYMM. The ^ was necessary to allow a negative number through i.e. before epoch numbers + ) + ending_word_boundary = ( + r"(?=[\,\.\;\:\?\!\"\'\`\[\]\{\}\(\)\<\>]?" # Positive lookahead stating that these punctuation marks can appear after the pattern at most 1 time + r"(?!\S))" # Don't allow any non-whitespace character after the punctuation + ) + bounded_fmt_pattern = r"{}{}{}".format( + starting_word_boundary, final_fmt_pattern, ending_word_boundary + ) + + return tokens, re.compile(bounded_fmt_pattern, flags=re.IGNORECASE) + + def _parse_token(self, token, value, parts): + + if token == "YYYY": + parts["year"] = int(value) + + elif token == "YY": + value = int(value) + parts["year"] = 1900 + value if value > 68 else 2000 + value + + elif token in ["MMMM", "MMM"]: + parts["month"] = self.locale.month_number(value.lower()) + + elif token in ["MM", "M"]: + parts["month"] = int(value) + + elif token in ["DDDD", "DDD"]: + parts["day_of_year"] = int(value) + + elif token in ["DD", "D"]: + parts["day"] = int(value) + + elif token == "Do": + parts["day"] = int(value) + + elif token == "dddd": + # locale day names are 1-indexed + day_of_week = [x.lower() for x in self.locale.day_names].index( + value.lower() + ) + parts["day_of_week"] = day_of_week - 1 + + elif token == "ddd": + # locale day abbreviations are 1-indexed + day_of_week = [x.lower() for x in self.locale.day_abbreviations].index( + value.lower() + ) + parts["day_of_week"] = day_of_week - 1 + + elif token.upper() in ["HH", "H"]: + parts["hour"] = int(value) + + elif token in ["mm", "m"]: + parts["minute"] = int(value) + + elif token in ["ss", "s"]: + parts["second"] = int(value) + + elif token == "S": + # We have the *most significant* digits of an arbitrary-precision integer. + # We want the six most significant digits as an integer, rounded. + # IDEA: add nanosecond support somehow? Need datetime support for it first. + value = value.ljust(7, str("0")) + + # floating-point (IEEE-754) defaults to half-to-even rounding + seventh_digit = int(value[6]) + if seventh_digit == 5: + rounding = int(value[5]) % 2 + elif seventh_digit > 5: + rounding = 1 + else: + rounding = 0 + + parts["microsecond"] = int(value[:6]) + rounding + + elif token == "X": + parts["timestamp"] = float(value) + + elif token == "x": + parts["expanded_timestamp"] = int(value) + + elif token in ["ZZZ", "ZZ", "Z"]: + parts["tzinfo"] = TzinfoParser.parse(value) + + elif token in ["a", "A"]: + if value in (self.locale.meridians["am"], self.locale.meridians["AM"]): + parts["am_pm"] = "am" + elif value in (self.locale.meridians["pm"], self.locale.meridians["PM"]): + parts["am_pm"] = "pm" + + elif token == "W": + parts["weekdate"] = value + + @staticmethod + def _build_datetime(parts): + + weekdate = parts.get("weekdate") + + if weekdate is not None: + # we can use strptime (%G, %V, %u) in python 3.6 but these tokens aren't available before that + year, week = int(weekdate[0]), int(weekdate[1]) + + if weekdate[2] is not None: + day = int(weekdate[2]) + else: + # day not given, default to 1 + day = 1 + + dt = iso_to_gregorian(year, week, day) + parts["year"] = dt.year + parts["month"] = dt.month + parts["day"] = dt.day + + timestamp = parts.get("timestamp") + + if timestamp is not None: + return datetime.fromtimestamp(timestamp, tz=tz.tzutc()) + + expanded_timestamp = parts.get("expanded_timestamp") + + if expanded_timestamp is not None: + return datetime.fromtimestamp( + normalize_timestamp(expanded_timestamp), + tz=tz.tzutc(), + ) + + day_of_year = parts.get("day_of_year") + + if day_of_year is not None: + year = parts.get("year") + month = parts.get("month") + if year is None: + raise ParserError( + "Year component is required with the DDD and DDDD tokens." + ) + + if month is not None: + raise ParserError( + "Month component is not allowed with the DDD and DDDD tokens." + ) + + date_string = "{}-{}".format(year, day_of_year) + try: + dt = datetime.strptime(date_string, "%Y-%j") + except ValueError: + raise ParserError( + "The provided day of year '{}' is invalid.".format(day_of_year) + ) + + parts["year"] = dt.year + parts["month"] = dt.month + parts["day"] = dt.day + + day_of_week = parts.get("day_of_week") + day = parts.get("day") + + # If day is passed, ignore day of week + if day_of_week is not None and day is None: + year = parts.get("year", 1970) + month = parts.get("month", 1) + day = 1 + + # dddd => first day of week after epoch + # dddd YYYY => first day of week in specified year + # dddd MM YYYY => first day of week in specified year and month + # dddd MM => first day after epoch in specified month + next_weekday_dt = next_weekday(datetime(year, month, day), day_of_week) + parts["year"] = next_weekday_dt.year + parts["month"] = next_weekday_dt.month + parts["day"] = next_weekday_dt.day + + am_pm = parts.get("am_pm") + hour = parts.get("hour", 0) + + if am_pm == "pm" and hour < 12: + hour += 12 + elif am_pm == "am" and hour == 12: + hour = 0 + + # Support for midnight at the end of day + if hour == 24: + if parts.get("minute", 0) != 0: + raise ParserError("Midnight at the end of day must not contain minutes") + if parts.get("second", 0) != 0: + raise ParserError("Midnight at the end of day must not contain seconds") + if parts.get("microsecond", 0) != 0: + raise ParserError( + "Midnight at the end of day must not contain microseconds" + ) + hour = 0 + day_increment = 1 + else: + day_increment = 0 + + # account for rounding up to 1000000 + microsecond = parts.get("microsecond", 0) + if microsecond == 1000000: + microsecond = 0 + second_increment = 1 + else: + second_increment = 0 + + increment = timedelta(days=day_increment, seconds=second_increment) + + return ( + datetime( + year=parts.get("year", 1), + month=parts.get("month", 1), + day=parts.get("day", 1), + hour=hour, + minute=parts.get("minute", 0), + second=parts.get("second", 0), + microsecond=microsecond, + tzinfo=parts.get("tzinfo"), + ) + + increment + ) + + def _parse_multiformat(self, string, formats): + + _datetime = None + + for fmt in formats: + try: + _datetime = self.parse(string, fmt) + break + except ParserMatchError: + pass + + if _datetime is None: + raise ParserError( + "Could not match input '{}' to any of the following formats: {}".format( + string, ", ".join(formats) + ) + ) + + return _datetime + + # generates a capture group of choices separated by an OR operator + @staticmethod + def _generate_choice_re(choices, flags=0): + return re.compile(r"({})".format("|".join(choices)), flags=flags) + + +class TzinfoParser(object): + _TZINFO_RE = re.compile(r"^([\+\-])?(\d{2})(?:\:?(\d{2}))?$") + + @classmethod + def parse(cls, tzinfo_string): + + tzinfo = None + + if tzinfo_string == "local": + tzinfo = tz.tzlocal() + + elif tzinfo_string in ["utc", "UTC", "Z"]: + tzinfo = tz.tzutc() + + else: + + iso_match = cls._TZINFO_RE.match(tzinfo_string) + + if iso_match: + sign, hours, minutes = iso_match.groups() + if minutes is None: + minutes = 0 + seconds = int(hours) * 3600 + int(minutes) * 60 + + if sign == "-": + seconds *= -1 + + tzinfo = tz.tzoffset(None, seconds) + + else: + tzinfo = tz.gettz(tzinfo_string) + + if tzinfo is None: + raise ParserError( + 'Could not parse timezone expression "{}"'.format(tzinfo_string) + ) + + return tzinfo diff --git a/pype/modules/ftrack/python2_vendor/arrow/util.py b/pype/modules/ftrack/python2_vendor/arrow/util.py new file mode 100644 index 0000000000..acce8878df --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/arrow/util.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import datetime +import numbers + +from dateutil.rrule import WEEKLY, rrule + +from arrow.constants import MAX_TIMESTAMP, MAX_TIMESTAMP_MS, MAX_TIMESTAMP_US + + +def next_weekday(start_date, weekday): + """Get next weekday from the specified start date. + + :param start_date: Datetime object representing the start date. + :param weekday: Next weekday to obtain. Can be a value between 0 (Monday) and 6 (Sunday). + :return: Datetime object corresponding to the next weekday after start_date. + + Usage:: + + # Get first Monday after epoch + >>> next_weekday(datetime(1970, 1, 1), 0) + 1970-01-05 00:00:00 + + # Get first Thursday after epoch + >>> next_weekday(datetime(1970, 1, 1), 3) + 1970-01-01 00:00:00 + + # Get first Sunday after epoch + >>> next_weekday(datetime(1970, 1, 1), 6) + 1970-01-04 00:00:00 + """ + if weekday < 0 or weekday > 6: + raise ValueError("Weekday must be between 0 (Monday) and 6 (Sunday).") + return rrule(freq=WEEKLY, dtstart=start_date, byweekday=weekday, count=1)[0] + + +def total_seconds(td): + """Get total seconds for timedelta.""" + return td.total_seconds() + + +def is_timestamp(value): + """Check if value is a valid timestamp.""" + if isinstance(value, bool): + return False + if not ( + isinstance(value, numbers.Integral) + or isinstance(value, float) + or isinstance(value, str) + ): + return False + try: + float(value) + return True + except ValueError: + return False + + +def normalize_timestamp(timestamp): + """Normalize millisecond and microsecond timestamps into normal timestamps.""" + if timestamp > MAX_TIMESTAMP: + if timestamp < MAX_TIMESTAMP_MS: + timestamp /= 1e3 + elif timestamp < MAX_TIMESTAMP_US: + timestamp /= 1e6 + else: + raise ValueError( + "The specified timestamp '{}' is too large.".format(timestamp) + ) + return timestamp + + +# Credit to https://stackoverflow.com/a/1700069 +def iso_to_gregorian(iso_year, iso_week, iso_day): + """Converts an ISO week date tuple into a datetime object.""" + + if not 1 <= iso_week <= 53: + raise ValueError("ISO Calendar week value must be between 1-53.") + + if not 1 <= iso_day <= 7: + raise ValueError("ISO Calendar day value must be between 1-7") + + # The first week of the year always contains 4 Jan. + fourth_jan = datetime.date(iso_year, 1, 4) + delta = datetime.timedelta(fourth_jan.isoweekday() - 1) + year_start = fourth_jan - delta + gregorian = year_start + datetime.timedelta(days=iso_day - 1, weeks=iso_week - 1) + + return gregorian + + +def validate_bounds(bounds): + if bounds != "()" and bounds != "(]" and bounds != "[)" and bounds != "[]": + raise ValueError( + 'Invalid bounds. Please select between "()", "(]", "[)", or "[]".' + ) + + +# Python 2.7 / 3.0+ definitions for isstr function. + +try: # pragma: no cover + basestring + + def isstr(s): + return isinstance(s, basestring) # noqa: F821 + + +except NameError: # pragma: no cover + + def isstr(s): + return isinstance(s, str) + + +__all__ = ["next_weekday", "total_seconds", "is_timestamp", "isstr", "iso_to_gregorian"] diff --git a/pype/vendor/backports/__init__.py b/pype/modules/ftrack/python2_vendor/backports/__init__.py similarity index 100% rename from pype/vendor/backports/__init__.py rename to pype/modules/ftrack/python2_vendor/backports/__init__.py diff --git a/pype/vendor/backports/functools_lru_cache.py b/pype/modules/ftrack/python2_vendor/backports/functools_lru_cache.py similarity index 83% rename from pype/vendor/backports/functools_lru_cache.py rename to pype/modules/ftrack/python2_vendor/backports/functools_lru_cache.py index 707c6c766d..e0b19d951a 100644 --- a/pype/vendor/backports/functools_lru_cache.py +++ b/pype/modules/ftrack/python2_vendor/backports/functools_lru_cache.py @@ -8,10 +8,12 @@ _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) @functools.wraps(functools.update_wrapper) -def update_wrapper(wrapper, - wrapped, - assigned = functools.WRAPPER_ASSIGNMENTS, - updated = functools.WRAPPER_UPDATES): +def update_wrapper( + wrapper, + wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES, +): """ Patch two bugs in functools.update_wrapper. """ @@ -34,10 +36,17 @@ class _HashedSeq(list): return self.hashvalue -def _make_key(args, kwds, typed, - kwd_mark=(object(),), - fasttypes=set([int, str, frozenset, type(None)]), - sorted=sorted, tuple=tuple, type=type, len=len): +def _make_key( + args, + kwds, + typed, + kwd_mark=(object(),), + fasttypes=set([int, str, frozenset, type(None)]), + sorted=sorted, + tuple=tuple, + type=type, + len=len, +): 'Make a cache key from optionally typed positional and keyword arguments' key = args if kwds: @@ -82,16 +91,16 @@ def lru_cache(maxsize=100, typed=False): def decorating_function(user_function): cache = dict() - stats = [0, 0] # make statistics updateable non-locally - HITS, MISSES = 0, 1 # names for the stats fields + stats = [0, 0] # make statistics updateable non-locally + HITS, MISSES = 0, 1 # names for the stats fields make_key = _make_key - cache_get = cache.get # bound method to lookup key or return None - _len = len # localize the global len() function - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - nonlocal_root = [root] # make updateable non-locally - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + cache_get = cache.get # bound method to lookup key or return None + _len = len # localize the global len() function + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + nonlocal_root = [root] # make updateable non-locally + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields if maxsize == 0: @@ -106,7 +115,9 @@ def lru_cache(maxsize=100, typed=False): def wrapper(*args, **kwds): # simple caching without ordering or size limit key = make_key(args, kwds, typed) - result = cache_get(key, root) # root used here as a unique not-found sentinel + result = cache_get( + key, root + ) # root used here as a unique not-found sentinel if result is not root: stats[HITS] += 1 return result @@ -123,7 +134,8 @@ def lru_cache(maxsize=100, typed=False): with lock: link = cache_get(key) if link is not None: - # record recent use of the key by moving it to the front of the list + # record recent use of the key by moving it + # to the front of the list root, = nonlocal_root link_prev, link_next, key, result = link link_prev[NEXT] = link_next diff --git a/pype/vendor/builtins/__init__.py b/pype/modules/ftrack/python2_vendor/builtins/__init__.py similarity index 100% rename from pype/vendor/builtins/__init__.py rename to pype/modules/ftrack/python2_vendor/builtins/__init__.py diff --git a/pype/vendor/ftrack_api_old/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/__init__.py diff --git a/pype/vendor/ftrack_api_old/_centralized_storage_scenario.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/_centralized_storage_scenario.py similarity index 100% rename from pype/vendor/ftrack_api_old/_centralized_storage_scenario.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/_centralized_storage_scenario.py diff --git a/pype/vendor/ftrack_api_old/_python_ntpath.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/_python_ntpath.py similarity index 100% rename from pype/vendor/ftrack_api_old/_python_ntpath.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/_python_ntpath.py diff --git a/pype/vendor/ftrack_api_old/_version.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/_version.py similarity index 100% rename from pype/vendor/ftrack_api_old/_version.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/_version.py diff --git a/pype/vendor/ftrack_api_old/_weakref.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/_weakref.py similarity index 100% rename from pype/vendor/ftrack_api_old/_weakref.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/_weakref.py diff --git a/pype/vendor/ftrack_api_old/accessor/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/accessor/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/__init__.py diff --git a/pype/vendor/ftrack_api_old/accessor/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/base.py similarity index 100% rename from pype/vendor/ftrack_api_old/accessor/base.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/base.py diff --git a/pype/vendor/ftrack_api_old/accessor/disk.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/disk.py similarity index 100% rename from pype/vendor/ftrack_api_old/accessor/disk.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/disk.py diff --git a/pype/vendor/ftrack_api_old/accessor/server.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/server.py similarity index 100% rename from pype/vendor/ftrack_api_old/accessor/server.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/accessor/server.py diff --git a/pype/vendor/ftrack_api_old/attribute.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/attribute.py similarity index 100% rename from pype/vendor/ftrack_api_old/attribute.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/attribute.py diff --git a/pype/vendor/ftrack_api_old/cache.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/cache.py similarity index 100% rename from pype/vendor/ftrack_api_old/cache.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/cache.py diff --git a/pype/vendor/ftrack_api_old/collection.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/collection.py similarity index 100% rename from pype/vendor/ftrack_api_old/collection.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/collection.py diff --git a/pype/vendor/ftrack_api_old/data.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/data.py similarity index 100% rename from pype/vendor/ftrack_api_old/data.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/data.py diff --git a/pype/vendor/ftrack_api_old/entity/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/__init__.py diff --git a/pype/vendor/ftrack_api_old/entity/asset_version.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/asset_version.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/asset_version.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/asset_version.py diff --git a/pype/vendor/ftrack_api_old/entity/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/base.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/base.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/base.py diff --git a/pype/vendor/ftrack_api_old/entity/component.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/component.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/component.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/component.py diff --git a/pype/vendor/ftrack_api_old/entity/factory.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/factory.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/factory.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/factory.py diff --git a/pype/vendor/ftrack_api_old/entity/job.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/job.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/job.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/job.py diff --git a/pype/vendor/ftrack_api_old/entity/location.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/location.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/location.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/location.py diff --git a/pype/vendor/ftrack_api_old/entity/note.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/note.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/note.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/note.py diff --git a/pype/vendor/ftrack_api_old/entity/project_schema.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/project_schema.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/project_schema.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/project_schema.py diff --git a/pype/vendor/ftrack_api_old/entity/user.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/user.py similarity index 100% rename from pype/vendor/ftrack_api_old/entity/user.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/entity/user.py diff --git a/pype/vendor/ftrack_api_old/event/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/__init__.py diff --git a/pype/vendor/ftrack_api_old/event/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/base.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/base.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/base.py diff --git a/pype/vendor/ftrack_api_old/event/expression.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/expression.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/expression.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/expression.py diff --git a/pype/vendor/ftrack_api_old/event/hub.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/hub.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/hub.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/hub.py diff --git a/pype/vendor/ftrack_api_old/event/subscriber.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/subscriber.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/subscriber.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/subscriber.py diff --git a/pype/vendor/ftrack_api_old/event/subscription.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/event/subscription.py similarity index 100% rename from pype/vendor/ftrack_api_old/event/subscription.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/event/subscription.py diff --git a/pype/vendor/ftrack_api_old/exception.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/exception.py similarity index 100% rename from pype/vendor/ftrack_api_old/exception.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/exception.py diff --git a/pype/vendor/ftrack_api_old/formatter.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/formatter.py similarity index 100% rename from pype/vendor/ftrack_api_old/formatter.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/formatter.py diff --git a/pype/vendor/ftrack_api_old/inspection.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/inspection.py similarity index 100% rename from pype/vendor/ftrack_api_old/inspection.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/inspection.py diff --git a/pype/vendor/ftrack_api_old/logging.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/logging.py similarity index 100% rename from pype/vendor/ftrack_api_old/logging.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/logging.py diff --git a/pype/vendor/ftrack_api_old/operation.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/operation.py similarity index 100% rename from pype/vendor/ftrack_api_old/operation.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/operation.py diff --git a/pype/vendor/ftrack_api_old/plugin.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/plugin.py similarity index 100% rename from pype/vendor/ftrack_api_old/plugin.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/plugin.py diff --git a/pype/vendor/ftrack_api_old/query.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/query.py similarity index 100% rename from pype/vendor/ftrack_api_old/query.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/query.py diff --git a/pype/vendor/ftrack_api_old/resource_identifier_transformer/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/resource_identifier_transformer/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/resource_identifier_transformer/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/resource_identifier_transformer/__init__.py diff --git a/pype/vendor/ftrack_api_old/resource_identifier_transformer/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/resource_identifier_transformer/base.py similarity index 100% rename from pype/vendor/ftrack_api_old/resource_identifier_transformer/base.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/resource_identifier_transformer/base.py diff --git a/pype/vendor/ftrack_api_old/session.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/session.py similarity index 100% rename from pype/vendor/ftrack_api_old/session.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/session.py diff --git a/pype/vendor/ftrack_api_old/structure/__init__.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/__init__.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/__init__.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/__init__.py diff --git a/pype/vendor/ftrack_api_old/structure/base.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/base.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/base.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/base.py diff --git a/pype/vendor/ftrack_api_old/structure/entity_id.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/entity_id.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/entity_id.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/entity_id.py diff --git a/pype/vendor/ftrack_api_old/structure/id.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/id.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/id.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/id.py diff --git a/pype/vendor/ftrack_api_old/structure/origin.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/origin.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/origin.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/origin.py diff --git a/pype/vendor/ftrack_api_old/structure/standard.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/standard.py similarity index 100% rename from pype/vendor/ftrack_api_old/structure/standard.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/structure/standard.py diff --git a/pype/vendor/ftrack_api_old/symbol.py b/pype/modules/ftrack/python2_vendor/ftrack_api_old/symbol.py similarity index 100% rename from pype/vendor/ftrack_api_old/symbol.py rename to pype/modules/ftrack/python2_vendor/ftrack_api_old/symbol.py diff --git a/pype/modules/ftrack/python2_vendor/future/__init__.py b/pype/modules/ftrack/python2_vendor/future/__init__.py new file mode 100644 index 0000000000..ad419d67e2 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/__init__.py @@ -0,0 +1,93 @@ +""" +future: Easy, safe support for Python 2/3 compatibility +======================================================= + +``future`` is the missing compatibility layer between Python 2 and Python +3. It allows you to use a single, clean Python 3.x-compatible codebase to +support both Python 2 and Python 3 with minimal overhead. + +It is designed to be used as follows:: + + from __future__ import (absolute_import, division, + print_function, unicode_literals) + from builtins import ( + bytes, dict, int, list, object, range, str, + ascii, chr, hex, input, next, oct, open, + pow, round, super, + filter, map, zip) + +followed by predominantly standard, idiomatic Python 3 code that then runs +similarly on Python 2.6/2.7 and Python 3.3+. + +The imports have no effect on Python 3. On Python 2, they shadow the +corresponding builtins, which normally have different semantics on Python 3 +versus 2, to provide their Python 3 semantics. + + +Standard library reorganization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``future`` supports the standard library reorganization (PEP 3108) through the +following Py3 interfaces: + + >>> # Top-level packages with Py3 names provided on Py2: + >>> import html.parser + >>> import queue + >>> import tkinter.dialog + >>> import xmlrpc.client + >>> # etc. + + >>> # Aliases provided for extensions to existing Py2 module names: + >>> from future.standard_library import install_aliases + >>> install_aliases() + + >>> from collections import Counter, OrderedDict # backported to Py2.6 + >>> from collections import UserDict, UserList, UserString + >>> import urllib.request + >>> from itertools import filterfalse, zip_longest + >>> from subprocess import getoutput, getstatusoutput + + +Automatic conversion +-------------------- + +An included script called `futurize +`_ aids in converting +code (from either Python 2 or Python 3) to code compatible with both +platforms. It is similar to ``python-modernize`` but goes further in +providing Python 3 compatibility through the use of the backported types +and builtin functions in ``future``. + + +Documentation +------------- + +See: http://python-future.org + + +Credits +------- + +:Author: Ed Schofield, Jordan M. Adler, et al +:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte + Ltd, Singapore. http://pythoncharmers.com +:Others: See docs/credits.rst or http://python-future.org/credits.html + + +Licensing +--------- +Copyright 2013-2019 Python Charmers Pty Ltd, Australia. +The software is distributed under an MIT licence. See LICENSE.txt. + +""" + +__title__ = 'future' +__author__ = 'Ed Schofield' +__license__ = 'MIT' +__copyright__ = 'Copyright 2013-2019 Python Charmers Pty Ltd' +__ver_major__ = 0 +__ver_minor__ = 18 +__ver_patch__ = 2 +__ver_sub__ = '' +__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__, + __ver_patch__, __ver_sub__) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/__init__.py new file mode 100644 index 0000000000..c71e065354 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/__init__.py @@ -0,0 +1,26 @@ +""" +future.backports package +""" + +from __future__ import absolute_import + +import sys + +__future_module__ = True +from future.standard_library import import_top_level_modules + + +if sys.version_info[0] >= 3: + import_top_level_modules() + + +from .misc import (ceil, + OrderedDict, + Counter, + ChainMap, + check_output, + count, + recursive_repr, + _count_elements, + cmp_to_key + ) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/_markupbase.py b/pype/modules/ftrack/python2_vendor/future/backports/_markupbase.py new file mode 100644 index 0000000000..d51bfc7ef1 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/_markupbase.py @@ -0,0 +1,422 @@ +"""Shared support for scanning document type declarations in HTML and XHTML. + +Backported for python-future from Python 3.3. Reason: ParserBase is an +old-style class in the Python 2.7 source of markupbase.py, which I suspect +might be the cause of sporadic unit-test failures on travis-ci.org with +test_htmlparser.py. The test failures look like this: + + ====================================================================== + +ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase) + +---------------------------------------------------------------------- + +Traceback (most recent call last): + File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement + [("starttag", "a", [("b", "&><\"'")])]) + File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check + collector = self.get_collector() + File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector + return EventCollector(strict=True) + File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__ + html.parser.HTMLParser.__init__(self, *args, **kw) + File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__ + self.reset() + File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset + _markupbase.ParserBase.reset(self) + +TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead) + +This module is used as a foundation for the html.parser module. It has no +documented public API and should not be used directly. + +""" + +import re + +_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match +_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match +_commentclose = re.compile(r'--\s*>') +_markedsectionclose = re.compile(r']\s*]\s*>') + +# An analysis of the MS-Word extensions is available at +# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf + +_msmarkedsectionclose = re.compile(r']\s*>') + +del re + + +class ParserBase(object): + """Parser base class which provides some common support methods used + by the SGML/HTML and XHTML parsers.""" + + def __init__(self): + if self.__class__ is ParserBase: + raise RuntimeError( + "_markupbase.ParserBase must be subclassed") + + def error(self, message): + raise NotImplementedError( + "subclasses of ParserBase must override error()") + + def reset(self): + self.lineno = 1 + self.offset = 0 + + def getpos(self): + """Return current line number and offset.""" + return self.lineno, self.offset + + # Internal -- update line number and offset. This should be + # called for each piece of data exactly once, in order -- in other + # words the concatenation of all the input strings to this + # function should be exactly the entire input. + def updatepos(self, i, j): + if i >= j: + return j + rawdata = self.rawdata + nlines = rawdata.count("\n", i, j) + if nlines: + self.lineno = self.lineno + nlines + pos = rawdata.rindex("\n", i, j) # Should not fail + self.offset = j-(pos+1) + else: + self.offset = self.offset + j-i + return j + + _decl_otherchars = '' + + # Internal -- parse declaration (for use by subclasses). + def parse_declaration(self, i): + # This is some sort of declaration; in "HTML as + # deployed," this should only be the document type + # declaration (""). + # ISO 8879:1986, however, has more complex + # declaration syntax for elements in , including: + # --comment-- + # [marked section] + # name in the following list: ENTITY, DOCTYPE, ELEMENT, + # ATTLIST, NOTATION, SHORTREF, USEMAP, + # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM + rawdata = self.rawdata + j = i + 2 + assert rawdata[i:j] == "": + # the empty comment + return j + 1 + if rawdata[j:j+1] in ("-", ""): + # Start of comment followed by buffer boundary, + # or just a buffer boundary. + return -1 + # A simple, practical version could look like: ((name|stringlit) S*) + '>' + n = len(rawdata) + if rawdata[j:j+2] == '--': #comment + # Locate --.*-- as the body of the comment + return self.parse_comment(i) + elif rawdata[j] == '[': #marked section + # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section + # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA + # Note that this is extended by Microsoft Office "Save as Web" function + # to include [if...] and [endif]. + return self.parse_marked_section(i) + else: #all other declaration elements + decltype, j = self._scan_name(j, i) + if j < 0: + return j + if decltype == "doctype": + self._decl_otherchars = '' + while j < n: + c = rawdata[j] + if c == ">": + # end of declaration syntax + data = rawdata[i+2:j] + if decltype == "doctype": + self.handle_decl(data) + else: + # According to the HTML5 specs sections "8.2.4.44 Bogus + # comment state" and "8.2.4.45 Markup declaration open + # state", a comment token should be emitted. + # Calling unknown_decl provides more flexibility though. + self.unknown_decl(data) + return j + 1 + if c in "\"'": + m = _declstringlit_match(rawdata, j) + if not m: + return -1 # incomplete + j = m.end() + elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": + name, j = self._scan_name(j, i) + elif c in self._decl_otherchars: + j = j + 1 + elif c == "[": + # this could be handled in a separate doctype parser + if decltype == "doctype": + j = self._parse_doctype_subset(j + 1, i) + elif decltype in set(["attlist", "linktype", "link", "element"]): + # must tolerate []'d groups in a content model in an element declaration + # also in data attribute specifications of attlist declaration + # also link type declaration subsets in linktype declarations + # also link attribute specification lists in link declarations + self.error("unsupported '[' char in %s declaration" % decltype) + else: + self.error("unexpected '[' char in declaration") + else: + self.error( + "unexpected %r char in declaration" % rawdata[j]) + if j < 0: + return j + return -1 # incomplete + + # Internal -- parse a marked section + # Override this to handle MS-word extension syntax content + def parse_marked_section(self, i, report=1): + rawdata= self.rawdata + assert rawdata[i:i+3] == ' ending + match= _markedsectionclose.search(rawdata, i+3) + elif sectName in set(["if", "else", "endif"]): + # look for MS Office ]> ending + match= _msmarkedsectionclose.search(rawdata, i+3) + else: + self.error('unknown status keyword %r in marked section' % rawdata[i+3:j]) + if not match: + return -1 + if report: + j = match.start(0) + self.unknown_decl(rawdata[i+3: j]) + return match.end(0) + + # Internal -- parse comment, return length or -1 if not terminated + def parse_comment(self, i, report=1): + rawdata = self.rawdata + if rawdata[i:i+4] != ' delimiter transport-padding + # --> CRLF body-part + for body_part in msgtexts: + # delimiter transport-padding CRLF + self.write(self._NL + '--' + boundary + self._NL) + # body-part + self._fp.write(body_part) + # close-delimiter transport-padding + self.write(self._NL + '--' + boundary + '--') + if msg.epilogue is not None: + self.write(self._NL) + if self._mangle_from_: + epilogue = fcre.sub('>From ', msg.epilogue) + else: + epilogue = msg.epilogue + self._write_lines(epilogue) + + def _handle_multipart_signed(self, msg): + # The contents of signed parts has to stay unmodified in order to keep + # the signature intact per RFC1847 2.1, so we disable header wrapping. + # RDM: This isn't enough to completely preserve the part, but it helps. + p = self.policy + self.policy = p.clone(max_line_length=0) + try: + self._handle_multipart(msg) + finally: + self.policy = p + + def _handle_message_delivery_status(self, msg): + # We can't just write the headers directly to self's file object + # because this will leave an extra newline between the last header + # block and the boundary. Sigh. + blocks = [] + for part in msg.get_payload(): + s = self._new_buffer() + g = self.clone(s) + g.flatten(part, unixfrom=False, linesep=self._NL) + text = s.getvalue() + lines = text.split(self._encoded_NL) + # Strip off the unnecessary trailing empty line + if lines and lines[-1] == self._encoded_EMPTY: + blocks.append(self._encoded_NL.join(lines[:-1])) + else: + blocks.append(text) + # Now join all the blocks with an empty line. This has the lovely + # effect of separating each block with an empty line, but not adding + # an extra one after the last one. + self._fp.write(self._encoded_NL.join(blocks)) + + def _handle_message(self, msg): + s = self._new_buffer() + g = self.clone(s) + # The payload of a message/rfc822 part should be a multipart sequence + # of length 1. The zeroth element of the list should be the Message + # object for the subpart. Extract that object, stringify it, and + # write it out. + # Except, it turns out, when it's a string instead, which happens when + # and only when HeaderParser is used on a message of mime type + # message/rfc822. Such messages are generated by, for example, + # Groupwise when forwarding unadorned messages. (Issue 7970.) So + # in that case we just emit the string body. + payload = msg._payload + if isinstance(payload, list): + g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL) + payload = s.getvalue() + else: + payload = self._encode(payload) + self._fp.write(payload) + + # This used to be a module level function; we use a classmethod for this + # and _compile_re so we can continue to provide the module level function + # for backward compatibility by doing + # _make_boudary = Generator._make_boundary + # at the end of the module. It *is* internal, so we could drop that... + @classmethod + def _make_boundary(cls, text=None): + # Craft a random boundary. If text is given, ensure that the chosen + # boundary doesn't appear in the text. + token = random.randrange(sys.maxsize) + boundary = ('=' * 15) + (_fmt % token) + '==' + if text is None: + return boundary + b = boundary + counter = 0 + while True: + cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE) + if not cre.search(text): + break + b = boundary + '.' + str(counter) + counter += 1 + return b + + @classmethod + def _compile_re(cls, s, flags): + return re.compile(s, flags) + +class BytesGenerator(Generator): + """Generates a bytes version of a Message object tree. + + Functionally identical to the base Generator except that the output is + bytes and not string. When surrogates were used in the input to encode + bytes, these are decoded back to bytes for output. If the policy has + cte_type set to 7bit, then the message is transformed such that the + non-ASCII bytes are properly content transfer encoded, using the charset + unknown-8bit. + + The outfp object must accept bytes in its write method. + """ + + # Bytes versions of this constant for use in manipulating data from + # the BytesIO buffer. + _encoded_EMPTY = b'' + + def write(self, s): + self._fp.write(str(s).encode('ascii', 'surrogateescape')) + + def _new_buffer(self): + return BytesIO() + + def _encode(self, s): + return s.encode('ascii') + + def _write_headers(self, msg): + # This is almost the same as the string version, except for handling + # strings with 8bit bytes. + for h, v in msg.raw_items(): + self._fp.write(self.policy.fold_binary(h, v)) + # A blank line always separates headers from body + self.write(self._NL) + + def _handle_text(self, msg): + # If the string has surrogates the original source was bytes, so + # just write it back out. + if msg._payload is None: + return + if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit': + if self._mangle_from_: + msg._payload = fcre.sub(">From ", msg._payload) + self._write_lines(msg._payload) + else: + super(BytesGenerator,self)._handle_text(msg) + + # Default body handler + _writeBody = _handle_text + + @classmethod + def _compile_re(cls, s, flags): + return re.compile(s.encode('ascii'), flags) + + +_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]' + +class DecodedGenerator(Generator): + """Generates a text representation of a message. + + Like the Generator base class, except that non-text parts are substituted + with a format string representing the part. + """ + def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None): + """Like Generator.__init__() except that an additional optional + argument is allowed. + + Walks through all subparts of a message. If the subpart is of main + type `text', then it prints the decoded payload of the subpart. + + Otherwise, fmt is a format string that is used instead of the message + payload. fmt is expanded with the following keywords (in + %(keyword)s format): + + type : Full MIME type of the non-text part + maintype : Main MIME type of the non-text part + subtype : Sub-MIME type of the non-text part + filename : Filename of the non-text part + description: Description associated with the non-text part + encoding : Content transfer encoding of the non-text part + + The default value for fmt is None, meaning + + [Non-text (%(type)s) part of message omitted, filename %(filename)s] + """ + Generator.__init__(self, outfp, mangle_from_, maxheaderlen) + if fmt is None: + self._fmt = _FMT + else: + self._fmt = fmt + + def _dispatch(self, msg): + for part in msg.walk(): + maintype = part.get_content_maintype() + if maintype == 'text': + print(part.get_payload(decode=False), file=self) + elif maintype == 'multipart': + # Just skip this + pass + else: + print(self._fmt % { + 'type' : part.get_content_type(), + 'maintype' : part.get_content_maintype(), + 'subtype' : part.get_content_subtype(), + 'filename' : part.get_filename('[no filename]'), + 'description': part.get('Content-Description', + '[no description]'), + 'encoding' : part.get('Content-Transfer-Encoding', + '[no encoding]'), + }, file=self) + + +# Helper used by Generator._make_boundary +_width = len(repr(sys.maxsize-1)) +_fmt = '%%0%dd' % _width + +# Backward compatibility +_make_boundary = Generator._make_boundary diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/header.py b/pype/modules/ftrack/python2_vendor/future/backports/email/header.py new file mode 100644 index 0000000000..63bf038c02 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/header.py @@ -0,0 +1,581 @@ +# Copyright (C) 2002-2007 Python Software Foundation +# Author: Ben Gertzfield, Barry Warsaw +# Contact: email-sig@python.org + +"""Header encoding and decoding functionality.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import +from future.builtins import bytes, range, str, super, zip + +__all__ = [ + 'Header', + 'decode_header', + 'make_header', + ] + +import re +import binascii + +from future.backports import email +from future.backports.email import base64mime +from future.backports.email.errors import HeaderParseError +import future.backports.email.charset as _charset + +# Helpers +from future.backports.email.quoprimime import _max_append, header_decode + +Charset = _charset.Charset + +NL = '\n' +SPACE = ' ' +BSPACE = b' ' +SPACE8 = ' ' * 8 +EMPTYSTRING = '' +MAXLINELEN = 78 +FWS = ' \t' + +USASCII = Charset('us-ascii') +UTF8 = Charset('utf-8') + +# Match encoded-word strings in the form =?charset?q?Hello_World?= +ecre = re.compile(r''' + =\? # literal =? + (?P[^?]*?) # non-greedy up to the next ? is the charset + \? # literal ? + (?P[qb]) # either a "q" or a "b", case insensitive + \? # literal ? + (?P.*?) # non-greedy up to the next ?= is the encoded string + \?= # literal ?= + ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE) + +# Field name regexp, including trailing colon, but not separating whitespace, +# according to RFC 2822. Character range is from tilde to exclamation mark. +# For use with .match() +fcre = re.compile(r'[\041-\176]+:$') + +# Find a header embedded in a putative header value. Used to check for +# header injection attack. +_embeded_header = re.compile(r'\n[^ \t]+:') + + +def decode_header(header): + """Decode a message header value without converting charset. + + Returns a list of (string, charset) pairs containing each of the decoded + parts of the header. Charset is None for non-encoded parts of the header, + otherwise a lower-case string containing the name of the character set + specified in the encoded string. + + header may be a string that may or may not contain RFC2047 encoded words, + or it may be a Header object. + + An email.errors.HeaderParseError may be raised when certain decoding error + occurs (e.g. a base64 decoding exception). + """ + # If it is a Header object, we can just return the encoded chunks. + if hasattr(header, '_chunks'): + return [(_charset._encode(string, str(charset)), str(charset)) + for string, charset in header._chunks] + # If no encoding, just return the header with no charset. + if not ecre.search(header): + return [(header, None)] + # First step is to parse all the encoded parts into triplets of the form + # (encoded_string, encoding, charset). For unencoded strings, the last + # two parts will be None. + words = [] + for line in header.splitlines(): + parts = ecre.split(line) + first = True + while parts: + unencoded = parts.pop(0) + if first: + unencoded = unencoded.lstrip() + first = False + if unencoded: + words.append((unencoded, None, None)) + if parts: + charset = parts.pop(0).lower() + encoding = parts.pop(0).lower() + encoded = parts.pop(0) + words.append((encoded, encoding, charset)) + # Now loop over words and remove words that consist of whitespace + # between two encoded strings. + import sys + droplist = [] + for n, w in enumerate(words): + if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): + droplist.append(n-1) + for d in reversed(droplist): + del words[d] + + # The next step is to decode each encoded word by applying the reverse + # base64 or quopri transformation. decoded_words is now a list of the + # form (decoded_word, charset). + decoded_words = [] + for encoded_string, encoding, charset in words: + if encoding is None: + # This is an unencoded word. + decoded_words.append((encoded_string, charset)) + elif encoding == 'q': + word = header_decode(encoded_string) + decoded_words.append((word, charset)) + elif encoding == 'b': + paderr = len(encoded_string) % 4 # Postel's law: add missing padding + if paderr: + encoded_string += '==='[:4 - paderr] + try: + word = base64mime.decode(encoded_string) + except binascii.Error: + raise HeaderParseError('Base64 decoding error') + else: + decoded_words.append((word, charset)) + else: + raise AssertionError('Unexpected encoding: ' + encoding) + # Now convert all words to bytes and collapse consecutive runs of + # similarly encoded words. + collapsed = [] + last_word = last_charset = None + for word, charset in decoded_words: + if isinstance(word, str): + word = bytes(word, 'raw-unicode-escape') + if last_word is None: + last_word = word + last_charset = charset + elif charset != last_charset: + collapsed.append((last_word, last_charset)) + last_word = word + last_charset = charset + elif last_charset is None: + last_word += BSPACE + word + else: + last_word += word + collapsed.append((last_word, last_charset)) + return collapsed + + +def make_header(decoded_seq, maxlinelen=None, header_name=None, + continuation_ws=' '): + """Create a Header from a sequence of pairs as returned by decode_header() + + decode_header() takes a header value string and returns a sequence of + pairs of the format (decoded_string, charset) where charset is the string + name of the character set. + + This function takes one of those sequence of pairs and returns a Header + instance. Optional maxlinelen, header_name, and continuation_ws are as in + the Header constructor. + """ + h = Header(maxlinelen=maxlinelen, header_name=header_name, + continuation_ws=continuation_ws) + for s, charset in decoded_seq: + # None means us-ascii but we can simply pass it on to h.append() + if charset is not None and not isinstance(charset, Charset): + charset = Charset(charset) + h.append(s, charset) + return h + + +class Header(object): + def __init__(self, s=None, charset=None, + maxlinelen=None, header_name=None, + continuation_ws=' ', errors='strict'): + """Create a MIME-compliant header that can contain many character sets. + + Optional s is the initial header value. If None, the initial header + value is not set. You can later append to the header with .append() + method calls. s may be a byte string or a Unicode string, but see the + .append() documentation for semantics. + + Optional charset serves two purposes: it has the same meaning as the + charset argument to the .append() method. It also sets the default + character set for all subsequent .append() calls that omit the charset + argument. If charset is not provided in the constructor, the us-ascii + charset is used both as s's initial charset and as the default for + subsequent .append() calls. + + The maximum line length can be specified explicitly via maxlinelen. For + splitting the first line to a shorter value (to account for the field + header which isn't included in s, e.g. `Subject') pass in the name of + the field in header_name. The default maxlinelen is 78 as recommended + by RFC 2822. + + continuation_ws must be RFC 2822 compliant folding whitespace (usually + either a space or a hard tab) which will be prepended to continuation + lines. + + errors is passed through to the .append() call. + """ + if charset is None: + charset = USASCII + elif not isinstance(charset, Charset): + charset = Charset(charset) + self._charset = charset + self._continuation_ws = continuation_ws + self._chunks = [] + if s is not None: + self.append(s, charset, errors) + if maxlinelen is None: + maxlinelen = MAXLINELEN + self._maxlinelen = maxlinelen + if header_name is None: + self._headerlen = 0 + else: + # Take the separating colon and space into account. + self._headerlen = len(header_name) + 2 + + def __str__(self): + """Return the string value of the header.""" + self._normalize() + uchunks = [] + lastcs = None + lastspace = None + for string, charset in self._chunks: + # We must preserve spaces between encoded and non-encoded word + # boundaries, which means for us we need to add a space when we go + # from a charset to None/us-ascii, or from None/us-ascii to a + # charset. Only do this for the second and subsequent chunks. + # Don't add a space if the None/us-ascii string already has + # a space (trailing or leading depending on transition) + nextcs = charset + if nextcs == _charset.UNKNOWN8BIT: + original_bytes = string.encode('ascii', 'surrogateescape') + string = original_bytes.decode('ascii', 'replace') + if uchunks: + hasspace = string and self._nonctext(string[0]) + if lastcs not in (None, 'us-ascii'): + if nextcs in (None, 'us-ascii') and not hasspace: + uchunks.append(SPACE) + nextcs = None + elif nextcs not in (None, 'us-ascii') and not lastspace: + uchunks.append(SPACE) + lastspace = string and self._nonctext(string[-1]) + lastcs = nextcs + uchunks.append(string) + return EMPTYSTRING.join(uchunks) + + # Rich comparison operators for equality only. BAW: does it make sense to + # have or explicitly disable <, <=, >, >= operators? + def __eq__(self, other): + # other may be a Header or a string. Both are fine so coerce + # ourselves to a unicode (of the unencoded header value), swap the + # args and do another comparison. + return other == str(self) + + def __ne__(self, other): + return not self == other + + def append(self, s, charset=None, errors='strict'): + """Append a string to the MIME header. + + Optional charset, if given, should be a Charset instance or the name + of a character set (which will be converted to a Charset instance). A + value of None (the default) means that the charset given in the + constructor is used. + + s may be a byte string or a Unicode string. If it is a byte string + (i.e. isinstance(s, str) is false), then charset is the encoding of + that byte string, and a UnicodeError will be raised if the string + cannot be decoded with that charset. If s is a Unicode string, then + charset is a hint specifying the character set of the characters in + the string. In either case, when producing an RFC 2822 compliant + header using RFC 2047 rules, the string will be encoded using the + output codec of the charset. If the string cannot be encoded to the + output codec, a UnicodeError will be raised. + + Optional `errors' is passed as the errors argument to the decode + call if s is a byte string. + """ + if charset is None: + charset = self._charset + elif not isinstance(charset, Charset): + charset = Charset(charset) + if not isinstance(s, str): + input_charset = charset.input_codec or 'us-ascii' + if input_charset == _charset.UNKNOWN8BIT: + s = s.decode('us-ascii', 'surrogateescape') + else: + s = s.decode(input_charset, errors) + # Ensure that the bytes we're storing can be decoded to the output + # character set, otherwise an early error is raised. + output_charset = charset.output_codec or 'us-ascii' + if output_charset != _charset.UNKNOWN8BIT: + try: + s.encode(output_charset, errors) + except UnicodeEncodeError: + if output_charset!='us-ascii': + raise + charset = UTF8 + self._chunks.append((s, charset)) + + def _nonctext(self, s): + """True if string s is not a ctext character of RFC822. + """ + return s.isspace() or s in ('(', ')', '\\') + + def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'): + r"""Encode a message header into an RFC-compliant format. + + There are many issues involved in converting a given string for use in + an email header. Only certain character sets are readable in most + email clients, and as header strings can only contain a subset of + 7-bit ASCII, care must be taken to properly convert and encode (with + Base64 or quoted-printable) header strings. In addition, there is a + 75-character length limit on any given encoded header field, so + line-wrapping must be performed, even with double-byte character sets. + + Optional maxlinelen specifies the maximum length of each generated + line, exclusive of the linesep string. Individual lines may be longer + than maxlinelen if a folding point cannot be found. The first line + will be shorter by the length of the header name plus ": " if a header + name was specified at Header construction time. The default value for + maxlinelen is determined at header construction time. + + Optional splitchars is a string containing characters which should be + given extra weight by the splitting algorithm during normal header + wrapping. This is in very rough support of RFC 2822's `higher level + syntactic breaks': split points preceded by a splitchar are preferred + during line splitting, with the characters preferred in the order in + which they appear in the string. Space and tab may be included in the + string to indicate whether preference should be given to one over the + other as a split point when other split chars do not appear in the line + being split. Splitchars does not affect RFC 2047 encoded lines. + + Optional linesep is a string to be used to separate the lines of + the value. The default value is the most useful for typical + Python applications, but it can be set to \r\n to produce RFC-compliant + line separators when needed. + """ + self._normalize() + if maxlinelen is None: + maxlinelen = self._maxlinelen + # A maxlinelen of 0 means don't wrap. For all practical purposes, + # choosing a huge number here accomplishes that and makes the + # _ValueFormatter algorithm much simpler. + if maxlinelen == 0: + maxlinelen = 1000000 + formatter = _ValueFormatter(self._headerlen, maxlinelen, + self._continuation_ws, splitchars) + lastcs = None + hasspace = lastspace = None + for string, charset in self._chunks: + if hasspace is not None: + hasspace = string and self._nonctext(string[0]) + import sys + if lastcs not in (None, 'us-ascii'): + if not hasspace or charset not in (None, 'us-ascii'): + formatter.add_transition() + elif charset not in (None, 'us-ascii') and not lastspace: + formatter.add_transition() + lastspace = string and self._nonctext(string[-1]) + lastcs = charset + hasspace = False + lines = string.splitlines() + if lines: + formatter.feed('', lines[0], charset) + else: + formatter.feed('', '', charset) + for line in lines[1:]: + formatter.newline() + if charset.header_encoding is not None: + formatter.feed(self._continuation_ws, ' ' + line.lstrip(), + charset) + else: + sline = line.lstrip() + fws = line[:len(line)-len(sline)] + formatter.feed(fws, sline, charset) + if len(lines) > 1: + formatter.newline() + if self._chunks: + formatter.add_transition() + value = formatter._str(linesep) + if _embeded_header.search(value): + raise HeaderParseError("header value appears to contain " + "an embedded header: {!r}".format(value)) + return value + + def _normalize(self): + # Step 1: Normalize the chunks so that all runs of identical charsets + # get collapsed into a single unicode string. + chunks = [] + last_charset = None + last_chunk = [] + for string, charset in self._chunks: + if charset == last_charset: + last_chunk.append(string) + else: + if last_charset is not None: + chunks.append((SPACE.join(last_chunk), last_charset)) + last_chunk = [string] + last_charset = charset + if last_chunk: + chunks.append((SPACE.join(last_chunk), last_charset)) + self._chunks = chunks + + +class _ValueFormatter(object): + def __init__(self, headerlen, maxlen, continuation_ws, splitchars): + self._maxlen = maxlen + self._continuation_ws = continuation_ws + self._continuation_ws_len = len(continuation_ws) + self._splitchars = splitchars + self._lines = [] + self._current_line = _Accumulator(headerlen) + + def _str(self, linesep): + self.newline() + return linesep.join(self._lines) + + def __str__(self): + return self._str(NL) + + def newline(self): + end_of_line = self._current_line.pop() + if end_of_line != (' ', ''): + self._current_line.push(*end_of_line) + if len(self._current_line) > 0: + if self._current_line.is_onlyws(): + self._lines[-1] += str(self._current_line) + else: + self._lines.append(str(self._current_line)) + self._current_line.reset() + + def add_transition(self): + self._current_line.push(' ', '') + + def feed(self, fws, string, charset): + # If the charset has no header encoding (i.e. it is an ASCII encoding) + # then we must split the header at the "highest level syntactic break" + # possible. Note that we don't have a lot of smarts about field + # syntax; we just try to break on semi-colons, then commas, then + # whitespace. Eventually, this should be pluggable. + if charset.header_encoding is None: + self._ascii_split(fws, string, self._splitchars) + return + # Otherwise, we're doing either a Base64 or a quoted-printable + # encoding which means we don't need to split the line on syntactic + # breaks. We can basically just find enough characters to fit on the + # current line, minus the RFC 2047 chrome. What makes this trickier + # though is that we have to split at octet boundaries, not character + # boundaries but it's only safe to split at character boundaries so at + # best we can only get close. + encoded_lines = charset.header_encode_lines(string, self._maxlengths()) + # The first element extends the current line, but if it's None then + # nothing more fit on the current line so start a new line. + try: + first_line = encoded_lines.pop(0) + except IndexError: + # There are no encoded lines, so we're done. + return + if first_line is not None: + self._append_chunk(fws, first_line) + try: + last_line = encoded_lines.pop() + except IndexError: + # There was only one line. + return + self.newline() + self._current_line.push(self._continuation_ws, last_line) + # Everything else are full lines in themselves. + for line in encoded_lines: + self._lines.append(self._continuation_ws + line) + + def _maxlengths(self): + # The first line's length. + yield self._maxlen - len(self._current_line) + while True: + yield self._maxlen - self._continuation_ws_len + + def _ascii_split(self, fws, string, splitchars): + # The RFC 2822 header folding algorithm is simple in principle but + # complex in practice. Lines may be folded any place where "folding + # white space" appears by inserting a linesep character in front of the + # FWS. The complication is that not all spaces or tabs qualify as FWS, + # and we are also supposed to prefer to break at "higher level + # syntactic breaks". We can't do either of these without intimate + # knowledge of the structure of structured headers, which we don't have + # here. So the best we can do here is prefer to break at the specified + # splitchars, and hope that we don't choose any spaces or tabs that + # aren't legal FWS. (This is at least better than the old algorithm, + # where we would sometimes *introduce* FWS after a splitchar, or the + # algorithm before that, where we would turn all white space runs into + # single spaces or tabs.) + parts = re.split("(["+FWS+"]+)", fws+string) + if parts[0]: + parts[:0] = [''] + else: + parts.pop(0) + for fws, part in zip(*[iter(parts)]*2): + self._append_chunk(fws, part) + + def _append_chunk(self, fws, string): + self._current_line.push(fws, string) + if len(self._current_line) > self._maxlen: + # Find the best split point, working backward from the end. + # There might be none, on a long first line. + for ch in self._splitchars: + for i in range(self._current_line.part_count()-1, 0, -1): + if ch.isspace(): + fws = self._current_line[i][0] + if fws and fws[0]==ch: + break + prevpart = self._current_line[i-1][1] + if prevpart and prevpart[-1]==ch: + break + else: + continue + break + else: + fws, part = self._current_line.pop() + if self._current_line._initial_size > 0: + # There will be a header, so leave it on a line by itself. + self.newline() + if not fws: + # We don't use continuation_ws here because the whitespace + # after a header should always be a space. + fws = ' ' + self._current_line.push(fws, part) + return + remainder = self._current_line.pop_from(i) + self._lines.append(str(self._current_line)) + self._current_line.reset(remainder) + + +class _Accumulator(list): + + def __init__(self, initial_size=0): + self._initial_size = initial_size + super().__init__() + + def push(self, fws, string): + self.append((fws, string)) + + def pop_from(self, i=0): + popped = self[i:] + self[i:] = [] + return popped + + def pop(self): + if self.part_count()==0: + return ('', '') + return super().pop() + + def __len__(self): + return sum((len(fws)+len(part) for fws, part in self), + self._initial_size) + + def __str__(self): + return EMPTYSTRING.join((EMPTYSTRING.join((fws, part)) + for fws, part in self)) + + def reset(self, startval=None): + if startval is None: + startval = [] + self[:] = startval + self._initial_size = 0 + + def is_onlyws(self): + return self._initial_size==0 and (not self or str(self).isspace()) + + def part_count(self): + return super().__len__() diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/headerregistry.py b/pype/modules/ftrack/python2_vendor/future/backports/email/headerregistry.py new file mode 100644 index 0000000000..9aaad65a14 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/headerregistry.py @@ -0,0 +1,592 @@ +"""Representing and manipulating email headers via custom objects. + +This module provides an implementation of the HeaderRegistry API. +The implementation is designed to flexibly follow RFC5322 rules. + +Eventually HeaderRegistry will be a public API, but it isn't yet, +and will probably change some before that happens. + +""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +from future.builtins import super +from future.builtins import str +from future.utils import text_to_native_str +from future.backports.email import utils +from future.backports.email import errors +from future.backports.email import _header_value_parser as parser + +class Address(object): + + def __init__(self, display_name='', username='', domain='', addr_spec=None): + """Create an object represeting a full email address. + + An address can have a 'display_name', a 'username', and a 'domain'. In + addition to specifying the username and domain separately, they may be + specified together by using the addr_spec keyword *instead of* the + username and domain keywords. If an addr_spec string is specified it + must be properly quoted according to RFC 5322 rules; an error will be + raised if it is not. + + An Address object has display_name, username, domain, and addr_spec + attributes, all of which are read-only. The addr_spec and the string + value of the object are both quoted according to RFC5322 rules, but + without any Content Transfer Encoding. + + """ + # This clause with its potential 'raise' may only happen when an + # application program creates an Address object using an addr_spec + # keyword. The email library code itself must always supply username + # and domain. + if addr_spec is not None: + if username or domain: + raise TypeError("addrspec specified when username and/or " + "domain also specified") + a_s, rest = parser.get_addr_spec(addr_spec) + if rest: + raise ValueError("Invalid addr_spec; only '{}' " + "could be parsed from '{}'".format( + a_s, addr_spec)) + if a_s.all_defects: + raise a_s.all_defects[0] + username = a_s.local_part + domain = a_s.domain + self._display_name = display_name + self._username = username + self._domain = domain + + @property + def display_name(self): + return self._display_name + + @property + def username(self): + return self._username + + @property + def domain(self): + return self._domain + + @property + def addr_spec(self): + """The addr_spec (username@domain) portion of the address, quoted + according to RFC 5322 rules, but with no Content Transfer Encoding. + """ + nameset = set(self.username) + if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS): + lp = parser.quote_string(self.username) + else: + lp = self.username + if self.domain: + return lp + '@' + self.domain + if not lp: + return '<>' + return lp + + def __repr__(self): + return "Address(display_name={!r}, username={!r}, domain={!r})".format( + self.display_name, self.username, self.domain) + + def __str__(self): + nameset = set(self.display_name) + if len(nameset) > len(nameset-parser.SPECIALS): + disp = parser.quote_string(self.display_name) + else: + disp = self.display_name + if disp: + addr_spec = '' if self.addr_spec=='<>' else self.addr_spec + return "{} <{}>".format(disp, addr_spec) + return self.addr_spec + + def __eq__(self, other): + if type(other) != type(self): + return False + return (self.display_name == other.display_name and + self.username == other.username and + self.domain == other.domain) + + +class Group(object): + + def __init__(self, display_name=None, addresses=None): + """Create an object representing an address group. + + An address group consists of a display_name followed by colon and an + list of addresses (see Address) terminated by a semi-colon. The Group + is created by specifying a display_name and a possibly empty list of + Address objects. A Group can also be used to represent a single + address that is not in a group, which is convenient when manipulating + lists that are a combination of Groups and individual Addresses. In + this case the display_name should be set to None. In particular, the + string representation of a Group whose display_name is None is the same + as the Address object, if there is one and only one Address object in + the addresses list. + + """ + self._display_name = display_name + self._addresses = tuple(addresses) if addresses else tuple() + + @property + def display_name(self): + return self._display_name + + @property + def addresses(self): + return self._addresses + + def __repr__(self): + return "Group(display_name={!r}, addresses={!r}".format( + self.display_name, self.addresses) + + def __str__(self): + if self.display_name is None and len(self.addresses)==1: + return str(self.addresses[0]) + disp = self.display_name + if disp is not None: + nameset = set(disp) + if len(nameset) > len(nameset-parser.SPECIALS): + disp = parser.quote_string(disp) + adrstr = ", ".join(str(x) for x in self.addresses) + adrstr = ' ' + adrstr if adrstr else adrstr + return "{}:{};".format(disp, adrstr) + + def __eq__(self, other): + if type(other) != type(self): + return False + return (self.display_name == other.display_name and + self.addresses == other.addresses) + + +# Header Classes # + +class BaseHeader(str): + + """Base class for message headers. + + Implements generic behavior and provides tools for subclasses. + + A subclass must define a classmethod named 'parse' that takes an unfolded + value string and a dictionary as its arguments. The dictionary will + contain one key, 'defects', initialized to an empty list. After the call + the dictionary must contain two additional keys: parse_tree, set to the + parse tree obtained from parsing the header, and 'decoded', set to the + string value of the idealized representation of the data from the value. + (That is, encoded words are decoded, and values that have canonical + representations are so represented.) + + The defects key is intended to collect parsing defects, which the message + parser will subsequently dispose of as appropriate. The parser should not, + insofar as practical, raise any errors. Defects should be added to the + list instead. The standard header parsers register defects for RFC + compliance issues, for obsolete RFC syntax, and for unrecoverable parsing + errors. + + The parse method may add additional keys to the dictionary. In this case + the subclass must define an 'init' method, which will be passed the + dictionary as its keyword arguments. The method should use (usually by + setting them as the value of similarly named attributes) and remove all the + extra keys added by its parse method, and then use super to call its parent + class with the remaining arguments and keywords. + + The subclass should also make sure that a 'max_count' attribute is defined + that is either None or 1. XXX: need to better define this API. + + """ + + def __new__(cls, name, value): + kwds = {'defects': []} + cls.parse(value, kwds) + if utils._has_surrogates(kwds['decoded']): + kwds['decoded'] = utils._sanitize(kwds['decoded']) + self = str.__new__(cls, kwds['decoded']) + # del kwds['decoded'] + self.init(name, **kwds) + return self + + def init(self, name, **_3to2kwargs): + defects = _3to2kwargs['defects']; del _3to2kwargs['defects'] + parse_tree = _3to2kwargs['parse_tree']; del _3to2kwargs['parse_tree'] + self._name = name + self._parse_tree = parse_tree + self._defects = defects + + @property + def name(self): + return self._name + + @property + def defects(self): + return tuple(self._defects) + + def __reduce__(self): + return ( + _reconstruct_header, + ( + self.__class__.__name__, + self.__class__.__bases__, + str(self), + ), + self.__dict__) + + @classmethod + def _reconstruct(cls, value): + return str.__new__(cls, value) + + def fold(self, **_3to2kwargs): + policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] + """Fold header according to policy. + + The parsed representation of the header is folded according to + RFC5322 rules, as modified by the policy. If the parse tree + contains surrogateescaped bytes, the bytes are CTE encoded using + the charset 'unknown-8bit". + + Any non-ASCII characters in the parse tree are CTE encoded using + charset utf-8. XXX: make this a policy setting. + + The returned value is an ASCII-only string possibly containing linesep + characters, and ending with a linesep character. The string includes + the header name and the ': ' separator. + + """ + # At some point we need to only put fws here if it was in the source. + header = parser.Header([ + parser.HeaderLabel([ + parser.ValueTerminal(self.name, 'header-name'), + parser.ValueTerminal(':', 'header-sep')]), + parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), + self._parse_tree]) + return header.fold(policy=policy) + + +def _reconstruct_header(cls_name, bases, value): + return type(text_to_native_str(cls_name), bases, {})._reconstruct(value) + + +class UnstructuredHeader(object): + + max_count = None + value_parser = staticmethod(parser.get_unstructured) + + @classmethod + def parse(cls, value, kwds): + kwds['parse_tree'] = cls.value_parser(value) + kwds['decoded'] = str(kwds['parse_tree']) + + +class UniqueUnstructuredHeader(UnstructuredHeader): + + max_count = 1 + + +class DateHeader(object): + + """Header whose value consists of a single timestamp. + + Provides an additional attribute, datetime, which is either an aware + datetime using a timezone, or a naive datetime if the timezone + in the input string is -0000. Also accepts a datetime as input. + The 'value' attribute is the normalized form of the timestamp, + which means it is the output of format_datetime on the datetime. + """ + + max_count = None + + # This is used only for folding, not for creating 'decoded'. + value_parser = staticmethod(parser.get_unstructured) + + @classmethod + def parse(cls, value, kwds): + if not value: + kwds['defects'].append(errors.HeaderMissingRequiredValue()) + kwds['datetime'] = None + kwds['decoded'] = '' + kwds['parse_tree'] = parser.TokenList() + return + if isinstance(value, str): + value = utils.parsedate_to_datetime(value) + kwds['datetime'] = value + kwds['decoded'] = utils.format_datetime(kwds['datetime']) + kwds['parse_tree'] = cls.value_parser(kwds['decoded']) + + def init(self, *args, **kw): + self._datetime = kw.pop('datetime') + super().init(*args, **kw) + + @property + def datetime(self): + return self._datetime + + +class UniqueDateHeader(DateHeader): + + max_count = 1 + + +class AddressHeader(object): + + max_count = None + + @staticmethod + def value_parser(value): + address_list, value = parser.get_address_list(value) + assert not value, 'this should not happen' + return address_list + + @classmethod + def parse(cls, value, kwds): + if isinstance(value, str): + # We are translating here from the RFC language (address/mailbox) + # to our API language (group/address). + kwds['parse_tree'] = address_list = cls.value_parser(value) + groups = [] + for addr in address_list.addresses: + groups.append(Group(addr.display_name, + [Address(mb.display_name or '', + mb.local_part or '', + mb.domain or '') + for mb in addr.all_mailboxes])) + defects = list(address_list.all_defects) + else: + # Assume it is Address/Group stuff + if not hasattr(value, '__iter__'): + value = [value] + groups = [Group(None, [item]) if not hasattr(item, 'addresses') + else item + for item in value] + defects = [] + kwds['groups'] = groups + kwds['defects'] = defects + kwds['decoded'] = ', '.join([str(item) for item in groups]) + if 'parse_tree' not in kwds: + kwds['parse_tree'] = cls.value_parser(kwds['decoded']) + + def init(self, *args, **kw): + self._groups = tuple(kw.pop('groups')) + self._addresses = None + super().init(*args, **kw) + + @property + def groups(self): + return self._groups + + @property + def addresses(self): + if self._addresses is None: + self._addresses = tuple([address for group in self._groups + for address in group.addresses]) + return self._addresses + + +class UniqueAddressHeader(AddressHeader): + + max_count = 1 + + +class SingleAddressHeader(AddressHeader): + + @property + def address(self): + if len(self.addresses)!=1: + raise ValueError(("value of single address header {} is not " + "a single address").format(self.name)) + return self.addresses[0] + + +class UniqueSingleAddressHeader(SingleAddressHeader): + + max_count = 1 + + +class MIMEVersionHeader(object): + + max_count = 1 + + value_parser = staticmethod(parser.parse_mime_version) + + @classmethod + def parse(cls, value, kwds): + kwds['parse_tree'] = parse_tree = cls.value_parser(value) + kwds['decoded'] = str(parse_tree) + kwds['defects'].extend(parse_tree.all_defects) + kwds['major'] = None if parse_tree.minor is None else parse_tree.major + kwds['minor'] = parse_tree.minor + if parse_tree.minor is not None: + kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor']) + else: + kwds['version'] = None + + def init(self, *args, **kw): + self._version = kw.pop('version') + self._major = kw.pop('major') + self._minor = kw.pop('minor') + super().init(*args, **kw) + + @property + def major(self): + return self._major + + @property + def minor(self): + return self._minor + + @property + def version(self): + return self._version + + +class ParameterizedMIMEHeader(object): + + # Mixin that handles the params dict. Must be subclassed and + # a property value_parser for the specific header provided. + + max_count = 1 + + @classmethod + def parse(cls, value, kwds): + kwds['parse_tree'] = parse_tree = cls.value_parser(value) + kwds['decoded'] = str(parse_tree) + kwds['defects'].extend(parse_tree.all_defects) + if parse_tree.params is None: + kwds['params'] = {} + else: + # The MIME RFCs specify that parameter ordering is arbitrary. + kwds['params'] = dict((utils._sanitize(name).lower(), + utils._sanitize(value)) + for name, value in parse_tree.params) + + def init(self, *args, **kw): + self._params = kw.pop('params') + super().init(*args, **kw) + + @property + def params(self): + return self._params.copy() + + +class ContentTypeHeader(ParameterizedMIMEHeader): + + value_parser = staticmethod(parser.parse_content_type_header) + + def init(self, *args, **kw): + super().init(*args, **kw) + self._maintype = utils._sanitize(self._parse_tree.maintype) + self._subtype = utils._sanitize(self._parse_tree.subtype) + + @property + def maintype(self): + return self._maintype + + @property + def subtype(self): + return self._subtype + + @property + def content_type(self): + return self.maintype + '/' + self.subtype + + +class ContentDispositionHeader(ParameterizedMIMEHeader): + + value_parser = staticmethod(parser.parse_content_disposition_header) + + def init(self, *args, **kw): + super().init(*args, **kw) + cd = self._parse_tree.content_disposition + self._content_disposition = cd if cd is None else utils._sanitize(cd) + + @property + def content_disposition(self): + return self._content_disposition + + +class ContentTransferEncodingHeader(object): + + max_count = 1 + + value_parser = staticmethod(parser.parse_content_transfer_encoding_header) + + @classmethod + def parse(cls, value, kwds): + kwds['parse_tree'] = parse_tree = cls.value_parser(value) + kwds['decoded'] = str(parse_tree) + kwds['defects'].extend(parse_tree.all_defects) + + def init(self, *args, **kw): + super().init(*args, **kw) + self._cte = utils._sanitize(self._parse_tree.cte) + + @property + def cte(self): + return self._cte + + +# The header factory # + +_default_header_map = { + 'subject': UniqueUnstructuredHeader, + 'date': UniqueDateHeader, + 'resent-date': DateHeader, + 'orig-date': UniqueDateHeader, + 'sender': UniqueSingleAddressHeader, + 'resent-sender': SingleAddressHeader, + 'to': UniqueAddressHeader, + 'resent-to': AddressHeader, + 'cc': UniqueAddressHeader, + 'resent-cc': AddressHeader, + 'bcc': UniqueAddressHeader, + 'resent-bcc': AddressHeader, + 'from': UniqueAddressHeader, + 'resent-from': AddressHeader, + 'reply-to': UniqueAddressHeader, + 'mime-version': MIMEVersionHeader, + 'content-type': ContentTypeHeader, + 'content-disposition': ContentDispositionHeader, + 'content-transfer-encoding': ContentTransferEncodingHeader, + } + +class HeaderRegistry(object): + + """A header_factory and header registry.""" + + def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader, + use_default_map=True): + """Create a header_factory that works with the Policy API. + + base_class is the class that will be the last class in the created + header class's __bases__ list. default_class is the class that will be + used if "name" (see __call__) does not appear in the registry. + use_default_map controls whether or not the default mapping of names to + specialized classes is copied in to the registry when the factory is + created. The default is True. + + """ + self.registry = {} + self.base_class = base_class + self.default_class = default_class + if use_default_map: + self.registry.update(_default_header_map) + + def map_to_type(self, name, cls): + """Register cls as the specialized class for handling "name" headers. + + """ + self.registry[name.lower()] = cls + + def __getitem__(self, name): + cls = self.registry.get(name.lower(), self.default_class) + return type(text_to_native_str('_'+cls.__name__), (cls, self.base_class), {}) + + def __call__(self, name, value): + """Create a header instance for header 'name' from 'value'. + + Creates a header instance by creating a specialized class for parsing + and representing the specified header by combining the factory + base_class with a specialized class from the registry or the + default_class, and passing the name and value to the constructed + class's constructor. + + """ + return self[name](name, value) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/iterators.py b/pype/modules/ftrack/python2_vendor/future/backports/email/iterators.py new file mode 100644 index 0000000000..82d320f814 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/iterators.py @@ -0,0 +1,74 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Various types of useful iterators and generators.""" +from __future__ import print_function +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = [ + 'body_line_iterator', + 'typed_subpart_iterator', + 'walk', + # Do not include _structure() since it's part of the debugging API. + ] + +import sys +from io import StringIO + + +# This function will become a method of the Message class +def walk(self): + """Walk over the message tree, yielding each subpart. + + The walk is performed in depth-first order. This method is a + generator. + """ + yield self + if self.is_multipart(): + for subpart in self.get_payload(): + for subsubpart in subpart.walk(): + yield subsubpart + + +# These two functions are imported into the Iterators.py interface module. +def body_line_iterator(msg, decode=False): + """Iterate over the parts, returning string payloads line-by-line. + + Optional decode (default False) is passed through to .get_payload(). + """ + for subpart in msg.walk(): + payload = subpart.get_payload(decode=decode) + if isinstance(payload, str): + for line in StringIO(payload): + yield line + + +def typed_subpart_iterator(msg, maintype='text', subtype=None): + """Iterate over the subparts with a given MIME type. + + Use `maintype' as the main MIME type to match against; this defaults to + "text". Optional `subtype' is the MIME subtype to match against; if + omitted, only the main type is matched. + """ + for subpart in msg.walk(): + if subpart.get_content_maintype() == maintype: + if subtype is None or subpart.get_content_subtype() == subtype: + yield subpart + + +def _structure(msg, fp=None, level=0, include_default=False): + """A handy debugging aid""" + if fp is None: + fp = sys.stdout + tab = ' ' * (level * 4) + print(tab + msg.get_content_type(), end='', file=fp) + if include_default: + print(' [%s]' % msg.get_default_type(), file=fp) + else: + print(file=fp) + if msg.is_multipart(): + for subpart in msg.get_payload(): + _structure(subpart, fp, level+1, include_default) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/message.py b/pype/modules/ftrack/python2_vendor/future/backports/email/message.py new file mode 100644 index 0000000000..d8d9615d7d --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/message.py @@ -0,0 +1,882 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2001-2007 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Basic message object for the email package object model.""" +from __future__ import absolute_import, division, unicode_literals +from future.builtins import list, range, str, zip + +__all__ = ['Message'] + +import re +import uu +import base64 +import binascii +from io import BytesIO, StringIO + +# Intrapackage imports +from future.utils import as_native_str +from future.backports.email import utils +from future.backports.email import errors +from future.backports.email._policybase import compat32 +from future.backports.email import charset as _charset +from future.backports.email._encoded_words import decode_b +Charset = _charset.Charset + +SEMISPACE = '; ' + +# Regular expression that matches `special' characters in parameters, the +# existence of which force quoting of the parameter value. +tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]') + + +def _splitparam(param): + # Split header parameters. BAW: this may be too simple. It isn't + # strictly RFC 2045 (section 5.1) compliant, but it catches most headers + # found in the wild. We may eventually need a full fledged parser. + # RDM: we might have a Header here; for now just stringify it. + a, sep, b = str(param).partition(';') + if not sep: + return a.strip(), None + return a.strip(), b.strip() + +def _formatparam(param, value=None, quote=True): + """Convenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. If value is a + three tuple (charset, language, value), it will be encoded according + to RFC2231 rules. If it contains non-ascii characters it will likewise + be encoded according to RFC2231 rules, using the utf-8 charset and + a null language. + """ + if value is not None and len(value) > 0: + # A tuple is used for RFC 2231 encoded parameter values where items + # are (charset, language, value). charset is a string, not a Charset + # instance. RFC 2231 encoded values are never quoted, per RFC. + if isinstance(value, tuple): + # Encode as per RFC 2231 + param += '*' + value = utils.encode_rfc2231(value[2], value[0], value[1]) + return '%s=%s' % (param, value) + else: + try: + value.encode('ascii') + except UnicodeEncodeError: + param += '*' + value = utils.encode_rfc2231(value, 'utf-8', '') + return '%s=%s' % (param, value) + # BAW: Please check this. I think that if quote is set it should + # force quoting even if not necessary. + if quote or tspecials.search(value): + return '%s="%s"' % (param, utils.quote(value)) + else: + return '%s=%s' % (param, value) + else: + return param + +def _parseparam(s): + # RDM This might be a Header, so for now stringify it. + s = ';' + str(s) + plist = [] + while s[:1] == ';': + s = s[1:] + end = s.find(';') + while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: + end = s.find(';', end + 1) + if end < 0: + end = len(s) + f = s[:end] + if '=' in f: + i = f.index('=') + f = f[:i].strip().lower() + '=' + f[i+1:].strip() + plist.append(f.strip()) + s = s[end:] + return plist + + +def _unquotevalue(value): + # This is different than utils.collapse_rfc2231_value() because it doesn't + # try to convert the value to a unicode. Message.get_param() and + # Message.get_params() are both currently defined to return the tuple in + # the face of RFC 2231 parameters. + if isinstance(value, tuple): + return value[0], value[1], utils.unquote(value[2]) + else: + return utils.unquote(value) + + +class Message(object): + """Basic message object. + + A message object is defined as something that has a bunch of RFC 2822 + headers and a payload. It may optionally have an envelope header + (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a + multipart or a message/rfc822), then the payload is a list of Message + objects, otherwise it is a string. + + Message objects implement part of the `mapping' interface, which assumes + there is exactly one occurrence of the header per message. Some headers + do in fact appear multiple times (e.g. Received) and for those headers, + you must use the explicit API to set or get all the headers. Not all of + the mapping methods are implemented. + """ + def __init__(self, policy=compat32): + self.policy = policy + self._headers = list() + self._unixfrom = None + self._payload = None + self._charset = None + # Defaults for multipart messages + self.preamble = self.epilogue = None + self.defects = [] + # Default content type + self._default_type = 'text/plain' + + @as_native_str(encoding='utf-8') + def __str__(self): + """Return the entire formatted message as a string. + This includes the headers, body, and envelope header. + """ + return self.as_string() + + def as_string(self, unixfrom=False, maxheaderlen=0): + """Return the entire formatted message as a (unicode) string. + Optional `unixfrom' when True, means include the Unix From_ envelope + header. + + This is a convenience method and may not generate the message exactly + as you intend. For more flexibility, use the flatten() method of a + Generator instance. + """ + from future.backports.email.generator import Generator + fp = StringIO() + g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen) + g.flatten(self, unixfrom=unixfrom) + return fp.getvalue() + + def is_multipart(self): + """Return True if the message consists of multiple parts.""" + return isinstance(self._payload, list) + + # + # Unix From_ line + # + def set_unixfrom(self, unixfrom): + self._unixfrom = unixfrom + + def get_unixfrom(self): + return self._unixfrom + + # + # Payload manipulation. + # + def attach(self, payload): + """Add the given payload to the current payload. + + The current payload will always be a list of objects after this method + is called. If you want to set the payload to a scalar object, use + set_payload() instead. + """ + if self._payload is None: + self._payload = [payload] + else: + self._payload.append(payload) + + def get_payload(self, i=None, decode=False): + """Return a reference to the payload. + + The payload will either be a list object or a string. If you mutate + the list object, you modify the message's payload in place. Optional + i returns that index into the payload. + + Optional decode is a flag indicating whether the payload should be + decoded or not, according to the Content-Transfer-Encoding header + (default is False). + + When True and the message is not a multipart, the payload will be + decoded if this header's value is `quoted-printable' or `base64'. If + some other encoding is used, or the header is missing, or if the + payload has bogus data (i.e. bogus base64 or uuencoded data), the + payload is returned as-is. + + If the message is a multipart and the decode flag is True, then None + is returned. + """ + # Here is the logic table for this code, based on the email5.0.0 code: + # i decode is_multipart result + # ------ ------ ------------ ------------------------------ + # None True True None + # i True True None + # None False True _payload (a list) + # i False True _payload element i (a Message) + # i False False error (not a list) + # i True False error (not a list) + # None False False _payload + # None True False _payload decoded (bytes) + # Note that Barry planned to factor out the 'decode' case, but that + # isn't so easy now that we handle the 8 bit data, which needs to be + # converted in both the decode and non-decode path. + if self.is_multipart(): + if decode: + return None + if i is None: + return self._payload + else: + return self._payload[i] + # For backward compatibility, Use isinstance and this error message + # instead of the more logical is_multipart test. + if i is not None and not isinstance(self._payload, list): + raise TypeError('Expected list, got %s' % type(self._payload)) + payload = self._payload + # cte might be a Header, so for now stringify it. + cte = str(self.get('content-transfer-encoding', '')).lower() + # payload may be bytes here. + if isinstance(payload, str): + payload = str(payload) # for Python-Future, so surrogateescape works + if utils._has_surrogates(payload): + bpayload = payload.encode('ascii', 'surrogateescape') + if not decode: + try: + payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace') + except LookupError: + payload = bpayload.decode('ascii', 'replace') + elif decode: + try: + bpayload = payload.encode('ascii') + except UnicodeError: + # This won't happen for RFC compliant messages (messages + # containing only ASCII codepoints in the unicode input). + # If it does happen, turn the string into bytes in a way + # guaranteed not to fail. + bpayload = payload.encode('raw-unicode-escape') + if not decode: + return payload + if cte == 'quoted-printable': + return utils._qdecode(bpayload) + elif cte == 'base64': + # XXX: this is a bit of a hack; decode_b should probably be factored + # out somewhere, but I haven't figured out where yet. + value, defects = decode_b(b''.join(bpayload.splitlines())) + for defect in defects: + self.policy.handle_defect(self, defect) + return value + elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): + in_file = BytesIO(bpayload) + out_file = BytesIO() + try: + uu.decode(in_file, out_file, quiet=True) + return out_file.getvalue() + except uu.Error: + # Some decoding problem + return bpayload + if isinstance(payload, str): + return bpayload + return payload + + def set_payload(self, payload, charset=None): + """Set the payload to the given value. + + Optional charset sets the message's default character set. See + set_charset() for details. + """ + self._payload = payload + if charset is not None: + self.set_charset(charset) + + def set_charset(self, charset): + """Set the charset of the payload to a given character set. + + charset can be a Charset instance, a string naming a character set, or + None. If it is a string it will be converted to a Charset instance. + If charset is None, the charset parameter will be removed from the + Content-Type field. Anything else will generate a TypeError. + + The message will be assumed to be of type text/* encoded with + charset.input_charset. It will be converted to charset.output_charset + and encoded properly, if needed, when generating the plain text + representation of the message. MIME headers (MIME-Version, + Content-Type, Content-Transfer-Encoding) will be added as needed. + """ + if charset is None: + self.del_param('charset') + self._charset = None + return + if not isinstance(charset, Charset): + charset = Charset(charset) + self._charset = charset + if 'MIME-Version' not in self: + self.add_header('MIME-Version', '1.0') + if 'Content-Type' not in self: + self.add_header('Content-Type', 'text/plain', + charset=charset.get_output_charset()) + else: + self.set_param('charset', charset.get_output_charset()) + if charset != charset.get_output_charset(): + self._payload = charset.body_encode(self._payload) + if 'Content-Transfer-Encoding' not in self: + cte = charset.get_body_encoding() + try: + cte(self) + except TypeError: + self._payload = charset.body_encode(self._payload) + self.add_header('Content-Transfer-Encoding', cte) + + def get_charset(self): + """Return the Charset instance associated with the message's payload. + """ + return self._charset + + # + # MAPPING INTERFACE (partial) + # + def __len__(self): + """Return the total number of headers, including duplicates.""" + return len(self._headers) + + def __getitem__(self, name): + """Get a header value. + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, exactly which + occurrence gets returned is undefined. Use get_all() to get all + the values matching a header field name. + """ + return self.get(name) + + def __setitem__(self, name, val): + """Set the value of a header. + + Note: this does not overwrite an existing header with the same field + name. Use __delitem__() first to delete any existing headers. + """ + max_count = self.policy.header_max_count(name) + if max_count: + lname = name.lower() + found = 0 + for k, v in self._headers: + if k.lower() == lname: + found += 1 + if found >= max_count: + raise ValueError("There may be at most {} {} headers " + "in a message".format(max_count, name)) + self._headers.append(self.policy.header_store_parse(name, val)) + + def __delitem__(self, name): + """Delete all occurrences of a header, if present. + + Does not raise an exception if the header is missing. + """ + name = name.lower() + newheaders = list() + for k, v in self._headers: + if k.lower() != name: + newheaders.append((k, v)) + self._headers = newheaders + + def __contains__(self, name): + return name.lower() in [k.lower() for k, v in self._headers] + + def __iter__(self): + for field, value in self._headers: + yield field + + def keys(self): + """Return a list of all the message's header field names. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [k for k, v in self._headers] + + def values(self): + """Return a list of all the message's header values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [self.policy.header_fetch_parse(k, v) + for k, v in self._headers] + + def items(self): + """Get all the message's header fields and values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + """ + return [(k, self.policy.header_fetch_parse(k, v)) + for k, v in self._headers] + + def get(self, name, failobj=None): + """Get a header value. + + Like __getitem__() but return failobj instead of None when the field + is missing. + """ + name = name.lower() + for k, v in self._headers: + if k.lower() == name: + return self.policy.header_fetch_parse(k, v) + return failobj + + # + # "Internal" methods (public API, but only intended for use by a parser + # or generator, not normal application code. + # + + def set_raw(self, name, value): + """Store name and value in the model without modification. + + This is an "internal" API, intended only for use by a parser. + """ + self._headers.append((name, value)) + + def raw_items(self): + """Return the (name, value) header pairs without modification. + + This is an "internal" API, intended only for use by a generator. + """ + return iter(self._headers.copy()) + + # + # Additional useful stuff + # + + def get_all(self, name, failobj=None): + """Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original + message, and may contain duplicates. Any fields deleted and + re-inserted are always appended to the header list. + + If no such fields exist, failobj is returned (defaults to None). + """ + values = [] + name = name.lower() + for k, v in self._headers: + if k.lower() == name: + values.append(self.policy.header_fetch_parse(k, v)) + if not values: + return failobj + return values + + def add_header(self, _name, _value, **_params): + """Extended header setting. + + name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. If a + parameter value contains non-ASCII characters it can be specified as a + three-tuple of (charset, language, value), in which case it will be + encoded according to RFC2231 rules. Otherwise it will be encoded using + the utf-8 charset and a language of ''. + + Examples: + + msg.add_header('content-disposition', 'attachment', filename='bud.gif') + msg.add_header('content-disposition', 'attachment', + filename=('utf-8', '', 'Fußballer.ppt')) + msg.add_header('content-disposition', 'attachment', + filename='Fußballer.ppt')) + """ + parts = [] + for k, v in _params.items(): + if v is None: + parts.append(k.replace('_', '-')) + else: + parts.append(_formatparam(k.replace('_', '-'), v)) + if _value is not None: + parts.insert(0, _value) + self[_name] = SEMISPACE.join(parts) + + def replace_header(self, _name, _value): + """Replace a header. + + Replace the first matching header found in the message, retaining + header order and case. If no matching header was found, a KeyError is + raised. + """ + _name = _name.lower() + for i, (k, v) in zip(range(len(self._headers)), self._headers): + if k.lower() == _name: + self._headers[i] = self.policy.header_store_parse(k, _value) + break + else: + raise KeyError(_name) + + # + # Use these three methods instead of the three above. + # + + def get_content_type(self): + """Return the message's content type. + + The returned string is coerced to lower case of the form + `maintype/subtype'. If there was no Content-Type header in the + message, the default type as given by get_default_type() will be + returned. Since according to RFC 2045, messages always have a default + type this will always return a value. + + RFC 2045 defines a message's default type to be text/plain unless it + appears inside a multipart/digest container, in which case it would be + message/rfc822. + """ + missing = object() + value = self.get('content-type', missing) + if value is missing: + # This should have no parameters + return self.get_default_type() + ctype = _splitparam(value)[0].lower() + # RFC 2045, section 5.2 says if its invalid, use text/plain + if ctype.count('/') != 1: + return 'text/plain' + return ctype + + def get_content_maintype(self): + """Return the message's main content type. + + This is the `maintype' part of the string returned by + get_content_type(). + """ + ctype = self.get_content_type() + return ctype.split('/')[0] + + def get_content_subtype(self): + """Returns the message's sub-content type. + + This is the `subtype' part of the string returned by + get_content_type(). + """ + ctype = self.get_content_type() + return ctype.split('/')[1] + + def get_default_type(self): + """Return the `default' content type. + + Most messages have a default content type of text/plain, except for + messages that are subparts of multipart/digest containers. Such + subparts have a default content type of message/rfc822. + """ + return self._default_type + + def set_default_type(self, ctype): + """Set the `default' content type. + + ctype should be either "text/plain" or "message/rfc822", although this + is not enforced. The default content type is not stored in the + Content-Type header. + """ + self._default_type = ctype + + def _get_params_preserve(self, failobj, header): + # Like get_params() but preserves the quoting of values. BAW: + # should this be part of the public interface? + missing = object() + value = self.get(header, missing) + if value is missing: + return failobj + params = [] + for p in _parseparam(value): + try: + name, val = p.split('=', 1) + name = name.strip() + val = val.strip() + except ValueError: + # Must have been a bare attribute + name = p.strip() + val = '' + params.append((name, val)) + params = utils.decode_params(params) + return params + + def get_params(self, failobj=None, header='content-type', unquote=True): + """Return the message's Content-Type parameters, as a list. + + The elements of the returned list are 2-tuples of key/value pairs, as + split on the `=' sign. The left hand side of the `=' is the key, + while the right hand side is the value. If there is no `=' sign in + the parameter the value is the empty string. The value is as + described in the get_param() method. + + Optional failobj is the object to return if there is no Content-Type + header. Optional header is the header to search instead of + Content-Type. If unquote is True, the value is unquoted. + """ + missing = object() + params = self._get_params_preserve(missing, header) + if params is missing: + return failobj + if unquote: + return [(k, _unquotevalue(v)) for k, v in params] + else: + return params + + def get_param(self, param, failobj=None, header='content-type', + unquote=True): + """Return the parameter value if found in the Content-Type header. + + Optional failobj is the object to return if there is no Content-Type + header, or the Content-Type header has no such parameter. Optional + header is the header to search instead of Content-Type. + + Parameter keys are always compared case insensitively. The return + value can either be a string, or a 3-tuple if the parameter was RFC + 2231 encoded. When it's a 3-tuple, the elements of the value are of + the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and + LANGUAGE can be None, in which case you should consider VALUE to be + encoded in the us-ascii charset. You can usually ignore LANGUAGE. + The parameter value (either the returned string, or the VALUE item in + the 3-tuple) is always unquoted, unless unquote is set to False. + + If your application doesn't care whether the parameter was RFC 2231 + encoded, it can turn the return value into a string as follows: + + param = msg.get_param('foo') + param = email.utils.collapse_rfc2231_value(rawparam) + + """ + if header not in self: + return failobj + for k, v in self._get_params_preserve(failobj, header): + if k.lower() == param.lower(): + if unquote: + return _unquotevalue(v) + else: + return v + return failobj + + def set_param(self, param, value, header='Content-Type', requote=True, + charset=None, language=''): + """Set a parameter in the Content-Type header. + + If the parameter already exists in the header, its value will be + replaced with the new value. + + If header is Content-Type and has not yet been defined for this + message, it will be set to "text/plain" and the new parameter and + value will be appended as per RFC 2045. + + An alternate header can specified in the header argument, and all + parameters will be quoted as necessary unless requote is False. + + If charset is specified, the parameter will be encoded according to RFC + 2231. Optional language specifies the RFC 2231 language, defaulting + to the empty string. Both charset and language should be strings. + """ + if not isinstance(value, tuple) and charset: + value = (charset, language, value) + + if header not in self and header.lower() == 'content-type': + ctype = 'text/plain' + else: + ctype = self.get(header) + if not self.get_param(param, header=header): + if not ctype: + ctype = _formatparam(param, value, requote) + else: + ctype = SEMISPACE.join( + [ctype, _formatparam(param, value, requote)]) + else: + ctype = '' + for old_param, old_value in self.get_params(header=header, + unquote=requote): + append_param = '' + if old_param.lower() == param.lower(): + append_param = _formatparam(param, value, requote) + else: + append_param = _formatparam(old_param, old_value, requote) + if not ctype: + ctype = append_param + else: + ctype = SEMISPACE.join([ctype, append_param]) + if ctype != self.get(header): + del self[header] + self[header] = ctype + + def del_param(self, param, header='content-type', requote=True): + """Remove the given parameter completely from the Content-Type header. + + The header will be re-written in place without the parameter or its + value. All values will be quoted as necessary unless requote is + False. Optional header specifies an alternative to the Content-Type + header. + """ + if header not in self: + return + new_ctype = '' + for p, v in self.get_params(header=header, unquote=requote): + if p.lower() != param.lower(): + if not new_ctype: + new_ctype = _formatparam(p, v, requote) + else: + new_ctype = SEMISPACE.join([new_ctype, + _formatparam(p, v, requote)]) + if new_ctype != self.get(header): + del self[header] + self[header] = new_ctype + + def set_type(self, type, header='Content-Type', requote=True): + """Set the main type and subtype for the Content-Type header. + + type must be a string in the form "maintype/subtype", otherwise a + ValueError is raised. + + This method replaces the Content-Type header, keeping all the + parameters in place. If requote is False, this leaves the existing + header's quoting as is. Otherwise, the parameters will be quoted (the + default). + + An alternative header can be specified in the header argument. When + the Content-Type header is set, we'll always also add a MIME-Version + header. + """ + # BAW: should we be strict? + if not type.count('/') == 1: + raise ValueError + # Set the Content-Type, you get a MIME-Version + if header.lower() == 'content-type': + del self['mime-version'] + self['MIME-Version'] = '1.0' + if header not in self: + self[header] = type + return + params = self.get_params(header=header, unquote=requote) + del self[header] + self[header] = type + # Skip the first param; it's the old type. + for p, v in params[1:]: + self.set_param(p, v, header, requote) + + def get_filename(self, failobj=None): + """Return the filename associated with the payload if present. + + The filename is extracted from the Content-Disposition header's + `filename' parameter, and it is unquoted. If that header is missing + the `filename' parameter, this method falls back to looking for the + `name' parameter. + """ + missing = object() + filename = self.get_param('filename', missing, 'content-disposition') + if filename is missing: + filename = self.get_param('name', missing, 'content-type') + if filename is missing: + return failobj + return utils.collapse_rfc2231_value(filename).strip() + + def get_boundary(self, failobj=None): + """Return the boundary associated with the payload if present. + + The boundary is extracted from the Content-Type header's `boundary' + parameter, and it is unquoted. + """ + missing = object() + boundary = self.get_param('boundary', missing) + if boundary is missing: + return failobj + # RFC 2046 says that boundaries may begin but not end in w/s + return utils.collapse_rfc2231_value(boundary).rstrip() + + def set_boundary(self, boundary): + """Set the boundary parameter in Content-Type to 'boundary'. + + This is subtly different than deleting the Content-Type header and + adding a new one with a new boundary parameter via add_header(). The + main difference is that using the set_boundary() method preserves the + order of the Content-Type header in the original message. + + HeaderParseError is raised if the message has no Content-Type header. + """ + missing = object() + params = self._get_params_preserve(missing, 'content-type') + if params is missing: + # There was no Content-Type header, and we don't know what type + # to set it to, so raise an exception. + raise errors.HeaderParseError('No Content-Type header found') + newparams = list() + foundp = False + for pk, pv in params: + if pk.lower() == 'boundary': + newparams.append(('boundary', '"%s"' % boundary)) + foundp = True + else: + newparams.append((pk, pv)) + if not foundp: + # The original Content-Type header had no boundary attribute. + # Tack one on the end. BAW: should we raise an exception + # instead??? + newparams.append(('boundary', '"%s"' % boundary)) + # Replace the existing Content-Type header with the new value + newheaders = list() + for h, v in self._headers: + if h.lower() == 'content-type': + parts = list() + for k, v in newparams: + if v == '': + parts.append(k) + else: + parts.append('%s=%s' % (k, v)) + val = SEMISPACE.join(parts) + newheaders.append(self.policy.header_store_parse(h, val)) + + else: + newheaders.append((h, v)) + self._headers = newheaders + + def get_content_charset(self, failobj=None): + """Return the charset parameter of the Content-Type header. + + The returned string is always coerced to lower case. If there is no + Content-Type header, or if that header has no charset parameter, + failobj is returned. + """ + missing = object() + charset = self.get_param('charset', missing) + if charset is missing: + return failobj + if isinstance(charset, tuple): + # RFC 2231 encoded, so decode it, and it better end up as ascii. + pcharset = charset[0] or 'us-ascii' + try: + # LookupError will be raised if the charset isn't known to + # Python. UnicodeError will be raised if the encoded text + # contains a character not in the charset. + as_bytes = charset[2].encode('raw-unicode-escape') + charset = str(as_bytes, pcharset) + except (LookupError, UnicodeError): + charset = charset[2] + # charset characters must be in us-ascii range + try: + charset.encode('us-ascii') + except UnicodeError: + return failobj + # RFC 2046, $4.1.2 says charsets are not case sensitive + return charset.lower() + + def get_charsets(self, failobj=None): + """Return a list containing the charset(s) used in this message. + + The returned list of items describes the Content-Type headers' + charset parameter for this message and all the subparts in its + payload. + + Each item will either be a string (the value of the charset parameter + in the Content-Type header of that part) or the value of the + 'failobj' parameter (defaults to None), if the part does not have a + main MIME type of "text", or the charset is not defined. + + The list will contain one string for each part of the message, plus + one for the container message (i.e. self), so that a non-multipart + message will still return a list of length 1. + """ + return [part.get_content_charset(failobj) for part in self.walk()] + + # I.e. def walk(self): ... + from future.backports.email.iterators import walk diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/application.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/application.py new file mode 100644 index 0000000000..5cbfb174af --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/application.py @@ -0,0 +1,39 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Keith Dart +# Contact: email-sig@python.org + +"""Class representing application/* type MIME documents.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +from future.backports.email import encoders +from future.backports.email.mime.nonmultipart import MIMENonMultipart + +__all__ = ["MIMEApplication"] + + +class MIMEApplication(MIMENonMultipart): + """Class for generating application/* MIME documents.""" + + def __init__(self, _data, _subtype='octet-stream', + _encoder=encoders.encode_base64, **_params): + """Create an application/* type MIME document. + + _data is a string containing the raw application data. + + _subtype is the MIME content type subtype, defaulting to + 'octet-stream'. + + _encoder is a function which will perform the actual encoding for + transport of the application data, defaulting to base64 encoding. + + Any additional keyword arguments are passed to the base class + constructor, which turns them into parameters on the Content-Type + header. + """ + if _subtype is None: + raise TypeError('Invalid application MIME subtype') + MIMENonMultipart.__init__(self, 'application', _subtype, **_params) + self.set_payload(_data) + _encoder(self) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/audio.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/audio.py new file mode 100644 index 0000000000..4989c11420 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/audio.py @@ -0,0 +1,74 @@ +# Copyright (C) 2001-2007 Python Software Foundation +# Author: Anthony Baxter +# Contact: email-sig@python.org + +"""Class representing audio/* type MIME documents.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMEAudio'] + +import sndhdr + +from io import BytesIO +from future.backports.email import encoders +from future.backports.email.mime.nonmultipart import MIMENonMultipart + + +_sndhdr_MIMEmap = {'au' : 'basic', + 'wav' :'x-wav', + 'aiff':'x-aiff', + 'aifc':'x-aiff', + } + +# There are others in sndhdr that don't have MIME types. :( +# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? +def _whatsnd(data): + """Try to identify a sound file type. + + sndhdr.what() has a pretty cruddy interface, unfortunately. This is why + we re-do it here. It would be easier to reverse engineer the Unix 'file' + command and use the standard 'magic' file, as shipped with a modern Unix. + """ + hdr = data[:512] + fakefile = BytesIO(hdr) + for testfn in sndhdr.tests: + res = testfn(hdr, fakefile) + if res is not None: + return _sndhdr_MIMEmap.get(res[0]) + return None + + +class MIMEAudio(MIMENonMultipart): + """Class for generating audio/* MIME documents.""" + + def __init__(self, _audiodata, _subtype=None, + _encoder=encoders.encode_base64, **_params): + """Create an audio/* type MIME document. + + _audiodata is a string containing the raw audio data. If this data + can be decoded by the standard Python `sndhdr' module, then the + subtype will be automatically included in the Content-Type header. + Otherwise, you can specify the specific audio subtype via the + _subtype parameter. If _subtype is not given, and no subtype can be + guessed, a TypeError is raised. + + _encoder is a function which will perform the actual encoding for + transport of the image data. It takes one argument, which is this + Image instance. It should use get_payload() and set_payload() to + change the payload to the encoded form. It should also add any + Content-Transfer-Encoding or other headers to the message as + necessary. The default encoding is Base64. + + Any additional keyword arguments are passed to the base class + constructor, which turns them into parameters on the Content-Type + header. + """ + if _subtype is None: + _subtype = _whatsnd(_audiodata) + if _subtype is None: + raise TypeError('Could not find audio MIME subtype') + MIMENonMultipart.__init__(self, 'audio', _subtype, **_params) + self.set_payload(_audiodata) + _encoder(self) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/base.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/base.py new file mode 100644 index 0000000000..e77f3ca4ae --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/base.py @@ -0,0 +1,25 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Base class for MIME specializations.""" +from __future__ import absolute_import, division, unicode_literals +from future.backports.email import message + +__all__ = ['MIMEBase'] + + +class MIMEBase(message.Message): + """Base class for MIME specializations.""" + + def __init__(self, _maintype, _subtype, **_params): + """This constructor adds a Content-Type: and a MIME-Version: header. + + The Content-Type: header is taken from the _maintype and _subtype + arguments. Additional parameters for this header are taken from the + keyword arguments. + """ + message.Message.__init__(self) + ctype = '%s/%s' % (_maintype, _subtype) + self.add_header('Content-Type', ctype, **_params) + self['MIME-Version'] = '1.0' diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/image.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/image.py new file mode 100644 index 0000000000..a03602464a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/image.py @@ -0,0 +1,48 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Class representing image/* type MIME documents.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMEImage'] + +import imghdr + +from future.backports.email import encoders +from future.backports.email.mime.nonmultipart import MIMENonMultipart + + +class MIMEImage(MIMENonMultipart): + """Class for generating image/* type MIME documents.""" + + def __init__(self, _imagedata, _subtype=None, + _encoder=encoders.encode_base64, **_params): + """Create an image/* type MIME document. + + _imagedata is a string containing the raw image data. If this data + can be decoded by the standard Python `imghdr' module, then the + subtype will be automatically included in the Content-Type header. + Otherwise, you can specify the specific image subtype via the _subtype + parameter. + + _encoder is a function which will perform the actual encoding for + transport of the image data. It takes one argument, which is this + Image instance. It should use get_payload() and set_payload() to + change the payload to the encoded form. It should also add any + Content-Transfer-Encoding or other headers to the message as + necessary. The default encoding is Base64. + + Any additional keyword arguments are passed to the base class + constructor, which turns them into parameters on the Content-Type + header. + """ + if _subtype is None: + _subtype = imghdr.what(None, _imagedata) + if _subtype is None: + raise TypeError('Could not guess image MIME subtype') + MIMENonMultipart.__init__(self, 'image', _subtype, **_params) + self.set_payload(_imagedata) + _encoder(self) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/message.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/message.py new file mode 100644 index 0000000000..7f92075150 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/message.py @@ -0,0 +1,36 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Class representing message/* MIME documents.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMEMessage'] + +from future.backports.email import message +from future.backports.email.mime.nonmultipart import MIMENonMultipart + + +class MIMEMessage(MIMENonMultipart): + """Class representing message/* MIME documents.""" + + def __init__(self, _msg, _subtype='rfc822'): + """Create a message/* type MIME document. + + _msg is a message object and must be an instance of Message, or a + derived class of Message, otherwise a TypeError is raised. + + Optional _subtype defines the subtype of the contained message. The + default is "rfc822" (this is defined by the MIME standard, even though + the term "rfc822" is technically outdated by RFC 2822). + """ + MIMENonMultipart.__init__(self, 'message', _subtype) + if not isinstance(_msg, message.Message): + raise TypeError('Argument is not an instance of Message') + # It's convenient to use this base class method. We need to do it + # this way or we'll get an exception + message.Message.attach(self, _msg) + # And be sure our default type is set correctly + self.set_default_type('message/rfc822') diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/multipart.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/multipart.py new file mode 100644 index 0000000000..6d7ed3dcb9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/multipart.py @@ -0,0 +1,49 @@ +# Copyright (C) 2002-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Base class for MIME multipart/* type messages.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMEMultipart'] + +from future.backports.email.mime.base import MIMEBase + + +class MIMEMultipart(MIMEBase): + """Base class for MIME multipart/* type messages.""" + + def __init__(self, _subtype='mixed', boundary=None, _subparts=None, + **_params): + """Creates a multipart/* type message. + + By default, creates a multipart/mixed message, with proper + Content-Type and MIME-Version headers. + + _subtype is the subtype of the multipart content type, defaulting to + `mixed'. + + boundary is the multipart boundary string. By default it is + calculated as needed. + + _subparts is a sequence of initial subparts for the payload. It + must be an iterable object, such as a list. You can always + attach new subparts to the message by using the attach() method. + + Additional parameters for the Content-Type header are taken from the + keyword arguments (or passed into the _params argument). + """ + MIMEBase.__init__(self, 'multipart', _subtype, **_params) + + # Initialise _payload to an empty list as the Message superclass's + # implementation of is_multipart assumes that _payload is a list for + # multipart messages. + self._payload = [] + + if _subparts: + for p in _subparts: + self.attach(p) + if boundary: + self.set_boundary(boundary) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/nonmultipart.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/nonmultipart.py new file mode 100644 index 0000000000..08c37c36d1 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/nonmultipart.py @@ -0,0 +1,24 @@ +# Copyright (C) 2002-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Base class for MIME type messages that are not multipart.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMENonMultipart'] + +from future.backports.email import errors +from future.backports.email.mime.base import MIMEBase + + +class MIMENonMultipart(MIMEBase): + """Base class for MIME multipart/* type messages.""" + + def attach(self, payload): + # The public API prohibits attaching multiple subparts to MIMEBase + # derived subtypes since none of them are, by definition, of content + # type multipart/* + raise errors.MultipartConversionError( + 'Cannot attach additional subparts to non-multipart/*') diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/mime/text.py b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/text.py new file mode 100644 index 0000000000..6269f4a68a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/mime/text.py @@ -0,0 +1,44 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Class representing text/* type MIME documents.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['MIMEText'] + +from future.backports.email.encoders import encode_7or8bit +from future.backports.email.mime.nonmultipart import MIMENonMultipart + + +class MIMEText(MIMENonMultipart): + """Class for generating text/* type MIME documents.""" + + def __init__(self, _text, _subtype='plain', _charset=None): + """Create a text/* type MIME document. + + _text is the string for this message object. + + _subtype is the MIME sub content type, defaulting to "plain". + + _charset is the character set parameter added to the Content-Type + header. This defaults to "us-ascii". Note that as a side-effect, the + Content-Transfer-Encoding header will also be set. + """ + + # If no _charset was specified, check to see if there are non-ascii + # characters present. If not, use 'us-ascii', otherwise use utf-8. + # XXX: This can be removed once #7304 is fixed. + if _charset is None: + try: + _text.encode('us-ascii') + _charset = 'us-ascii' + except UnicodeEncodeError: + _charset = 'utf-8' + + MIMENonMultipart.__init__(self, 'text', _subtype, + **{'charset': _charset}) + + self.set_payload(_text, _charset) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/parser.py b/pype/modules/ftrack/python2_vendor/future/backports/email/parser.py new file mode 100644 index 0000000000..df1c6e2868 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/parser.py @@ -0,0 +1,135 @@ +# Copyright (C) 2001-2007 Python Software Foundation +# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter +# Contact: email-sig@python.org + +"""A parser of RFC 2822 and MIME email messages.""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser'] + +import warnings +from io import StringIO, TextIOWrapper + +from future.backports.email.feedparser import FeedParser, BytesFeedParser +from future.backports.email.message import Message +from future.backports.email._policybase import compat32 + + +class Parser(object): + def __init__(self, _class=Message, **_3to2kwargs): + """Parser of RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The string must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceeded by a `Unix-from' header. The + header block is terminated either by the end of the string or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + """ + if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] + else: policy = compat32 + self._class = _class + self.policy = policy + + def parse(self, fp, headersonly=False): + """Create a message structure from the data in a file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + """ + feedparser = FeedParser(self._class, policy=self.policy) + if headersonly: + feedparser._set_headersonly() + while True: + data = fp.read(8192) + if not data: + break + feedparser.feed(data) + return feedparser.close() + + def parsestr(self, text, headersonly=False): + """Create a message structure from a string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + """ + return self.parse(StringIO(text), headersonly=headersonly) + + + +class HeaderParser(Parser): + def parse(self, fp, headersonly=True): + return Parser.parse(self, fp, True) + + def parsestr(self, text, headersonly=True): + return Parser.parsestr(self, text, True) + + +class BytesParser(object): + + def __init__(self, *args, **kw): + """Parser of binary RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The input must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceeded by a `Unix-from' header. The + header block is terminated either by the end of the input or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + """ + self.parser = Parser(*args, **kw) + + def parse(self, fp, headersonly=False): + """Create a message structure from the data in a binary file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + """ + fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape') + with fp: + return self.parser.parse(fp, headersonly) + + + def parsebytes(self, text, headersonly=False): + """Create a message structure from a byte string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + """ + text = text.decode('ASCII', errors='surrogateescape') + return self.parser.parsestr(text, headersonly) + + +class BytesHeaderParser(BytesParser): + def parse(self, fp, headersonly=True): + return BytesParser.parse(self, fp, headersonly=True) + + def parsebytes(self, text, headersonly=True): + return BytesParser.parsebytes(self, text, headersonly=True) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/policy.py b/pype/modules/ftrack/python2_vendor/future/backports/email/policy.py new file mode 100644 index 0000000000..2f609a23ae --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/policy.py @@ -0,0 +1,193 @@ +"""This will be the home for the policy that hooks in the new +code that adds all the email6 features. +""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import +from future.builtins import super + +from future.standard_library.email._policybase import (Policy, Compat32, + compat32, _extend_docstrings) +from future.standard_library.email.utils import _has_surrogates +from future.standard_library.email.headerregistry import HeaderRegistry as HeaderRegistry + +__all__ = [ + 'Compat32', + 'compat32', + 'Policy', + 'EmailPolicy', + 'default', + 'strict', + 'SMTP', + 'HTTP', + ] + +@_extend_docstrings +class EmailPolicy(Policy): + + """+ + PROVISIONAL + + The API extensions enabled by this policy are currently provisional. + Refer to the documentation for details. + + This policy adds new header parsing and folding algorithms. Instead of + simple strings, headers are custom objects with custom attributes + depending on the type of the field. The folding algorithm fully + implements RFCs 2047 and 5322. + + In addition to the settable attributes listed above that apply to + all Policies, this policy adds the following additional attributes: + + refold_source -- if the value for a header in the Message object + came from the parsing of some source, this attribute + indicates whether or not a generator should refold + that value when transforming the message back into + stream form. The possible values are: + + none -- all source values use original folding + long -- source values that have any line that is + longer than max_line_length will be + refolded + all -- all values are refolded. + + The default is 'long'. + + header_factory -- a callable that takes two arguments, 'name' and + 'value', where 'name' is a header field name and + 'value' is an unfolded header field value, and + returns a string-like object that represents that + header. A default header_factory is provided that + understands some of the RFC5322 header field types. + (Currently address fields and date fields have + special treatment, while all other fields are + treated as unstructured. This list will be + completed before the extension is marked stable.) + """ + + refold_source = 'long' + header_factory = HeaderRegistry() + + def __init__(self, **kw): + # Ensure that each new instance gets a unique header factory + # (as opposed to clones, which share the factory). + if 'header_factory' not in kw: + object.__setattr__(self, 'header_factory', HeaderRegistry()) + super().__init__(**kw) + + def header_max_count(self, name): + """+ + The implementation for this class returns the max_count attribute from + the specialized header class that would be used to construct a header + of type 'name'. + """ + return self.header_factory[name].max_count + + # The logic of the next three methods is chosen such that it is possible to + # switch a Message object between a Compat32 policy and a policy derived + # from this class and have the results stay consistent. This allows a + # Message object constructed with this policy to be passed to a library + # that only handles Compat32 objects, or to receive such an object and + # convert it to use the newer style by just changing its policy. It is + # also chosen because it postpones the relatively expensive full rfc5322 + # parse until as late as possible when parsing from source, since in many + # applications only a few headers will actually be inspected. + + def header_source_parse(self, sourcelines): + """+ + The name is parsed as everything up to the ':' and returned unmodified. + The value is determined by stripping leading whitespace off the + remainder of the first line, joining all subsequent lines together, and + stripping any trailing carriage return or linefeed characters. (This + is the same as Compat32). + + """ + name, value = sourcelines[0].split(':', 1) + value = value.lstrip(' \t') + ''.join(sourcelines[1:]) + return (name, value.rstrip('\r\n')) + + def header_store_parse(self, name, value): + """+ + The name is returned unchanged. If the input value has a 'name' + attribute and it matches the name ignoring case, the value is returned + unchanged. Otherwise the name and value are passed to header_factory + method, and the resulting custom header object is returned as the + value. In this case a ValueError is raised if the input value contains + CR or LF characters. + + """ + if hasattr(value, 'name') and value.name.lower() == name.lower(): + return (name, value) + if isinstance(value, str) and len(value.splitlines())>1: + raise ValueError("Header values may not contain linefeed " + "or carriage return characters") + return (name, self.header_factory(name, value)) + + def header_fetch_parse(self, name, value): + """+ + If the value has a 'name' attribute, it is returned to unmodified. + Otherwise the name and the value with any linesep characters removed + are passed to the header_factory method, and the resulting custom + header object is returned. Any surrogateescaped bytes get turned + into the unicode unknown-character glyph. + + """ + if hasattr(value, 'name'): + return value + return self.header_factory(name, ''.join(value.splitlines())) + + def fold(self, name, value): + """+ + Header folding is controlled by the refold_source policy setting. A + value is considered to be a 'source value' if and only if it does not + have a 'name' attribute (having a 'name' attribute means it is a header + object of some sort). If a source value needs to be refolded according + to the policy, it is converted into a custom header object by passing + the name and the value with any linesep characters removed to the + header_factory method. Folding of a custom header object is done by + calling its fold method with the current policy. + + Source values are split into lines using splitlines. If the value is + not to be refolded, the lines are rejoined using the linesep from the + policy and returned. The exception is lines containing non-ascii + binary data. In that case the value is refolded regardless of the + refold_source setting, which causes the binary data to be CTE encoded + using the unknown-8bit charset. + + """ + return self._fold(name, value, refold_binary=True) + + def fold_binary(self, name, value): + """+ + The same as fold if cte_type is 7bit, except that the returned value is + bytes. + + If cte_type is 8bit, non-ASCII binary data is converted back into + bytes. Headers with binary data are not refolded, regardless of the + refold_header setting, since there is no way to know whether the binary + data consists of single byte characters or multibyte characters. + + """ + folded = self._fold(name, value, refold_binary=self.cte_type=='7bit') + return folded.encode('ascii', 'surrogateescape') + + def _fold(self, name, value, refold_binary=False): + if hasattr(value, 'name'): + return value.fold(policy=self) + maxlen = self.max_line_length if self.max_line_length else float('inf') + lines = value.splitlines() + refold = (self.refold_source == 'all' or + self.refold_source == 'long' and + (lines and len(lines[0])+len(name)+2 > maxlen or + any(len(x) > maxlen for x in lines[1:]))) + if refold or refold_binary and _has_surrogates(value): + return self.header_factory(name, ''.join(lines)).fold(policy=self) + return name + ': ' + self.linesep.join(lines) + self.linesep + + +default = EmailPolicy() +# Make the default policy use the class default header_factory +del default.header_factory +strict = default.clone(raise_on_defect=True) +SMTP = default.clone(linesep='\r\n') +HTTP = default.clone(linesep='\r\n', max_line_length=None) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/quoprimime.py b/pype/modules/ftrack/python2_vendor/future/backports/email/quoprimime.py new file mode 100644 index 0000000000..b69d158bc4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/quoprimime.py @@ -0,0 +1,326 @@ +# Copyright (C) 2001-2006 Python Software Foundation +# Author: Ben Gertzfield +# Contact: email-sig@python.org + +"""Quoted-printable content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to +safely encode text that is in a character set similar to the 7-bit US ASCII +character set, but that includes some 8-bit characters that are normally not +allowed in email bodies or headers. + +Quoted-printable is very space-inefficient for encoding binary files; use the +email.base64mime module for that instead. + +This module provides an interface to encode and decode both headers and bodies +with quoted-printable encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:/From:/Cc: etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character +conversion necessary for proper internationalized headers; it only +does dumb encoding and decoding. To deal with the various line +wrapping issues, use the email.header module. +""" +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import +from future.builtins import bytes, chr, dict, int, range, super + +__all__ = [ + 'body_decode', + 'body_encode', + 'body_length', + 'decode', + 'decodestring', + 'header_decode', + 'header_encode', + 'header_length', + 'quote', + 'unquote', + ] + +import re +import io + +from string import ascii_letters, digits, hexdigits + +CRLF = '\r\n' +NL = '\n' +EMPTYSTRING = '' + +# Build a mapping of octets to the expansion of that octet. Since we're only +# going to have 256 of these things, this isn't terribly inefficient +# space-wise. Remember that headers and bodies have different sets of safe +# characters. Initialize both maps with the full expansion, and then override +# the safe bytes with the more compact form. +_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256)) +_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy() + +# Safe header bytes which need no encoding. +for c in bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')): + _QUOPRI_HEADER_MAP[c] = chr(c) +# Headers have one other special encoding; spaces become underscores. +_QUOPRI_HEADER_MAP[ord(' ')] = '_' + +# Safe body bytes which need no encoding. +for c in bytes(b' !"#$%&\'()*+,-./0123456789:;<>' + b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`' + b'abcdefghijklmnopqrstuvwxyz{|}~\t'): + _QUOPRI_BODY_MAP[c] = chr(c) + + + +# Helpers +def header_check(octet): + """Return True if the octet should be escaped with header quopri.""" + return chr(octet) != _QUOPRI_HEADER_MAP[octet] + + +def body_check(octet): + """Return True if the octet should be escaped with body quopri.""" + return chr(octet) != _QUOPRI_BODY_MAP[octet] + + +def header_length(bytearray): + """Return a header quoted-printable encoding length. + + Note that this does not include any RFC 2047 chrome added by + `header_encode()`. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for headers. + """ + return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray) + + +def body_length(bytearray): + """Return a body quoted-printable encoding length. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for bodies. + """ + return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray) + + +def _max_append(L, s, maxlen, extra=''): + if not isinstance(s, str): + s = chr(s) + if not L: + L.append(s.lstrip()) + elif len(L[-1]) + len(s) <= maxlen: + L[-1] += extra + s + else: + L.append(s.lstrip()) + + +def unquote(s): + """Turn a string in the form =AB to the ASCII character with value 0xab""" + return chr(int(s[1:3], 16)) + + +def quote(c): + return '=%02X' % ord(c) + + + +def header_encode(header_bytes, charset='iso-8859-1'): + """Encode a single header line with quoted-printable (like) encoding. + + Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but + used specifically for email header fields to allow charsets with mostly 7 + bit characters (and some 8 bit) to remain more or less readable in non-RFC + 2045 aware mail clients. + + charset names the character set to use in the RFC 2046 header. It + defaults to iso-8859-1. + """ + # Return empty headers as an empty string. + if not header_bytes: + return '' + # Iterate over every byte, encoding if necessary. + encoded = [] + for octet in header_bytes: + encoded.append(_QUOPRI_HEADER_MAP[octet]) + # Now add the RFC chrome to each encoded chunk and glue the chunks + # together. + return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded)) + + +class _body_accumulator(io.StringIO): + + def __init__(self, maxlinelen, eol, *args, **kw): + super().__init__(*args, **kw) + self.eol = eol + self.maxlinelen = self.room = maxlinelen + + def write_str(self, s): + """Add string s to the accumulated body.""" + self.write(s) + self.room -= len(s) + + def newline(self): + """Write eol, then start new line.""" + self.write_str(self.eol) + self.room = self.maxlinelen + + def write_soft_break(self): + """Write a soft break, then start a new line.""" + self.write_str('=') + self.newline() + + def write_wrapped(self, s, extra_room=0): + """Add a soft line break if needed, then write s.""" + if self.room < len(s) + extra_room: + self.write_soft_break() + self.write_str(s) + + def write_char(self, c, is_last_char): + if not is_last_char: + # Another character follows on this line, so we must leave + # extra room, either for it or a soft break, and whitespace + # need not be quoted. + self.write_wrapped(c, extra_room=1) + elif c not in ' \t': + # For this and remaining cases, no more characters follow, + # so there is no need to reserve extra room (since a hard + # break will immediately follow). + self.write_wrapped(c) + elif self.room >= 3: + # It's a whitespace character at end-of-line, and we have room + # for the three-character quoted encoding. + self.write(quote(c)) + elif self.room == 2: + # There's room for the whitespace character and a soft break. + self.write(c) + self.write_soft_break() + else: + # There's room only for a soft break. The quoted whitespace + # will be the only content on the subsequent line. + self.write_soft_break() + self.write(quote(c)) + + +def body_encode(body, maxlinelen=76, eol=NL): + """Encode with quoted-printable, wrapping at maxlinelen characters. + + Each line of encoded text will end with eol, which defaults to "\\n". Set + this to "\\r\\n" if you will be using the result of this function directly + in an email. + + Each line will be wrapped at, at most, maxlinelen characters before the + eol string (maxlinelen defaults to 76 characters, the maximum value + permitted by RFC 2045). Long lines will have the 'soft line break' + quoted-printable character "=" appended to them, so the decoded text will + be identical to the original text. + + The minimum maxlinelen is 4 to have room for a quoted character ("=XX") + followed by a soft line break. Smaller values will generate a + ValueError. + + """ + + if maxlinelen < 4: + raise ValueError("maxlinelen must be at least 4") + if not body: + return body + + # The last line may or may not end in eol, but all other lines do. + last_has_eol = (body[-1] in '\r\n') + + # This accumulator will make it easier to build the encoded body. + encoded_body = _body_accumulator(maxlinelen, eol) + + lines = body.splitlines() + last_line_no = len(lines) - 1 + for line_no, line in enumerate(lines): + last_char_index = len(line) - 1 + for i, c in enumerate(line): + if body_check(ord(c)): + c = quote(c) + encoded_body.write_char(c, i==last_char_index) + # Add an eol if input line had eol. All input lines have eol except + # possibly the last one. + if line_no < last_line_no or last_has_eol: + encoded_body.newline() + + return encoded_body.getvalue() + + + +# BAW: I'm not sure if the intent was for the signature of this function to be +# the same as base64MIME.decode() or not... +def decode(encoded, eol=NL): + """Decode a quoted-printable string. + + Lines are separated with eol, which defaults to \\n. + """ + if not encoded: + return encoded + # BAW: see comment in encode() above. Again, we're building up the + # decoded string with string concatenation, which could be done much more + # efficiently. + decoded = '' + + for line in encoded.splitlines(): + line = line.rstrip() + if not line: + decoded += eol + continue + + i = 0 + n = len(line) + while i < n: + c = line[i] + if c != '=': + decoded += c + i += 1 + # Otherwise, c == "=". Are we at the end of the line? If so, add + # a soft line break. + elif i+1 == n: + i += 1 + continue + # Decode if in form =AB + elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits: + decoded += unquote(line[i:i+3]) + i += 3 + # Otherwise, not in form =AB, pass literally + else: + decoded += c + i += 1 + + if i == n: + decoded += eol + # Special case if original string did not end with eol + if encoded[-1] not in '\r\n' and decoded.endswith(eol): + decoded = decoded[:-1] + return decoded + + +# For convenience and backwards compatibility w/ standard base64 module +body_decode = decode +decodestring = decode + + + +def _unquote_match(match): + """Turn a match in the form =AB to the ASCII character with value 0xab""" + s = match.group(0) + return unquote(s) + + +# Header decoding is done a bit differently +def header_decode(s): + """Decode a string encoded with RFC 2045 MIME header `Q' encoding. + + This function does not parse a full MIME header value encoded with + quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use + the high level email.header class for that functionality. + """ + s = s.replace('_', ' ') + return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/email/utils.py b/pype/modules/ftrack/python2_vendor/future/backports/email/utils.py new file mode 100644 index 0000000000..4abebf7cb6 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/email/utils.py @@ -0,0 +1,400 @@ +# Copyright (C) 2001-2010 Python Software Foundation +# Author: Barry Warsaw +# Contact: email-sig@python.org + +"""Miscellaneous utilities.""" + +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import +from future import utils +from future.builtins import bytes, int, str + +__all__ = [ + 'collapse_rfc2231_value', + 'decode_params', + 'decode_rfc2231', + 'encode_rfc2231', + 'formataddr', + 'formatdate', + 'format_datetime', + 'getaddresses', + 'make_msgid', + 'mktime_tz', + 'parseaddr', + 'parsedate', + 'parsedate_tz', + 'parsedate_to_datetime', + 'unquote', + ] + +import os +import re +if utils.PY2: + re.ASCII = 0 +import time +import base64 +import random +import socket +from future.backports import datetime +from future.backports.urllib.parse import quote as url_quote, unquote as url_unquote +import warnings +from io import StringIO + +from future.backports.email._parseaddr import quote +from future.backports.email._parseaddr import AddressList as _AddressList +from future.backports.email._parseaddr import mktime_tz + +from future.backports.email._parseaddr import parsedate, parsedate_tz, _parsedate_tz + +from quopri import decodestring as _qdecode + +# Intrapackage imports +from future.backports.email.encoders import _bencode, _qencode +from future.backports.email.charset import Charset + +COMMASPACE = ', ' +EMPTYSTRING = '' +UEMPTYSTRING = '' +CRLF = '\r\n' +TICK = "'" + +specialsre = re.compile(r'[][\\()<>@,:;".]') +escapesre = re.compile(r'[\\"]') + +# How to figure out if we are processing strings that come from a byte +# source with undecodable characters. +_has_surrogates = re.compile( + '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search + +# How to deal with a string containing bytes before handing it to the +# application through the 'normal' interface. +def _sanitize(string): + # Turn any escaped bytes into unicode 'unknown' char. + original_bytes = string.encode('ascii', 'surrogateescape') + return original_bytes.decode('ascii', 'replace') + + +# Helpers + +def formataddr(pair, charset='utf-8'): + """The inverse of parseaddr(), this takes a 2-tuple of the form + (realname, email_address) and returns the string value suitable + for an RFC 2822 From, To or Cc header. + + If the first element of pair is false, then the second element is + returned unmodified. + + Optional charset if given is the character set that is used to encode + realname in case realname is not ASCII safe. Can be an instance of str or + a Charset-like object which has a header_encode method. Default is + 'utf-8'. + """ + name, address = pair + # The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't. + address.encode('ascii') + if name: + try: + name.encode('ascii') + except UnicodeEncodeError: + if isinstance(charset, str): + charset = Charset(charset) + encoded_name = charset.header_encode(name) + return "%s <%s>" % (encoded_name, address) + else: + quotes = '' + if specialsre.search(name): + quotes = '"' + name = escapesre.sub(r'\\\g<0>', name) + return '%s%s%s <%s>' % (quotes, name, quotes, address) + return address + + + +def getaddresses(fieldvalues): + """Return a list of (REALNAME, EMAIL) for each fieldvalue.""" + all = COMMASPACE.join(fieldvalues) + a = _AddressList(all) + return a.addresslist + + + +ecre = re.compile(r''' + =\? # literal =? + (?P[^?]*?) # non-greedy up to the next ? is the charset + \? # literal ? + (?P[qb]) # either a "q" or a "b", case insensitive + \? # literal ? + (?P.*?) # non-greedy up to the next ?= is the atom + \?= # literal ?= + ''', re.VERBOSE | re.IGNORECASE) + + +def _format_timetuple_and_zone(timetuple, zone): + return '%s, %02d %s %04d %02d:%02d:%02d %s' % ( + ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]], + timetuple[2], + ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1], + timetuple[0], timetuple[3], timetuple[4], timetuple[5], + zone) + +def formatdate(timeval=None, localtime=False, usegmt=False): + """Returns a date string as specified by RFC 2822, e.g.: + + Fri, 09 Nov 2001 01:08:47 -0000 + + Optional timeval if given is a floating point time value as accepted by + gmtime() and localtime(), otherwise the current time is used. + + Optional localtime is a flag that when True, interprets timeval, and + returns a date relative to the local timezone instead of UTC, properly + taking daylight savings time into account. + + Optional argument usegmt means that the timezone is written out as + an ascii string, not numeric one (so "GMT" instead of "+0000"). This + is needed for HTTP, and is only used when localtime==False. + """ + # Note: we cannot use strftime() because that honors the locale and RFC + # 2822 requires that day and month names be the English abbreviations. + if timeval is None: + timeval = time.time() + if localtime: + now = time.localtime(timeval) + # Calculate timezone offset, based on whether the local zone has + # daylight savings time, and whether DST is in effect. + if time.daylight and now[-1]: + offset = time.altzone + else: + offset = time.timezone + hours, minutes = divmod(abs(offset), 3600) + # Remember offset is in seconds west of UTC, but the timezone is in + # minutes east of UTC, so the signs differ. + if offset > 0: + sign = '-' + else: + sign = '+' + zone = '%s%02d%02d' % (sign, hours, minutes // 60) + else: + now = time.gmtime(timeval) + # Timezone offset is always -0000 + if usegmt: + zone = 'GMT' + else: + zone = '-0000' + return _format_timetuple_and_zone(now, zone) + +def format_datetime(dt, usegmt=False): + """Turn a datetime into a date string as specified in RFC 2822. + + If usegmt is True, dt must be an aware datetime with an offset of zero. In + this case 'GMT' will be rendered instead of the normal +0000 required by + RFC2822. This is to support HTTP headers involving date stamps. + """ + now = dt.timetuple() + if usegmt: + if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc: + raise ValueError("usegmt option requires a UTC datetime") + zone = 'GMT' + elif dt.tzinfo is None: + zone = '-0000' + else: + zone = dt.strftime("%z") + return _format_timetuple_and_zone(now, zone) + + +def make_msgid(idstring=None, domain=None): + """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: + + <20020201195627.33539.96671@nightshade.la.mastaler.com> + + Optional idstring if given is a string used to strengthen the + uniqueness of the message id. Optional domain if given provides the + portion of the message id after the '@'. It defaults to the locally + defined hostname. + """ + timeval = time.time() + utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) + pid = os.getpid() + randint = random.randrange(100000) + if idstring is None: + idstring = '' + else: + idstring = '.' + idstring + if domain is None: + domain = socket.getfqdn() + msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain) + return msgid + + +def parsedate_to_datetime(data): + _3to2list = list(_parsedate_tz(data)) + dtuple, tz, = [_3to2list[:-1]] + _3to2list[-1:] + if tz is None: + return datetime.datetime(*dtuple[:6]) + return datetime.datetime(*dtuple[:6], + tzinfo=datetime.timezone(datetime.timedelta(seconds=tz))) + + +def parseaddr(addr): + addrs = _AddressList(addr).addresslist + if not addrs: + return '', '' + return addrs[0] + + +# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3. +def unquote(str): + """Remove quotes from a string.""" + if len(str) > 1: + if str.startswith('"') and str.endswith('"'): + return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') + if str.startswith('<') and str.endswith('>'): + return str[1:-1] + return str + + + +# RFC2231-related functions - parameter encoding and decoding +def decode_rfc2231(s): + """Decode string according to RFC 2231""" + parts = s.split(TICK, 2) + if len(parts) <= 2: + return None, None, s + return parts + + +def encode_rfc2231(s, charset=None, language=None): + """Encode string according to RFC 2231. + + If neither charset nor language is given, then s is returned as-is. If + charset is given but not language, the string is encoded using the empty + string for language. + """ + s = url_quote(s, safe='', encoding=charset or 'ascii') + if charset is None and language is None: + return s + if language is None: + language = '' + return "%s'%s'%s" % (charset, language, s) + + +rfc2231_continuation = re.compile(r'^(?P\w+)\*((?P[0-9]+)\*?)?$', + re.ASCII) + +def decode_params(params): + """Decode parameters list according to RFC 2231. + + params is a sequence of 2-tuples containing (param name, string value). + """ + # Copy params so we don't mess with the original + params = params[:] + new_params = [] + # Map parameter's name to a list of continuations. The values are a + # 3-tuple of the continuation number, the string value, and a flag + # specifying whether a particular segment is %-encoded. + rfc2231_params = {} + name, value = params.pop(0) + new_params.append((name, value)) + while params: + name, value = params.pop(0) + if name.endswith('*'): + encoded = True + else: + encoded = False + value = unquote(value) + mo = rfc2231_continuation.match(name) + if mo: + name, num = mo.group('name', 'num') + if num is not None: + num = int(num) + rfc2231_params.setdefault(name, []).append((num, value, encoded)) + else: + new_params.append((name, '"%s"' % quote(value))) + if rfc2231_params: + for name, continuations in rfc2231_params.items(): + value = [] + extended = False + # Sort by number + continuations.sort() + # And now append all values in numerical order, converting + # %-encodings for the encoded segments. If any of the + # continuation names ends in a *, then the entire string, after + # decoding segments and concatenating, must have the charset and + # language specifiers at the beginning of the string. + for num, s, encoded in continuations: + if encoded: + # Decode as "latin-1", so the characters in s directly + # represent the percent-encoded octet values. + # collapse_rfc2231_value treats this as an octet sequence. + s = url_unquote(s, encoding="latin-1") + extended = True + value.append(s) + value = quote(EMPTYSTRING.join(value)) + if extended: + charset, language, value = decode_rfc2231(value) + new_params.append((name, (charset, language, '"%s"' % value))) + else: + new_params.append((name, '"%s"' % value)) + return new_params + +def collapse_rfc2231_value(value, errors='replace', + fallback_charset='us-ascii'): + if not isinstance(value, tuple) or len(value) != 3: + return unquote(value) + # While value comes to us as a unicode string, we need it to be a bytes + # object. We do not want bytes() normal utf-8 decoder, we want a straight + # interpretation of the string as character bytes. + charset, language, text = value + rawbytes = bytes(text, 'raw-unicode-escape') + try: + return str(rawbytes, charset, errors) + except LookupError: + # charset is not a known codec. + return unquote(text) + + +# +# datetime doesn't provide a localtime function yet, so provide one. Code +# adapted from the patch in issue 9527. This may not be perfect, but it is +# better than not having it. +# + +def localtime(dt=None, isdst=-1): + """Return local time as an aware datetime object. + + If called without arguments, return current time. Otherwise *dt* + argument should be a datetime instance, and it is converted to the + local time zone according to the system time zone database. If *dt* is + naive (that is, dt.tzinfo is None), it is assumed to be in local time. + In this case, a positive or zero value for *isdst* causes localtime to + presume initially that summer time (for example, Daylight Saving Time) + is or is not (respectively) in effect for the specified time. A + negative value for *isdst* causes the localtime() function to attempt + to divine whether summer time is in effect for the specified time. + + """ + if dt is None: + return datetime.datetime.now(datetime.timezone.utc).astimezone() + if dt.tzinfo is not None: + return dt.astimezone() + # We have a naive datetime. Convert to a (localtime) timetuple and pass to + # system mktime together with the isdst hint. System mktime will return + # seconds since epoch. + tm = dt.timetuple()[:-1] + (isdst,) + seconds = time.mktime(tm) + localtm = time.localtime(seconds) + try: + delta = datetime.timedelta(seconds=localtm.tm_gmtoff) + tz = datetime.timezone(delta, localtm.tm_zone) + except AttributeError: + # Compute UTC offset and compare with the value implied by tm_isdst. + # If the values match, use the zone name implied by tm_isdst. + delta = dt - datetime.datetime(*time.gmtime(seconds)[:6]) + dst = time.daylight and localtm.tm_isdst > 0 + gmtoff = -(time.altzone if dst else time.timezone) + if delta == datetime.timedelta(seconds=gmtoff): + tz = datetime.timezone(delta, time.tzname[dst]) + else: + tz = datetime.timezone(delta) + return dt.replace(tzinfo=tz) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/html/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/html/__init__.py new file mode 100644 index 0000000000..58e133fd4b --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/html/__init__.py @@ -0,0 +1,27 @@ +""" +General functions for HTML manipulation, backported from Py3. + +Note that this uses Python 2.7 code with the corresponding Python 3 +module names and locations. +""" + +from __future__ import unicode_literals + + +_escape_map = {ord('&'): '&', ord('<'): '<', ord('>'): '>'} +_escape_map_full = {ord('&'): '&', ord('<'): '<', ord('>'): '>', + ord('"'): '"', ord('\''): '''} + +# NB: this is a candidate for a bytes/string polymorphic interface + +def escape(s, quote=True): + """ + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + """ + assert not isinstance(s, bytes), 'Pass a unicode string' + if quote: + return s.translate(_escape_map_full) + return s.translate(_escape_map) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/html/entities.py b/pype/modules/ftrack/python2_vendor/future/backports/html/entities.py new file mode 100644 index 0000000000..5c73f6923a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/html/entities.py @@ -0,0 +1,2514 @@ +"""HTML character entity references. + +Backported for python-future from Python 3.3 +""" + +from __future__ import (absolute_import, division, + print_function, unicode_literals) +from future.builtins import * + + +# maps the HTML entity name to the Unicode codepoint +name2codepoint = { + 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 + 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1 + 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1 + 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1 + 'Alpha': 0x0391, # greek capital letter alpha, U+0391 + 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1 + 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1 + 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1 + 'Beta': 0x0392, # greek capital letter beta, U+0392 + 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1 + 'Chi': 0x03a7, # greek capital letter chi, U+03A7 + 'Dagger': 0x2021, # double dagger, U+2021 ISOpub + 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3 + 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1 + 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1 + 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1 + 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1 + 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395 + 'Eta': 0x0397, # greek capital letter eta, U+0397 + 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1 + 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3 + 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1 + 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1 + 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1 + 'Iota': 0x0399, # greek capital letter iota, U+0399 + 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1 + 'Kappa': 0x039a, # greek capital letter kappa, U+039A + 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3 + 'Mu': 0x039c, # greek capital letter mu, U+039C + 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1 + 'Nu': 0x039d, # greek capital letter nu, U+039D + 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2 + 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1 + 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1 + 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1 + 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3 + 'Omicron': 0x039f, # greek capital letter omicron, U+039F + 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1 + 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1 + 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1 + 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3 + 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3 + 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech + 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3 + 'Rho': 0x03a1, # greek capital letter rho, U+03A1 + 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2 + 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3 + 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1 + 'Tau': 0x03a4, # greek capital letter tau, U+03A4 + 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3 + 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1 + 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1 + 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1 + 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3 + 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1 + 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3 + 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1 + 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2 + 'Zeta': 0x0396, # greek capital letter zeta, U+0396 + 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1 + 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1 + 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia + 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1 + 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1 + 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW + 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3 + 'amp': 0x0026, # ampersand, U+0026 ISOnum + 'and': 0x2227, # logical and = wedge, U+2227 ISOtech + 'ang': 0x2220, # angle, U+2220 ISOamso + 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1 + 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr + 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1 + 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1 + 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW + 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3 + 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum + 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub + 'cap': 0x2229, # intersection = cap, U+2229 ISOtech + 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1 + 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia + 'cent': 0x00a2, # cent sign, U+00A2 ISOnum + 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3 + 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub + 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub + 'cong': 0x2245, # approximately equal to, U+2245 ISOtech + 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum + 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW + 'cup': 0x222a, # union = cup, U+222A ISOtech + 'curren': 0x00a4, # currency sign, U+00A4 ISOnum + 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa + 'dagger': 0x2020, # dagger, U+2020 ISOpub + 'darr': 0x2193, # downwards arrow, U+2193 ISOnum + 'deg': 0x00b0, # degree sign, U+00B0 ISOnum + 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3 + 'diams': 0x2666, # black diamond suit, U+2666 ISOpub + 'divide': 0x00f7, # division sign, U+00F7 ISOnum + 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1 + 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1 + 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1 + 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso + 'emsp': 0x2003, # em space, U+2003 ISOpub + 'ensp': 0x2002, # en space, U+2002 ISOpub + 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3 + 'equiv': 0x2261, # identical to, U+2261 ISOtech + 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3 + 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1 + 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1 + 'euro': 0x20ac, # euro sign, U+20AC NEW + 'exist': 0x2203, # there exists, U+2203 ISOtech + 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech + 'forall': 0x2200, # for all, U+2200 ISOtech + 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum + 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum + 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum + 'frasl': 0x2044, # fraction slash, U+2044 NEW + 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3 + 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech + 'gt': 0x003e, # greater-than sign, U+003E ISOnum + 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa + 'harr': 0x2194, # left right arrow, U+2194 ISOamsa + 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub + 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub + 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1 + 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1 + 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum + 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1 + 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso + 'infin': 0x221e, # infinity, U+221E ISOtech + 'int': 0x222b, # integral, U+222B ISOtech + 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3 + 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum + 'isin': 0x2208, # element of, U+2208 ISOtech + 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1 + 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3 + 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech + 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3 + 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech + 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum + 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum + 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc + 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum + 'le': 0x2264, # less-than or equal to, U+2264 ISOtech + 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc + 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech + 'loz': 0x25ca, # lozenge, U+25CA ISOpub + 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070 + 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed + 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum + 'lt': 0x003c, # less-than sign, U+003C ISOnum + 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia + 'mdash': 0x2014, # em dash, U+2014 ISOpub + 'micro': 0x00b5, # micro sign, U+00B5 ISOnum + 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum + 'minus': 0x2212, # minus sign, U+2212 ISOtech + 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3 + 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech + 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum + 'ndash': 0x2013, # en dash, U+2013 ISOpub + 'ne': 0x2260, # not equal to, U+2260 ISOtech + 'ni': 0x220b, # contains as member, U+220B ISOtech + 'not': 0x00ac, # not sign, U+00AC ISOnum + 'notin': 0x2209, # not an element of, U+2209 ISOtech + 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn + 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1 + 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3 + 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1 + 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1 + 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2 + 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1 + 'oline': 0x203e, # overline = spacing overscore, U+203E NEW + 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3 + 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW + 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb + 'or': 0x2228, # logical or = vee, U+2228 ISOtech + 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum + 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum + 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1 + 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1 + 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb + 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1 + 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum + 'part': 0x2202, # partial differential, U+2202 ISOtech + 'permil': 0x2030, # per mille sign, U+2030 ISOtech + 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech + 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3 + 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3 + 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3 + 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum + 'pound': 0x00a3, # pound sign, U+00A3 ISOnum + 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech + 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb + 'prop': 0x221d, # proportional to, U+221D ISOtech + 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3 + 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum + 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech + 'radic': 0x221a, # square root = radical sign, U+221A ISOtech + 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech + 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum + 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum + 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc + 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum + 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso + 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum + 'rfloor': 0x230b, # right floor, U+230B ISOamsc + 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3 + 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070 + 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed + 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum + 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW + 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2 + 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb + 'sect': 0x00a7, # section sign, U+00A7 ISOnum + 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum + 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3 + 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3 + 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech + 'spades': 0x2660, # black spade suit, U+2660 ISOpub + 'sub': 0x2282, # subset of, U+2282 ISOtech + 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech + 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb + 'sup': 0x2283, # superset of, U+2283 ISOtech + 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum + 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum + 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum + 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech + 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1 + 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3 + 'there4': 0x2234, # therefore, U+2234 ISOtech + 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3 + 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW + 'thinsp': 0x2009, # thin space, U+2009 ISOpub + 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1 + 'tilde': 0x02dc, # small tilde, U+02DC ISOdia + 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum + 'trade': 0x2122, # trade mark sign, U+2122 ISOnum + 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa + 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1 + 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum + 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1 + 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1 + 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia + 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW + 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3 + 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1 + 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso + 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3 + 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1 + 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum + 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1 + 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3 + 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070 + 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070 +} + + +# maps the HTML5 named character references to the equivalent Unicode character(s) +html5 = { + 'Aacute': '\xc1', + 'aacute': '\xe1', + 'Aacute;': '\xc1', + 'aacute;': '\xe1', + 'Abreve;': '\u0102', + 'abreve;': '\u0103', + 'ac;': '\u223e', + 'acd;': '\u223f', + 'acE;': '\u223e\u0333', + 'Acirc': '\xc2', + 'acirc': '\xe2', + 'Acirc;': '\xc2', + 'acirc;': '\xe2', + 'acute': '\xb4', + 'acute;': '\xb4', + 'Acy;': '\u0410', + 'acy;': '\u0430', + 'AElig': '\xc6', + 'aelig': '\xe6', + 'AElig;': '\xc6', + 'aelig;': '\xe6', + 'af;': '\u2061', + 'Afr;': '\U0001d504', + 'afr;': '\U0001d51e', + 'Agrave': '\xc0', + 'agrave': '\xe0', + 'Agrave;': '\xc0', + 'agrave;': '\xe0', + 'alefsym;': '\u2135', + 'aleph;': '\u2135', + 'Alpha;': '\u0391', + 'alpha;': '\u03b1', + 'Amacr;': '\u0100', + 'amacr;': '\u0101', + 'amalg;': '\u2a3f', + 'AMP': '&', + 'amp': '&', + 'AMP;': '&', + 'amp;': '&', + 'And;': '\u2a53', + 'and;': '\u2227', + 'andand;': '\u2a55', + 'andd;': '\u2a5c', + 'andslope;': '\u2a58', + 'andv;': '\u2a5a', + 'ang;': '\u2220', + 'ange;': '\u29a4', + 'angle;': '\u2220', + 'angmsd;': '\u2221', + 'angmsdaa;': '\u29a8', + 'angmsdab;': '\u29a9', + 'angmsdac;': '\u29aa', + 'angmsdad;': '\u29ab', + 'angmsdae;': '\u29ac', + 'angmsdaf;': '\u29ad', + 'angmsdag;': '\u29ae', + 'angmsdah;': '\u29af', + 'angrt;': '\u221f', + 'angrtvb;': '\u22be', + 'angrtvbd;': '\u299d', + 'angsph;': '\u2222', + 'angst;': '\xc5', + 'angzarr;': '\u237c', + 'Aogon;': '\u0104', + 'aogon;': '\u0105', + 'Aopf;': '\U0001d538', + 'aopf;': '\U0001d552', + 'ap;': '\u2248', + 'apacir;': '\u2a6f', + 'apE;': '\u2a70', + 'ape;': '\u224a', + 'apid;': '\u224b', + 'apos;': "'", + 'ApplyFunction;': '\u2061', + 'approx;': '\u2248', + 'approxeq;': '\u224a', + 'Aring': '\xc5', + 'aring': '\xe5', + 'Aring;': '\xc5', + 'aring;': '\xe5', + 'Ascr;': '\U0001d49c', + 'ascr;': '\U0001d4b6', + 'Assign;': '\u2254', + 'ast;': '*', + 'asymp;': '\u2248', + 'asympeq;': '\u224d', + 'Atilde': '\xc3', + 'atilde': '\xe3', + 'Atilde;': '\xc3', + 'atilde;': '\xe3', + 'Auml': '\xc4', + 'auml': '\xe4', + 'Auml;': '\xc4', + 'auml;': '\xe4', + 'awconint;': '\u2233', + 'awint;': '\u2a11', + 'backcong;': '\u224c', + 'backepsilon;': '\u03f6', + 'backprime;': '\u2035', + 'backsim;': '\u223d', + 'backsimeq;': '\u22cd', + 'Backslash;': '\u2216', + 'Barv;': '\u2ae7', + 'barvee;': '\u22bd', + 'Barwed;': '\u2306', + 'barwed;': '\u2305', + 'barwedge;': '\u2305', + 'bbrk;': '\u23b5', + 'bbrktbrk;': '\u23b6', + 'bcong;': '\u224c', + 'Bcy;': '\u0411', + 'bcy;': '\u0431', + 'bdquo;': '\u201e', + 'becaus;': '\u2235', + 'Because;': '\u2235', + 'because;': '\u2235', + 'bemptyv;': '\u29b0', + 'bepsi;': '\u03f6', + 'bernou;': '\u212c', + 'Bernoullis;': '\u212c', + 'Beta;': '\u0392', + 'beta;': '\u03b2', + 'beth;': '\u2136', + 'between;': '\u226c', + 'Bfr;': '\U0001d505', + 'bfr;': '\U0001d51f', + 'bigcap;': '\u22c2', + 'bigcirc;': '\u25ef', + 'bigcup;': '\u22c3', + 'bigodot;': '\u2a00', + 'bigoplus;': '\u2a01', + 'bigotimes;': '\u2a02', + 'bigsqcup;': '\u2a06', + 'bigstar;': '\u2605', + 'bigtriangledown;': '\u25bd', + 'bigtriangleup;': '\u25b3', + 'biguplus;': '\u2a04', + 'bigvee;': '\u22c1', + 'bigwedge;': '\u22c0', + 'bkarow;': '\u290d', + 'blacklozenge;': '\u29eb', + 'blacksquare;': '\u25aa', + 'blacktriangle;': '\u25b4', + 'blacktriangledown;': '\u25be', + 'blacktriangleleft;': '\u25c2', + 'blacktriangleright;': '\u25b8', + 'blank;': '\u2423', + 'blk12;': '\u2592', + 'blk14;': '\u2591', + 'blk34;': '\u2593', + 'block;': '\u2588', + 'bne;': '=\u20e5', + 'bnequiv;': '\u2261\u20e5', + 'bNot;': '\u2aed', + 'bnot;': '\u2310', + 'Bopf;': '\U0001d539', + 'bopf;': '\U0001d553', + 'bot;': '\u22a5', + 'bottom;': '\u22a5', + 'bowtie;': '\u22c8', + 'boxbox;': '\u29c9', + 'boxDL;': '\u2557', + 'boxDl;': '\u2556', + 'boxdL;': '\u2555', + 'boxdl;': '\u2510', + 'boxDR;': '\u2554', + 'boxDr;': '\u2553', + 'boxdR;': '\u2552', + 'boxdr;': '\u250c', + 'boxH;': '\u2550', + 'boxh;': '\u2500', + 'boxHD;': '\u2566', + 'boxHd;': '\u2564', + 'boxhD;': '\u2565', + 'boxhd;': '\u252c', + 'boxHU;': '\u2569', + 'boxHu;': '\u2567', + 'boxhU;': '\u2568', + 'boxhu;': '\u2534', + 'boxminus;': '\u229f', + 'boxplus;': '\u229e', + 'boxtimes;': '\u22a0', + 'boxUL;': '\u255d', + 'boxUl;': '\u255c', + 'boxuL;': '\u255b', + 'boxul;': '\u2518', + 'boxUR;': '\u255a', + 'boxUr;': '\u2559', + 'boxuR;': '\u2558', + 'boxur;': '\u2514', + 'boxV;': '\u2551', + 'boxv;': '\u2502', + 'boxVH;': '\u256c', + 'boxVh;': '\u256b', + 'boxvH;': '\u256a', + 'boxvh;': '\u253c', + 'boxVL;': '\u2563', + 'boxVl;': '\u2562', + 'boxvL;': '\u2561', + 'boxvl;': '\u2524', + 'boxVR;': '\u2560', + 'boxVr;': '\u255f', + 'boxvR;': '\u255e', + 'boxvr;': '\u251c', + 'bprime;': '\u2035', + 'Breve;': '\u02d8', + 'breve;': '\u02d8', + 'brvbar': '\xa6', + 'brvbar;': '\xa6', + 'Bscr;': '\u212c', + 'bscr;': '\U0001d4b7', + 'bsemi;': '\u204f', + 'bsim;': '\u223d', + 'bsime;': '\u22cd', + 'bsol;': '\\', + 'bsolb;': '\u29c5', + 'bsolhsub;': '\u27c8', + 'bull;': '\u2022', + 'bullet;': '\u2022', + 'bump;': '\u224e', + 'bumpE;': '\u2aae', + 'bumpe;': '\u224f', + 'Bumpeq;': '\u224e', + 'bumpeq;': '\u224f', + 'Cacute;': '\u0106', + 'cacute;': '\u0107', + 'Cap;': '\u22d2', + 'cap;': '\u2229', + 'capand;': '\u2a44', + 'capbrcup;': '\u2a49', + 'capcap;': '\u2a4b', + 'capcup;': '\u2a47', + 'capdot;': '\u2a40', + 'CapitalDifferentialD;': '\u2145', + 'caps;': '\u2229\ufe00', + 'caret;': '\u2041', + 'caron;': '\u02c7', + 'Cayleys;': '\u212d', + 'ccaps;': '\u2a4d', + 'Ccaron;': '\u010c', + 'ccaron;': '\u010d', + 'Ccedil': '\xc7', + 'ccedil': '\xe7', + 'Ccedil;': '\xc7', + 'ccedil;': '\xe7', + 'Ccirc;': '\u0108', + 'ccirc;': '\u0109', + 'Cconint;': '\u2230', + 'ccups;': '\u2a4c', + 'ccupssm;': '\u2a50', + 'Cdot;': '\u010a', + 'cdot;': '\u010b', + 'cedil': '\xb8', + 'cedil;': '\xb8', + 'Cedilla;': '\xb8', + 'cemptyv;': '\u29b2', + 'cent': '\xa2', + 'cent;': '\xa2', + 'CenterDot;': '\xb7', + 'centerdot;': '\xb7', + 'Cfr;': '\u212d', + 'cfr;': '\U0001d520', + 'CHcy;': '\u0427', + 'chcy;': '\u0447', + 'check;': '\u2713', + 'checkmark;': '\u2713', + 'Chi;': '\u03a7', + 'chi;': '\u03c7', + 'cir;': '\u25cb', + 'circ;': '\u02c6', + 'circeq;': '\u2257', + 'circlearrowleft;': '\u21ba', + 'circlearrowright;': '\u21bb', + 'circledast;': '\u229b', + 'circledcirc;': '\u229a', + 'circleddash;': '\u229d', + 'CircleDot;': '\u2299', + 'circledR;': '\xae', + 'circledS;': '\u24c8', + 'CircleMinus;': '\u2296', + 'CirclePlus;': '\u2295', + 'CircleTimes;': '\u2297', + 'cirE;': '\u29c3', + 'cire;': '\u2257', + 'cirfnint;': '\u2a10', + 'cirmid;': '\u2aef', + 'cirscir;': '\u29c2', + 'ClockwiseContourIntegral;': '\u2232', + 'CloseCurlyDoubleQuote;': '\u201d', + 'CloseCurlyQuote;': '\u2019', + 'clubs;': '\u2663', + 'clubsuit;': '\u2663', + 'Colon;': '\u2237', + 'colon;': ':', + 'Colone;': '\u2a74', + 'colone;': '\u2254', + 'coloneq;': '\u2254', + 'comma;': ',', + 'commat;': '@', + 'comp;': '\u2201', + 'compfn;': '\u2218', + 'complement;': '\u2201', + 'complexes;': '\u2102', + 'cong;': '\u2245', + 'congdot;': '\u2a6d', + 'Congruent;': '\u2261', + 'Conint;': '\u222f', + 'conint;': '\u222e', + 'ContourIntegral;': '\u222e', + 'Copf;': '\u2102', + 'copf;': '\U0001d554', + 'coprod;': '\u2210', + 'Coproduct;': '\u2210', + 'COPY': '\xa9', + 'copy': '\xa9', + 'COPY;': '\xa9', + 'copy;': '\xa9', + 'copysr;': '\u2117', + 'CounterClockwiseContourIntegral;': '\u2233', + 'crarr;': '\u21b5', + 'Cross;': '\u2a2f', + 'cross;': '\u2717', + 'Cscr;': '\U0001d49e', + 'cscr;': '\U0001d4b8', + 'csub;': '\u2acf', + 'csube;': '\u2ad1', + 'csup;': '\u2ad0', + 'csupe;': '\u2ad2', + 'ctdot;': '\u22ef', + 'cudarrl;': '\u2938', + 'cudarrr;': '\u2935', + 'cuepr;': '\u22de', + 'cuesc;': '\u22df', + 'cularr;': '\u21b6', + 'cularrp;': '\u293d', + 'Cup;': '\u22d3', + 'cup;': '\u222a', + 'cupbrcap;': '\u2a48', + 'CupCap;': '\u224d', + 'cupcap;': '\u2a46', + 'cupcup;': '\u2a4a', + 'cupdot;': '\u228d', + 'cupor;': '\u2a45', + 'cups;': '\u222a\ufe00', + 'curarr;': '\u21b7', + 'curarrm;': '\u293c', + 'curlyeqprec;': '\u22de', + 'curlyeqsucc;': '\u22df', + 'curlyvee;': '\u22ce', + 'curlywedge;': '\u22cf', + 'curren': '\xa4', + 'curren;': '\xa4', + 'curvearrowleft;': '\u21b6', + 'curvearrowright;': '\u21b7', + 'cuvee;': '\u22ce', + 'cuwed;': '\u22cf', + 'cwconint;': '\u2232', + 'cwint;': '\u2231', + 'cylcty;': '\u232d', + 'Dagger;': '\u2021', + 'dagger;': '\u2020', + 'daleth;': '\u2138', + 'Darr;': '\u21a1', + 'dArr;': '\u21d3', + 'darr;': '\u2193', + 'dash;': '\u2010', + 'Dashv;': '\u2ae4', + 'dashv;': '\u22a3', + 'dbkarow;': '\u290f', + 'dblac;': '\u02dd', + 'Dcaron;': '\u010e', + 'dcaron;': '\u010f', + 'Dcy;': '\u0414', + 'dcy;': '\u0434', + 'DD;': '\u2145', + 'dd;': '\u2146', + 'ddagger;': '\u2021', + 'ddarr;': '\u21ca', + 'DDotrahd;': '\u2911', + 'ddotseq;': '\u2a77', + 'deg': '\xb0', + 'deg;': '\xb0', + 'Del;': '\u2207', + 'Delta;': '\u0394', + 'delta;': '\u03b4', + 'demptyv;': '\u29b1', + 'dfisht;': '\u297f', + 'Dfr;': '\U0001d507', + 'dfr;': '\U0001d521', + 'dHar;': '\u2965', + 'dharl;': '\u21c3', + 'dharr;': '\u21c2', + 'DiacriticalAcute;': '\xb4', + 'DiacriticalDot;': '\u02d9', + 'DiacriticalDoubleAcute;': '\u02dd', + 'DiacriticalGrave;': '`', + 'DiacriticalTilde;': '\u02dc', + 'diam;': '\u22c4', + 'Diamond;': '\u22c4', + 'diamond;': '\u22c4', + 'diamondsuit;': '\u2666', + 'diams;': '\u2666', + 'die;': '\xa8', + 'DifferentialD;': '\u2146', + 'digamma;': '\u03dd', + 'disin;': '\u22f2', + 'div;': '\xf7', + 'divide': '\xf7', + 'divide;': '\xf7', + 'divideontimes;': '\u22c7', + 'divonx;': '\u22c7', + 'DJcy;': '\u0402', + 'djcy;': '\u0452', + 'dlcorn;': '\u231e', + 'dlcrop;': '\u230d', + 'dollar;': '$', + 'Dopf;': '\U0001d53b', + 'dopf;': '\U0001d555', + 'Dot;': '\xa8', + 'dot;': '\u02d9', + 'DotDot;': '\u20dc', + 'doteq;': '\u2250', + 'doteqdot;': '\u2251', + 'DotEqual;': '\u2250', + 'dotminus;': '\u2238', + 'dotplus;': '\u2214', + 'dotsquare;': '\u22a1', + 'doublebarwedge;': '\u2306', + 'DoubleContourIntegral;': '\u222f', + 'DoubleDot;': '\xa8', + 'DoubleDownArrow;': '\u21d3', + 'DoubleLeftArrow;': '\u21d0', + 'DoubleLeftRightArrow;': '\u21d4', + 'DoubleLeftTee;': '\u2ae4', + 'DoubleLongLeftArrow;': '\u27f8', + 'DoubleLongLeftRightArrow;': '\u27fa', + 'DoubleLongRightArrow;': '\u27f9', + 'DoubleRightArrow;': '\u21d2', + 'DoubleRightTee;': '\u22a8', + 'DoubleUpArrow;': '\u21d1', + 'DoubleUpDownArrow;': '\u21d5', + 'DoubleVerticalBar;': '\u2225', + 'DownArrow;': '\u2193', + 'Downarrow;': '\u21d3', + 'downarrow;': '\u2193', + 'DownArrowBar;': '\u2913', + 'DownArrowUpArrow;': '\u21f5', + 'DownBreve;': '\u0311', + 'downdownarrows;': '\u21ca', + 'downharpoonleft;': '\u21c3', + 'downharpoonright;': '\u21c2', + 'DownLeftRightVector;': '\u2950', + 'DownLeftTeeVector;': '\u295e', + 'DownLeftVector;': '\u21bd', + 'DownLeftVectorBar;': '\u2956', + 'DownRightTeeVector;': '\u295f', + 'DownRightVector;': '\u21c1', + 'DownRightVectorBar;': '\u2957', + 'DownTee;': '\u22a4', + 'DownTeeArrow;': '\u21a7', + 'drbkarow;': '\u2910', + 'drcorn;': '\u231f', + 'drcrop;': '\u230c', + 'Dscr;': '\U0001d49f', + 'dscr;': '\U0001d4b9', + 'DScy;': '\u0405', + 'dscy;': '\u0455', + 'dsol;': '\u29f6', + 'Dstrok;': '\u0110', + 'dstrok;': '\u0111', + 'dtdot;': '\u22f1', + 'dtri;': '\u25bf', + 'dtrif;': '\u25be', + 'duarr;': '\u21f5', + 'duhar;': '\u296f', + 'dwangle;': '\u29a6', + 'DZcy;': '\u040f', + 'dzcy;': '\u045f', + 'dzigrarr;': '\u27ff', + 'Eacute': '\xc9', + 'eacute': '\xe9', + 'Eacute;': '\xc9', + 'eacute;': '\xe9', + 'easter;': '\u2a6e', + 'Ecaron;': '\u011a', + 'ecaron;': '\u011b', + 'ecir;': '\u2256', + 'Ecirc': '\xca', + 'ecirc': '\xea', + 'Ecirc;': '\xca', + 'ecirc;': '\xea', + 'ecolon;': '\u2255', + 'Ecy;': '\u042d', + 'ecy;': '\u044d', + 'eDDot;': '\u2a77', + 'Edot;': '\u0116', + 'eDot;': '\u2251', + 'edot;': '\u0117', + 'ee;': '\u2147', + 'efDot;': '\u2252', + 'Efr;': '\U0001d508', + 'efr;': '\U0001d522', + 'eg;': '\u2a9a', + 'Egrave': '\xc8', + 'egrave': '\xe8', + 'Egrave;': '\xc8', + 'egrave;': '\xe8', + 'egs;': '\u2a96', + 'egsdot;': '\u2a98', + 'el;': '\u2a99', + 'Element;': '\u2208', + 'elinters;': '\u23e7', + 'ell;': '\u2113', + 'els;': '\u2a95', + 'elsdot;': '\u2a97', + 'Emacr;': '\u0112', + 'emacr;': '\u0113', + 'empty;': '\u2205', + 'emptyset;': '\u2205', + 'EmptySmallSquare;': '\u25fb', + 'emptyv;': '\u2205', + 'EmptyVerySmallSquare;': '\u25ab', + 'emsp13;': '\u2004', + 'emsp14;': '\u2005', + 'emsp;': '\u2003', + 'ENG;': '\u014a', + 'eng;': '\u014b', + 'ensp;': '\u2002', + 'Eogon;': '\u0118', + 'eogon;': '\u0119', + 'Eopf;': '\U0001d53c', + 'eopf;': '\U0001d556', + 'epar;': '\u22d5', + 'eparsl;': '\u29e3', + 'eplus;': '\u2a71', + 'epsi;': '\u03b5', + 'Epsilon;': '\u0395', + 'epsilon;': '\u03b5', + 'epsiv;': '\u03f5', + 'eqcirc;': '\u2256', + 'eqcolon;': '\u2255', + 'eqsim;': '\u2242', + 'eqslantgtr;': '\u2a96', + 'eqslantless;': '\u2a95', + 'Equal;': '\u2a75', + 'equals;': '=', + 'EqualTilde;': '\u2242', + 'equest;': '\u225f', + 'Equilibrium;': '\u21cc', + 'equiv;': '\u2261', + 'equivDD;': '\u2a78', + 'eqvparsl;': '\u29e5', + 'erarr;': '\u2971', + 'erDot;': '\u2253', + 'Escr;': '\u2130', + 'escr;': '\u212f', + 'esdot;': '\u2250', + 'Esim;': '\u2a73', + 'esim;': '\u2242', + 'Eta;': '\u0397', + 'eta;': '\u03b7', + 'ETH': '\xd0', + 'eth': '\xf0', + 'ETH;': '\xd0', + 'eth;': '\xf0', + 'Euml': '\xcb', + 'euml': '\xeb', + 'Euml;': '\xcb', + 'euml;': '\xeb', + 'euro;': '\u20ac', + 'excl;': '!', + 'exist;': '\u2203', + 'Exists;': '\u2203', + 'expectation;': '\u2130', + 'ExponentialE;': '\u2147', + 'exponentiale;': '\u2147', + 'fallingdotseq;': '\u2252', + 'Fcy;': '\u0424', + 'fcy;': '\u0444', + 'female;': '\u2640', + 'ffilig;': '\ufb03', + 'fflig;': '\ufb00', + 'ffllig;': '\ufb04', + 'Ffr;': '\U0001d509', + 'ffr;': '\U0001d523', + 'filig;': '\ufb01', + 'FilledSmallSquare;': '\u25fc', + 'FilledVerySmallSquare;': '\u25aa', + 'fjlig;': 'fj', + 'flat;': '\u266d', + 'fllig;': '\ufb02', + 'fltns;': '\u25b1', + 'fnof;': '\u0192', + 'Fopf;': '\U0001d53d', + 'fopf;': '\U0001d557', + 'ForAll;': '\u2200', + 'forall;': '\u2200', + 'fork;': '\u22d4', + 'forkv;': '\u2ad9', + 'Fouriertrf;': '\u2131', + 'fpartint;': '\u2a0d', + 'frac12': '\xbd', + 'frac12;': '\xbd', + 'frac13;': '\u2153', + 'frac14': '\xbc', + 'frac14;': '\xbc', + 'frac15;': '\u2155', + 'frac16;': '\u2159', + 'frac18;': '\u215b', + 'frac23;': '\u2154', + 'frac25;': '\u2156', + 'frac34': '\xbe', + 'frac34;': '\xbe', + 'frac35;': '\u2157', + 'frac38;': '\u215c', + 'frac45;': '\u2158', + 'frac56;': '\u215a', + 'frac58;': '\u215d', + 'frac78;': '\u215e', + 'frasl;': '\u2044', + 'frown;': '\u2322', + 'Fscr;': '\u2131', + 'fscr;': '\U0001d4bb', + 'gacute;': '\u01f5', + 'Gamma;': '\u0393', + 'gamma;': '\u03b3', + 'Gammad;': '\u03dc', + 'gammad;': '\u03dd', + 'gap;': '\u2a86', + 'Gbreve;': '\u011e', + 'gbreve;': '\u011f', + 'Gcedil;': '\u0122', + 'Gcirc;': '\u011c', + 'gcirc;': '\u011d', + 'Gcy;': '\u0413', + 'gcy;': '\u0433', + 'Gdot;': '\u0120', + 'gdot;': '\u0121', + 'gE;': '\u2267', + 'ge;': '\u2265', + 'gEl;': '\u2a8c', + 'gel;': '\u22db', + 'geq;': '\u2265', + 'geqq;': '\u2267', + 'geqslant;': '\u2a7e', + 'ges;': '\u2a7e', + 'gescc;': '\u2aa9', + 'gesdot;': '\u2a80', + 'gesdoto;': '\u2a82', + 'gesdotol;': '\u2a84', + 'gesl;': '\u22db\ufe00', + 'gesles;': '\u2a94', + 'Gfr;': '\U0001d50a', + 'gfr;': '\U0001d524', + 'Gg;': '\u22d9', + 'gg;': '\u226b', + 'ggg;': '\u22d9', + 'gimel;': '\u2137', + 'GJcy;': '\u0403', + 'gjcy;': '\u0453', + 'gl;': '\u2277', + 'gla;': '\u2aa5', + 'glE;': '\u2a92', + 'glj;': '\u2aa4', + 'gnap;': '\u2a8a', + 'gnapprox;': '\u2a8a', + 'gnE;': '\u2269', + 'gne;': '\u2a88', + 'gneq;': '\u2a88', + 'gneqq;': '\u2269', + 'gnsim;': '\u22e7', + 'Gopf;': '\U0001d53e', + 'gopf;': '\U0001d558', + 'grave;': '`', + 'GreaterEqual;': '\u2265', + 'GreaterEqualLess;': '\u22db', + 'GreaterFullEqual;': '\u2267', + 'GreaterGreater;': '\u2aa2', + 'GreaterLess;': '\u2277', + 'GreaterSlantEqual;': '\u2a7e', + 'GreaterTilde;': '\u2273', + 'Gscr;': '\U0001d4a2', + 'gscr;': '\u210a', + 'gsim;': '\u2273', + 'gsime;': '\u2a8e', + 'gsiml;': '\u2a90', + 'GT': '>', + 'gt': '>', + 'GT;': '>', + 'Gt;': '\u226b', + 'gt;': '>', + 'gtcc;': '\u2aa7', + 'gtcir;': '\u2a7a', + 'gtdot;': '\u22d7', + 'gtlPar;': '\u2995', + 'gtquest;': '\u2a7c', + 'gtrapprox;': '\u2a86', + 'gtrarr;': '\u2978', + 'gtrdot;': '\u22d7', + 'gtreqless;': '\u22db', + 'gtreqqless;': '\u2a8c', + 'gtrless;': '\u2277', + 'gtrsim;': '\u2273', + 'gvertneqq;': '\u2269\ufe00', + 'gvnE;': '\u2269\ufe00', + 'Hacek;': '\u02c7', + 'hairsp;': '\u200a', + 'half;': '\xbd', + 'hamilt;': '\u210b', + 'HARDcy;': '\u042a', + 'hardcy;': '\u044a', + 'hArr;': '\u21d4', + 'harr;': '\u2194', + 'harrcir;': '\u2948', + 'harrw;': '\u21ad', + 'Hat;': '^', + 'hbar;': '\u210f', + 'Hcirc;': '\u0124', + 'hcirc;': '\u0125', + 'hearts;': '\u2665', + 'heartsuit;': '\u2665', + 'hellip;': '\u2026', + 'hercon;': '\u22b9', + 'Hfr;': '\u210c', + 'hfr;': '\U0001d525', + 'HilbertSpace;': '\u210b', + 'hksearow;': '\u2925', + 'hkswarow;': '\u2926', + 'hoarr;': '\u21ff', + 'homtht;': '\u223b', + 'hookleftarrow;': '\u21a9', + 'hookrightarrow;': '\u21aa', + 'Hopf;': '\u210d', + 'hopf;': '\U0001d559', + 'horbar;': '\u2015', + 'HorizontalLine;': '\u2500', + 'Hscr;': '\u210b', + 'hscr;': '\U0001d4bd', + 'hslash;': '\u210f', + 'Hstrok;': '\u0126', + 'hstrok;': '\u0127', + 'HumpDownHump;': '\u224e', + 'HumpEqual;': '\u224f', + 'hybull;': '\u2043', + 'hyphen;': '\u2010', + 'Iacute': '\xcd', + 'iacute': '\xed', + 'Iacute;': '\xcd', + 'iacute;': '\xed', + 'ic;': '\u2063', + 'Icirc': '\xce', + 'icirc': '\xee', + 'Icirc;': '\xce', + 'icirc;': '\xee', + 'Icy;': '\u0418', + 'icy;': '\u0438', + 'Idot;': '\u0130', + 'IEcy;': '\u0415', + 'iecy;': '\u0435', + 'iexcl': '\xa1', + 'iexcl;': '\xa1', + 'iff;': '\u21d4', + 'Ifr;': '\u2111', + 'ifr;': '\U0001d526', + 'Igrave': '\xcc', + 'igrave': '\xec', + 'Igrave;': '\xcc', + 'igrave;': '\xec', + 'ii;': '\u2148', + 'iiiint;': '\u2a0c', + 'iiint;': '\u222d', + 'iinfin;': '\u29dc', + 'iiota;': '\u2129', + 'IJlig;': '\u0132', + 'ijlig;': '\u0133', + 'Im;': '\u2111', + 'Imacr;': '\u012a', + 'imacr;': '\u012b', + 'image;': '\u2111', + 'ImaginaryI;': '\u2148', + 'imagline;': '\u2110', + 'imagpart;': '\u2111', + 'imath;': '\u0131', + 'imof;': '\u22b7', + 'imped;': '\u01b5', + 'Implies;': '\u21d2', + 'in;': '\u2208', + 'incare;': '\u2105', + 'infin;': '\u221e', + 'infintie;': '\u29dd', + 'inodot;': '\u0131', + 'Int;': '\u222c', + 'int;': '\u222b', + 'intcal;': '\u22ba', + 'integers;': '\u2124', + 'Integral;': '\u222b', + 'intercal;': '\u22ba', + 'Intersection;': '\u22c2', + 'intlarhk;': '\u2a17', + 'intprod;': '\u2a3c', + 'InvisibleComma;': '\u2063', + 'InvisibleTimes;': '\u2062', + 'IOcy;': '\u0401', + 'iocy;': '\u0451', + 'Iogon;': '\u012e', + 'iogon;': '\u012f', + 'Iopf;': '\U0001d540', + 'iopf;': '\U0001d55a', + 'Iota;': '\u0399', + 'iota;': '\u03b9', + 'iprod;': '\u2a3c', + 'iquest': '\xbf', + 'iquest;': '\xbf', + 'Iscr;': '\u2110', + 'iscr;': '\U0001d4be', + 'isin;': '\u2208', + 'isindot;': '\u22f5', + 'isinE;': '\u22f9', + 'isins;': '\u22f4', + 'isinsv;': '\u22f3', + 'isinv;': '\u2208', + 'it;': '\u2062', + 'Itilde;': '\u0128', + 'itilde;': '\u0129', + 'Iukcy;': '\u0406', + 'iukcy;': '\u0456', + 'Iuml': '\xcf', + 'iuml': '\xef', + 'Iuml;': '\xcf', + 'iuml;': '\xef', + 'Jcirc;': '\u0134', + 'jcirc;': '\u0135', + 'Jcy;': '\u0419', + 'jcy;': '\u0439', + 'Jfr;': '\U0001d50d', + 'jfr;': '\U0001d527', + 'jmath;': '\u0237', + 'Jopf;': '\U0001d541', + 'jopf;': '\U0001d55b', + 'Jscr;': '\U0001d4a5', + 'jscr;': '\U0001d4bf', + 'Jsercy;': '\u0408', + 'jsercy;': '\u0458', + 'Jukcy;': '\u0404', + 'jukcy;': '\u0454', + 'Kappa;': '\u039a', + 'kappa;': '\u03ba', + 'kappav;': '\u03f0', + 'Kcedil;': '\u0136', + 'kcedil;': '\u0137', + 'Kcy;': '\u041a', + 'kcy;': '\u043a', + 'Kfr;': '\U0001d50e', + 'kfr;': '\U0001d528', + 'kgreen;': '\u0138', + 'KHcy;': '\u0425', + 'khcy;': '\u0445', + 'KJcy;': '\u040c', + 'kjcy;': '\u045c', + 'Kopf;': '\U0001d542', + 'kopf;': '\U0001d55c', + 'Kscr;': '\U0001d4a6', + 'kscr;': '\U0001d4c0', + 'lAarr;': '\u21da', + 'Lacute;': '\u0139', + 'lacute;': '\u013a', + 'laemptyv;': '\u29b4', + 'lagran;': '\u2112', + 'Lambda;': '\u039b', + 'lambda;': '\u03bb', + 'Lang;': '\u27ea', + 'lang;': '\u27e8', + 'langd;': '\u2991', + 'langle;': '\u27e8', + 'lap;': '\u2a85', + 'Laplacetrf;': '\u2112', + 'laquo': '\xab', + 'laquo;': '\xab', + 'Larr;': '\u219e', + 'lArr;': '\u21d0', + 'larr;': '\u2190', + 'larrb;': '\u21e4', + 'larrbfs;': '\u291f', + 'larrfs;': '\u291d', + 'larrhk;': '\u21a9', + 'larrlp;': '\u21ab', + 'larrpl;': '\u2939', + 'larrsim;': '\u2973', + 'larrtl;': '\u21a2', + 'lat;': '\u2aab', + 'lAtail;': '\u291b', + 'latail;': '\u2919', + 'late;': '\u2aad', + 'lates;': '\u2aad\ufe00', + 'lBarr;': '\u290e', + 'lbarr;': '\u290c', + 'lbbrk;': '\u2772', + 'lbrace;': '{', + 'lbrack;': '[', + 'lbrke;': '\u298b', + 'lbrksld;': '\u298f', + 'lbrkslu;': '\u298d', + 'Lcaron;': '\u013d', + 'lcaron;': '\u013e', + 'Lcedil;': '\u013b', + 'lcedil;': '\u013c', + 'lceil;': '\u2308', + 'lcub;': '{', + 'Lcy;': '\u041b', + 'lcy;': '\u043b', + 'ldca;': '\u2936', + 'ldquo;': '\u201c', + 'ldquor;': '\u201e', + 'ldrdhar;': '\u2967', + 'ldrushar;': '\u294b', + 'ldsh;': '\u21b2', + 'lE;': '\u2266', + 'le;': '\u2264', + 'LeftAngleBracket;': '\u27e8', + 'LeftArrow;': '\u2190', + 'Leftarrow;': '\u21d0', + 'leftarrow;': '\u2190', + 'LeftArrowBar;': '\u21e4', + 'LeftArrowRightArrow;': '\u21c6', + 'leftarrowtail;': '\u21a2', + 'LeftCeiling;': '\u2308', + 'LeftDoubleBracket;': '\u27e6', + 'LeftDownTeeVector;': '\u2961', + 'LeftDownVector;': '\u21c3', + 'LeftDownVectorBar;': '\u2959', + 'LeftFloor;': '\u230a', + 'leftharpoondown;': '\u21bd', + 'leftharpoonup;': '\u21bc', + 'leftleftarrows;': '\u21c7', + 'LeftRightArrow;': '\u2194', + 'Leftrightarrow;': '\u21d4', + 'leftrightarrow;': '\u2194', + 'leftrightarrows;': '\u21c6', + 'leftrightharpoons;': '\u21cb', + 'leftrightsquigarrow;': '\u21ad', + 'LeftRightVector;': '\u294e', + 'LeftTee;': '\u22a3', + 'LeftTeeArrow;': '\u21a4', + 'LeftTeeVector;': '\u295a', + 'leftthreetimes;': '\u22cb', + 'LeftTriangle;': '\u22b2', + 'LeftTriangleBar;': '\u29cf', + 'LeftTriangleEqual;': '\u22b4', + 'LeftUpDownVector;': '\u2951', + 'LeftUpTeeVector;': '\u2960', + 'LeftUpVector;': '\u21bf', + 'LeftUpVectorBar;': '\u2958', + 'LeftVector;': '\u21bc', + 'LeftVectorBar;': '\u2952', + 'lEg;': '\u2a8b', + 'leg;': '\u22da', + 'leq;': '\u2264', + 'leqq;': '\u2266', + 'leqslant;': '\u2a7d', + 'les;': '\u2a7d', + 'lescc;': '\u2aa8', + 'lesdot;': '\u2a7f', + 'lesdoto;': '\u2a81', + 'lesdotor;': '\u2a83', + 'lesg;': '\u22da\ufe00', + 'lesges;': '\u2a93', + 'lessapprox;': '\u2a85', + 'lessdot;': '\u22d6', + 'lesseqgtr;': '\u22da', + 'lesseqqgtr;': '\u2a8b', + 'LessEqualGreater;': '\u22da', + 'LessFullEqual;': '\u2266', + 'LessGreater;': '\u2276', + 'lessgtr;': '\u2276', + 'LessLess;': '\u2aa1', + 'lesssim;': '\u2272', + 'LessSlantEqual;': '\u2a7d', + 'LessTilde;': '\u2272', + 'lfisht;': '\u297c', + 'lfloor;': '\u230a', + 'Lfr;': '\U0001d50f', + 'lfr;': '\U0001d529', + 'lg;': '\u2276', + 'lgE;': '\u2a91', + 'lHar;': '\u2962', + 'lhard;': '\u21bd', + 'lharu;': '\u21bc', + 'lharul;': '\u296a', + 'lhblk;': '\u2584', + 'LJcy;': '\u0409', + 'ljcy;': '\u0459', + 'Ll;': '\u22d8', + 'll;': '\u226a', + 'llarr;': '\u21c7', + 'llcorner;': '\u231e', + 'Lleftarrow;': '\u21da', + 'llhard;': '\u296b', + 'lltri;': '\u25fa', + 'Lmidot;': '\u013f', + 'lmidot;': '\u0140', + 'lmoust;': '\u23b0', + 'lmoustache;': '\u23b0', + 'lnap;': '\u2a89', + 'lnapprox;': '\u2a89', + 'lnE;': '\u2268', + 'lne;': '\u2a87', + 'lneq;': '\u2a87', + 'lneqq;': '\u2268', + 'lnsim;': '\u22e6', + 'loang;': '\u27ec', + 'loarr;': '\u21fd', + 'lobrk;': '\u27e6', + 'LongLeftArrow;': '\u27f5', + 'Longleftarrow;': '\u27f8', + 'longleftarrow;': '\u27f5', + 'LongLeftRightArrow;': '\u27f7', + 'Longleftrightarrow;': '\u27fa', + 'longleftrightarrow;': '\u27f7', + 'longmapsto;': '\u27fc', + 'LongRightArrow;': '\u27f6', + 'Longrightarrow;': '\u27f9', + 'longrightarrow;': '\u27f6', + 'looparrowleft;': '\u21ab', + 'looparrowright;': '\u21ac', + 'lopar;': '\u2985', + 'Lopf;': '\U0001d543', + 'lopf;': '\U0001d55d', + 'loplus;': '\u2a2d', + 'lotimes;': '\u2a34', + 'lowast;': '\u2217', + 'lowbar;': '_', + 'LowerLeftArrow;': '\u2199', + 'LowerRightArrow;': '\u2198', + 'loz;': '\u25ca', + 'lozenge;': '\u25ca', + 'lozf;': '\u29eb', + 'lpar;': '(', + 'lparlt;': '\u2993', + 'lrarr;': '\u21c6', + 'lrcorner;': '\u231f', + 'lrhar;': '\u21cb', + 'lrhard;': '\u296d', + 'lrm;': '\u200e', + 'lrtri;': '\u22bf', + 'lsaquo;': '\u2039', + 'Lscr;': '\u2112', + 'lscr;': '\U0001d4c1', + 'Lsh;': '\u21b0', + 'lsh;': '\u21b0', + 'lsim;': '\u2272', + 'lsime;': '\u2a8d', + 'lsimg;': '\u2a8f', + 'lsqb;': '[', + 'lsquo;': '\u2018', + 'lsquor;': '\u201a', + 'Lstrok;': '\u0141', + 'lstrok;': '\u0142', + 'LT': '<', + 'lt': '<', + 'LT;': '<', + 'Lt;': '\u226a', + 'lt;': '<', + 'ltcc;': '\u2aa6', + 'ltcir;': '\u2a79', + 'ltdot;': '\u22d6', + 'lthree;': '\u22cb', + 'ltimes;': '\u22c9', + 'ltlarr;': '\u2976', + 'ltquest;': '\u2a7b', + 'ltri;': '\u25c3', + 'ltrie;': '\u22b4', + 'ltrif;': '\u25c2', + 'ltrPar;': '\u2996', + 'lurdshar;': '\u294a', + 'luruhar;': '\u2966', + 'lvertneqq;': '\u2268\ufe00', + 'lvnE;': '\u2268\ufe00', + 'macr': '\xaf', + 'macr;': '\xaf', + 'male;': '\u2642', + 'malt;': '\u2720', + 'maltese;': '\u2720', + 'Map;': '\u2905', + 'map;': '\u21a6', + 'mapsto;': '\u21a6', + 'mapstodown;': '\u21a7', + 'mapstoleft;': '\u21a4', + 'mapstoup;': '\u21a5', + 'marker;': '\u25ae', + 'mcomma;': '\u2a29', + 'Mcy;': '\u041c', + 'mcy;': '\u043c', + 'mdash;': '\u2014', + 'mDDot;': '\u223a', + 'measuredangle;': '\u2221', + 'MediumSpace;': '\u205f', + 'Mellintrf;': '\u2133', + 'Mfr;': '\U0001d510', + 'mfr;': '\U0001d52a', + 'mho;': '\u2127', + 'micro': '\xb5', + 'micro;': '\xb5', + 'mid;': '\u2223', + 'midast;': '*', + 'midcir;': '\u2af0', + 'middot': '\xb7', + 'middot;': '\xb7', + 'minus;': '\u2212', + 'minusb;': '\u229f', + 'minusd;': '\u2238', + 'minusdu;': '\u2a2a', + 'MinusPlus;': '\u2213', + 'mlcp;': '\u2adb', + 'mldr;': '\u2026', + 'mnplus;': '\u2213', + 'models;': '\u22a7', + 'Mopf;': '\U0001d544', + 'mopf;': '\U0001d55e', + 'mp;': '\u2213', + 'Mscr;': '\u2133', + 'mscr;': '\U0001d4c2', + 'mstpos;': '\u223e', + 'Mu;': '\u039c', + 'mu;': '\u03bc', + 'multimap;': '\u22b8', + 'mumap;': '\u22b8', + 'nabla;': '\u2207', + 'Nacute;': '\u0143', + 'nacute;': '\u0144', + 'nang;': '\u2220\u20d2', + 'nap;': '\u2249', + 'napE;': '\u2a70\u0338', + 'napid;': '\u224b\u0338', + 'napos;': '\u0149', + 'napprox;': '\u2249', + 'natur;': '\u266e', + 'natural;': '\u266e', + 'naturals;': '\u2115', + 'nbsp': '\xa0', + 'nbsp;': '\xa0', + 'nbump;': '\u224e\u0338', + 'nbumpe;': '\u224f\u0338', + 'ncap;': '\u2a43', + 'Ncaron;': '\u0147', + 'ncaron;': '\u0148', + 'Ncedil;': '\u0145', + 'ncedil;': '\u0146', + 'ncong;': '\u2247', + 'ncongdot;': '\u2a6d\u0338', + 'ncup;': '\u2a42', + 'Ncy;': '\u041d', + 'ncy;': '\u043d', + 'ndash;': '\u2013', + 'ne;': '\u2260', + 'nearhk;': '\u2924', + 'neArr;': '\u21d7', + 'nearr;': '\u2197', + 'nearrow;': '\u2197', + 'nedot;': '\u2250\u0338', + 'NegativeMediumSpace;': '\u200b', + 'NegativeThickSpace;': '\u200b', + 'NegativeThinSpace;': '\u200b', + 'NegativeVeryThinSpace;': '\u200b', + 'nequiv;': '\u2262', + 'nesear;': '\u2928', + 'nesim;': '\u2242\u0338', + 'NestedGreaterGreater;': '\u226b', + 'NestedLessLess;': '\u226a', + 'NewLine;': '\n', + 'nexist;': '\u2204', + 'nexists;': '\u2204', + 'Nfr;': '\U0001d511', + 'nfr;': '\U0001d52b', + 'ngE;': '\u2267\u0338', + 'nge;': '\u2271', + 'ngeq;': '\u2271', + 'ngeqq;': '\u2267\u0338', + 'ngeqslant;': '\u2a7e\u0338', + 'nges;': '\u2a7e\u0338', + 'nGg;': '\u22d9\u0338', + 'ngsim;': '\u2275', + 'nGt;': '\u226b\u20d2', + 'ngt;': '\u226f', + 'ngtr;': '\u226f', + 'nGtv;': '\u226b\u0338', + 'nhArr;': '\u21ce', + 'nharr;': '\u21ae', + 'nhpar;': '\u2af2', + 'ni;': '\u220b', + 'nis;': '\u22fc', + 'nisd;': '\u22fa', + 'niv;': '\u220b', + 'NJcy;': '\u040a', + 'njcy;': '\u045a', + 'nlArr;': '\u21cd', + 'nlarr;': '\u219a', + 'nldr;': '\u2025', + 'nlE;': '\u2266\u0338', + 'nle;': '\u2270', + 'nLeftarrow;': '\u21cd', + 'nleftarrow;': '\u219a', + 'nLeftrightarrow;': '\u21ce', + 'nleftrightarrow;': '\u21ae', + 'nleq;': '\u2270', + 'nleqq;': '\u2266\u0338', + 'nleqslant;': '\u2a7d\u0338', + 'nles;': '\u2a7d\u0338', + 'nless;': '\u226e', + 'nLl;': '\u22d8\u0338', + 'nlsim;': '\u2274', + 'nLt;': '\u226a\u20d2', + 'nlt;': '\u226e', + 'nltri;': '\u22ea', + 'nltrie;': '\u22ec', + 'nLtv;': '\u226a\u0338', + 'nmid;': '\u2224', + 'NoBreak;': '\u2060', + 'NonBreakingSpace;': '\xa0', + 'Nopf;': '\u2115', + 'nopf;': '\U0001d55f', + 'not': '\xac', + 'Not;': '\u2aec', + 'not;': '\xac', + 'NotCongruent;': '\u2262', + 'NotCupCap;': '\u226d', + 'NotDoubleVerticalBar;': '\u2226', + 'NotElement;': '\u2209', + 'NotEqual;': '\u2260', + 'NotEqualTilde;': '\u2242\u0338', + 'NotExists;': '\u2204', + 'NotGreater;': '\u226f', + 'NotGreaterEqual;': '\u2271', + 'NotGreaterFullEqual;': '\u2267\u0338', + 'NotGreaterGreater;': '\u226b\u0338', + 'NotGreaterLess;': '\u2279', + 'NotGreaterSlantEqual;': '\u2a7e\u0338', + 'NotGreaterTilde;': '\u2275', + 'NotHumpDownHump;': '\u224e\u0338', + 'NotHumpEqual;': '\u224f\u0338', + 'notin;': '\u2209', + 'notindot;': '\u22f5\u0338', + 'notinE;': '\u22f9\u0338', + 'notinva;': '\u2209', + 'notinvb;': '\u22f7', + 'notinvc;': '\u22f6', + 'NotLeftTriangle;': '\u22ea', + 'NotLeftTriangleBar;': '\u29cf\u0338', + 'NotLeftTriangleEqual;': '\u22ec', + 'NotLess;': '\u226e', + 'NotLessEqual;': '\u2270', + 'NotLessGreater;': '\u2278', + 'NotLessLess;': '\u226a\u0338', + 'NotLessSlantEqual;': '\u2a7d\u0338', + 'NotLessTilde;': '\u2274', + 'NotNestedGreaterGreater;': '\u2aa2\u0338', + 'NotNestedLessLess;': '\u2aa1\u0338', + 'notni;': '\u220c', + 'notniva;': '\u220c', + 'notnivb;': '\u22fe', + 'notnivc;': '\u22fd', + 'NotPrecedes;': '\u2280', + 'NotPrecedesEqual;': '\u2aaf\u0338', + 'NotPrecedesSlantEqual;': '\u22e0', + 'NotReverseElement;': '\u220c', + 'NotRightTriangle;': '\u22eb', + 'NotRightTriangleBar;': '\u29d0\u0338', + 'NotRightTriangleEqual;': '\u22ed', + 'NotSquareSubset;': '\u228f\u0338', + 'NotSquareSubsetEqual;': '\u22e2', + 'NotSquareSuperset;': '\u2290\u0338', + 'NotSquareSupersetEqual;': '\u22e3', + 'NotSubset;': '\u2282\u20d2', + 'NotSubsetEqual;': '\u2288', + 'NotSucceeds;': '\u2281', + 'NotSucceedsEqual;': '\u2ab0\u0338', + 'NotSucceedsSlantEqual;': '\u22e1', + 'NotSucceedsTilde;': '\u227f\u0338', + 'NotSuperset;': '\u2283\u20d2', + 'NotSupersetEqual;': '\u2289', + 'NotTilde;': '\u2241', + 'NotTildeEqual;': '\u2244', + 'NotTildeFullEqual;': '\u2247', + 'NotTildeTilde;': '\u2249', + 'NotVerticalBar;': '\u2224', + 'npar;': '\u2226', + 'nparallel;': '\u2226', + 'nparsl;': '\u2afd\u20e5', + 'npart;': '\u2202\u0338', + 'npolint;': '\u2a14', + 'npr;': '\u2280', + 'nprcue;': '\u22e0', + 'npre;': '\u2aaf\u0338', + 'nprec;': '\u2280', + 'npreceq;': '\u2aaf\u0338', + 'nrArr;': '\u21cf', + 'nrarr;': '\u219b', + 'nrarrc;': '\u2933\u0338', + 'nrarrw;': '\u219d\u0338', + 'nRightarrow;': '\u21cf', + 'nrightarrow;': '\u219b', + 'nrtri;': '\u22eb', + 'nrtrie;': '\u22ed', + 'nsc;': '\u2281', + 'nsccue;': '\u22e1', + 'nsce;': '\u2ab0\u0338', + 'Nscr;': '\U0001d4a9', + 'nscr;': '\U0001d4c3', + 'nshortmid;': '\u2224', + 'nshortparallel;': '\u2226', + 'nsim;': '\u2241', + 'nsime;': '\u2244', + 'nsimeq;': '\u2244', + 'nsmid;': '\u2224', + 'nspar;': '\u2226', + 'nsqsube;': '\u22e2', + 'nsqsupe;': '\u22e3', + 'nsub;': '\u2284', + 'nsubE;': '\u2ac5\u0338', + 'nsube;': '\u2288', + 'nsubset;': '\u2282\u20d2', + 'nsubseteq;': '\u2288', + 'nsubseteqq;': '\u2ac5\u0338', + 'nsucc;': '\u2281', + 'nsucceq;': '\u2ab0\u0338', + 'nsup;': '\u2285', + 'nsupE;': '\u2ac6\u0338', + 'nsupe;': '\u2289', + 'nsupset;': '\u2283\u20d2', + 'nsupseteq;': '\u2289', + 'nsupseteqq;': '\u2ac6\u0338', + 'ntgl;': '\u2279', + 'Ntilde': '\xd1', + 'ntilde': '\xf1', + 'Ntilde;': '\xd1', + 'ntilde;': '\xf1', + 'ntlg;': '\u2278', + 'ntriangleleft;': '\u22ea', + 'ntrianglelefteq;': '\u22ec', + 'ntriangleright;': '\u22eb', + 'ntrianglerighteq;': '\u22ed', + 'Nu;': '\u039d', + 'nu;': '\u03bd', + 'num;': '#', + 'numero;': '\u2116', + 'numsp;': '\u2007', + 'nvap;': '\u224d\u20d2', + 'nVDash;': '\u22af', + 'nVdash;': '\u22ae', + 'nvDash;': '\u22ad', + 'nvdash;': '\u22ac', + 'nvge;': '\u2265\u20d2', + 'nvgt;': '>\u20d2', + 'nvHarr;': '\u2904', + 'nvinfin;': '\u29de', + 'nvlArr;': '\u2902', + 'nvle;': '\u2264\u20d2', + 'nvlt;': '<\u20d2', + 'nvltrie;': '\u22b4\u20d2', + 'nvrArr;': '\u2903', + 'nvrtrie;': '\u22b5\u20d2', + 'nvsim;': '\u223c\u20d2', + 'nwarhk;': '\u2923', + 'nwArr;': '\u21d6', + 'nwarr;': '\u2196', + 'nwarrow;': '\u2196', + 'nwnear;': '\u2927', + 'Oacute': '\xd3', + 'oacute': '\xf3', + 'Oacute;': '\xd3', + 'oacute;': '\xf3', + 'oast;': '\u229b', + 'ocir;': '\u229a', + 'Ocirc': '\xd4', + 'ocirc': '\xf4', + 'Ocirc;': '\xd4', + 'ocirc;': '\xf4', + 'Ocy;': '\u041e', + 'ocy;': '\u043e', + 'odash;': '\u229d', + 'Odblac;': '\u0150', + 'odblac;': '\u0151', + 'odiv;': '\u2a38', + 'odot;': '\u2299', + 'odsold;': '\u29bc', + 'OElig;': '\u0152', + 'oelig;': '\u0153', + 'ofcir;': '\u29bf', + 'Ofr;': '\U0001d512', + 'ofr;': '\U0001d52c', + 'ogon;': '\u02db', + 'Ograve': '\xd2', + 'ograve': '\xf2', + 'Ograve;': '\xd2', + 'ograve;': '\xf2', + 'ogt;': '\u29c1', + 'ohbar;': '\u29b5', + 'ohm;': '\u03a9', + 'oint;': '\u222e', + 'olarr;': '\u21ba', + 'olcir;': '\u29be', + 'olcross;': '\u29bb', + 'oline;': '\u203e', + 'olt;': '\u29c0', + 'Omacr;': '\u014c', + 'omacr;': '\u014d', + 'Omega;': '\u03a9', + 'omega;': '\u03c9', + 'Omicron;': '\u039f', + 'omicron;': '\u03bf', + 'omid;': '\u29b6', + 'ominus;': '\u2296', + 'Oopf;': '\U0001d546', + 'oopf;': '\U0001d560', + 'opar;': '\u29b7', + 'OpenCurlyDoubleQuote;': '\u201c', + 'OpenCurlyQuote;': '\u2018', + 'operp;': '\u29b9', + 'oplus;': '\u2295', + 'Or;': '\u2a54', + 'or;': '\u2228', + 'orarr;': '\u21bb', + 'ord;': '\u2a5d', + 'order;': '\u2134', + 'orderof;': '\u2134', + 'ordf': '\xaa', + 'ordf;': '\xaa', + 'ordm': '\xba', + 'ordm;': '\xba', + 'origof;': '\u22b6', + 'oror;': '\u2a56', + 'orslope;': '\u2a57', + 'orv;': '\u2a5b', + 'oS;': '\u24c8', + 'Oscr;': '\U0001d4aa', + 'oscr;': '\u2134', + 'Oslash': '\xd8', + 'oslash': '\xf8', + 'Oslash;': '\xd8', + 'oslash;': '\xf8', + 'osol;': '\u2298', + 'Otilde': '\xd5', + 'otilde': '\xf5', + 'Otilde;': '\xd5', + 'otilde;': '\xf5', + 'Otimes;': '\u2a37', + 'otimes;': '\u2297', + 'otimesas;': '\u2a36', + 'Ouml': '\xd6', + 'ouml': '\xf6', + 'Ouml;': '\xd6', + 'ouml;': '\xf6', + 'ovbar;': '\u233d', + 'OverBar;': '\u203e', + 'OverBrace;': '\u23de', + 'OverBracket;': '\u23b4', + 'OverParenthesis;': '\u23dc', + 'par;': '\u2225', + 'para': '\xb6', + 'para;': '\xb6', + 'parallel;': '\u2225', + 'parsim;': '\u2af3', + 'parsl;': '\u2afd', + 'part;': '\u2202', + 'PartialD;': '\u2202', + 'Pcy;': '\u041f', + 'pcy;': '\u043f', + 'percnt;': '%', + 'period;': '.', + 'permil;': '\u2030', + 'perp;': '\u22a5', + 'pertenk;': '\u2031', + 'Pfr;': '\U0001d513', + 'pfr;': '\U0001d52d', + 'Phi;': '\u03a6', + 'phi;': '\u03c6', + 'phiv;': '\u03d5', + 'phmmat;': '\u2133', + 'phone;': '\u260e', + 'Pi;': '\u03a0', + 'pi;': '\u03c0', + 'pitchfork;': '\u22d4', + 'piv;': '\u03d6', + 'planck;': '\u210f', + 'planckh;': '\u210e', + 'plankv;': '\u210f', + 'plus;': '+', + 'plusacir;': '\u2a23', + 'plusb;': '\u229e', + 'pluscir;': '\u2a22', + 'plusdo;': '\u2214', + 'plusdu;': '\u2a25', + 'pluse;': '\u2a72', + 'PlusMinus;': '\xb1', + 'plusmn': '\xb1', + 'plusmn;': '\xb1', + 'plussim;': '\u2a26', + 'plustwo;': '\u2a27', + 'pm;': '\xb1', + 'Poincareplane;': '\u210c', + 'pointint;': '\u2a15', + 'Popf;': '\u2119', + 'popf;': '\U0001d561', + 'pound': '\xa3', + 'pound;': '\xa3', + 'Pr;': '\u2abb', + 'pr;': '\u227a', + 'prap;': '\u2ab7', + 'prcue;': '\u227c', + 'prE;': '\u2ab3', + 'pre;': '\u2aaf', + 'prec;': '\u227a', + 'precapprox;': '\u2ab7', + 'preccurlyeq;': '\u227c', + 'Precedes;': '\u227a', + 'PrecedesEqual;': '\u2aaf', + 'PrecedesSlantEqual;': '\u227c', + 'PrecedesTilde;': '\u227e', + 'preceq;': '\u2aaf', + 'precnapprox;': '\u2ab9', + 'precneqq;': '\u2ab5', + 'precnsim;': '\u22e8', + 'precsim;': '\u227e', + 'Prime;': '\u2033', + 'prime;': '\u2032', + 'primes;': '\u2119', + 'prnap;': '\u2ab9', + 'prnE;': '\u2ab5', + 'prnsim;': '\u22e8', + 'prod;': '\u220f', + 'Product;': '\u220f', + 'profalar;': '\u232e', + 'profline;': '\u2312', + 'profsurf;': '\u2313', + 'prop;': '\u221d', + 'Proportion;': '\u2237', + 'Proportional;': '\u221d', + 'propto;': '\u221d', + 'prsim;': '\u227e', + 'prurel;': '\u22b0', + 'Pscr;': '\U0001d4ab', + 'pscr;': '\U0001d4c5', + 'Psi;': '\u03a8', + 'psi;': '\u03c8', + 'puncsp;': '\u2008', + 'Qfr;': '\U0001d514', + 'qfr;': '\U0001d52e', + 'qint;': '\u2a0c', + 'Qopf;': '\u211a', + 'qopf;': '\U0001d562', + 'qprime;': '\u2057', + 'Qscr;': '\U0001d4ac', + 'qscr;': '\U0001d4c6', + 'quaternions;': '\u210d', + 'quatint;': '\u2a16', + 'quest;': '?', + 'questeq;': '\u225f', + 'QUOT': '"', + 'quot': '"', + 'QUOT;': '"', + 'quot;': '"', + 'rAarr;': '\u21db', + 'race;': '\u223d\u0331', + 'Racute;': '\u0154', + 'racute;': '\u0155', + 'radic;': '\u221a', + 'raemptyv;': '\u29b3', + 'Rang;': '\u27eb', + 'rang;': '\u27e9', + 'rangd;': '\u2992', + 'range;': '\u29a5', + 'rangle;': '\u27e9', + 'raquo': '\xbb', + 'raquo;': '\xbb', + 'Rarr;': '\u21a0', + 'rArr;': '\u21d2', + 'rarr;': '\u2192', + 'rarrap;': '\u2975', + 'rarrb;': '\u21e5', + 'rarrbfs;': '\u2920', + 'rarrc;': '\u2933', + 'rarrfs;': '\u291e', + 'rarrhk;': '\u21aa', + 'rarrlp;': '\u21ac', + 'rarrpl;': '\u2945', + 'rarrsim;': '\u2974', + 'Rarrtl;': '\u2916', + 'rarrtl;': '\u21a3', + 'rarrw;': '\u219d', + 'rAtail;': '\u291c', + 'ratail;': '\u291a', + 'ratio;': '\u2236', + 'rationals;': '\u211a', + 'RBarr;': '\u2910', + 'rBarr;': '\u290f', + 'rbarr;': '\u290d', + 'rbbrk;': '\u2773', + 'rbrace;': '}', + 'rbrack;': ']', + 'rbrke;': '\u298c', + 'rbrksld;': '\u298e', + 'rbrkslu;': '\u2990', + 'Rcaron;': '\u0158', + 'rcaron;': '\u0159', + 'Rcedil;': '\u0156', + 'rcedil;': '\u0157', + 'rceil;': '\u2309', + 'rcub;': '}', + 'Rcy;': '\u0420', + 'rcy;': '\u0440', + 'rdca;': '\u2937', + 'rdldhar;': '\u2969', + 'rdquo;': '\u201d', + 'rdquor;': '\u201d', + 'rdsh;': '\u21b3', + 'Re;': '\u211c', + 'real;': '\u211c', + 'realine;': '\u211b', + 'realpart;': '\u211c', + 'reals;': '\u211d', + 'rect;': '\u25ad', + 'REG': '\xae', + 'reg': '\xae', + 'REG;': '\xae', + 'reg;': '\xae', + 'ReverseElement;': '\u220b', + 'ReverseEquilibrium;': '\u21cb', + 'ReverseUpEquilibrium;': '\u296f', + 'rfisht;': '\u297d', + 'rfloor;': '\u230b', + 'Rfr;': '\u211c', + 'rfr;': '\U0001d52f', + 'rHar;': '\u2964', + 'rhard;': '\u21c1', + 'rharu;': '\u21c0', + 'rharul;': '\u296c', + 'Rho;': '\u03a1', + 'rho;': '\u03c1', + 'rhov;': '\u03f1', + 'RightAngleBracket;': '\u27e9', + 'RightArrow;': '\u2192', + 'Rightarrow;': '\u21d2', + 'rightarrow;': '\u2192', + 'RightArrowBar;': '\u21e5', + 'RightArrowLeftArrow;': '\u21c4', + 'rightarrowtail;': '\u21a3', + 'RightCeiling;': '\u2309', + 'RightDoubleBracket;': '\u27e7', + 'RightDownTeeVector;': '\u295d', + 'RightDownVector;': '\u21c2', + 'RightDownVectorBar;': '\u2955', + 'RightFloor;': '\u230b', + 'rightharpoondown;': '\u21c1', + 'rightharpoonup;': '\u21c0', + 'rightleftarrows;': '\u21c4', + 'rightleftharpoons;': '\u21cc', + 'rightrightarrows;': '\u21c9', + 'rightsquigarrow;': '\u219d', + 'RightTee;': '\u22a2', + 'RightTeeArrow;': '\u21a6', + 'RightTeeVector;': '\u295b', + 'rightthreetimes;': '\u22cc', + 'RightTriangle;': '\u22b3', + 'RightTriangleBar;': '\u29d0', + 'RightTriangleEqual;': '\u22b5', + 'RightUpDownVector;': '\u294f', + 'RightUpTeeVector;': '\u295c', + 'RightUpVector;': '\u21be', + 'RightUpVectorBar;': '\u2954', + 'RightVector;': '\u21c0', + 'RightVectorBar;': '\u2953', + 'ring;': '\u02da', + 'risingdotseq;': '\u2253', + 'rlarr;': '\u21c4', + 'rlhar;': '\u21cc', + 'rlm;': '\u200f', + 'rmoust;': '\u23b1', + 'rmoustache;': '\u23b1', + 'rnmid;': '\u2aee', + 'roang;': '\u27ed', + 'roarr;': '\u21fe', + 'robrk;': '\u27e7', + 'ropar;': '\u2986', + 'Ropf;': '\u211d', + 'ropf;': '\U0001d563', + 'roplus;': '\u2a2e', + 'rotimes;': '\u2a35', + 'RoundImplies;': '\u2970', + 'rpar;': ')', + 'rpargt;': '\u2994', + 'rppolint;': '\u2a12', + 'rrarr;': '\u21c9', + 'Rrightarrow;': '\u21db', + 'rsaquo;': '\u203a', + 'Rscr;': '\u211b', + 'rscr;': '\U0001d4c7', + 'Rsh;': '\u21b1', + 'rsh;': '\u21b1', + 'rsqb;': ']', + 'rsquo;': '\u2019', + 'rsquor;': '\u2019', + 'rthree;': '\u22cc', + 'rtimes;': '\u22ca', + 'rtri;': '\u25b9', + 'rtrie;': '\u22b5', + 'rtrif;': '\u25b8', + 'rtriltri;': '\u29ce', + 'RuleDelayed;': '\u29f4', + 'ruluhar;': '\u2968', + 'rx;': '\u211e', + 'Sacute;': '\u015a', + 'sacute;': '\u015b', + 'sbquo;': '\u201a', + 'Sc;': '\u2abc', + 'sc;': '\u227b', + 'scap;': '\u2ab8', + 'Scaron;': '\u0160', + 'scaron;': '\u0161', + 'sccue;': '\u227d', + 'scE;': '\u2ab4', + 'sce;': '\u2ab0', + 'Scedil;': '\u015e', + 'scedil;': '\u015f', + 'Scirc;': '\u015c', + 'scirc;': '\u015d', + 'scnap;': '\u2aba', + 'scnE;': '\u2ab6', + 'scnsim;': '\u22e9', + 'scpolint;': '\u2a13', + 'scsim;': '\u227f', + 'Scy;': '\u0421', + 'scy;': '\u0441', + 'sdot;': '\u22c5', + 'sdotb;': '\u22a1', + 'sdote;': '\u2a66', + 'searhk;': '\u2925', + 'seArr;': '\u21d8', + 'searr;': '\u2198', + 'searrow;': '\u2198', + 'sect': '\xa7', + 'sect;': '\xa7', + 'semi;': ';', + 'seswar;': '\u2929', + 'setminus;': '\u2216', + 'setmn;': '\u2216', + 'sext;': '\u2736', + 'Sfr;': '\U0001d516', + 'sfr;': '\U0001d530', + 'sfrown;': '\u2322', + 'sharp;': '\u266f', + 'SHCHcy;': '\u0429', + 'shchcy;': '\u0449', + 'SHcy;': '\u0428', + 'shcy;': '\u0448', + 'ShortDownArrow;': '\u2193', + 'ShortLeftArrow;': '\u2190', + 'shortmid;': '\u2223', + 'shortparallel;': '\u2225', + 'ShortRightArrow;': '\u2192', + 'ShortUpArrow;': '\u2191', + 'shy': '\xad', + 'shy;': '\xad', + 'Sigma;': '\u03a3', + 'sigma;': '\u03c3', + 'sigmaf;': '\u03c2', + 'sigmav;': '\u03c2', + 'sim;': '\u223c', + 'simdot;': '\u2a6a', + 'sime;': '\u2243', + 'simeq;': '\u2243', + 'simg;': '\u2a9e', + 'simgE;': '\u2aa0', + 'siml;': '\u2a9d', + 'simlE;': '\u2a9f', + 'simne;': '\u2246', + 'simplus;': '\u2a24', + 'simrarr;': '\u2972', + 'slarr;': '\u2190', + 'SmallCircle;': '\u2218', + 'smallsetminus;': '\u2216', + 'smashp;': '\u2a33', + 'smeparsl;': '\u29e4', + 'smid;': '\u2223', + 'smile;': '\u2323', + 'smt;': '\u2aaa', + 'smte;': '\u2aac', + 'smtes;': '\u2aac\ufe00', + 'SOFTcy;': '\u042c', + 'softcy;': '\u044c', + 'sol;': '/', + 'solb;': '\u29c4', + 'solbar;': '\u233f', + 'Sopf;': '\U0001d54a', + 'sopf;': '\U0001d564', + 'spades;': '\u2660', + 'spadesuit;': '\u2660', + 'spar;': '\u2225', + 'sqcap;': '\u2293', + 'sqcaps;': '\u2293\ufe00', + 'sqcup;': '\u2294', + 'sqcups;': '\u2294\ufe00', + 'Sqrt;': '\u221a', + 'sqsub;': '\u228f', + 'sqsube;': '\u2291', + 'sqsubset;': '\u228f', + 'sqsubseteq;': '\u2291', + 'sqsup;': '\u2290', + 'sqsupe;': '\u2292', + 'sqsupset;': '\u2290', + 'sqsupseteq;': '\u2292', + 'squ;': '\u25a1', + 'Square;': '\u25a1', + 'square;': '\u25a1', + 'SquareIntersection;': '\u2293', + 'SquareSubset;': '\u228f', + 'SquareSubsetEqual;': '\u2291', + 'SquareSuperset;': '\u2290', + 'SquareSupersetEqual;': '\u2292', + 'SquareUnion;': '\u2294', + 'squarf;': '\u25aa', + 'squf;': '\u25aa', + 'srarr;': '\u2192', + 'Sscr;': '\U0001d4ae', + 'sscr;': '\U0001d4c8', + 'ssetmn;': '\u2216', + 'ssmile;': '\u2323', + 'sstarf;': '\u22c6', + 'Star;': '\u22c6', + 'star;': '\u2606', + 'starf;': '\u2605', + 'straightepsilon;': '\u03f5', + 'straightphi;': '\u03d5', + 'strns;': '\xaf', + 'Sub;': '\u22d0', + 'sub;': '\u2282', + 'subdot;': '\u2abd', + 'subE;': '\u2ac5', + 'sube;': '\u2286', + 'subedot;': '\u2ac3', + 'submult;': '\u2ac1', + 'subnE;': '\u2acb', + 'subne;': '\u228a', + 'subplus;': '\u2abf', + 'subrarr;': '\u2979', + 'Subset;': '\u22d0', + 'subset;': '\u2282', + 'subseteq;': '\u2286', + 'subseteqq;': '\u2ac5', + 'SubsetEqual;': '\u2286', + 'subsetneq;': '\u228a', + 'subsetneqq;': '\u2acb', + 'subsim;': '\u2ac7', + 'subsub;': '\u2ad5', + 'subsup;': '\u2ad3', + 'succ;': '\u227b', + 'succapprox;': '\u2ab8', + 'succcurlyeq;': '\u227d', + 'Succeeds;': '\u227b', + 'SucceedsEqual;': '\u2ab0', + 'SucceedsSlantEqual;': '\u227d', + 'SucceedsTilde;': '\u227f', + 'succeq;': '\u2ab0', + 'succnapprox;': '\u2aba', + 'succneqq;': '\u2ab6', + 'succnsim;': '\u22e9', + 'succsim;': '\u227f', + 'SuchThat;': '\u220b', + 'Sum;': '\u2211', + 'sum;': '\u2211', + 'sung;': '\u266a', + 'sup1': '\xb9', + 'sup1;': '\xb9', + 'sup2': '\xb2', + 'sup2;': '\xb2', + 'sup3': '\xb3', + 'sup3;': '\xb3', + 'Sup;': '\u22d1', + 'sup;': '\u2283', + 'supdot;': '\u2abe', + 'supdsub;': '\u2ad8', + 'supE;': '\u2ac6', + 'supe;': '\u2287', + 'supedot;': '\u2ac4', + 'Superset;': '\u2283', + 'SupersetEqual;': '\u2287', + 'suphsol;': '\u27c9', + 'suphsub;': '\u2ad7', + 'suplarr;': '\u297b', + 'supmult;': '\u2ac2', + 'supnE;': '\u2acc', + 'supne;': '\u228b', + 'supplus;': '\u2ac0', + 'Supset;': '\u22d1', + 'supset;': '\u2283', + 'supseteq;': '\u2287', + 'supseteqq;': '\u2ac6', + 'supsetneq;': '\u228b', + 'supsetneqq;': '\u2acc', + 'supsim;': '\u2ac8', + 'supsub;': '\u2ad4', + 'supsup;': '\u2ad6', + 'swarhk;': '\u2926', + 'swArr;': '\u21d9', + 'swarr;': '\u2199', + 'swarrow;': '\u2199', + 'swnwar;': '\u292a', + 'szlig': '\xdf', + 'szlig;': '\xdf', + 'Tab;': '\t', + 'target;': '\u2316', + 'Tau;': '\u03a4', + 'tau;': '\u03c4', + 'tbrk;': '\u23b4', + 'Tcaron;': '\u0164', + 'tcaron;': '\u0165', + 'Tcedil;': '\u0162', + 'tcedil;': '\u0163', + 'Tcy;': '\u0422', + 'tcy;': '\u0442', + 'tdot;': '\u20db', + 'telrec;': '\u2315', + 'Tfr;': '\U0001d517', + 'tfr;': '\U0001d531', + 'there4;': '\u2234', + 'Therefore;': '\u2234', + 'therefore;': '\u2234', + 'Theta;': '\u0398', + 'theta;': '\u03b8', + 'thetasym;': '\u03d1', + 'thetav;': '\u03d1', + 'thickapprox;': '\u2248', + 'thicksim;': '\u223c', + 'ThickSpace;': '\u205f\u200a', + 'thinsp;': '\u2009', + 'ThinSpace;': '\u2009', + 'thkap;': '\u2248', + 'thksim;': '\u223c', + 'THORN': '\xde', + 'thorn': '\xfe', + 'THORN;': '\xde', + 'thorn;': '\xfe', + 'Tilde;': '\u223c', + 'tilde;': '\u02dc', + 'TildeEqual;': '\u2243', + 'TildeFullEqual;': '\u2245', + 'TildeTilde;': '\u2248', + 'times': '\xd7', + 'times;': '\xd7', + 'timesb;': '\u22a0', + 'timesbar;': '\u2a31', + 'timesd;': '\u2a30', + 'tint;': '\u222d', + 'toea;': '\u2928', + 'top;': '\u22a4', + 'topbot;': '\u2336', + 'topcir;': '\u2af1', + 'Topf;': '\U0001d54b', + 'topf;': '\U0001d565', + 'topfork;': '\u2ada', + 'tosa;': '\u2929', + 'tprime;': '\u2034', + 'TRADE;': '\u2122', + 'trade;': '\u2122', + 'triangle;': '\u25b5', + 'triangledown;': '\u25bf', + 'triangleleft;': '\u25c3', + 'trianglelefteq;': '\u22b4', + 'triangleq;': '\u225c', + 'triangleright;': '\u25b9', + 'trianglerighteq;': '\u22b5', + 'tridot;': '\u25ec', + 'trie;': '\u225c', + 'triminus;': '\u2a3a', + 'TripleDot;': '\u20db', + 'triplus;': '\u2a39', + 'trisb;': '\u29cd', + 'tritime;': '\u2a3b', + 'trpezium;': '\u23e2', + 'Tscr;': '\U0001d4af', + 'tscr;': '\U0001d4c9', + 'TScy;': '\u0426', + 'tscy;': '\u0446', + 'TSHcy;': '\u040b', + 'tshcy;': '\u045b', + 'Tstrok;': '\u0166', + 'tstrok;': '\u0167', + 'twixt;': '\u226c', + 'twoheadleftarrow;': '\u219e', + 'twoheadrightarrow;': '\u21a0', + 'Uacute': '\xda', + 'uacute': '\xfa', + 'Uacute;': '\xda', + 'uacute;': '\xfa', + 'Uarr;': '\u219f', + 'uArr;': '\u21d1', + 'uarr;': '\u2191', + 'Uarrocir;': '\u2949', + 'Ubrcy;': '\u040e', + 'ubrcy;': '\u045e', + 'Ubreve;': '\u016c', + 'ubreve;': '\u016d', + 'Ucirc': '\xdb', + 'ucirc': '\xfb', + 'Ucirc;': '\xdb', + 'ucirc;': '\xfb', + 'Ucy;': '\u0423', + 'ucy;': '\u0443', + 'udarr;': '\u21c5', + 'Udblac;': '\u0170', + 'udblac;': '\u0171', + 'udhar;': '\u296e', + 'ufisht;': '\u297e', + 'Ufr;': '\U0001d518', + 'ufr;': '\U0001d532', + 'Ugrave': '\xd9', + 'ugrave': '\xf9', + 'Ugrave;': '\xd9', + 'ugrave;': '\xf9', + 'uHar;': '\u2963', + 'uharl;': '\u21bf', + 'uharr;': '\u21be', + 'uhblk;': '\u2580', + 'ulcorn;': '\u231c', + 'ulcorner;': '\u231c', + 'ulcrop;': '\u230f', + 'ultri;': '\u25f8', + 'Umacr;': '\u016a', + 'umacr;': '\u016b', + 'uml': '\xa8', + 'uml;': '\xa8', + 'UnderBar;': '_', + 'UnderBrace;': '\u23df', + 'UnderBracket;': '\u23b5', + 'UnderParenthesis;': '\u23dd', + 'Union;': '\u22c3', + 'UnionPlus;': '\u228e', + 'Uogon;': '\u0172', + 'uogon;': '\u0173', + 'Uopf;': '\U0001d54c', + 'uopf;': '\U0001d566', + 'UpArrow;': '\u2191', + 'Uparrow;': '\u21d1', + 'uparrow;': '\u2191', + 'UpArrowBar;': '\u2912', + 'UpArrowDownArrow;': '\u21c5', + 'UpDownArrow;': '\u2195', + 'Updownarrow;': '\u21d5', + 'updownarrow;': '\u2195', + 'UpEquilibrium;': '\u296e', + 'upharpoonleft;': '\u21bf', + 'upharpoonright;': '\u21be', + 'uplus;': '\u228e', + 'UpperLeftArrow;': '\u2196', + 'UpperRightArrow;': '\u2197', + 'Upsi;': '\u03d2', + 'upsi;': '\u03c5', + 'upsih;': '\u03d2', + 'Upsilon;': '\u03a5', + 'upsilon;': '\u03c5', + 'UpTee;': '\u22a5', + 'UpTeeArrow;': '\u21a5', + 'upuparrows;': '\u21c8', + 'urcorn;': '\u231d', + 'urcorner;': '\u231d', + 'urcrop;': '\u230e', + 'Uring;': '\u016e', + 'uring;': '\u016f', + 'urtri;': '\u25f9', + 'Uscr;': '\U0001d4b0', + 'uscr;': '\U0001d4ca', + 'utdot;': '\u22f0', + 'Utilde;': '\u0168', + 'utilde;': '\u0169', + 'utri;': '\u25b5', + 'utrif;': '\u25b4', + 'uuarr;': '\u21c8', + 'Uuml': '\xdc', + 'uuml': '\xfc', + 'Uuml;': '\xdc', + 'uuml;': '\xfc', + 'uwangle;': '\u29a7', + 'vangrt;': '\u299c', + 'varepsilon;': '\u03f5', + 'varkappa;': '\u03f0', + 'varnothing;': '\u2205', + 'varphi;': '\u03d5', + 'varpi;': '\u03d6', + 'varpropto;': '\u221d', + 'vArr;': '\u21d5', + 'varr;': '\u2195', + 'varrho;': '\u03f1', + 'varsigma;': '\u03c2', + 'varsubsetneq;': '\u228a\ufe00', + 'varsubsetneqq;': '\u2acb\ufe00', + 'varsupsetneq;': '\u228b\ufe00', + 'varsupsetneqq;': '\u2acc\ufe00', + 'vartheta;': '\u03d1', + 'vartriangleleft;': '\u22b2', + 'vartriangleright;': '\u22b3', + 'Vbar;': '\u2aeb', + 'vBar;': '\u2ae8', + 'vBarv;': '\u2ae9', + 'Vcy;': '\u0412', + 'vcy;': '\u0432', + 'VDash;': '\u22ab', + 'Vdash;': '\u22a9', + 'vDash;': '\u22a8', + 'vdash;': '\u22a2', + 'Vdashl;': '\u2ae6', + 'Vee;': '\u22c1', + 'vee;': '\u2228', + 'veebar;': '\u22bb', + 'veeeq;': '\u225a', + 'vellip;': '\u22ee', + 'Verbar;': '\u2016', + 'verbar;': '|', + 'Vert;': '\u2016', + 'vert;': '|', + 'VerticalBar;': '\u2223', + 'VerticalLine;': '|', + 'VerticalSeparator;': '\u2758', + 'VerticalTilde;': '\u2240', + 'VeryThinSpace;': '\u200a', + 'Vfr;': '\U0001d519', + 'vfr;': '\U0001d533', + 'vltri;': '\u22b2', + 'vnsub;': '\u2282\u20d2', + 'vnsup;': '\u2283\u20d2', + 'Vopf;': '\U0001d54d', + 'vopf;': '\U0001d567', + 'vprop;': '\u221d', + 'vrtri;': '\u22b3', + 'Vscr;': '\U0001d4b1', + 'vscr;': '\U0001d4cb', + 'vsubnE;': '\u2acb\ufe00', + 'vsubne;': '\u228a\ufe00', + 'vsupnE;': '\u2acc\ufe00', + 'vsupne;': '\u228b\ufe00', + 'Vvdash;': '\u22aa', + 'vzigzag;': '\u299a', + 'Wcirc;': '\u0174', + 'wcirc;': '\u0175', + 'wedbar;': '\u2a5f', + 'Wedge;': '\u22c0', + 'wedge;': '\u2227', + 'wedgeq;': '\u2259', + 'weierp;': '\u2118', + 'Wfr;': '\U0001d51a', + 'wfr;': '\U0001d534', + 'Wopf;': '\U0001d54e', + 'wopf;': '\U0001d568', + 'wp;': '\u2118', + 'wr;': '\u2240', + 'wreath;': '\u2240', + 'Wscr;': '\U0001d4b2', + 'wscr;': '\U0001d4cc', + 'xcap;': '\u22c2', + 'xcirc;': '\u25ef', + 'xcup;': '\u22c3', + 'xdtri;': '\u25bd', + 'Xfr;': '\U0001d51b', + 'xfr;': '\U0001d535', + 'xhArr;': '\u27fa', + 'xharr;': '\u27f7', + 'Xi;': '\u039e', + 'xi;': '\u03be', + 'xlArr;': '\u27f8', + 'xlarr;': '\u27f5', + 'xmap;': '\u27fc', + 'xnis;': '\u22fb', + 'xodot;': '\u2a00', + 'Xopf;': '\U0001d54f', + 'xopf;': '\U0001d569', + 'xoplus;': '\u2a01', + 'xotime;': '\u2a02', + 'xrArr;': '\u27f9', + 'xrarr;': '\u27f6', + 'Xscr;': '\U0001d4b3', + 'xscr;': '\U0001d4cd', + 'xsqcup;': '\u2a06', + 'xuplus;': '\u2a04', + 'xutri;': '\u25b3', + 'xvee;': '\u22c1', + 'xwedge;': '\u22c0', + 'Yacute': '\xdd', + 'yacute': '\xfd', + 'Yacute;': '\xdd', + 'yacute;': '\xfd', + 'YAcy;': '\u042f', + 'yacy;': '\u044f', + 'Ycirc;': '\u0176', + 'ycirc;': '\u0177', + 'Ycy;': '\u042b', + 'ycy;': '\u044b', + 'yen': '\xa5', + 'yen;': '\xa5', + 'Yfr;': '\U0001d51c', + 'yfr;': '\U0001d536', + 'YIcy;': '\u0407', + 'yicy;': '\u0457', + 'Yopf;': '\U0001d550', + 'yopf;': '\U0001d56a', + 'Yscr;': '\U0001d4b4', + 'yscr;': '\U0001d4ce', + 'YUcy;': '\u042e', + 'yucy;': '\u044e', + 'yuml': '\xff', + 'Yuml;': '\u0178', + 'yuml;': '\xff', + 'Zacute;': '\u0179', + 'zacute;': '\u017a', + 'Zcaron;': '\u017d', + 'zcaron;': '\u017e', + 'Zcy;': '\u0417', + 'zcy;': '\u0437', + 'Zdot;': '\u017b', + 'zdot;': '\u017c', + 'zeetrf;': '\u2128', + 'ZeroWidthSpace;': '\u200b', + 'Zeta;': '\u0396', + 'zeta;': '\u03b6', + 'Zfr;': '\u2128', + 'zfr;': '\U0001d537', + 'ZHcy;': '\u0416', + 'zhcy;': '\u0436', + 'zigrarr;': '\u21dd', + 'Zopf;': '\u2124', + 'zopf;': '\U0001d56b', + 'Zscr;': '\U0001d4b5', + 'zscr;': '\U0001d4cf', + 'zwj;': '\u200d', + 'zwnj;': '\u200c', +} + +# maps the Unicode codepoint to the HTML entity name +codepoint2name = {} + +# maps the HTML entity name to the character +# (or a character reference if the character is outside the Latin-1 range) +entitydefs = {} + +for (name, codepoint) in name2codepoint.items(): + codepoint2name[codepoint] = name + entitydefs[name] = chr(codepoint) + +del name, codepoint diff --git a/pype/modules/ftrack/python2_vendor/future/backports/html/parser.py b/pype/modules/ftrack/python2_vendor/future/backports/html/parser.py new file mode 100644 index 0000000000..fb652636d4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/html/parser.py @@ -0,0 +1,536 @@ +"""A parser for HTML and XHTML. + +Backported for python-future from Python 3.3. +""" + +# This file is based on sgmllib.py, but the API is slightly different. + +# XXX There should be a way to distinguish between PCDATA (parsed +# character data -- the normal case), RCDATA (replaceable character +# data -- only char and entity references and end tags are special) +# and CDATA (character data -- only end tags are special). + +from __future__ import (absolute_import, division, + print_function, unicode_literals) +from future.builtins import * +from future.backports import _markupbase +import re +import warnings + +# Regular expressions used for parsing + +interesting_normal = re.compile('[&<]') +incomplete = re.compile('&[a-zA-Z#]') + +entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') +charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') + +starttagopen = re.compile('<[a-zA-Z]') +piclose = re.compile('>') +commentclose = re.compile(r'--\s*>') +tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') +# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state +# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') +# Note: +# 1) the strict attrfind isn't really strict, but we can't make it +# correctly strict without breaking backward compatibility; +# 2) if you change attrfind remember to update locatestarttagend too; +# 3) if you change attrfind and/or locatestarttagend the parser will +# explode, so don't do it. +attrfind = re.compile( + r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*' + r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?') +attrfind_tolerant = re.compile( + r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' + r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') +locatestarttagend = re.compile(r""" + <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + (?:\s+ # whitespace before attribute name + (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name + (?:\s*=\s* # value indicator + (?:'[^']*' # LITA-enclosed value + |\"[^\"]*\" # LIT-enclosed value + |[^'\">\s]+ # bare value + ) + )? + ) + )* + \s* # trailing whitespace +""", re.VERBOSE) +locatestarttagend_tolerant = re.compile(r""" + <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + (?:[\s/]* # optional whitespace before attribute name + (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name + (?:\s*=+\s* # value indicator + (?:'[^']*' # LITA-enclosed value + |"[^"]*" # LIT-enclosed value + |(?!['"])[^>\s]* # bare value + ) + (?:\s*,)* # possibly followed by a comma + )?(?:\s|/(?!>))* + )* + )? + \s* # trailing whitespace +""", re.VERBOSE) +endendtag = re.compile('>') +# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between +# ') + + +class HTMLParseError(Exception): + """Exception raised for all parse errors.""" + + def __init__(self, msg, position=(None, None)): + assert msg + self.msg = msg + self.lineno = position[0] + self.offset = position[1] + + def __str__(self): + result = self.msg + if self.lineno is not None: + result = result + ", at line %d" % self.lineno + if self.offset is not None: + result = result + ", column %d" % (self.offset + 1) + return result + + +class HTMLParser(_markupbase.ParserBase): + """Find tags and other markup and call handler functions. + + Usage: + p = HTMLParser() + p.feed(data) + ... + p.close() + + Start tags are handled by calling self.handle_starttag() or + self.handle_startendtag(); end tags by self.handle_endtag(). The + data between tags is passed from the parser to the derived class + by calling self.handle_data() with the data as argument (the data + may be split up in arbitrary chunks). Entity references are + passed by calling self.handle_entityref() with the entity + reference as the argument. Numeric character references are + passed to self.handle_charref() with the string containing the + reference as the argument. + """ + + CDATA_CONTENT_ELEMENTS = ("script", "style") + + def __init__(self, strict=False): + """Initialize and reset this instance. + + If strict is set to False (the default) the parser will parse invalid + markup, otherwise it will raise an error. Note that the strict mode + is deprecated. + """ + if strict: + warnings.warn("The strict mode is deprecated.", + DeprecationWarning, stacklevel=2) + self.strict = strict + self.reset() + + def reset(self): + """Reset this instance. Loses all unprocessed data.""" + self.rawdata = '' + self.lasttag = '???' + self.interesting = interesting_normal + self.cdata_elem = None + _markupbase.ParserBase.reset(self) + + def feed(self, data): + r"""Feed data to the parser. + + Call this as often as you want, with as little or as much text + as you want (may include '\n'). + """ + self.rawdata = self.rawdata + data + self.goahead(0) + + def close(self): + """Handle any buffered data.""" + self.goahead(1) + + def error(self, message): + raise HTMLParseError(message, self.getpos()) + + __starttag_text = None + + def get_starttag_text(self): + """Return full source of start tag: '<...>'.""" + return self.__starttag_text + + def set_cdata_mode(self, elem): + self.cdata_elem = elem.lower() + self.interesting = re.compile(r'' % self.cdata_elem, re.I) + + def clear_cdata_mode(self): + self.interesting = interesting_normal + self.cdata_elem = None + + # Internal -- handle data as far as reasonable. May leave state + # and data to be processed by a subsequent call. If 'end' is + # true, force handling all data as if followed by EOF marker. + def goahead(self, end): + rawdata = self.rawdata + i = 0 + n = len(rawdata) + while i < n: + match = self.interesting.search(rawdata, i) # < or & + if match: + j = match.start() + else: + if self.cdata_elem: + break + j = n + if i < j: self.handle_data(rawdata[i:j]) + i = self.updatepos(i, j) + if i == n: break + startswith = rawdata.startswith + if startswith('<', i): + if starttagopen.match(rawdata, i): # < + letter + k = self.parse_starttag(i) + elif startswith("', i + 1) + if k < 0: + k = rawdata.find('<', i + 1) + if k < 0: + k = i + 1 + else: + k += 1 + self.handle_data(rawdata[i:k]) + i = self.updatepos(i, k) + elif startswith("&#", i): + match = charref.match(rawdata, i) + if match: + name = match.group()[2:-1] + self.handle_charref(name) + k = match.end() + if not startswith(';', k-1): + k = k - 1 + i = self.updatepos(i, k) + continue + else: + if ";" in rawdata[i:]: #bail by consuming &# + self.handle_data(rawdata[0:2]) + i = self.updatepos(i, 2) + break + elif startswith('&', i): + match = entityref.match(rawdata, i) + if match: + name = match.group(1) + self.handle_entityref(name) + k = match.end() + if not startswith(';', k-1): + k = k - 1 + i = self.updatepos(i, k) + continue + match = incomplete.match(rawdata, i) + if match: + # match.group() will contain at least 2 chars + if end and match.group() == rawdata[i:]: + if self.strict: + self.error("EOF in middle of entity or char ref") + else: + if k <= i: + k = n + i = self.updatepos(i, i + 1) + # incomplete + break + elif (i + 1) < n: + # not the end of the buffer, and can't be confused + # with some other construct + self.handle_data("&") + i = self.updatepos(i, i + 1) + else: + break + else: + assert 0, "interesting.search() lied" + # end while + if end and i < n and not self.cdata_elem: + self.handle_data(rawdata[i:n]) + i = self.updatepos(i, n) + self.rawdata = rawdata[i:] + + # Internal -- parse html declarations, return length or -1 if not terminated + # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state + # See also parse_declaration in _markupbase + def parse_html_declaration(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == ' + gtpos = rawdata.find('>', i+9) + if gtpos == -1: + return -1 + self.handle_decl(rawdata[i+2:gtpos]) + return gtpos+1 + else: + return self.parse_bogus_comment(i) + + # Internal -- parse bogus comment, return length or -1 if not terminated + # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state + def parse_bogus_comment(self, i, report=1): + rawdata = self.rawdata + assert rawdata[i:i+2] in ('', i+2) + if pos == -1: + return -1 + if report: + self.handle_comment(rawdata[i+2:pos]) + return pos + 1 + + # Internal -- parse processing instr, return end or -1 if not terminated + def parse_pi(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == ' + if not match: + return -1 + j = match.start() + self.handle_pi(rawdata[i+2: j]) + j = match.end() + return j + + # Internal -- handle starttag, return end or -1 if not terminated + def parse_starttag(self, i): + self.__starttag_text = None + endpos = self.check_for_whole_start_tag(i) + if endpos < 0: + return endpos + rawdata = self.rawdata + self.__starttag_text = rawdata[i:endpos] + + # Now parse the data between i+1 and j into a tag and attrs + attrs = [] + match = tagfind.match(rawdata, i+1) + assert match, 'unexpected call to parse_starttag()' + k = match.end() + self.lasttag = tag = match.group(1).lower() + while k < endpos: + if self.strict: + m = attrfind.match(rawdata, k) + else: + m = attrfind_tolerant.match(rawdata, k) + if not m: + break + attrname, rest, attrvalue = m.group(1, 2, 3) + if not rest: + attrvalue = None + elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ + attrvalue[:1] == '"' == attrvalue[-1:]: + attrvalue = attrvalue[1:-1] + if attrvalue: + attrvalue = self.unescape(attrvalue) + attrs.append((attrname.lower(), attrvalue)) + k = m.end() + + end = rawdata[k:endpos].strip() + if end not in (">", "/>"): + lineno, offset = self.getpos() + if "\n" in self.__starttag_text: + lineno = lineno + self.__starttag_text.count("\n") + offset = len(self.__starttag_text) \ + - self.__starttag_text.rfind("\n") + else: + offset = offset + len(self.__starttag_text) + if self.strict: + self.error("junk characters in start tag: %r" + % (rawdata[k:endpos][:20],)) + self.handle_data(rawdata[i:endpos]) + return endpos + if end.endswith('/>'): + # XHTML-style empty tag: + self.handle_startendtag(tag, attrs) + else: + self.handle_starttag(tag, attrs) + if tag in self.CDATA_CONTENT_ELEMENTS: + self.set_cdata_mode(tag) + return endpos + + # Internal -- check to see if we have a complete starttag; return end + # or -1 if incomplete. + def check_for_whole_start_tag(self, i): + rawdata = self.rawdata + if self.strict: + m = locatestarttagend.match(rawdata, i) + else: + m = locatestarttagend_tolerant.match(rawdata, i) + if m: + j = m.end() + next = rawdata[j:j+1] + if next == ">": + return j + 1 + if next == "/": + if rawdata.startswith("/>", j): + return j + 2 + if rawdata.startswith("/", j): + # buffer boundary + return -1 + # else bogus input + if self.strict: + self.updatepos(i, j + 1) + self.error("malformed empty start tag") + if j > i: + return j + else: + return i + 1 + if next == "": + # end of input + return -1 + if next in ("abcdefghijklmnopqrstuvwxyz=/" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + # end of input in or before attribute value, or we have the + # '/' from a '/>' ending + return -1 + if self.strict: + self.updatepos(i, j) + self.error("malformed start tag") + if j > i: + return j + else: + return i + 1 + raise AssertionError("we should not get here!") + + # Internal -- parse endtag, return end or -1 if incomplete + def parse_endtag(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == " + if not match: + return -1 + gtpos = match.end() + match = endtagfind.match(rawdata, i) # + if not match: + if self.cdata_elem is not None: + self.handle_data(rawdata[i:gtpos]) + return gtpos + if self.strict: + self.error("bad end tag: %r" % (rawdata[i:gtpos],)) + # find the name: w3.org/TR/html5/tokenization.html#tag-name-state + namematch = tagfind_tolerant.match(rawdata, i+2) + if not namematch: + # w3.org/TR/html5/tokenization.html#end-tag-open-state + if rawdata[i:i+3] == '': + return i+3 + else: + return self.parse_bogus_comment(i) + tagname = namematch.group().lower() + # consume and ignore other stuff between the name and the > + # Note: this is not 100% correct, since we might have things like + # , but looking for > after tha name should cover + # most of the cases and is much simpler + gtpos = rawdata.find('>', namematch.end()) + self.handle_endtag(tagname) + return gtpos+1 + + elem = match.group(1).lower() # script or style + if self.cdata_elem is not None: + if elem != self.cdata_elem: + self.handle_data(rawdata[i:gtpos]) + return gtpos + + self.handle_endtag(elem.lower()) + self.clear_cdata_mode() + return gtpos + + # Overridable -- finish processing of start+end tag: + def handle_startendtag(self, tag, attrs): + self.handle_starttag(tag, attrs) + self.handle_endtag(tag) + + # Overridable -- handle start tag + def handle_starttag(self, tag, attrs): + pass + + # Overridable -- handle end tag + def handle_endtag(self, tag): + pass + + # Overridable -- handle character reference + def handle_charref(self, name): + pass + + # Overridable -- handle entity reference + def handle_entityref(self, name): + pass + + # Overridable -- handle data + def handle_data(self, data): + pass + + # Overridable -- handle comment + def handle_comment(self, data): + pass + + # Overridable -- handle declaration + def handle_decl(self, decl): + pass + + # Overridable -- handle processing instruction + def handle_pi(self, data): + pass + + def unknown_decl(self, data): + if self.strict: + self.error("unknown declaration: %r" % (data,)) + + # Internal -- helper to remove special character quoting + def unescape(self, s): + if '&' not in s: + return s + def replaceEntities(s): + s = s.groups()[0] + try: + if s[0] == "#": + s = s[1:] + if s[0] in ['x','X']: + c = int(s[1:].rstrip(';'), 16) + else: + c = int(s.rstrip(';')) + return chr(c) + except ValueError: + return '&#' + s + else: + from future.backports.html.entities import html5 + if s in html5: + return html5[s] + elif s.endswith(';'): + return '&' + s + for x in range(2, len(s)): + if s[:x] in html5: + return html5[s[:x]] + s[x:] + else: + return '&' + s + + return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))", + replaceEntities, s) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/http/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/http/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/backports/http/client.py b/pype/modules/ftrack/python2_vendor/future/backports/http/client.py new file mode 100644 index 0000000000..e663d125c4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/http/client.py @@ -0,0 +1,1346 @@ +"""HTTP/1.1 client library + +A backport of the Python 3.3 http/client.py module for python-future. + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + | + | response = getresponse() + v + Unread-response [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +""" + +from __future__ import (absolute_import, division, + print_function, unicode_literals) +from future.builtins import bytes, int, str, super +from future.utils import PY2 + +from future.backports.email import parser as email_parser +from future.backports.email import message as email_message +from future.backports.misc import create_connection as socket_create_connection +import io +import os +import socket +from future.backports.urllib.parse import urlsplit +import warnings +from array import array + +if PY2: + from collections import Iterable +else: + from collections.abc import Iterable + +__all__ = ["HTTPResponse", "HTTPConnection", + "HTTPException", "NotConnected", "UnknownProtocol", + "UnknownTransferEncoding", "UnimplementedFileMode", + "IncompleteRead", "InvalidURL", "ImproperConnectionState", + "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", + "BadStatusLine", "error", "responses"] + +HTTP_PORT = 80 +HTTPS_PORT = 443 + +_UNKNOWN = 'UNKNOWN' + +# connection states +_CS_IDLE = 'Idle' +_CS_REQ_STARTED = 'Request-started' +_CS_REQ_SENT = 'Request-sent' + +# status codes +# informational +CONTINUE = 100 +SWITCHING_PROTOCOLS = 101 +PROCESSING = 102 + +# successful +OK = 200 +CREATED = 201 +ACCEPTED = 202 +NON_AUTHORITATIVE_INFORMATION = 203 +NO_CONTENT = 204 +RESET_CONTENT = 205 +PARTIAL_CONTENT = 206 +MULTI_STATUS = 207 +IM_USED = 226 + +# redirection +MULTIPLE_CHOICES = 300 +MOVED_PERMANENTLY = 301 +FOUND = 302 +SEE_OTHER = 303 +NOT_MODIFIED = 304 +USE_PROXY = 305 +TEMPORARY_REDIRECT = 307 + +# client error +BAD_REQUEST = 400 +UNAUTHORIZED = 401 +PAYMENT_REQUIRED = 402 +FORBIDDEN = 403 +NOT_FOUND = 404 +METHOD_NOT_ALLOWED = 405 +NOT_ACCEPTABLE = 406 +PROXY_AUTHENTICATION_REQUIRED = 407 +REQUEST_TIMEOUT = 408 +CONFLICT = 409 +GONE = 410 +LENGTH_REQUIRED = 411 +PRECONDITION_FAILED = 412 +REQUEST_ENTITY_TOO_LARGE = 413 +REQUEST_URI_TOO_LONG = 414 +UNSUPPORTED_MEDIA_TYPE = 415 +REQUESTED_RANGE_NOT_SATISFIABLE = 416 +EXPECTATION_FAILED = 417 +UNPROCESSABLE_ENTITY = 422 +LOCKED = 423 +FAILED_DEPENDENCY = 424 +UPGRADE_REQUIRED = 426 +PRECONDITION_REQUIRED = 428 +TOO_MANY_REQUESTS = 429 +REQUEST_HEADER_FIELDS_TOO_LARGE = 431 + +# server error +INTERNAL_SERVER_ERROR = 500 +NOT_IMPLEMENTED = 501 +BAD_GATEWAY = 502 +SERVICE_UNAVAILABLE = 503 +GATEWAY_TIMEOUT = 504 +HTTP_VERSION_NOT_SUPPORTED = 505 +INSUFFICIENT_STORAGE = 507 +NOT_EXTENDED = 510 +NETWORK_AUTHENTICATION_REQUIRED = 511 + +# Mapping status codes to official W3C names +responses = { + 100: 'Continue', + 101: 'Switching Protocols', + + 200: 'OK', + 201: 'Created', + 202: 'Accepted', + 203: 'Non-Authoritative Information', + 204: 'No Content', + 205: 'Reset Content', + 206: 'Partial Content', + + 300: 'Multiple Choices', + 301: 'Moved Permanently', + 302: 'Found', + 303: 'See Other', + 304: 'Not Modified', + 305: 'Use Proxy', + 306: '(Unused)', + 307: 'Temporary Redirect', + + 400: 'Bad Request', + 401: 'Unauthorized', + 402: 'Payment Required', + 403: 'Forbidden', + 404: 'Not Found', + 405: 'Method Not Allowed', + 406: 'Not Acceptable', + 407: 'Proxy Authentication Required', + 408: 'Request Timeout', + 409: 'Conflict', + 410: 'Gone', + 411: 'Length Required', + 412: 'Precondition Failed', + 413: 'Request Entity Too Large', + 414: 'Request-URI Too Long', + 415: 'Unsupported Media Type', + 416: 'Requested Range Not Satisfiable', + 417: 'Expectation Failed', + 428: 'Precondition Required', + 429: 'Too Many Requests', + 431: 'Request Header Fields Too Large', + + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', + 505: 'HTTP Version Not Supported', + 511: 'Network Authentication Required', +} + +# maximal amount of data to read at one time in _safe_read +MAXAMOUNT = 1048576 + +# maximal line length when calling readline(). +_MAXLINE = 65536 +_MAXHEADERS = 100 + + +class HTTPMessage(email_message.Message): + # XXX The only usage of this method is in + # http.server.CGIHTTPRequestHandler. Maybe move the code there so + # that it doesn't need to be part of the public API. The API has + # never been defined so this could cause backwards compatibility + # issues. + + def getallmatchingheaders(self, name): + """Find all header lines matching a given header name. + + Look through the list of headers and find all lines matching a given + header name (and their continuation lines). A list of the lines is + returned, without interpretation. If the header does not occur, an + empty list is returned. If the header occurs multiple times, all + occurrences are returned. Case is not important in the header name. + + """ + name = name.lower() + ':' + n = len(name) + lst = [] + hit = 0 + for line in self.keys(): + if line[:n].lower() == name: + hit = 1 + elif not line[:1].isspace(): + hit = 0 + if hit: + lst.append(line) + return lst + +def parse_headers(fp, _class=HTTPMessage): + """Parses only RFC2822 headers from a file pointer. + + email Parser wants to see strings rather than bytes. + But a TextIOWrapper around self.rfile would buffer too many bytes + from the stream, bytes which we later need to read as bytes. + So we read the correct bytes here, as bytes, for email Parser + to parse. + + """ + headers = [] + while True: + line = fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") + headers.append(line) + if len(headers) > _MAXHEADERS: + raise HTTPException("got more than %d headers" % _MAXHEADERS) + if line in (b'\r\n', b'\n', b''): + break + hstring = bytes(b'').join(headers).decode('iso-8859-1') + return email_parser.Parser(_class=_class).parsestr(hstring) + + +_strict_sentinel = object() + +class HTTPResponse(io.RawIOBase): + + # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. + + # The bytes from the socket object are iso-8859-1 strings. + # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded + # text following RFC 2047. The basic status line parsing only + # accepts iso-8859-1. + + def __init__(self, sock, debuglevel=0, strict=_strict_sentinel, method=None, url=None): + # If the response includes a content-length header, we need to + # make sure that the client doesn't read more than the + # specified number of bytes. If it does, it will block until + # the server times out and closes the connection. This will + # happen if a self.fp.read() is done (without a size) whether + # self.fp is buffered or not. So, no self.fp.read() by + # clients unless they know what they are doing. + self.fp = sock.makefile("rb") + self.debuglevel = debuglevel + if strict is not _strict_sentinel: + warnings.warn("the 'strict' argument isn't supported anymore; " + "http.client now always assumes HTTP/1.x compliant servers.", + DeprecationWarning, 2) + self._method = method + + # The HTTPResponse object is returned via urllib. The clients + # of http and urllib expect different attributes for the + # headers. headers is used here and supports urllib. msg is + # provided as a backwards compatibility layer for http + # clients. + + self.headers = self.msg = None + + # from the Status-Line of the response + self.version = _UNKNOWN # HTTP-Version + self.status = _UNKNOWN # Status-Code + self.reason = _UNKNOWN # Reason-Phrase + + self.chunked = _UNKNOWN # is "chunked" being used? + self.chunk_left = _UNKNOWN # bytes left to read in current chunk + self.length = _UNKNOWN # number of bytes left in response + self.will_close = _UNKNOWN # conn will close at end of response + + def _read_status(self): + line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1") + if len(line) > _MAXLINE: + raise LineTooLong("status line") + if self.debuglevel > 0: + print("reply:", repr(line)) + if not line: + # Presumably, the server closed the connection before + # sending a valid response. + raise BadStatusLine(line) + try: + version, status, reason = line.split(None, 2) + except ValueError: + try: + version, status = line.split(None, 1) + reason = "" + except ValueError: + # empty version will cause next test to fail. + version = "" + if not version.startswith("HTTP/"): + self._close_conn() + raise BadStatusLine(line) + + # The status code is a three-digit number + try: + status = int(status) + if status < 100 or status > 999: + raise BadStatusLine(line) + except ValueError: + raise BadStatusLine(line) + return version, status, reason + + def begin(self): + if self.headers is not None: + # we've already started reading the response + return + + # read until we get a non-100 response + while True: + version, status, reason = self._read_status() + if status != CONTINUE: + break + # skip the header from the 100 response + while True: + skip = self.fp.readline(_MAXLINE + 1) + if len(skip) > _MAXLINE: + raise LineTooLong("header line") + skip = skip.strip() + if not skip: + break + if self.debuglevel > 0: + print("header:", skip) + + self.code = self.status = status + self.reason = reason.strip() + if version in ("HTTP/1.0", "HTTP/0.9"): + # Some servers might still return "0.9", treat it as 1.0 anyway + self.version = 10 + elif version.startswith("HTTP/1."): + self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 + else: + raise UnknownProtocol(version) + + self.headers = self.msg = parse_headers(self.fp) + + if self.debuglevel > 0: + for hdr in self.headers: + print("header:", hdr, end=" ") + + # are we using the chunked-style of transfer encoding? + tr_enc = self.headers.get("transfer-encoding") + if tr_enc and tr_enc.lower() == "chunked": + self.chunked = True + self.chunk_left = None + else: + self.chunked = False + + # will the connection close at the end of the response? + self.will_close = self._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + self.length = None + length = self.headers.get("content-length") + + # are we using the chunked-style of transfer encoding? + tr_enc = self.headers.get("transfer-encoding") + if length and not self.chunked: + try: + self.length = int(length) + except ValueError: + self.length = None + else: + if self.length < 0: # ignore nonsensical negative lengths + self.length = None + else: + self.length = None + + # does the body have a fixed length? (of zero) + if (status == NO_CONTENT or status == NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + self._method == "HEAD"): + self.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if (not self.will_close and + not self.chunked and + self.length is None): + self.will_close = True + + def _check_close(self): + conn = self.headers.get("connection") + if self.version == 11: + # An HTTP/1.1 proxy is assumed to stay open unless + # explicitly closed. + conn = self.headers.get("connection") + if conn and "close" in conn.lower(): + return True + return False + + # Some HTTP/1.0 implementations have support for persistent + # connections, using rules different than HTTP/1.1. + + # For older HTTP, Keep-Alive indicates persistent connection. + if self.headers.get("keep-alive"): + return False + + # At least Akamai returns a "Connection: Keep-Alive" header, + # which was supposed to be sent by the client. + if conn and "keep-alive" in conn.lower(): + return False + + # Proxy-Connection is a netscape hack. + pconn = self.headers.get("proxy-connection") + if pconn and "keep-alive" in pconn.lower(): + return False + + # otherwise, assume it will close + return True + + def _close_conn(self): + fp = self.fp + self.fp = None + fp.close() + + def close(self): + super().close() # set "closed" flag + if self.fp: + self._close_conn() + + # These implementations are for the benefit of io.BufferedReader. + + # XXX This class should probably be revised to act more like + # the "raw stream" that BufferedReader expects. + + def flush(self): + super().flush() + if self.fp: + self.fp.flush() + + def readable(self): + return True + + # End of "raw stream" methods + + def isclosed(self): + """True if the connection is closed.""" + # NOTE: it is possible that we will not ever call self.close(). This + # case occurs when will_close is TRUE, length is None, and we + # read up to the last byte, but NOT past it. + # + # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be + # called, meaning self.isclosed() is meaningful. + return self.fp is None + + def read(self, amt=None): + if self.fp is None: + return bytes(b"") + + if self._method == "HEAD": + self._close_conn() + return bytes(b"") + + if amt is not None: + # Amount is given, so call base class version + # (which is implemented in terms of self.readinto) + return bytes(super(HTTPResponse, self).read(amt)) + else: + # Amount is not given (unbounded read) so we must check self.length + # and self.chunked + + if self.chunked: + return self._readall_chunked() + + if self.length is None: + s = self.fp.read() + else: + try: + s = self._safe_read(self.length) + except IncompleteRead: + self._close_conn() + raise + self.length = 0 + self._close_conn() # we read everything + return bytes(s) + + def readinto(self, b): + if self.fp is None: + return 0 + + if self._method == "HEAD": + self._close_conn() + return 0 + + if self.chunked: + return self._readinto_chunked(b) + + if self.length is not None: + if len(b) > self.length: + # clip the read to the "end of response" + b = memoryview(b)[0:self.length] + + # we do not use _safe_read() here because this may be a .will_close + # connection, and the user is reading more bytes than will be provided + # (for example, reading in 1k chunks) + + if PY2: + data = self.fp.read(len(b)) + n = len(data) + b[:n] = data + else: + n = self.fp.readinto(b) + + if not n and b: + # Ideally, we would raise IncompleteRead if the content-length + # wasn't satisfied, but it might break compatibility. + self._close_conn() + elif self.length is not None: + self.length -= n + if not self.length: + self._close_conn() + return n + + def _read_next_chunk_size(self): + # Read the next chunk size from the file + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("chunk size") + i = line.find(b";") + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + return int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self._close_conn() + raise + + def _read_and_discard_trailer(self): + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("trailer line") + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line in (b'\r\n', b'\n', b''): + break + + def _readall_chunked(self): + assert self.chunked != _UNKNOWN + chunk_left = self.chunk_left + value = [] + while True: + if chunk_left is None: + try: + chunk_left = self._read_next_chunk_size() + if chunk_left == 0: + break + except ValueError: + raise IncompleteRead(bytes(b'').join(value)) + value.append(self._safe_read(chunk_left)) + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + self._read_and_discard_trailer() + + # we read everything; close the "file" + self._close_conn() + + return bytes(b'').join(value) + + def _readinto_chunked(self, b): + assert self.chunked != _UNKNOWN + chunk_left = self.chunk_left + + total_bytes = 0 + mvb = memoryview(b) + while True: + if chunk_left is None: + try: + chunk_left = self._read_next_chunk_size() + if chunk_left == 0: + break + except ValueError: + raise IncompleteRead(bytes(b[0:total_bytes])) + + if len(mvb) < chunk_left: + n = self._safe_readinto(mvb) + self.chunk_left = chunk_left - n + return total_bytes + n + elif len(mvb) == chunk_left: + n = self._safe_readinto(mvb) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return total_bytes + n + else: + temp_mvb = mvb[0:chunk_left] + n = self._safe_readinto(temp_mvb) + mvb = mvb[n:] + total_bytes += n + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + self._read_and_discard_trailer() + + # we read everything; close the "file" + self._close_conn() + + return total_bytes + + def _safe_read(self, amt): + """Read the number of bytes requested, compensating for partial reads. + + Normally, we have a blocking socket, but a read() can be interrupted + by a signal (resulting in a partial read). + + Note that we cannot distinguish between EOF and an interrupt when zero + bytes have been read. IncompleteRead() will be raised in this + situation. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + """ + s = [] + while amt > 0: + chunk = self.fp.read(min(amt, MAXAMOUNT)) + if not chunk: + raise IncompleteRead(bytes(b'').join(s), amt) + s.append(chunk) + amt -= len(chunk) + return bytes(b"").join(s) + + def _safe_readinto(self, b): + """Same as _safe_read, but for reading into a buffer.""" + total_bytes = 0 + mvb = memoryview(b) + while total_bytes < len(b): + if MAXAMOUNT < len(mvb): + temp_mvb = mvb[0:MAXAMOUNT] + if PY2: + data = self.fp.read(len(temp_mvb)) + n = len(data) + temp_mvb[:n] = data + else: + n = self.fp.readinto(temp_mvb) + else: + if PY2: + data = self.fp.read(len(mvb)) + n = len(data) + mvb[:n] = data + else: + n = self.fp.readinto(mvb) + if not n: + raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b)) + mvb = mvb[n:] + total_bytes += n + return total_bytes + + def fileno(self): + return self.fp.fileno() + + def getheader(self, name, default=None): + if self.headers is None: + raise ResponseNotReady() + headers = self.headers.get_all(name) or default + if isinstance(headers, str) or not hasattr(headers, '__iter__'): + return headers + else: + return ', '.join(headers) + + def getheaders(self): + """Return list of (header, value) tuples.""" + if self.headers is None: + raise ResponseNotReady() + return list(self.headers.items()) + + # We override IOBase.__iter__ so that it doesn't check for closed-ness + + def __iter__(self): + return self + + # For compatibility with old-style urllib responses. + + def info(self): + return self.headers + + def geturl(self): + return self.url + + def getcode(self): + return self.status + +class HTTPConnection(object): + + _http_vsn = 11 + _http_vsn_str = 'HTTP/1.1' + + response_class = HTTPResponse + default_port = HTTP_PORT + auto_open = 1 + debuglevel = 0 + + def __init__(self, host, port=None, strict=_strict_sentinel, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): + if strict is not _strict_sentinel: + warnings.warn("the 'strict' argument isn't supported anymore; " + "http.client now always assumes HTTP/1.x compliant servers.", + DeprecationWarning, 2) + self.timeout = timeout + self.source_address = source_address + self.sock = None + self._buffer = [] + self.__response = None + self.__state = _CS_IDLE + self._method = None + self._tunnel_host = None + self._tunnel_port = None + self._tunnel_headers = {} + + self._set_hostport(host, port) + + def set_tunnel(self, host, port=None, headers=None): + """ Sets up the host and the port for the HTTP CONNECT Tunnelling. + + The headers argument should be a mapping of extra HTTP headers + to send with the CONNECT request. + """ + self._tunnel_host = host + self._tunnel_port = port + if headers: + self._tunnel_headers = headers + else: + self._tunnel_headers.clear() + + def _set_hostport(self, host, port): + if port is None: + i = host.rfind(':') + j = host.rfind(']') # ipv6 addresses have [...] + if i > j: + try: + port = int(host[i+1:]) + except ValueError: + if host[i+1:] == "": # http://foo.com:/ == http://foo.com/ + port = self.default_port + else: + raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) + host = host[:i] + else: + port = self.default_port + if host and host[0] == '[' and host[-1] == ']': + host = host[1:-1] + self.host = host + self.port = port + + def set_debuglevel(self, level): + self.debuglevel = level + + def _tunnel(self): + self._set_hostport(self._tunnel_host, self._tunnel_port) + connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port) + connect_bytes = connect_str.encode("ascii") + self.send(connect_bytes) + for header, value in self._tunnel_headers.items(): + header_str = "%s: %s\r\n" % (header, value) + header_bytes = header_str.encode("latin-1") + self.send(header_bytes) + self.send(bytes(b'\r\n')) + + response = self.response_class(self.sock, method=self._method) + (version, code, message) = response._read_status() + + if code != 200: + self.close() + raise socket.error("Tunnel connection failed: %d %s" % (code, + message.strip())) + while True: + line = response.fp.readline(_MAXLINE + 1) + if len(line) > _MAXLINE: + raise LineTooLong("header line") + if not line: + # for sites which EOF without sending a trailer + break + if line in (b'\r\n', b'\n', b''): + break + + def connect(self): + """Connect to the host and port specified in __init__.""" + self.sock = socket_create_connection((self.host,self.port), + self.timeout, self.source_address) + if self._tunnel_host: + self._tunnel() + + def close(self): + """Close the connection to the HTTP server.""" + if self.sock: + self.sock.close() # close it manually... there may be other refs + self.sock = None + if self.__response: + self.__response.close() + self.__response = None + self.__state = _CS_IDLE + + def send(self, data): + """Send `data' to the server. + ``data`` can be a string object, a bytes object, an array object, a + file-like object that supports a .read() method, or an iterable object. + """ + + if self.sock is None: + if self.auto_open: + self.connect() + else: + raise NotConnected() + + if self.debuglevel > 0: + print("send:", repr(data)) + blocksize = 8192 + # Python 2.7 array objects have a read method which is incompatible + # with the 2-arg calling syntax below. + if hasattr(data, "read") and not isinstance(data, array): + if self.debuglevel > 0: + print("sendIng a read()able") + encode = False + try: + mode = data.mode + except AttributeError: + # io.BytesIO and other file-like objects don't have a `mode` + # attribute. + pass + else: + if "b" not in mode: + encode = True + if self.debuglevel > 0: + print("encoding file using iso-8859-1") + while 1: + datablock = data.read(blocksize) + if not datablock: + break + if encode: + datablock = datablock.encode("iso-8859-1") + self.sock.sendall(datablock) + return + try: + self.sock.sendall(data) + except TypeError: + if isinstance(data, Iterable): + for d in data: + self.sock.sendall(d) + else: + raise TypeError("data should be a bytes-like object " + "or an iterable, got %r" % type(data)) + + def _output(self, s): + """Add a line of output to the current request buffer. + + Assumes that the line does *not* end with \\r\\n. + """ + self._buffer.append(s) + + def _send_output(self, message_body=None): + """Send the currently buffered request and clear the buffer. + + Appends an extra \\r\\n to the buffer. + A message_body may be specified, to be appended to the request. + """ + self._buffer.extend((bytes(b""), bytes(b""))) + msg = bytes(b"\r\n").join(self._buffer) + del self._buffer[:] + # If msg and message_body are sent in a single send() call, + # it will avoid performance problems caused by the interaction + # between delayed ack and the Nagle algorithm. + if isinstance(message_body, bytes): + msg += message_body + message_body = None + self.send(msg) + if message_body is not None: + # message_body was not a string (i.e. it is a file), and + # we must run the risk of Nagle. + self.send(message_body) + + def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): + """Send a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + """ + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + + # in certain cases, we cannot issue another request on this connection. + # this occurs when: + # 1) we are in the process of sending a request. (_CS_REQ_STARTED) + # 2) a response to a previous request has signalled that it is going + # to close the connection upon completion. + # 3) the headers for the previous response have not been read, thus + # we cannot determine whether point (2) is true. (_CS_REQ_SENT) + # + # if there is no prior response, then we can request at will. + # + # if point (2) is true, then we will have passed the socket to the + # response (effectively meaning, "there is no prior response"), and + # will open a new one when a new request is made. + # + # Note: if a prior response exists, then we *can* start a new request. + # We are not allowed to begin fetching the response to this new + # request, however, until that prior response is complete. + # + if self.__state == _CS_IDLE: + self.__state = _CS_REQ_STARTED + else: + raise CannotSendRequest(self.__state) + + # Save the method we use, we need it later in the response phase + self._method = method + if not url: + url = '/' + request = '%s %s %s' % (method, url, self._http_vsn_str) + + # Non-ASCII characters should have been eliminated earlier + self._output(request.encode('ascii')) + + if self._http_vsn == 11: + # Issue some standard headers for better HTTP/1.1 compliance + + if not skip_host: + # this header is issued *only* for HTTP/1.1 + # connections. more specifically, this means it is + # only issued when the client uses the new + # HTTPConnection() class. backwards-compat clients + # will be using HTTP/1.0 and those clients may be + # issuing this header themselves. we should NOT issue + # it twice; some web servers (such as Apache) barf + # when they see two Host: headers + + # If we need a non-standard port,include it in the + # header. If the request is going through a proxy, + # but the host of the actual URL, not the host of the + # proxy. + + netloc = '' + if url.startswith('http'): + nil, netloc, nil, nil, nil = urlsplit(url) + + if netloc: + try: + netloc_enc = netloc.encode("ascii") + except UnicodeEncodeError: + netloc_enc = netloc.encode("idna") + self.putheader('Host', netloc_enc) + else: + try: + host_enc = self.host.encode("ascii") + except UnicodeEncodeError: + host_enc = self.host.encode("idna") + + # As per RFC 273, IPv6 address should be wrapped with [] + # when used as Host header + + if self.host.find(':') >= 0: + host_enc = bytes(b'[' + host_enc + b']') + + if self.port == self.default_port: + self.putheader('Host', host_enc) + else: + host_enc = host_enc.decode("ascii") + self.putheader('Host', "%s:%s" % (host_enc, self.port)) + + # note: we are assuming that clients will not attempt to set these + # headers since *this* library must deal with the + # consequences. this also means that when the supporting + # libraries are updated to recognize other forms, then this + # code should be changed (removed or updated). + + # we only want a Content-Encoding of "identity" since we don't + # support encodings such as x-gzip or x-deflate. + if not skip_accept_encoding: + self.putheader('Accept-Encoding', 'identity') + + # we can accept "chunked" Transfer-Encodings, but no others + # NOTE: no TE header implies *only* "chunked" + #self.putheader('TE', 'chunked') + + # if TE is supplied in the header, then it must appear in a + # Connection header. + #self.putheader('Connection', 'TE') + + else: + # For HTTP/1.0, the server will assume "not chunked" + pass + + def putheader(self, header, *values): + """Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + """ + if self.__state != _CS_REQ_STARTED: + raise CannotSendHeader() + + if hasattr(header, 'encode'): + header = header.encode('ascii') + values = list(values) + for i, one_value in enumerate(values): + if hasattr(one_value, 'encode'): + values[i] = one_value.encode('latin-1') + elif isinstance(one_value, int): + values[i] = str(one_value).encode('ascii') + value = bytes(b'\r\n\t').join(values) + header = header + bytes(b': ') + value + self._output(header) + + def endheaders(self, message_body=None): + """Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional message_body + argument can be used to pass a message body associated with the + request. The message body will be sent in the same packet as the + message headers if it is a string, otherwise it is sent as a separate + packet. + """ + if self.__state == _CS_REQ_STARTED: + self.__state = _CS_REQ_SENT + else: + raise CannotSendHeader() + self._send_output(message_body) + + def request(self, method, url, body=None, headers={}): + """Send a complete request to the server.""" + self._send_request(method, url, body, headers) + + def _set_content_length(self, body): + # Set the content-length based on the body. + thelen = None + try: + thelen = str(len(body)) + except TypeError as te: + # If this is a file-like object, try to + # fstat its file descriptor + try: + thelen = str(os.fstat(body.fileno()).st_size) + except (AttributeError, OSError): + # Don't send a length if this failed + if self.debuglevel > 0: print("Cannot stat!!") + + if thelen is not None: + self.putheader('Content-Length', thelen) + + def _send_request(self, method, url, body, headers): + # Honor explicitly requested Host: and Accept-Encoding: headers. + header_names = dict.fromkeys([k.lower() for k in headers]) + skips = {} + if 'host' in header_names: + skips['skip_host'] = 1 + if 'accept-encoding' in header_names: + skips['skip_accept_encoding'] = 1 + + self.putrequest(method, url, **skips) + + if body is not None and ('content-length' not in header_names): + self._set_content_length(body) + for hdr, value in headers.items(): + self.putheader(hdr, value) + if isinstance(body, str): + # RFC 2616 Section 3.7.1 says that text default has a + # default charset of iso-8859-1. + body = body.encode('iso-8859-1') + self.endheaders(body) + + def getresponse(self): + """Get the response from the server. + + If the HTTPConnection is in the correct state, returns an + instance of HTTPResponse or of whatever object is returned by + class the response_class variable. + + If a request has not been sent or if a previous response has + not be handled, ResponseNotReady is raised. If the HTTP + response indicates that the connection should be closed, then + it will be closed before the response is returned. When the + connection is closed, the underlying socket is closed. + """ + + # if a prior response has been completed, then forget about it. + if self.__response and self.__response.isclosed(): + self.__response = None + + # if a prior response exists, then it must be completed (otherwise, we + # cannot read this response's header to determine the connection-close + # behavior) + # + # note: if a prior response existed, but was connection-close, then the + # socket and response were made independent of this HTTPConnection + # object since a new request requires that we open a whole new + # connection + # + # this means the prior response had one of two states: + # 1) will_close: this connection was reset and the prior socket and + # response operate independently + # 2) persistent: the response was retained and we await its + # isclosed() status to become true. + # + if self.__state != _CS_REQ_SENT or self.__response: + raise ResponseNotReady(self.__state) + + if self.debuglevel > 0: + response = self.response_class(self.sock, self.debuglevel, + method=self._method) + else: + response = self.response_class(self.sock, method=self._method) + + response.begin() + assert response.will_close != _UNKNOWN + self.__state = _CS_IDLE + + if response.will_close: + # this effectively passes the connection to the response + self.close() + else: + # remember this, so we can tell when it is complete + self.__response = response + + return response + +try: + import ssl + from ssl import SSLContext +except ImportError: + pass +else: + class HTTPSConnection(HTTPConnection): + "This class allows communication via SSL." + + default_port = HTTPS_PORT + + # XXX Should key_file and cert_file be deprecated in favour of context? + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=_strict_sentinel, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, **_3to2kwargs): + if 'check_hostname' in _3to2kwargs: check_hostname = _3to2kwargs['check_hostname']; del _3to2kwargs['check_hostname'] + else: check_hostname = None + if 'context' in _3to2kwargs: context = _3to2kwargs['context']; del _3to2kwargs['context'] + else: context = None + super(HTTPSConnection, self).__init__(host, port, strict, timeout, + source_address) + self.key_file = key_file + self.cert_file = cert_file + if context is None: + # Some reasonable defaults + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + will_verify = context.verify_mode != ssl.CERT_NONE + if check_hostname is None: + check_hostname = will_verify + elif check_hostname and not will_verify: + raise ValueError("check_hostname needs a SSL context with " + "either CERT_OPTIONAL or CERT_REQUIRED") + if key_file or cert_file: + context.load_cert_chain(cert_file, key_file) + self._context = context + self._check_hostname = check_hostname + + def connect(self): + "Connect to a host on a given (SSL) port." + + sock = socket_create_connection((self.host, self.port), + self.timeout, self.source_address) + + if self._tunnel_host: + self.sock = sock + self._tunnel() + + server_hostname = self.host if ssl.HAS_SNI else None + self.sock = self._context.wrap_socket(sock, + server_hostname=server_hostname) + try: + if self._check_hostname: + ssl.match_hostname(self.sock.getpeercert(), self.host) + except Exception: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + __all__.append("HTTPSConnection") + + + # ###################################### + # # We use the old HTTPSConnection class from Py2.7, because ssl.SSLContext + # # doesn't exist in the Py2.7 stdlib + # class HTTPSConnection(HTTPConnection): + # "This class allows communication via SSL." + + # default_port = HTTPS_PORT + + # def __init__(self, host, port=None, key_file=None, cert_file=None, + # strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + # source_address=None): + # HTTPConnection.__init__(self, host, port, strict, timeout, + # source_address) + # self.key_file = key_file + # self.cert_file = cert_file + + # def connect(self): + # "Connect to a host on a given (SSL) port." + + # sock = socket_create_connection((self.host, self.port), + # self.timeout, self.source_address) + # if self._tunnel_host: + # self.sock = sock + # self._tunnel() + # self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file) + + # __all__.append("HTTPSConnection") + # ###################################### + + +class HTTPException(Exception): + # Subclasses that define an __init__ must call Exception.__init__ + # or define self.args. Otherwise, str() will fail. + pass + +class NotConnected(HTTPException): + pass + +class InvalidURL(HTTPException): + pass + +class UnknownProtocol(HTTPException): + def __init__(self, version): + self.args = version, + self.version = version + +class UnknownTransferEncoding(HTTPException): + pass + +class UnimplementedFileMode(HTTPException): + pass + +class IncompleteRead(HTTPException): + def __init__(self, partial, expected=None): + self.args = partial, + self.partial = partial + self.expected = expected + def __repr__(self): + if self.expected is not None: + e = ', %i more expected' % self.expected + else: + e = '' + return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) + def __str__(self): + return repr(self) + +class ImproperConnectionState(HTTPException): + pass + +class CannotSendRequest(ImproperConnectionState): + pass + +class CannotSendHeader(ImproperConnectionState): + pass + +class ResponseNotReady(ImproperConnectionState): + pass + +class BadStatusLine(HTTPException): + def __init__(self, line): + if not line: + line = repr(line) + self.args = line, + self.line = line + +class LineTooLong(HTTPException): + def __init__(self, line_type): + HTTPException.__init__(self, "got more than %d bytes when reading %s" + % (_MAXLINE, line_type)) + +# for backwards compatibility +error = HTTPException diff --git a/pype/modules/ftrack/python2_vendor/future/backports/http/cookiejar.py b/pype/modules/ftrack/python2_vendor/future/backports/http/cookiejar.py new file mode 100644 index 0000000000..af3ef4151a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/http/cookiejar.py @@ -0,0 +1,2110 @@ +r"""HTTP cookie handling for web clients. + +This is a backport of the Py3.3 ``http.cookiejar`` module for +python-future. + +This module has (now fairly distant) origins in Gisle Aas' Perl module +HTTP::Cookies, from the libwww-perl library. + +Docstrings, comments and debug strings in this code refer to the +attributes of the HTTP cookie system as cookie-attributes, to distinguish +them clearly from Python attributes. + +Class diagram (note that BSDDBCookieJar and the MSIE* classes are not +distributed with the Python standard library, but are available from +http://wwwsearch.sf.net/): + + CookieJar____ + / \ \ + FileCookieJar \ \ + / | \ \ \ + MozillaCookieJar | LWPCookieJar \ \ + | | \ + | ---MSIEBase | \ + | / | | \ + | / MSIEDBCookieJar BSDDBCookieJar + |/ + MSIECookieJar + +""" + +from __future__ import unicode_literals +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +from future.builtins import filter, int, map, open, str +from future.utils import as_native_str, PY2 + +__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy', + 'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar'] + +import copy +import datetime +import re +if PY2: + re.ASCII = 0 +import time +from future.backports.urllib.parse import urlparse, urlsplit, quote +from future.backports.http.client import HTTP_PORT +try: + import threading as _threading +except ImportError: + import dummy_threading as _threading +from calendar import timegm + +debug = False # set to True to enable debugging via the logging module +logger = None + +def _debug(*args): + if not debug: + return + global logger + if not logger: + import logging + logger = logging.getLogger("http.cookiejar") + return logger.debug(*args) + + +DEFAULT_HTTP_PORT = str(HTTP_PORT) +MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar " + "instance initialised with one)") + +def _warn_unhandled_exception(): + # There are a few catch-all except: statements in this module, for + # catching input that's bad in unexpected ways. Warn if any + # exceptions are caught there. + import io, warnings, traceback + f = io.StringIO() + traceback.print_exc(None, f) + msg = f.getvalue() + warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2) + + +# Date/time conversion +# ----------------------------------------------------------------------------- + +EPOCH_YEAR = 1970 +def _timegm(tt): + year, month, mday, hour, min, sec = tt[:6] + if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and + (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)): + return timegm(tt) + else: + return None + +DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] +MONTHS_LOWER = [] +for month in MONTHS: MONTHS_LOWER.append(month.lower()) + +def time2isoz(t=None): + """Return a string representing time in seconds since epoch, t. + + If the function is called without an argument, it will use the current + time. + + The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ", + representing Universal Time (UTC, aka GMT). An example of this format is: + + 1994-11-24 08:49:37Z + + """ + if t is None: + dt = datetime.datetime.utcnow() + else: + dt = datetime.datetime.utcfromtimestamp(t) + return "%04d-%02d-%02d %02d:%02d:%02dZ" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + +def time2netscape(t=None): + """Return a string representing time in seconds since epoch, t. + + If the function is called without an argument, it will use the current + time. + + The format of the returned string is like this: + + Wed, DD-Mon-YYYY HH:MM:SS GMT + + """ + if t is None: + dt = datetime.datetime.utcnow() + else: + dt = datetime.datetime.utcfromtimestamp(t) + return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % ( + DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1], + dt.year, dt.hour, dt.minute, dt.second) + + +UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None} + +TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII) +def offset_from_tz_string(tz): + offset = None + if tz in UTC_ZONES: + offset = 0 + else: + m = TIMEZONE_RE.search(tz) + if m: + offset = 3600 * int(m.group(2)) + if m.group(3): + offset = offset + 60 * int(m.group(3)) + if m.group(1) == '-': + offset = -offset + return offset + +def _str2time(day, mon, yr, hr, min, sec, tz): + # translate month name to number + # month numbers start with 1 (January) + try: + mon = MONTHS_LOWER.index(mon.lower())+1 + except ValueError: + # maybe it's already a number + try: + imon = int(mon) + except ValueError: + return None + if 1 <= imon <= 12: + mon = imon + else: + return None + + # make sure clock elements are defined + if hr is None: hr = 0 + if min is None: min = 0 + if sec is None: sec = 0 + + yr = int(yr) + day = int(day) + hr = int(hr) + min = int(min) + sec = int(sec) + + if yr < 1000: + # find "obvious" year + cur_yr = time.localtime(time.time())[0] + m = cur_yr % 100 + tmp = yr + yr = yr + cur_yr - m + m = m - tmp + if abs(m) > 50: + if m > 0: yr = yr + 100 + else: yr = yr - 100 + + # convert UTC time tuple to seconds since epoch (not timezone-adjusted) + t = _timegm((yr, mon, day, hr, min, sec, tz)) + + if t is not None: + # adjust time using timezone string, to get absolute time since epoch + if tz is None: + tz = "UTC" + tz = tz.upper() + offset = offset_from_tz_string(tz) + if offset is None: + return None + t = t - offset + + return t + +STRICT_DATE_RE = re.compile( + r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) " + "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) +WEEKDAY_RE = re.compile( + r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII) +LOOSE_HTTP_DATE_RE = re.compile( + r"""^ + (\d\d?) # day + (?:\s+|[-\/]) + (\w+) # month + (?:\s+|[-\/]) + (\d+) # year + (?: + (?:\s+|:) # separator before clock + (\d\d?):(\d\d) # hour:min + (?::(\d\d))? # optional seconds + )? # optional clock + \s* + ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone + \s* + (?:\(\w+\))? # ASCII representation of timezone in parens. + \s*$""", re.X | re.ASCII) +def http2time(text): + """Returns time in seconds since epoch of time represented by a string. + + Return value is an integer. + + None is returned if the format of str is unrecognized, the time is outside + the representable range, or the timezone string is not recognized. If the + string contains no timezone, UTC is assumed. + + The timezone in the string may be numerical (like "-0800" or "+0100") or a + string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the + timezone strings equivalent to UTC (zero offset) are known to the function. + + The function loosely parses the following formats: + + Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format + Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format + Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format + 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) + 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) + 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) + + The parser ignores leading and trailing whitespace. The time may be + absent. + + If the year is given with only 2 digits, the function will select the + century that makes the year closest to the current date. + + """ + # fast exit for strictly conforming string + m = STRICT_DATE_RE.search(text) + if m: + g = m.groups() + mon = MONTHS_LOWER.index(g[1].lower()) + 1 + tt = (int(g[2]), mon, int(g[0]), + int(g[3]), int(g[4]), float(g[5])) + return _timegm(tt) + + # No, we need some messy parsing... + + # clean up + text = text.lstrip() + text = WEEKDAY_RE.sub("", text, 1) # Useless weekday + + # tz is time zone specifier string + day, mon, yr, hr, min, sec, tz = [None]*7 + + # loose regexp parse + m = LOOSE_HTTP_DATE_RE.search(text) + if m is not None: + day, mon, yr, hr, min, sec, tz = m.groups() + else: + return None # bad format + + return _str2time(day, mon, yr, hr, min, sec, tz) + +ISO_DATE_RE = re.compile( + """^ + (\d{4}) # year + [-\/]? + (\d\d?) # numerical month + [-\/]? + (\d\d?) # day + (?: + (?:\s+|[-:Tt]) # separator before clock + (\d\d?):?(\d\d) # hour:min + (?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional) + )? # optional clock + \s* + ([-+]?\d\d?:?(:?\d\d)? + |Z|z)? # timezone (Z is "zero meridian", i.e. GMT) + \s*$""", re.X | re. ASCII) +def iso2time(text): + """ + As for http2time, but parses the ISO 8601 formats: + + 1994-02-03 14:15:29 -0100 -- ISO 8601 format + 1994-02-03 14:15:29 -- zone is optional + 1994-02-03 -- only date + 1994-02-03T14:15:29 -- Use T as separator + 19940203T141529Z -- ISO 8601 compact format + 19940203 -- only date + + """ + # clean up + text = text.lstrip() + + # tz is time zone specifier string + day, mon, yr, hr, min, sec, tz = [None]*7 + + # loose regexp parse + m = ISO_DATE_RE.search(text) + if m is not None: + # XXX there's an extra bit of the timezone I'm ignoring here: is + # this the right thing to do? + yr, mon, day, hr, min, sec, tz, _ = m.groups() + else: + return None # bad format + + return _str2time(day, mon, yr, hr, min, sec, tz) + + +# Header parsing +# ----------------------------------------------------------------------------- + +def unmatched(match): + """Return unmatched part of re.Match object.""" + start, end = match.span(0) + return match.string[:start]+match.string[end:] + +HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)") +HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"") +HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)") +HEADER_ESCAPE_RE = re.compile(r"\\(.)") +def split_header_words(header_values): + r"""Parse header values into a list of lists containing key,value pairs. + + The function knows how to deal with ",", ";" and "=" as well as quoted + values after "=". A list of space separated tokens are parsed as if they + were separated by ";". + + If the header_values passed as argument contains multiple values, then they + are treated as if they were a single value separated by comma ",". + + This means that this function is useful for parsing header fields that + follow this syntax (BNF as from the HTTP/1.1 specification, but we relax + the requirement for tokens). + + headers = #header + header = (token | parameter) *( [";"] (token | parameter)) + + token = 1* + separators = "(" | ")" | "<" | ">" | "@" + | "," | ";" | ":" | "\" | <"> + | "/" | "[" | "]" | "?" | "=" + | "{" | "}" | SP | HT + + quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) + qdtext = > + quoted-pair = "\" CHAR + + parameter = attribute "=" value + attribute = token + value = token | quoted-string + + Each header is represented by a list of key/value pairs. The value for a + simple token (not part of a parameter) is None. Syntactically incorrect + headers will not necessarily be parsed as you would want. + + This is easier to describe with some examples: + + >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz']) + [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]] + >>> split_header_words(['text/html; charset="iso-8859-1"']) + [[('text/html', None), ('charset', 'iso-8859-1')]] + >>> split_header_words([r'Basic realm="\"foo\bar\""']) + [[('Basic', None), ('realm', '"foobar"')]] + + """ + assert not isinstance(header_values, str) + result = [] + for text in header_values: + orig_text = text + pairs = [] + while text: + m = HEADER_TOKEN_RE.search(text) + if m: + text = unmatched(m) + name = m.group(1) + m = HEADER_QUOTED_VALUE_RE.search(text) + if m: # quoted value + text = unmatched(m) + value = m.group(1) + value = HEADER_ESCAPE_RE.sub(r"\1", value) + else: + m = HEADER_VALUE_RE.search(text) + if m: # unquoted value + text = unmatched(m) + value = m.group(1) + value = value.rstrip() + else: + # no value, a lone token + value = None + pairs.append((name, value)) + elif text.lstrip().startswith(","): + # concatenated headers, as per RFC 2616 section 4.2 + text = text.lstrip()[1:] + if pairs: result.append(pairs) + pairs = [] + else: + # skip junk + non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) + assert nr_junk_chars > 0, ( + "split_header_words bug: '%s', '%s', %s" % + (orig_text, text, pairs)) + text = non_junk + if pairs: result.append(pairs) + return result + +HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])") +def join_header_words(lists): + """Do the inverse (almost) of the conversion done by split_header_words. + + Takes a list of lists of (key, value) pairs and produces a single header + value. Attribute values are quoted if needed. + + >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]]) + 'text/plain; charset="iso-8859/1"' + >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]]) + 'text/plain, charset="iso-8859/1"' + + """ + headers = [] + for pairs in lists: + attr = [] + for k, v in pairs: + if v is not None: + if not re.search(r"^\w+$", v): + v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \ + v = '"%s"' % v + k = "%s=%s" % (k, v) + attr.append(k) + if attr: headers.append("; ".join(attr)) + return ", ".join(headers) + +def strip_quotes(text): + if text.startswith('"'): + text = text[1:] + if text.endswith('"'): + text = text[:-1] + return text + +def parse_ns_headers(ns_headers): + """Ad-hoc parser for Netscape protocol cookie-attributes. + + The old Netscape cookie format for Set-Cookie can for instance contain + an unquoted "," in the expires field, so we have to use this ad-hoc + parser instead of split_header_words. + + XXX This may not make the best possible effort to parse all the crap + that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient + parser is probably better, so could do worse than following that if + this ever gives any trouble. + + Currently, this is also used for parsing RFC 2109 cookies. + + """ + known_attrs = ("expires", "domain", "path", "secure", + # RFC 2109 attrs (may turn up in Netscape cookies, too) + "version", "port", "max-age") + + result = [] + for ns_header in ns_headers: + pairs = [] + version_set = False + for ii, param in enumerate(re.split(r";\s*", ns_header)): + param = param.rstrip() + if param == "": continue + if "=" not in param: + k, v = param, None + else: + k, v = re.split(r"\s*=\s*", param, 1) + k = k.lstrip() + if ii != 0: + lc = k.lower() + if lc in known_attrs: + k = lc + if k == "version": + # This is an RFC 2109 cookie. + v = strip_quotes(v) + version_set = True + if k == "expires": + # convert expires date to seconds since epoch + v = http2time(strip_quotes(v)) # None if invalid + pairs.append((k, v)) + + if pairs: + if not version_set: + pairs.append(("version", "0")) + result.append(pairs) + + return result + + +IPV4_RE = re.compile(r"\.\d+$", re.ASCII) +def is_HDN(text): + """Return True if text is a host domain name.""" + # XXX + # This may well be wrong. Which RFC is HDN defined in, if any (for + # the purposes of RFC 2965)? + # For the current implementation, what about IPv6? Remember to look + # at other uses of IPV4_RE also, if change this. + if IPV4_RE.search(text): + return False + if text == "": + return False + if text[0] == "." or text[-1] == ".": + return False + return True + +def domain_match(A, B): + """Return True if domain A domain-matches domain B, according to RFC 2965. + + A and B may be host domain names or IP addresses. + + RFC 2965, section 1: + + Host names can be specified either as an IP address or a HDN string. + Sometimes we compare one host name with another. (Such comparisons SHALL + be case-insensitive.) Host A's name domain-matches host B's if + + * their host name strings string-compare equal; or + + * A is a HDN string and has the form NB, where N is a non-empty + name string, B has the form .B', and B' is a HDN string. (So, + x.y.com domain-matches .Y.com but not Y.com.) + + Note that domain-match is not a commutative operation: a.b.c.com + domain-matches .c.com, but not the reverse. + + """ + # Note that, if A or B are IP addresses, the only relevant part of the + # definition of the domain-match algorithm is the direct string-compare. + A = A.lower() + B = B.lower() + if A == B: + return True + if not is_HDN(A): + return False + i = A.rfind(B) + if i == -1 or i == 0: + # A does not have form NB, or N is the empty string + return False + if not B.startswith("."): + return False + if not is_HDN(B[1:]): + return False + return True + +def liberal_is_HDN(text): + """Return True if text is a sort-of-like a host domain name. + + For accepting/blocking domains. + + """ + if IPV4_RE.search(text): + return False + return True + +def user_domain_match(A, B): + """For blocking/accepting domains. + + A and B may be host domain names or IP addresses. + + """ + A = A.lower() + B = B.lower() + if not (liberal_is_HDN(A) and liberal_is_HDN(B)): + if A == B: + # equal IP addresses + return True + return False + initial_dot = B.startswith(".") + if initial_dot and A.endswith(B): + return True + if not initial_dot and A == B: + return True + return False + +cut_port_re = re.compile(r":\d+$", re.ASCII) +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.get_full_url() + host = urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = cut_port_re.sub("", host, 1) + return host.lower() + +def eff_request_host(request): + """Return a tuple (request-host, effective request-host name). + + As defined by RFC 2965, except both are lowercased. + + """ + erhn = req_host = request_host(request) + if req_host.find(".") == -1 and not IPV4_RE.search(req_host): + erhn = req_host + ".local" + return req_host, erhn + +def request_path(request): + """Path component of request-URI, as defined by RFC 2965.""" + url = request.get_full_url() + parts = urlsplit(url) + path = escape_path(parts.path) + if not path.startswith("/"): + # fix bad RFC 2396 absoluteURI + path = "/" + path + return path + +def request_port(request): + host = request.host + i = host.find(':') + if i >= 0: + port = host[i+1:] + try: + int(port) + except ValueError: + _debug("nonnumeric port: '%s'", port) + return None + else: + port = DEFAULT_HTTP_PORT + return port + +# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't +# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738). +HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()" +ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])") +def uppercase_escaped_char(match): + return "%%%s" % match.group(1).upper() +def escape_path(path): + """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" + # There's no knowing what character encoding was used to create URLs + # containing %-escapes, but since we have to pick one to escape invalid + # path characters, we pick UTF-8, as recommended in the HTML 4.0 + # specification: + # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 + # And here, kind of: draft-fielding-uri-rfc2396bis-03 + # (And in draft IRI specification: draft-duerst-iri-05) + # (And here, for new URI schemes: RFC 2718) + path = quote(path, HTTP_PATH_SAFE) + path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) + return path + +def reach(h): + """Return reach of host h, as defined by RFC 2965, section 1. + + The reach R of a host name H is defined as follows: + + * If + + - H is the host domain name of a host; and, + + - H has the form A.B; and + + - A has no embedded (that is, interior) dots; and + + - B has at least one embedded dot, or B is the string "local". + then the reach of H is .B. + + * Otherwise, the reach of H is H. + + >>> reach("www.acme.com") + '.acme.com' + >>> reach("acme.com") + 'acme.com' + >>> reach("acme.local") + '.local' + + """ + i = h.find(".") + if i >= 0: + #a = h[:i] # this line is only here to show what a is + b = h[i+1:] + i = b.find(".") + if is_HDN(h) and (i >= 0 or b == "local"): + return "."+b + return h + +def is_third_party(request): + """ + + RFC 2965, section 3.3.6: + + An unverifiable transaction is to a third-party host if its request- + host U does not domain-match the reach R of the request-host O in the + origin transaction. + + """ + req_host = request_host(request) + if not domain_match(req_host, reach(request.get_origin_req_host())): + return True + else: + return False + + +class Cookie(object): + """HTTP Cookie. + + This class represents both Netscape and RFC 2965 cookies. + + This is deliberately a very simple class. It just holds attributes. It's + possible to construct Cookie instances that don't comply with the cookie + standards. CookieJar.make_cookies is the factory function for Cookie + objects -- it deals with cookie parsing, supplying defaults, and + normalising to the representation used in this class. CookiePolicy is + responsible for checking them to see whether they should be accepted from + and returned to the server. + + Note that the port may be present in the headers, but unspecified ("Port" + rather than"Port=80", for example); if this is the case, port is None. + + """ + + def __init__(self, version, name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest, + rfc2109=False, + ): + + if version is not None: version = int(version) + if expires is not None: expires = int(expires) + if port is None and port_specified is True: + raise ValueError("if port is None, port_specified must be false") + + self.version = version + self.name = name + self.value = value + self.port = port + self.port_specified = port_specified + # normalise case, as per RFC 2965 section 3.3.3 + self.domain = domain.lower() + self.domain_specified = domain_specified + # Sigh. We need to know whether the domain given in the + # cookie-attribute had an initial dot, in order to follow RFC 2965 + # (as clarified in draft errata). Needed for the returned $Domain + # value. + self.domain_initial_dot = domain_initial_dot + self.path = path + self.path_specified = path_specified + self.secure = secure + self.expires = expires + self.discard = discard + self.comment = comment + self.comment_url = comment_url + self.rfc2109 = rfc2109 + + self._rest = copy.copy(rest) + + def has_nonstandard_attr(self, name): + return name in self._rest + def get_nonstandard_attr(self, name, default=None): + return self._rest.get(name, default) + def set_nonstandard_attr(self, name, value): + self._rest[name] = value + + def is_expired(self, now=None): + if now is None: now = time.time() + if (self.expires is not None) and (self.expires <= now): + return True + return False + + def __str__(self): + if self.port is None: p = "" + else: p = ":"+self.port + limit = self.domain + p + self.path + if self.value is not None: + namevalue = "%s=%s" % (self.name, self.value) + else: + namevalue = self.name + return "" % (namevalue, limit) + + @as_native_str() + def __repr__(self): + args = [] + for name in ("version", "name", "value", + "port", "port_specified", + "domain", "domain_specified", "domain_initial_dot", + "path", "path_specified", + "secure", "expires", "discard", "comment", "comment_url", + ): + attr = getattr(self, name) + ### Python-Future: + # Avoid u'...' prefixes for unicode strings: + if isinstance(attr, str): + attr = str(attr) + ### + args.append(str("%s=%s") % (name, repr(attr))) + args.append("rest=%s" % repr(self._rest)) + args.append("rfc2109=%s" % repr(self.rfc2109)) + return "Cookie(%s)" % ", ".join(args) + + +class CookiePolicy(object): + """Defines which cookies get accepted from and returned to server. + + May also modify cookies, though this is probably a bad idea. + + The subclass DefaultCookiePolicy defines the standard rules for Netscape + and RFC 2965 cookies -- override that if you want a customised policy. + + """ + def set_ok(self, cookie, request): + """Return true if (and only if) cookie should be accepted from server. + + Currently, pre-expired cookies never get this far -- the CookieJar + class deletes such cookies itself. + + """ + raise NotImplementedError() + + def return_ok(self, cookie, request): + """Return true if (and only if) cookie should be returned to server.""" + raise NotImplementedError() + + def domain_return_ok(self, domain, request): + """Return false if cookies should not be returned, given cookie domain. + """ + return True + + def path_return_ok(self, path, request): + """Return false if cookies should not be returned, given cookie path. + """ + return True + + +class DefaultCookiePolicy(CookiePolicy): + """Implements the standard rules for accepting and returning cookies.""" + + DomainStrictNoDots = 1 + DomainStrictNonDomain = 2 + DomainRFC2965Match = 4 + + DomainLiberal = 0 + DomainStrict = DomainStrictNoDots|DomainStrictNonDomain + + def __init__(self, + blocked_domains=None, allowed_domains=None, + netscape=True, rfc2965=False, + rfc2109_as_netscape=None, + hide_cookie2=False, + strict_domain=False, + strict_rfc2965_unverifiable=True, + strict_ns_unverifiable=False, + strict_ns_domain=DomainLiberal, + strict_ns_set_initial_dollar=False, + strict_ns_set_path=False, + ): + """Constructor arguments should be passed as keyword arguments only.""" + self.netscape = netscape + self.rfc2965 = rfc2965 + self.rfc2109_as_netscape = rfc2109_as_netscape + self.hide_cookie2 = hide_cookie2 + self.strict_domain = strict_domain + self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable + self.strict_ns_unverifiable = strict_ns_unverifiable + self.strict_ns_domain = strict_ns_domain + self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar + self.strict_ns_set_path = strict_ns_set_path + + if blocked_domains is not None: + self._blocked_domains = tuple(blocked_domains) + else: + self._blocked_domains = () + + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def blocked_domains(self): + """Return the sequence of blocked domains (as a tuple).""" + return self._blocked_domains + def set_blocked_domains(self, blocked_domains): + """Set the sequence of blocked domains.""" + self._blocked_domains = tuple(blocked_domains) + + def is_blocked(self, domain): + for blocked_domain in self._blocked_domains: + if user_domain_match(domain, blocked_domain): + return True + return False + + def allowed_domains(self): + """Return None, or the sequence of allowed domains (as a tuple).""" + return self._allowed_domains + def set_allowed_domains(self, allowed_domains): + """Set the sequence of allowed domains, or None.""" + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def is_not_allowed(self, domain): + if self._allowed_domains is None: + return False + for allowed_domain in self._allowed_domains: + if user_domain_match(domain, allowed_domain): + return False + return True + + def set_ok(self, cookie, request): + """ + If you override .set_ok(), be sure to call this method. If it returns + false, so should your subclass (assuming your subclass wants to be more + strict about which cookies to accept). + + """ + _debug(" - checking cookie %s=%s", cookie.name, cookie.value) + + assert cookie.name is not None + + for n in "version", "verifiability", "name", "path", "domain", "port": + fn_name = "set_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + + return True + + def set_ok_version(self, cookie, request): + if cookie.version is None: + # Version is always set to 0 by parse_ns_headers if it's a Netscape + # cookie, so this must be an invalid RFC 2965 cookie. + _debug(" Set-Cookie2 without version attribute (%s=%s)", + cookie.name, cookie.value) + return False + if cookie.version > 0 and not self.rfc2965: + _debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + _debug(" Netscape cookies are switched off") + return False + return True + + def set_ok_verifiability(self, cookie, request): + if request.unverifiable and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + _debug(" third-party RFC 2965 cookie during " + "unverifiable transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + _debug(" third-party Netscape cookie during " + "unverifiable transaction") + return False + return True + + def set_ok_name(self, cookie, request): + # Try and stop servers setting V0 cookies designed to hack other + # servers that know both V0 and V1 protocols. + if (cookie.version == 0 and self.strict_ns_set_initial_dollar and + cookie.name.startswith("$")): + _debug(" illegal name (starts with '$'): '%s'", cookie.name) + return False + return True + + def set_ok_path(self, cookie, request): + if cookie.path_specified: + req_path = request_path(request) + if ((cookie.version > 0 or + (cookie.version == 0 and self.strict_ns_set_path)) and + not req_path.startswith(cookie.path)): + _debug(" path attribute %s is not a prefix of request " + "path %s", cookie.path, req_path) + return False + return True + + def set_ok_domain(self, cookie, request): + if self.is_blocked(cookie.domain): + _debug(" domain %s is in user block-list", cookie.domain) + return False + if self.is_not_allowed(cookie.domain): + _debug(" domain %s is not in user allow-list", cookie.domain) + return False + if cookie.domain_specified: + req_host, erhn = eff_request_host(request) + domain = cookie.domain + if self.strict_domain and (domain.count(".") >= 2): + # XXX This should probably be compared with the Konqueror + # (kcookiejar.cpp) and Mozilla implementations, but it's a + # losing battle. + i = domain.rfind(".") + j = domain.rfind(".", 0, i) + if j == 0: # domain like .foo.bar + tld = domain[i+1:] + sld = domain[j+1:i] + if sld.lower() in ("co", "ac", "com", "edu", "org", "net", + "gov", "mil", "int", "aero", "biz", "cat", "coop", + "info", "jobs", "mobi", "museum", "name", "pro", + "travel", "eu") and len(tld) == 2: + # domain like .co.uk + _debug(" country-code second level domain %s", domain) + return False + if domain.startswith("."): + undotted_domain = domain[1:] + else: + undotted_domain = domain + embedded_dots = (undotted_domain.find(".") >= 0) + if not embedded_dots and domain != ".local": + _debug(" non-local domain %s contains no embedded dot", + domain) + return False + if cookie.version == 0: + if (not erhn.endswith(domain) and + (not erhn.startswith(".") and + not ("."+erhn).endswith(domain))): + _debug(" effective request-host %s (even with added " + "initial dot) does not end with %s", + erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainRFC2965Match)): + if not domain_match(erhn, domain): + _debug(" effective request-host %s does not domain-match " + "%s", erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainStrictNoDots)): + host_prefix = req_host[:-len(domain)] + if (host_prefix.find(".") >= 0 and + not IPV4_RE.search(req_host)): + _debug(" host prefix %s for domain %s contains a dot", + host_prefix, domain) + return False + return True + + def set_ok_port(self, cookie, request): + if cookie.port_specified: + req_port = request_port(request) + if req_port is None: + req_port = "80" + else: + req_port = str(req_port) + for p in cookie.port.split(","): + try: + int(p) + except ValueError: + _debug(" bad port %s (not numeric)", p) + return False + if p == req_port: + break + else: + _debug(" request port (%s) not found in %s", + req_port, cookie.port) + return False + return True + + def return_ok(self, cookie, request): + """ + If you override .return_ok(), be sure to call this method. If it + returns false, so should your subclass (assuming your subclass wants to + be more strict about which cookies to return). + + """ + # Path has already been checked by .path_return_ok(), and domain + # blocking done by .domain_return_ok(). + _debug(" - checking cookie %s=%s", cookie.name, cookie.value) + + for n in "version", "verifiability", "secure", "expires", "port", "domain": + fn_name = "return_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + return True + + def return_ok_version(self, cookie, request): + if cookie.version > 0 and not self.rfc2965: + _debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + _debug(" Netscape cookies are switched off") + return False + return True + + def return_ok_verifiability(self, cookie, request): + if request.unverifiable and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + _debug(" third-party RFC 2965 cookie during unverifiable " + "transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + _debug(" third-party Netscape cookie during unverifiable " + "transaction") + return False + return True + + def return_ok_secure(self, cookie, request): + if cookie.secure and request.type != "https": + _debug(" secure cookie with non-secure request") + return False + return True + + def return_ok_expires(self, cookie, request): + if cookie.is_expired(self._now): + _debug(" cookie expired") + return False + return True + + def return_ok_port(self, cookie, request): + if cookie.port: + req_port = request_port(request) + if req_port is None: + req_port = "80" + for p in cookie.port.split(","): + if p == req_port: + break + else: + _debug(" request port %s does not match cookie port %s", + req_port, cookie.port) + return False + return True + + def return_ok_domain(self, cookie, request): + req_host, erhn = eff_request_host(request) + domain = cookie.domain + + # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't + if (cookie.version == 0 and + (self.strict_ns_domain & self.DomainStrictNonDomain) and + not cookie.domain_specified and domain != erhn): + _debug(" cookie with unspecified domain does not string-compare " + "equal to request domain") + return False + + if cookie.version > 0 and not domain_match(erhn, domain): + _debug(" effective request-host name %s does not domain-match " + "RFC 2965 cookie domain %s", erhn, domain) + return False + if cookie.version == 0 and not ("."+erhn).endswith(domain): + _debug(" request-host %s does not match Netscape cookie domain " + "%s", req_host, domain) + return False + return True + + def domain_return_ok(self, domain, request): + # Liberal check of. This is here as an optimization to avoid + # having to load lots of MSIE cookie files unless necessary. + req_host, erhn = eff_request_host(request) + if not req_host.startswith("."): + req_host = "."+req_host + if not erhn.startswith("."): + erhn = "."+erhn + if not (req_host.endswith(domain) or erhn.endswith(domain)): + #_debug(" request domain %s does not match cookie domain %s", + # req_host, domain) + return False + + if self.is_blocked(domain): + _debug(" domain %s is in user block-list", domain) + return False + if self.is_not_allowed(domain): + _debug(" domain %s is not in user allow-list", domain) + return False + + return True + + def path_return_ok(self, path, request): + _debug("- checking cookie path=%s", path) + req_path = request_path(request) + if not req_path.startswith(path): + _debug(" %s does not path-match %s", req_path, path) + return False + return True + + +def vals_sorted_by_key(adict): + keys = sorted(adict.keys()) + return map(adict.get, keys) + +def deepvalues(mapping): + """Iterates over nested mapping, depth-first, in sorted order by key.""" + values = vals_sorted_by_key(mapping) + for obj in values: + mapping = False + try: + obj.items + except AttributeError: + pass + else: + mapping = True + for subobj in deepvalues(obj): + yield subobj + if not mapping: + yield obj + + +# Used as second parameter to dict.get() method, to distinguish absent +# dict key from one with a None value. +class Absent(object): pass + +class CookieJar(object): + """Collection of HTTP cookies. + + You may not need to know about this class: try + urllib.request.build_opener(HTTPCookieProcessor).open(url). + """ + + non_word_re = re.compile(r"\W") + quote_re = re.compile(r"([\"\\])") + strict_domain_re = re.compile(r"\.?[^.]*") + domain_re = re.compile(r"[^.]*") + dots_re = re.compile(r"^\.+") + + magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII) + + def __init__(self, policy=None): + if policy is None: + policy = DefaultCookiePolicy() + self._policy = policy + + self._cookies_lock = _threading.RLock() + self._cookies = {} + + def set_policy(self, policy): + self._policy = policy + + def _cookies_for_domain(self, domain, request): + cookies = [] + if not self._policy.domain_return_ok(domain, request): + return [] + _debug("Checking %s for cookies to return", domain) + cookies_by_path = self._cookies[domain] + for path in cookies_by_path.keys(): + if not self._policy.path_return_ok(path, request): + continue + cookies_by_name = cookies_by_path[path] + for cookie in cookies_by_name.values(): + if not self._policy.return_ok(cookie, request): + _debug(" not returning cookie") + continue + _debug(" it's a match") + cookies.append(cookie) + return cookies + + def _cookies_for_request(self, request): + """Return a list of cookies to be returned to server.""" + cookies = [] + for domain in self._cookies.keys(): + cookies.extend(self._cookies_for_domain(domain, request)) + return cookies + + def _cookie_attrs(self, cookies): + """Return a list of cookie-attributes to be returned to server. + + like ['foo="bar"; $Path="/"', ...] + + The $Version attribute is also added when appropriate (currently only + once per request). + + """ + # add cookies in order of most specific (ie. longest) path first + cookies.sort(key=lambda a: len(a.path), reverse=True) + + version_set = False + + attrs = [] + for cookie in cookies: + # set version of Cookie header + # XXX + # What should it be if multiple matching Set-Cookie headers have + # different versions themselves? + # Answer: there is no answer; was supposed to be settled by + # RFC 2965 errata, but that may never appear... + version = cookie.version + if not version_set: + version_set = True + if version > 0: + attrs.append("$Version=%s" % version) + + # quote cookie value if necessary + # (not for Netscape protocol, which already has any quotes + # intact, due to the poorly-specified Netscape Cookie: syntax) + if ((cookie.value is not None) and + self.non_word_re.search(cookie.value) and version > 0): + value = self.quote_re.sub(r"\\\1", cookie.value) + else: + value = cookie.value + + # add cookie-attributes to be returned in Cookie header + if cookie.value is None: + attrs.append(cookie.name) + else: + attrs.append("%s=%s" % (cookie.name, value)) + if version > 0: + if cookie.path_specified: + attrs.append('$Path="%s"' % cookie.path) + if cookie.domain.startswith("."): + domain = cookie.domain + if (not cookie.domain_initial_dot and + domain.startswith(".")): + domain = domain[1:] + attrs.append('$Domain="%s"' % domain) + if cookie.port is not None: + p = "$Port" + if cookie.port_specified: + p = p + ('="%s"' % cookie.port) + attrs.append(p) + + return attrs + + def add_cookie_header(self, request): + """Add correct Cookie: header to request (urllib.request.Request object). + + The Cookie2 header is also added unless policy.hide_cookie2 is true. + + """ + _debug("add_cookie_header") + self._cookies_lock.acquire() + try: + + self._policy._now = self._now = int(time.time()) + + cookies = self._cookies_for_request(request) + + attrs = self._cookie_attrs(cookies) + if attrs: + if not request.has_header("Cookie"): + request.add_unredirected_header( + "Cookie", "; ".join(attrs)) + + # if necessary, advertise that we know RFC 2965 + if (self._policy.rfc2965 and not self._policy.hide_cookie2 and + not request.has_header("Cookie2")): + for cookie in cookies: + if cookie.version != 1: + request.add_unredirected_header("Cookie2", '$Version="1"') + break + + finally: + self._cookies_lock.release() + + self.clear_expired_cookies() + + def _normalized_cookie_tuples(self, attrs_set): + """Return list of tuples containing normalised cookie information. + + attrs_set is the list of lists of key,value pairs extracted from + the Set-Cookie or Set-Cookie2 headers. + + Tuples are name, value, standard, rest, where name and value are the + cookie name and value, standard is a dictionary containing the standard + cookie-attributes (discard, secure, version, expires or max-age, + domain, path and port) and rest is a dictionary containing the rest of + the cookie-attributes. + + """ + cookie_tuples = [] + + boolean_attrs = "discard", "secure" + value_attrs = ("version", + "expires", "max-age", + "domain", "path", "port", + "comment", "commenturl") + + for cookie_attrs in attrs_set: + name, value = cookie_attrs[0] + + # Build dictionary of standard cookie-attributes (standard) and + # dictionary of other cookie-attributes (rest). + + # Note: expiry time is normalised to seconds since epoch. V0 + # cookies should have the Expires cookie-attribute, and V1 cookies + # should have Max-Age, but since V1 includes RFC 2109 cookies (and + # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we + # accept either (but prefer Max-Age). + max_age_set = False + + bad_cookie = False + + standard = {} + rest = {} + for k, v in cookie_attrs[1:]: + lc = k.lower() + # don't lose case distinction for unknown fields + if lc in value_attrs or lc in boolean_attrs: + k = lc + if k in boolean_attrs and v is None: + # boolean cookie-attribute is present, but has no value + # (like "discard", rather than "port=80") + v = True + if k in standard: + # only first value is significant + continue + if k == "domain": + if v is None: + _debug(" missing value for domain attribute") + bad_cookie = True + break + # RFC 2965 section 3.3.3 + v = v.lower() + if k == "expires": + if max_age_set: + # Prefer max-age to expires (like Mozilla) + continue + if v is None: + _debug(" missing or invalid value for expires " + "attribute: treating as session cookie") + continue + if k == "max-age": + max_age_set = True + try: + v = int(v) + except ValueError: + _debug(" missing or invalid (non-numeric) value for " + "max-age attribute") + bad_cookie = True + break + # convert RFC 2965 Max-Age to seconds since epoch + # XXX Strictly you're supposed to follow RFC 2616 + # age-calculation rules. Remember that zero Max-Age is a + # is a request to discard (old and new) cookie, though. + k = "expires" + v = self._now + v + if (k in value_attrs) or (k in boolean_attrs): + if (v is None and + k not in ("port", "comment", "commenturl")): + _debug(" missing value for %s attribute" % k) + bad_cookie = True + break + standard[k] = v + else: + rest[k] = v + + if bad_cookie: + continue + + cookie_tuples.append((name, value, standard, rest)) + + return cookie_tuples + + def _cookie_from_cookie_tuple(self, tup, request): + # standard is dict of standard cookie-attributes, rest is dict of the + # rest of them + name, value, standard, rest = tup + + domain = standard.get("domain", Absent) + path = standard.get("path", Absent) + port = standard.get("port", Absent) + expires = standard.get("expires", Absent) + + # set the easy defaults + version = standard.get("version", None) + if version is not None: + try: + version = int(version) + except ValueError: + return None # invalid version, ignore cookie + secure = standard.get("secure", False) + # (discard is also set if expires is Absent) + discard = standard.get("discard", False) + comment = standard.get("comment", None) + comment_url = standard.get("commenturl", None) + + # set default path + if path is not Absent and path != "": + path_specified = True + path = escape_path(path) + else: + path_specified = False + path = request_path(request) + i = path.rfind("/") + if i != -1: + if version == 0: + # Netscape spec parts company from reality here + path = path[:i] + else: + path = path[:i+1] + if len(path) == 0: path = "/" + + # set default domain + domain_specified = domain is not Absent + # but first we have to remember whether it starts with a dot + domain_initial_dot = False + if domain_specified: + domain_initial_dot = bool(domain.startswith(".")) + if domain is Absent: + req_host, erhn = eff_request_host(request) + domain = erhn + elif not domain.startswith("."): + domain = "."+domain + + # set default port + port_specified = False + if port is not Absent: + if port is None: + # Port attr present, but has no value: default to request port. + # Cookie should then only be sent back on that port. + port = request_port(request) + else: + port_specified = True + port = re.sub(r"\s+", "", port) + else: + # No port attr present. Cookie can be sent back on any port. + port = None + + # set default expires and discard + if expires is Absent: + expires = None + discard = True + elif expires <= self._now: + # Expiry date in past is request to delete cookie. This can't be + # in DefaultCookiePolicy, because can't delete cookies there. + try: + self.clear(domain, path, name) + except KeyError: + pass + _debug("Expiring cookie, domain='%s', path='%s', name='%s'", + domain, path, name) + return None + + return Cookie(version, + name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest) + + def _cookies_from_attrs_set(self, attrs_set, request): + cookie_tuples = self._normalized_cookie_tuples(attrs_set) + + cookies = [] + for tup in cookie_tuples: + cookie = self._cookie_from_cookie_tuple(tup, request) + if cookie: cookies.append(cookie) + return cookies + + def _process_rfc2109_cookies(self, cookies): + rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None) + if rfc2109_as_ns is None: + rfc2109_as_ns = not self._policy.rfc2965 + for cookie in cookies: + if cookie.version == 1: + cookie.rfc2109 = True + if rfc2109_as_ns: + # treat 2109 cookies as Netscape cookies rather than + # as RFC2965 cookies + cookie.version = 0 + + def make_cookies(self, response, request): + """Return sequence of Cookie objects extracted from response object.""" + # get cookie-attributes for RFC 2965 and Netscape protocols + headers = response.info() + rfc2965_hdrs = headers.get_all("Set-Cookie2", []) + ns_hdrs = headers.get_all("Set-Cookie", []) + + rfc2965 = self._policy.rfc2965 + netscape = self._policy.netscape + + if ((not rfc2965_hdrs and not ns_hdrs) or + (not ns_hdrs and not rfc2965) or + (not rfc2965_hdrs and not netscape) or + (not netscape and not rfc2965)): + return [] # no relevant cookie headers: quick exit + + try: + cookies = self._cookies_from_attrs_set( + split_header_words(rfc2965_hdrs), request) + except Exception: + _warn_unhandled_exception() + cookies = [] + + if ns_hdrs and netscape: + try: + # RFC 2109 and Netscape cookies + ns_cookies = self._cookies_from_attrs_set( + parse_ns_headers(ns_hdrs), request) + except Exception: + _warn_unhandled_exception() + ns_cookies = [] + self._process_rfc2109_cookies(ns_cookies) + + # Look for Netscape cookies (from Set-Cookie headers) that match + # corresponding RFC 2965 cookies (from Set-Cookie2 headers). + # For each match, keep the RFC 2965 cookie and ignore the Netscape + # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are + # bundled in with the Netscape cookies for this purpose, which is + # reasonable behaviour. + if rfc2965: + lookup = {} + for cookie in cookies: + lookup[(cookie.domain, cookie.path, cookie.name)] = None + + def no_matching_rfc2965(ns_cookie, lookup=lookup): + key = ns_cookie.domain, ns_cookie.path, ns_cookie.name + return key not in lookup + ns_cookies = filter(no_matching_rfc2965, ns_cookies) + + if ns_cookies: + cookies.extend(ns_cookies) + + return cookies + + def set_cookie_if_ok(self, cookie, request): + """Set a cookie if policy says it's OK to do so.""" + self._cookies_lock.acquire() + try: + self._policy._now = self._now = int(time.time()) + + if self._policy.set_ok(cookie, request): + self.set_cookie(cookie) + + + finally: + self._cookies_lock.release() + + def set_cookie(self, cookie): + """Set a cookie, without checking whether or not it should be set.""" + c = self._cookies + self._cookies_lock.acquire() + try: + if cookie.domain not in c: c[cookie.domain] = {} + c2 = c[cookie.domain] + if cookie.path not in c2: c2[cookie.path] = {} + c3 = c2[cookie.path] + c3[cookie.name] = cookie + finally: + self._cookies_lock.release() + + def extract_cookies(self, response, request): + """Extract cookies from response, where allowable given the request.""" + _debug("extract_cookies: %s", response.info()) + self._cookies_lock.acquire() + try: + self._policy._now = self._now = int(time.time()) + + for cookie in self.make_cookies(response, request): + if self._policy.set_ok(cookie, request): + _debug(" setting cookie: %s", cookie) + self.set_cookie(cookie) + finally: + self._cookies_lock.release() + + def clear(self, domain=None, path=None, name=None): + """Clear some cookies. + + Invoking this method without arguments will clear all cookies. If + given a single argument, only cookies belonging to that domain will be + removed. If given two arguments, cookies belonging to the specified + path within that domain are removed. If given three arguments, then + the cookie with the specified name, path and domain is removed. + + Raises KeyError if no matching cookie exists. + + """ + if name is not None: + if (domain is None) or (path is None): + raise ValueError( + "domain and path must be given to remove a cookie by name") + del self._cookies[domain][path][name] + elif path is not None: + if domain is None: + raise ValueError( + "domain must be given to remove cookies by path") + del self._cookies[domain][path] + elif domain is not None: + del self._cookies[domain] + else: + self._cookies = {} + + def clear_session_cookies(self): + """Discard all session cookies. + + Note that the .save() method won't save session cookies anyway, unless + you ask otherwise by passing a true ignore_discard argument. + + """ + self._cookies_lock.acquire() + try: + for cookie in self: + if cookie.discard: + self.clear(cookie.domain, cookie.path, cookie.name) + finally: + self._cookies_lock.release() + + def clear_expired_cookies(self): + """Discard all expired cookies. + + You probably don't need to call this method: expired cookies are never + sent back to the server (provided you're using DefaultCookiePolicy), + this method is called by CookieJar itself every so often, and the + .save() method won't save expired cookies anyway (unless you ask + otherwise by passing a true ignore_expires argument). + + """ + self._cookies_lock.acquire() + try: + now = time.time() + for cookie in self: + if cookie.is_expired(now): + self.clear(cookie.domain, cookie.path, cookie.name) + finally: + self._cookies_lock.release() + + def __iter__(self): + return deepvalues(self._cookies) + + def __len__(self): + """Return number of contained cookies.""" + i = 0 + for cookie in self: i = i + 1 + return i + + @as_native_str() + def __repr__(self): + r = [] + for cookie in self: r.append(repr(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + def __str__(self): + r = [] + for cookie in self: r.append(str(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + +# derives from IOError for backwards-compatibility with Python 2.4.0 +class LoadError(IOError): pass + +class FileCookieJar(CookieJar): + """CookieJar that can be loaded from and saved to a file.""" + + def __init__(self, filename=None, delayload=False, policy=None): + """ + Cookies are NOT loaded from the named file until either the .load() or + .revert() method is called. + + """ + CookieJar.__init__(self, policy) + if filename is not None: + try: + filename+"" + except: + raise ValueError("filename must be string-like") + self.filename = filename + self.delayload = bool(delayload) + + def save(self, filename=None, ignore_discard=False, ignore_expires=False): + """Save cookies to a file.""" + raise NotImplementedError() + + def load(self, filename=None, ignore_discard=False, ignore_expires=False): + """Load cookies from a file.""" + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + f = open(filename) + try: + self._really_load(f, filename, ignore_discard, ignore_expires) + finally: + f.close() + + def revert(self, filename=None, + ignore_discard=False, ignore_expires=False): + """Clear all cookies and reload cookies from a saved file. + + Raises LoadError (or IOError) if reversion is not successful; the + object's state will not be altered if this happens. + + """ + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + self._cookies_lock.acquire() + try: + + old_state = copy.deepcopy(self._cookies) + self._cookies = {} + try: + self.load(filename, ignore_discard, ignore_expires) + except (LoadError, IOError): + self._cookies = old_state + raise + + finally: + self._cookies_lock.release() + + +def lwp_cookie_str(cookie): + """Return string representation of Cookie in an the LWP cookie file format. + + Actually, the format is extended a bit -- see module docstring. + + """ + h = [(cookie.name, cookie.value), + ("path", cookie.path), + ("domain", cookie.domain)] + if cookie.port is not None: h.append(("port", cookie.port)) + if cookie.path_specified: h.append(("path_spec", None)) + if cookie.port_specified: h.append(("port_spec", None)) + if cookie.domain_initial_dot: h.append(("domain_dot", None)) + if cookie.secure: h.append(("secure", None)) + if cookie.expires: h.append(("expires", + time2isoz(float(cookie.expires)))) + if cookie.discard: h.append(("discard", None)) + if cookie.comment: h.append(("comment", cookie.comment)) + if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) + + keys = sorted(cookie._rest.keys()) + for k in keys: + h.append((k, str(cookie._rest[k]))) + + h.append(("version", str(cookie.version))) + + return join_header_words([h]) + +class LWPCookieJar(FileCookieJar): + """ + The LWPCookieJar saves a sequence of "Set-Cookie3" lines. + "Set-Cookie3" is the format used by the libwww-perl libary, not known + to be compatible with any browser, but which is easy to read and + doesn't lose information about RFC 2965 cookies. + + Additional methods + + as_lwp_str(ignore_discard=True, ignore_expired=True) + + """ + + def as_lwp_str(self, ignore_discard=True, ignore_expires=True): + """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers. + + ignore_discard and ignore_expires: see docstring for FileCookieJar.save + + """ + now = time.time() + r = [] + for cookie in self: + if not ignore_discard and cookie.discard: + continue + if not ignore_expires and cookie.is_expired(now): + continue + r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) + return "\n".join(r+[""]) + + def save(self, filename=None, ignore_discard=False, ignore_expires=False): + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + f = open(filename, "w") + try: + # There really isn't an LWP Cookies 2.0 format, but this indicates + # that there is extra information in here (domain_dot and + # port_spec) while still being compatible with libwww-perl, I hope. + f.write("#LWP-Cookies-2.0\n") + f.write(self.as_lwp_str(ignore_discard, ignore_expires)) + finally: + f.close() + + def _really_load(self, f, filename, ignore_discard, ignore_expires): + magic = f.readline() + if not self.magic_re.search(magic): + msg = ("%r does not look like a Set-Cookie3 (LWP) format " + "file" % filename) + raise LoadError(msg) + + now = time.time() + + header = "Set-Cookie3:" + boolean_attrs = ("port_spec", "path_spec", "domain_dot", + "secure", "discard") + value_attrs = ("version", + "port", "path", "domain", + "expires", + "comment", "commenturl") + + try: + while 1: + line = f.readline() + if line == "": break + if not line.startswith(header): + continue + line = line[len(header):].strip() + + for data in split_header_words([line]): + name, value = data[0] + standard = {} + rest = {} + for k in boolean_attrs: + standard[k] = False + for k, v in data[1:]: + if k is not None: + lc = k.lower() + else: + lc = None + # don't lose case distinction for unknown fields + if (lc in value_attrs) or (lc in boolean_attrs): + k = lc + if k in boolean_attrs: + if v is None: v = True + standard[k] = v + elif k in value_attrs: + standard[k] = v + else: + rest[k] = v + + h = standard.get + expires = h("expires") + discard = h("discard") + if expires is not None: + expires = iso2time(expires) + if expires is None: + discard = True + domain = h("domain") + domain_specified = domain.startswith(".") + c = Cookie(h("version"), name, value, + h("port"), h("port_spec"), + domain, domain_specified, h("domain_dot"), + h("path"), h("path_spec"), + h("secure"), + expires, + discard, + h("comment"), + h("commenturl"), + rest) + if not ignore_discard and c.discard: + continue + if not ignore_expires and c.is_expired(now): + continue + self.set_cookie(c) + + except IOError: + raise + except Exception: + _warn_unhandled_exception() + raise LoadError("invalid Set-Cookie3 format file %r: %r" % + (filename, line)) + + +class MozillaCookieJar(FileCookieJar): + """ + + WARNING: you may want to backup your browser's cookies file if you use + this class to save cookies. I *think* it works, but there have been + bugs in the past! + + This class differs from CookieJar only in the format it uses to save and + load cookies to and from a file. This class uses the Mozilla/Netscape + `cookies.txt' format. lynx uses this file format, too. + + Don't expect cookies saved while the browser is running to be noticed by + the browser (in fact, Mozilla on unix will overwrite your saved cookies if + you change them on disk while it's running; on Windows, you probably can't + save at all while the browser is running). + + Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to + Netscape cookies on saving. + + In particular, the cookie version and port number information is lost, + together with information about whether or not Path, Port and Discard were + specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the + domain as set in the HTTP header started with a dot (yes, I'm aware some + domains in Netscape files start with a dot and some don't -- trust me, you + really don't want to know any more about this). + + Note that though Mozilla and Netscape use the same format, they use + slightly different headers. The class saves cookies using the Netscape + header by default (Mozilla can cope with that). + + """ + magic_re = re.compile("#( Netscape)? HTTP Cookie File") + header = """\ +# Netscape HTTP Cookie File +# http://www.netscape.com/newsref/std/cookie_spec.html +# This is a generated file! Do not edit. + +""" + + def _really_load(self, f, filename, ignore_discard, ignore_expires): + now = time.time() + + magic = f.readline() + if not self.magic_re.search(magic): + f.close() + raise LoadError( + "%r does not look like a Netscape format cookies file" % + filename) + + try: + while 1: + line = f.readline() + if line == "": break + + # last field may be absent, so keep any trailing tab + if line.endswith("\n"): line = line[:-1] + + # skip comments and blank lines XXX what is $ for? + if (line.strip().startswith(("#", "$")) or + line.strip() == ""): + continue + + domain, domain_specified, path, secure, expires, name, value = \ + line.split("\t") + secure = (secure == "TRUE") + domain_specified = (domain_specified == "TRUE") + if name == "": + # cookies.txt regards 'Set-Cookie: foo' as a cookie + # with no name, whereas http.cookiejar regards it as a + # cookie with no value. + name = value + value = None + + initial_dot = domain.startswith(".") + assert domain_specified == initial_dot + + discard = False + if expires == "": + expires = None + discard = True + + # assume path_specified is false + c = Cookie(0, name, value, + None, False, + domain, domain_specified, initial_dot, + path, False, + secure, + expires, + discard, + None, + None, + {}) + if not ignore_discard and c.discard: + continue + if not ignore_expires and c.is_expired(now): + continue + self.set_cookie(c) + + except IOError: + raise + except Exception: + _warn_unhandled_exception() + raise LoadError("invalid Netscape format cookies file %r: %r" % + (filename, line)) + + def save(self, filename=None, ignore_discard=False, ignore_expires=False): + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + f = open(filename, "w") + try: + f.write(self.header) + now = time.time() + for cookie in self: + if not ignore_discard and cookie.discard: + continue + if not ignore_expires and cookie.is_expired(now): + continue + if cookie.secure: secure = "TRUE" + else: secure = "FALSE" + if cookie.domain.startswith("."): initial_dot = "TRUE" + else: initial_dot = "FALSE" + if cookie.expires is not None: + expires = str(cookie.expires) + else: + expires = "" + if cookie.value is None: + # cookies.txt regards 'Set-Cookie: foo' as a cookie + # with no name, whereas http.cookiejar regards it as a + # cookie with no value. + name = "" + value = cookie.name + else: + name = cookie.name + value = cookie.value + f.write( + "\t".join([cookie.domain, initial_dot, cookie.path, + secure, expires, name, value])+ + "\n") + finally: + f.close() diff --git a/pype/modules/ftrack/python2_vendor/future/backports/http/cookies.py b/pype/modules/ftrack/python2_vendor/future/backports/http/cookies.py new file mode 100644 index 0000000000..8bb61e22c4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/http/cookies.py @@ -0,0 +1,598 @@ +#### +# Copyright 2000 by Timothy O'Malley +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software +# and its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Timothy O'Malley not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR +# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# +#### +# +# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp +# by Timothy O'Malley +# +# Cookie.py is a Python module for the handling of HTTP +# cookies as a Python dictionary. See RFC 2109 for more +# information on cookies. +# +# The original idea to treat Cookies as a dictionary came from +# Dave Mitchell (davem@magnet.com) in 1995, when he released the +# first version of nscookie.py. +# +#### + +r""" +http.cookies module ported to python-future from Py3.3 + +Here's a sample session to show how to use this module. +At the moment, this is the only documentation. + +The Basics +---------- + +Importing is easy... + + >>> from http import cookies + +Most of the time you start by creating a cookie. + + >>> C = cookies.SimpleCookie() + +Once you've created your Cookie, you can add values just as if it were +a dictionary. + + >>> C = cookies.SimpleCookie() + >>> C["fig"] = "newton" + >>> C["sugar"] = "wafer" + >>> C.output() + 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer' + +Notice that the printable representation of a Cookie is the +appropriate format for a Set-Cookie: header. This is the +default behavior. You can change the header and printed +attributes by using the .output() function + + >>> C = cookies.SimpleCookie() + >>> C["rocky"] = "road" + >>> C["rocky"]["path"] = "/cookie" + >>> print(C.output(header="Cookie:")) + Cookie: rocky=road; Path=/cookie + >>> print(C.output(attrs=[], header="Cookie:")) + Cookie: rocky=road + +The load() method of a Cookie extracts cookies from a string. In a +CGI script, you would use this method to extract the cookies from the +HTTP_COOKIE environment variable. + + >>> C = cookies.SimpleCookie() + >>> C.load("chips=ahoy; vienna=finger") + >>> C.output() + 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger' + +The load() method is darn-tootin smart about identifying cookies +within a string. Escaped quotation marks, nested semicolons, and other +such trickeries do not confuse it. + + >>> C = cookies.SimpleCookie() + >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";') + >>> print(C) + Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;" + +Each element of the Cookie also supports all of the RFC 2109 +Cookie attributes. Here's an example which sets the Path +attribute. + + >>> C = cookies.SimpleCookie() + >>> C["oreo"] = "doublestuff" + >>> C["oreo"]["path"] = "/" + >>> print(C) + Set-Cookie: oreo=doublestuff; Path=/ + +Each dictionary element has a 'value' attribute, which gives you +back the value associated with the key. + + >>> C = cookies.SimpleCookie() + >>> C["twix"] = "none for you" + >>> C["twix"].value + 'none for you' + +The SimpleCookie expects that all values should be standard strings. +Just to be sure, SimpleCookie invokes the str() builtin to convert +the value to a string, when the values are set dictionary-style. + + >>> C = cookies.SimpleCookie() + >>> C["number"] = 7 + >>> C["string"] = "seven" + >>> C["number"].value + '7' + >>> C["string"].value + 'seven' + >>> C.output() + 'Set-Cookie: number=7\r\nSet-Cookie: string=seven' + +Finis. +""" +from __future__ import unicode_literals +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +from future.builtins import chr, dict, int, str +from future.utils import PY2, as_native_str + +# +# Import our required modules +# +import re +if PY2: + re.ASCII = 0 # for py2 compatibility +import string + +__all__ = ["CookieError", "BaseCookie", "SimpleCookie"] + +_nulljoin = ''.join +_semispacejoin = '; '.join +_spacejoin = ' '.join + +# +# Define an exception visible to External modules +# +class CookieError(Exception): + pass + + +# These quoting routines conform to the RFC2109 specification, which in +# turn references the character definitions from RFC2068. They provide +# a two-way quoting algorithm. Any non-text character is translated +# into a 4 character sequence: a forward-slash followed by the +# three-digit octal equivalent of the character. Any '\' or '"' is +# quoted with a preceeding '\' slash. +# +# These are taken from RFC2068 and RFC2109. +# _LegalChars is the list of chars which don't require "'s +# _Translator hash-table for fast quoting +# +_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:" +_Translator = { + '\000' : '\\000', '\001' : '\\001', '\002' : '\\002', + '\003' : '\\003', '\004' : '\\004', '\005' : '\\005', + '\006' : '\\006', '\007' : '\\007', '\010' : '\\010', + '\011' : '\\011', '\012' : '\\012', '\013' : '\\013', + '\014' : '\\014', '\015' : '\\015', '\016' : '\\016', + '\017' : '\\017', '\020' : '\\020', '\021' : '\\021', + '\022' : '\\022', '\023' : '\\023', '\024' : '\\024', + '\025' : '\\025', '\026' : '\\026', '\027' : '\\027', + '\030' : '\\030', '\031' : '\\031', '\032' : '\\032', + '\033' : '\\033', '\034' : '\\034', '\035' : '\\035', + '\036' : '\\036', '\037' : '\\037', + + # Because of the way browsers really handle cookies (as opposed + # to what the RFC says) we also encode , and ; + + ',' : '\\054', ';' : '\\073', + + '"' : '\\"', '\\' : '\\\\', + + '\177' : '\\177', '\200' : '\\200', '\201' : '\\201', + '\202' : '\\202', '\203' : '\\203', '\204' : '\\204', + '\205' : '\\205', '\206' : '\\206', '\207' : '\\207', + '\210' : '\\210', '\211' : '\\211', '\212' : '\\212', + '\213' : '\\213', '\214' : '\\214', '\215' : '\\215', + '\216' : '\\216', '\217' : '\\217', '\220' : '\\220', + '\221' : '\\221', '\222' : '\\222', '\223' : '\\223', + '\224' : '\\224', '\225' : '\\225', '\226' : '\\226', + '\227' : '\\227', '\230' : '\\230', '\231' : '\\231', + '\232' : '\\232', '\233' : '\\233', '\234' : '\\234', + '\235' : '\\235', '\236' : '\\236', '\237' : '\\237', + '\240' : '\\240', '\241' : '\\241', '\242' : '\\242', + '\243' : '\\243', '\244' : '\\244', '\245' : '\\245', + '\246' : '\\246', '\247' : '\\247', '\250' : '\\250', + '\251' : '\\251', '\252' : '\\252', '\253' : '\\253', + '\254' : '\\254', '\255' : '\\255', '\256' : '\\256', + '\257' : '\\257', '\260' : '\\260', '\261' : '\\261', + '\262' : '\\262', '\263' : '\\263', '\264' : '\\264', + '\265' : '\\265', '\266' : '\\266', '\267' : '\\267', + '\270' : '\\270', '\271' : '\\271', '\272' : '\\272', + '\273' : '\\273', '\274' : '\\274', '\275' : '\\275', + '\276' : '\\276', '\277' : '\\277', '\300' : '\\300', + '\301' : '\\301', '\302' : '\\302', '\303' : '\\303', + '\304' : '\\304', '\305' : '\\305', '\306' : '\\306', + '\307' : '\\307', '\310' : '\\310', '\311' : '\\311', + '\312' : '\\312', '\313' : '\\313', '\314' : '\\314', + '\315' : '\\315', '\316' : '\\316', '\317' : '\\317', + '\320' : '\\320', '\321' : '\\321', '\322' : '\\322', + '\323' : '\\323', '\324' : '\\324', '\325' : '\\325', + '\326' : '\\326', '\327' : '\\327', '\330' : '\\330', + '\331' : '\\331', '\332' : '\\332', '\333' : '\\333', + '\334' : '\\334', '\335' : '\\335', '\336' : '\\336', + '\337' : '\\337', '\340' : '\\340', '\341' : '\\341', + '\342' : '\\342', '\343' : '\\343', '\344' : '\\344', + '\345' : '\\345', '\346' : '\\346', '\347' : '\\347', + '\350' : '\\350', '\351' : '\\351', '\352' : '\\352', + '\353' : '\\353', '\354' : '\\354', '\355' : '\\355', + '\356' : '\\356', '\357' : '\\357', '\360' : '\\360', + '\361' : '\\361', '\362' : '\\362', '\363' : '\\363', + '\364' : '\\364', '\365' : '\\365', '\366' : '\\366', + '\367' : '\\367', '\370' : '\\370', '\371' : '\\371', + '\372' : '\\372', '\373' : '\\373', '\374' : '\\374', + '\375' : '\\375', '\376' : '\\376', '\377' : '\\377' + } + +def _quote(str, LegalChars=_LegalChars): + r"""Quote a string for use in a cookie header. + + If the string does not need to be double-quoted, then just return the + string. Otherwise, surround the string in doublequotes and quote + (with a \) special characters. + """ + if all(c in LegalChars for c in str): + return str + else: + return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"' + + +_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]") +_QuotePatt = re.compile(r"[\\].") + +def _unquote(mystr): + # If there aren't any doublequotes, + # then there can't be any special characters. See RFC 2109. + if len(mystr) < 2: + return mystr + if mystr[0] != '"' or mystr[-1] != '"': + return mystr + + # We have to assume that we must decode this string. + # Down to work. + + # Remove the "s + mystr = mystr[1:-1] + + # Check for special sequences. Examples: + # \012 --> \n + # \" --> " + # + i = 0 + n = len(mystr) + res = [] + while 0 <= i < n: + o_match = _OctalPatt.search(mystr, i) + q_match = _QuotePatt.search(mystr, i) + if not o_match and not q_match: # Neither matched + res.append(mystr[i:]) + break + # else: + j = k = -1 + if o_match: + j = o_match.start(0) + if q_match: + k = q_match.start(0) + if q_match and (not o_match or k < j): # QuotePatt matched + res.append(mystr[i:k]) + res.append(mystr[k+1]) + i = k + 2 + else: # OctalPatt matched + res.append(mystr[i:j]) + res.append(chr(int(mystr[j+1:j+4], 8))) + i = j + 4 + return _nulljoin(res) + +# The _getdate() routine is used to set the expiration time in the cookie's HTTP +# header. By default, _getdate() returns the current time in the appropriate +# "expires" format for a Set-Cookie header. The one optional argument is an +# offset from now, in seconds. For example, an offset of -3600 means "one hour +# ago". The offset may be a floating point number. +# + +_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + +_monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + +def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname): + from time import gmtime, time + now = time() + year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future) + return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \ + (weekdayname[wd], day, monthname[month], year, hh, mm, ss) + + +class Morsel(dict): + """A class to hold ONE (key, value) pair. + + In a cookie, each such pair may have several attributes, so this class is + used to keep the attributes associated with the appropriate key,value pair. + This class also includes a coded_value attribute, which is used to hold + the network representation of the value. This is most useful when Python + objects are pickled for network transit. + """ + # RFC 2109 lists these attributes as reserved: + # path comment domain + # max-age secure version + # + # For historical reasons, these attributes are also reserved: + # expires + # + # This is an extension from Microsoft: + # httponly + # + # This dictionary provides a mapping from the lowercase + # variant on the left to the appropriate traditional + # formatting on the right. + _reserved = { + "expires" : "expires", + "path" : "Path", + "comment" : "Comment", + "domain" : "Domain", + "max-age" : "Max-Age", + "secure" : "secure", + "httponly" : "httponly", + "version" : "Version", + } + + _flags = set(['secure', 'httponly']) + + def __init__(self): + # Set defaults + self.key = self.value = self.coded_value = None + + # Set default attributes + for key in self._reserved: + dict.__setitem__(self, key, "") + + def __setitem__(self, K, V): + K = K.lower() + if not K in self._reserved: + raise CookieError("Invalid Attribute %s" % K) + dict.__setitem__(self, K, V) + + def isReservedKey(self, K): + return K.lower() in self._reserved + + def set(self, key, val, coded_val, LegalChars=_LegalChars): + # First we verify that the key isn't a reserved word + # Second we make sure it only contains legal characters + if key.lower() in self._reserved: + raise CookieError("Attempt to set a reserved key: %s" % key) + if any(c not in LegalChars for c in key): + raise CookieError("Illegal key value: %s" % key) + + # It's a good key, so save it. + self.key = key + self.value = val + self.coded_value = coded_val + + def output(self, attrs=None, header="Set-Cookie:"): + return "%s %s" % (header, self.OutputString(attrs)) + + __str__ = output + + @as_native_str() + def __repr__(self): + if PY2 and isinstance(self.value, unicode): + val = str(self.value) # make it a newstr to remove the u prefix + else: + val = self.value + return '<%s: %s=%s>' % (self.__class__.__name__, + str(self.key), repr(val)) + + def js_output(self, attrs=None): + # Print javascript + return """ + + """ % (self.OutputString(attrs).replace('"', r'\"')) + + def OutputString(self, attrs=None): + # Build up our result + # + result = [] + append = result.append + + # First, the key=value pair + append("%s=%s" % (self.key, self.coded_value)) + + # Now add any defined attributes + if attrs is None: + attrs = self._reserved + items = sorted(self.items()) + for key, value in items: + if value == "": + continue + if key not in attrs: + continue + if key == "expires" and isinstance(value, int): + append("%s=%s" % (self._reserved[key], _getdate(value))) + elif key == "max-age" and isinstance(value, int): + append("%s=%d" % (self._reserved[key], value)) + elif key == "secure": + append(str(self._reserved[key])) + elif key == "httponly": + append(str(self._reserved[key])) + else: + append("%s=%s" % (self._reserved[key], value)) + + # Return the result + return _semispacejoin(result) + + +# +# Pattern for finding cookie +# +# This used to be strict parsing based on the RFC2109 and RFC2068 +# specifications. I have since discovered that MSIE 3.0x doesn't +# follow the character rules outlined in those specs. As a +# result, the parsing rules here are less strict. +# + +_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]" +_CookiePattern = re.compile(r""" + (?x) # This is a verbose pattern + (?P # Start of group 'key' + """ + _LegalCharsPatt + r"""+? # Any word of at least one letter + ) # End of group 'key' + ( # Optional group: there may not be a value. + \s*=\s* # Equal Sign + (?P # Start of group 'val' + "(?:[^\\"]|\\.)*" # Any doublequoted string + | # or + \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr + | # or + """ + _LegalCharsPatt + r"""* # Any word or empty string + ) # End of group 'val' + )? # End of optional value group + \s* # Any number of spaces. + (\s+|;|$) # Ending either at space, semicolon, or EOS. + """, re.ASCII) # May be removed if safe. + + +# At long last, here is the cookie class. Using this class is almost just like +# using a dictionary. See this module's docstring for example usage. +# +class BaseCookie(dict): + """A container class for a set of Morsels.""" + + def value_decode(self, val): + """real_value, coded_value = value_decode(STRING) + Called prior to setting a cookie's value from the network + representation. The VALUE is the value read from HTTP + header. + Override this function to modify the behavior of cookies. + """ + return val, val + + def value_encode(self, val): + """real_value, coded_value = value_encode(VALUE) + Called prior to setting a cookie's value from the dictionary + representation. The VALUE is the value being assigned. + Override this function to modify the behavior of cookies. + """ + strval = str(val) + return strval, strval + + def __init__(self, input=None): + if input: + self.load(input) + + def __set(self, key, real_value, coded_value): + """Private method for setting a cookie's value""" + M = self.get(key, Morsel()) + M.set(key, real_value, coded_value) + dict.__setitem__(self, key, M) + + def __setitem__(self, key, value): + """Dictionary style assignment.""" + rval, cval = self.value_encode(value) + self.__set(key, rval, cval) + + def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"): + """Return a string suitable for HTTP.""" + result = [] + items = sorted(self.items()) + for key, value in items: + result.append(value.output(attrs, header)) + return sep.join(result) + + __str__ = output + + @as_native_str() + def __repr__(self): + l = [] + items = sorted(self.items()) + for key, value in items: + if PY2 and isinstance(value.value, unicode): + val = str(value.value) # make it a newstr to remove the u prefix + else: + val = value.value + l.append('%s=%s' % (str(key), repr(val))) + return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l)) + + def js_output(self, attrs=None): + """Return a string suitable for JavaScript.""" + result = [] + items = sorted(self.items()) + for key, value in items: + result.append(value.js_output(attrs)) + return _nulljoin(result) + + def load(self, rawdata): + """Load cookies from a string (presumably HTTP_COOKIE) or + from a dictionary. Loading cookies from a dictionary 'd' + is equivalent to calling: + map(Cookie.__setitem__, d.keys(), d.values()) + """ + if isinstance(rawdata, str): + self.__parse_string(rawdata) + else: + # self.update() wouldn't call our custom __setitem__ + for key, value in rawdata.items(): + self[key] = value + return + + def __parse_string(self, mystr, patt=_CookiePattern): + i = 0 # Our starting point + n = len(mystr) # Length of string + M = None # current morsel + + while 0 <= i < n: + # Start looking for a cookie + match = patt.search(mystr, i) + if not match: + # No more cookies + break + + key, value = match.group("key"), match.group("val") + + i = match.end(0) + + # Parse the key, value in case it's metainfo + if key[0] == "$": + # We ignore attributes which pertain to the cookie + # mechanism as a whole. See RFC 2109. + # (Does anyone care?) + if M: + M[key[1:]] = value + elif key.lower() in Morsel._reserved: + if M: + if value is None: + if key.lower() in Morsel._flags: + M[key] = True + else: + M[key] = _unquote(value) + elif value is not None: + rval, cval = self.value_decode(value) + self.__set(key, rval, cval) + M = self[key] + + +class SimpleCookie(BaseCookie): + """ + SimpleCookie supports strings as cookie values. When setting + the value using the dictionary assignment notation, SimpleCookie + calls the builtin str() to convert the value to a string. Values + received from HTTP are kept as strings. + """ + def value_decode(self, val): + return _unquote(val), val + + def value_encode(self, val): + strval = str(val) + return strval, _quote(strval) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/http/server.py b/pype/modules/ftrack/python2_vendor/future/backports/http/server.py new file mode 100644 index 0000000000..b1c11e0c73 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/http/server.py @@ -0,0 +1,1226 @@ +"""HTTP server classes. + +From Python 3.3 + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections, +as of version 0.3. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (e.g. on Windows), +subprocess.Popen() is used as a fallback, with slightly altered semantics. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +""" + +from __future__ import (absolute_import, division, + print_function, unicode_literals) +from future import utils +from future.builtins import * + + +# See also: +# +# HTTP Working Group T. Berners-Lee +# INTERNET-DRAFT R. T. Fielding +# H. Frystyk Nielsen +# Expires September 8, 1995 March 8, 1995 +# +# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt +# +# and +# +# Network Working Group R. Fielding +# Request for Comments: 2616 et al +# Obsoletes: 2068 June 1999 +# Category: Standards Track +# +# URL: http://www.faqs.org/rfcs/rfc2616.html + +# Log files +# --------- +# +# Here's a quote from the NCSA httpd docs about log file format. +# +# | The logfile format is as follows. Each line consists of: +# | +# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb +# | +# | host: Either the DNS name or the IP number of the remote client +# | rfc931: Any information returned by identd for this person, +# | - otherwise. +# | authuser: If user sent a userid for authentication, the user name, +# | - otherwise. +# | DD: Day +# | Mon: Month (calendar name) +# | YYYY: Year +# | hh: hour (24-hour format, the machine's timezone) +# | mm: minutes +# | ss: seconds +# | request: The first line of the HTTP request as sent by the client. +# | ddd: the status code returned by the server, - if not available. +# | bbbb: the total number of bytes sent, +# | *not including the HTTP/1.0 header*, - if not available +# | +# | You can determine the name of the file accessed through request. +# +# (Actually, the latter is only true if you know the server configuration +# at the time the request was made!) + +__version__ = "0.6" + +__all__ = ["HTTPServer", "BaseHTTPRequestHandler"] + +from future.backports import html +from future.backports.http import client as http_client +from future.backports.urllib import parse as urllib_parse +from future.backports import socketserver + +import io +import mimetypes +import os +import posixpath +import select +import shutil +import socket # For gethostbyaddr() +import sys +import time +import copy +import argparse + + +# Default error message template +DEFAULT_ERROR_MESSAGE = """\ + + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +""" + +DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8" + +def _quote_html(html): + return html.replace("&", "&").replace("<", "<").replace(">", ">") + +class HTTPServer(socketserver.TCPServer): + + allow_reuse_address = 1 # Seems to make sense in testing environment + + def server_bind(self): + """Override server_bind to store the server name.""" + socketserver.TCPServer.server_bind(self) + host, port = self.socket.getsockname()[:2] + self.server_name = socket.getfqdn(host) + self.server_port = port + + +class BaseHTTPRequestHandler(socketserver.StreamRequestHandler): + + """HTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + """ + + # The Python system version, truncated to its first component. + sys_version = "Python/" + sys.version.split()[0] + + # The server software version. You may want to override this. + # The format is multiple whitespace-separated strings, + # where each string is of the form name[/version]. + server_version = "BaseHTTP/" + __version__ + + error_message_format = DEFAULT_ERROR_MESSAGE + error_content_type = DEFAULT_ERROR_CONTENT_TYPE + + # The default request version. This only affects responses up until + # the point where the request line is parsed, so it mainly decides what + # the client gets back when sending a malformed request line. + # Most web servers default to HTTP 0.9, i.e. don't send a status line. + default_request_version = "HTTP/0.9" + + def parse_request(self): + """Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, an + error is sent back. + + """ + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = 1 + requestline = str(self.raw_requestline, 'iso-8859-1') + requestline = requestline.rstrip('\r\n') + self.requestline = requestline + words = requestline.split() + if len(words) == 3: + command, path, version = words + if version[:5] != 'HTTP/': + self.send_error(400, "Bad request version (%r)" % version) + return False + try: + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + version_number = int(version_number[0]), int(version_number[1]) + except (ValueError, IndexError): + self.send_error(400, "Bad request version (%r)" % version) + return False + if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": + self.close_connection = 0 + if version_number >= (2, 0): + self.send_error(505, + "Invalid HTTP Version (%s)" % base_version_number) + return False + elif len(words) == 2: + command, path = words + self.close_connection = 1 + if command != 'GET': + self.send_error(400, + "Bad HTTP/0.9 request type (%r)" % command) + return False + elif not words: + return False + else: + self.send_error(400, "Bad request syntax (%r)" % requestline) + return False + self.command, self.path, self.request_version = command, path, version + + # Examine the headers and look for a Connection directive. + try: + self.headers = http_client.parse_headers(self.rfile, + _class=self.MessageClass) + except http_client.LineTooLong: + self.send_error(400, "Line too long") + return False + + conntype = self.headers.get('Connection', "") + if conntype.lower() == 'close': + self.close_connection = 1 + elif (conntype.lower() == 'keep-alive' and + self.protocol_version >= "HTTP/1.1"): + self.close_connection = 0 + # Examine the headers and look for an Expect directive + expect = self.headers.get('Expect', "") + if (expect.lower() == "100-continue" and + self.protocol_version >= "HTTP/1.1" and + self.request_version >= "HTTP/1.1"): + if not self.handle_expect_100(): + return False + return True + + def handle_expect_100(self): + """Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + """ + self.send_response_only(100) + self.flush_headers() + return True + + def handle_one_request(self): + """Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + """ + try: + self.raw_requestline = self.rfile.readline(65537) + if len(self.raw_requestline) > 65536: + self.requestline = '' + self.request_version = '' + self.command = '' + self.send_error(414) + return + if not self.raw_requestline: + self.close_connection = 1 + return + if not self.parse_request(): + # An error code has been sent, just exit + return + mname = 'do_' + self.command + if not hasattr(self, mname): + self.send_error(501, "Unsupported method (%r)" % self.command) + return + method = getattr(self, mname) + method() + self.wfile.flush() #actually send the response if not already done. + except socket.timeout as e: + #a read or a write timed out. Discard this connection + self.log_error("Request timed out: %r", e) + self.close_connection = 1 + return + + def handle(self): + """Handle multiple requests if necessary.""" + self.close_connection = 1 + + self.handle_one_request() + while not self.close_connection: + self.handle_one_request() + + def send_error(self, code, message=None): + """Send and log an error reply. + + Arguments are the error code, and a detailed message. + The detailed message defaults to the short entry matching the + response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + """ + + try: + shortmsg, longmsg = self.responses[code] + except KeyError: + shortmsg, longmsg = '???', '???' + if message is None: + message = shortmsg + explain = longmsg + self.log_error("code %d, message %s", code, message) + # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) + content = (self.error_message_format % + {'code': code, 'message': _quote_html(message), 'explain': explain}) + self.send_response(code, message) + self.send_header("Content-Type", self.error_content_type) + self.send_header('Connection', 'close') + self.end_headers() + if self.command != 'HEAD' and code >= 200 and code not in (204, 304): + self.wfile.write(content.encode('UTF-8', 'replace')) + + def send_response(self, code, message=None): + """Add the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + """ + self.log_request(code) + self.send_response_only(code, message) + self.send_header('Server', self.version_string()) + self.send_header('Date', self.date_time_string()) + + def send_response_only(self, code, message=None): + """Send the response header only.""" + if message is None: + if code in self.responses: + message = self.responses[code][0] + else: + message = '' + if self.request_version != 'HTTP/0.9': + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append(("%s %d %s\r\n" % + (self.protocol_version, code, message)).encode( + 'latin-1', 'strict')) + + def send_header(self, keyword, value): + """Send a MIME header to the headers buffer.""" + if self.request_version != 'HTTP/0.9': + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append( + ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict')) + + if keyword.lower() == 'connection': + if value.lower() == 'close': + self.close_connection = 1 + elif value.lower() == 'keep-alive': + self.close_connection = 0 + + def end_headers(self): + """Send the blank line ending the MIME headers.""" + if self.request_version != 'HTTP/0.9': + self._headers_buffer.append(b"\r\n") + self.flush_headers() + + def flush_headers(self): + if hasattr(self, '_headers_buffer'): + self.wfile.write(b"".join(self._headers_buffer)) + self._headers_buffer = [] + + def log_request(self, code='-', size='-'): + """Log an accepted request. + + This is called by send_response(). + + """ + + self.log_message('"%s" %s %s', + self.requestline, str(code), str(size)) + + def log_error(self, format, *args): + """Log an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + """ + + self.log_message(format, *args) + + def log_message(self, format, *args): + """Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + """ + + sys.stderr.write("%s - - [%s] %s\n" % + (self.address_string(), + self.log_date_time_string(), + format%args)) + + def version_string(self): + """Return the server software version string.""" + return self.server_version + ' ' + self.sys_version + + def date_time_string(self, timestamp=None): + """Return the current date and time formatted for a message header.""" + if timestamp is None: + timestamp = time.time() + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def log_date_time_string(self): + """Return the current time formatted for logging.""" + now = time.time() + year, month, day, hh, mm, ss, x, y, z = time.localtime(now) + s = "%02d/%3s/%04d %02d:%02d:%02d" % ( + day, self.monthname[month], year, hh, mm, ss) + return s + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def address_string(self): + """Return the client address.""" + + return self.client_address[0] + + # Essentially static class variables + + # The version of the HTTP protocol we support. + # Set this to HTTP/1.1 to enable automatic keepalive + protocol_version = "HTTP/1.0" + + # MessageClass used to parse headers + MessageClass = http_client.HTTPMessage + + # Table mapping response codes to messages; entries have the + # form {code: (shortmessage, longmessage)}. + # See RFC 2616 and 6585. + responses = { + 100: ('Continue', 'Request received, please continue'), + 101: ('Switching Protocols', + 'Switching to new protocol; obey Upgrade header'), + + 200: ('OK', 'Request fulfilled, document follows'), + 201: ('Created', 'Document created, URL follows'), + 202: ('Accepted', + 'Request accepted, processing continues off-line'), + 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), + 204: ('No Content', 'Request fulfilled, nothing follows'), + 205: ('Reset Content', 'Clear input form for further input.'), + 206: ('Partial Content', 'Partial content follows.'), + + 300: ('Multiple Choices', + 'Object has several resources -- see URI list'), + 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), + 302: ('Found', 'Object moved temporarily -- see URI list'), + 303: ('See Other', 'Object moved -- see Method and URL list'), + 304: ('Not Modified', + 'Document has not changed since given time'), + 305: ('Use Proxy', + 'You must use proxy specified in Location to access this ' + 'resource.'), + 307: ('Temporary Redirect', + 'Object moved temporarily -- see URI list'), + + 400: ('Bad Request', + 'Bad request syntax or unsupported method'), + 401: ('Unauthorized', + 'No permission -- see authorization schemes'), + 402: ('Payment Required', + 'No payment -- see charging schemes'), + 403: ('Forbidden', + 'Request forbidden -- authorization will not help'), + 404: ('Not Found', 'Nothing matches the given URI'), + 405: ('Method Not Allowed', + 'Specified method is invalid for this resource.'), + 406: ('Not Acceptable', 'URI not available in preferred format.'), + 407: ('Proxy Authentication Required', 'You must authenticate with ' + 'this proxy before proceeding.'), + 408: ('Request Timeout', 'Request timed out; try again later.'), + 409: ('Conflict', 'Request conflict.'), + 410: ('Gone', + 'URI no longer exists and has been permanently removed.'), + 411: ('Length Required', 'Client must specify Content-Length.'), + 412: ('Precondition Failed', 'Precondition in headers is false.'), + 413: ('Request Entity Too Large', 'Entity is too large.'), + 414: ('Request-URI Too Long', 'URI is too long.'), + 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), + 416: ('Requested Range Not Satisfiable', + 'Cannot satisfy request range.'), + 417: ('Expectation Failed', + 'Expect condition could not be satisfied.'), + 428: ('Precondition Required', + 'The origin server requires the request to be conditional.'), + 429: ('Too Many Requests', 'The user has sent too many requests ' + 'in a given amount of time ("rate limiting").'), + 431: ('Request Header Fields Too Large', 'The server is unwilling to ' + 'process the request because its header fields are too large.'), + + 500: ('Internal Server Error', 'Server got itself in trouble'), + 501: ('Not Implemented', + 'Server does not support this operation'), + 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), + 503: ('Service Unavailable', + 'The server cannot process the request due to a high load'), + 504: ('Gateway Timeout', + 'The gateway server did not receive a timely response'), + 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), + 511: ('Network Authentication Required', + 'The client needs to authenticate to gain network access.'), + } + + +class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): + + """Simple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + """ + + server_version = "SimpleHTTP/" + __version__ + + def do_GET(self): + """Serve a GET request.""" + f = self.send_head() + if f: + self.copyfile(f, self.wfile) + f.close() + + def do_HEAD(self): + """Serve a HEAD request.""" + f = self.send_head() + if f: + f.close() + + def send_head(self): + """Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + """ + path = self.translate_path(self.path) + f = None + if os.path.isdir(path): + if not self.path.endswith('/'): + # redirect browser - doing basically what apache does + self.send_response(301) + self.send_header("Location", self.path + "/") + self.end_headers() + return None + for index in "index.html", "index.htm": + index = os.path.join(path, index) + if os.path.exists(index): + path = index + break + else: + return self.list_directory(path) + ctype = self.guess_type(path) + try: + f = open(path, 'rb') + except IOError: + self.send_error(404, "File not found") + return None + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + + def list_directory(self, path): + """Helper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + """ + try: + list = os.listdir(path) + except os.error: + self.send_error(404, "No permission to list directory") + return None + list.sort(key=lambda a: a.lower()) + r = [] + displaypath = html.escape(urllib_parse.unquote(self.path)) + enc = sys.getfilesystemencoding() + title = 'Directory listing for %s' % displaypath + r.append('') + r.append('\n') + r.append('' % enc) + r.append('%s\n' % title) + r.append('\n

%s

' % title) + r.append('
\n
    ') + for name in list: + fullname = os.path.join(path, name) + displayname = linkname = name + # Append / for directories or @ for symbolic links + if os.path.isdir(fullname): + displayname = name + "/" + linkname = name + "/" + if os.path.islink(fullname): + displayname = name + "@" + # Note: a link to a directory displays with @ and links with / + r.append('
  • %s
  • ' + % (urllib_parse.quote(linkname), html.escape(displayname))) + # # Use this instead: + # r.append('
  • %s
  • ' + # % (urllib.quote(linkname), cgi.escape(displayname))) + r.append('
\n
\n\n\n') + encoded = '\n'.join(r).encode(enc) + f = io.BytesIO() + f.write(encoded) + f.seek(0) + self.send_response(200) + self.send_header("Content-type", "text/html; charset=%s" % enc) + self.send_header("Content-Length", str(len(encoded))) + self.end_headers() + return f + + def translate_path(self, path): + """Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + """ + # abandon query parameters + path = path.split('?',1)[0] + path = path.split('#',1)[0] + path = posixpath.normpath(urllib_parse.unquote(path)) + words = path.split('/') + words = filter(None, words) + path = os.getcwd() + for word in words: + drive, word = os.path.splitdrive(word) + head, word = os.path.split(word) + if word in (os.curdir, os.pardir): continue + path = os.path.join(path, word) + return path + + def copyfile(self, source, outputfile): + """Copy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + """ + shutil.copyfileobj(source, outputfile) + + def guess_type(self, path): + """Guess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + """ + + base, ext = posixpath.splitext(path) + if ext in self.extensions_map: + return self.extensions_map[ext] + ext = ext.lower() + if ext in self.extensions_map: + return self.extensions_map[ext] + else: + return self.extensions_map[''] + + if not mimetypes.inited: + mimetypes.init() # try to read system mime.types + extensions_map = mimetypes.types_map.copy() + extensions_map.update({ + '': 'application/octet-stream', # Default + '.py': 'text/plain', + '.c': 'text/plain', + '.h': 'text/plain', + }) + + +# Utilities for CGIHTTPRequestHandler + +def _url_collapse_path(path): + """ + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a colllapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: A tuple of (head, tail) where tail is everything after the final / + and head is everything before it. Head will always start with a '/' and, + if it contains anything else, never have a trailing '/'. + + Raises: IndexError if too many '..' occur within the path. + + """ + # Similar to os.path.split(os.path.normpath(path)) but specific to URL + # path semantics rather than local operating system semantics. + path_parts = path.split('/') + head_parts = [] + for part in path_parts[:-1]: + if part == '..': + head_parts.pop() # IndexError if more '..' than prior parts + elif part and part != '.': + head_parts.append( part ) + if path_parts: + tail_part = path_parts.pop() + if tail_part: + if tail_part == '..': + head_parts.pop() + tail_part = '' + elif tail_part == '.': + tail_part = '' + else: + tail_part = '' + + splitpath = ('/' + '/'.join(head_parts), tail_part) + collapsed_path = "/".join(splitpath) + + return collapsed_path + + + +nobody = None + +def nobody_uid(): + """Internal routine to get nobody's uid""" + global nobody + if nobody: + return nobody + try: + import pwd + except ImportError: + return -1 + try: + nobody = pwd.getpwnam('nobody')[2] + except KeyError: + nobody = 1 + max(x[2] for x in pwd.getpwall()) + return nobody + + +def executable(path): + """Test for executable file.""" + return os.access(path, os.X_OK) + + +class CGIHTTPRequestHandler(SimpleHTTPRequestHandler): + + """Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + """ + + # Determine platform specifics + have_fork = hasattr(os, 'fork') + + # Make rfile unbuffered -- we need to read one line and then pass + # the rest to a subprocess, so we can't use buffered input. + rbufsize = 0 + + def do_POST(self): + """Serve a POST request. + + This is only implemented for CGI scripts. + + """ + + if self.is_cgi(): + self.run_cgi() + else: + self.send_error(501, "Can only POST to CGI scripts") + + def send_head(self): + """Version of send_head that support CGI scripts""" + if self.is_cgi(): + return self.run_cgi() + else: + return SimpleHTTPRequestHandler.send_head(self) + + def is_cgi(self): + """Test whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + """ + collapsed_path = _url_collapse_path(self.path) + dir_sep = collapsed_path.find('/', 1) + head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] + if head in self.cgi_directories: + self.cgi_info = head, tail + return True + return False + + + cgi_directories = ['/cgi-bin', '/htbin'] + + def is_executable(self, path): + """Test whether argument path is an executable file.""" + return executable(path) + + def is_python(self, path): + """Test whether argument path is a Python script.""" + head, tail = os.path.splitext(path) + return tail.lower() in (".py", ".pyw") + + def run_cgi(self): + """Execute a CGI script.""" + path = self.path + dir, rest = self.cgi_info + + i = path.find('/', len(dir) + 1) + while i >= 0: + nextdir = path[:i] + nextrest = path[i+1:] + + scriptdir = self.translate_path(nextdir) + if os.path.isdir(scriptdir): + dir, rest = nextdir, nextrest + i = path.find('/', len(dir) + 1) + else: + break + + # find an explicit query string, if present. + i = rest.rfind('?') + if i >= 0: + rest, query = rest[:i], rest[i+1:] + else: + query = '' + + # dissect the part after the directory name into a script name & + # a possible additional path, to be stored in PATH_INFO. + i = rest.find('/') + if i >= 0: + script, rest = rest[:i], rest[i:] + else: + script, rest = rest, '' + + scriptname = dir + '/' + script + scriptfile = self.translate_path(scriptname) + if not os.path.exists(scriptfile): + self.send_error(404, "No such CGI script (%r)" % scriptname) + return + if not os.path.isfile(scriptfile): + self.send_error(403, "CGI script is not a plain file (%r)" % + scriptname) + return + ispy = self.is_python(scriptname) + if self.have_fork or not ispy: + if not self.is_executable(scriptfile): + self.send_error(403, "CGI script is not executable (%r)" % + scriptname) + return + + # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html + # XXX Much of the following could be prepared ahead of time! + env = copy.deepcopy(os.environ) + env['SERVER_SOFTWARE'] = self.version_string() + env['SERVER_NAME'] = self.server.server_name + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['SERVER_PROTOCOL'] = self.protocol_version + env['SERVER_PORT'] = str(self.server.server_port) + env['REQUEST_METHOD'] = self.command + uqrest = urllib_parse.unquote(rest) + env['PATH_INFO'] = uqrest + env['PATH_TRANSLATED'] = self.translate_path(uqrest) + env['SCRIPT_NAME'] = scriptname + if query: + env['QUERY_STRING'] = query + env['REMOTE_ADDR'] = self.client_address[0] + authorization = self.headers.get("authorization") + if authorization: + authorization = authorization.split() + if len(authorization) == 2: + import base64, binascii + env['AUTH_TYPE'] = authorization[0] + if authorization[0].lower() == "basic": + try: + authorization = authorization[1].encode('ascii') + if utils.PY3: + # In Py3.3, was: + authorization = base64.decodebytes(authorization).\ + decode('ascii') + else: + # Backport to Py2.7: + authorization = base64.decodestring(authorization).\ + decode('ascii') + except (binascii.Error, UnicodeError): + pass + else: + authorization = authorization.split(':') + if len(authorization) == 2: + env['REMOTE_USER'] = authorization[0] + # XXX REMOTE_IDENT + if self.headers.get('content-type') is None: + env['CONTENT_TYPE'] = self.headers.get_content_type() + else: + env['CONTENT_TYPE'] = self.headers['content-type'] + length = self.headers.get('content-length') + if length: + env['CONTENT_LENGTH'] = length + referer = self.headers.get('referer') + if referer: + env['HTTP_REFERER'] = referer + accept = [] + for line in self.headers.getallmatchingheaders('accept'): + if line[:1] in "\t\n\r ": + accept.append(line.strip()) + else: + accept = accept + line[7:].split(',') + env['HTTP_ACCEPT'] = ','.join(accept) + ua = self.headers.get('user-agent') + if ua: + env['HTTP_USER_AGENT'] = ua + co = filter(None, self.headers.get_all('cookie', [])) + cookie_str = ', '.join(co) + if cookie_str: + env['HTTP_COOKIE'] = cookie_str + # XXX Other HTTP_* headers + # Since we're setting the env in the parent, provide empty + # values to override previously set values + for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', + 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): + env.setdefault(k, "") + + self.send_response(200, "Script output follows") + self.flush_headers() + + decoded_query = query.replace('+', ' ') + + if self.have_fork: + # Unix -- fork as we should + args = [script] + if '=' not in decoded_query: + args.append(decoded_query) + nobody = nobody_uid() + self.wfile.flush() # Always flush before forking + pid = os.fork() + if pid != 0: + # Parent + pid, sts = os.waitpid(pid, 0) + # throw away additional data [see bug #427345] + while select.select([self.rfile], [], [], 0)[0]: + if not self.rfile.read(1): + break + if sts: + self.log_error("CGI script exit status %#x", sts) + return + # Child + try: + try: + os.setuid(nobody) + except os.error: + pass + os.dup2(self.rfile.fileno(), 0) + os.dup2(self.wfile.fileno(), 1) + os.execve(scriptfile, args, env) + except: + self.server.handle_error(self.request, self.client_address) + os._exit(127) + + else: + # Non-Unix -- use subprocess + import subprocess + cmdline = [scriptfile] + if self.is_python(scriptfile): + interp = sys.executable + if interp.lower().endswith("w.exe"): + # On Windows, use python.exe, not pythonw.exe + interp = interp[:-5] + interp[-4:] + cmdline = [interp, '-u'] + cmdline + if '=' not in query: + cmdline.append(query) + self.log_message("command: %s", subprocess.list2cmdline(cmdline)) + try: + nbytes = int(length) + except (TypeError, ValueError): + nbytes = 0 + p = subprocess.Popen(cmdline, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env = env + ) + if self.command.lower() == "post" and nbytes > 0: + data = self.rfile.read(nbytes) + else: + data = None + # throw away additional data [see bug #427345] + while select.select([self.rfile._sock], [], [], 0)[0]: + if not self.rfile._sock.recv(1): + break + stdout, stderr = p.communicate(data) + self.wfile.write(stdout) + if stderr: + self.log_error('%s', stderr) + p.stderr.close() + p.stdout.close() + status = p.returncode + if status: + self.log_error("CGI script exit status %#x", status) + else: + self.log_message("CGI script exited OK") + + +def test(HandlerClass = BaseHTTPRequestHandler, + ServerClass = HTTPServer, protocol="HTTP/1.0", port=8000): + """Test the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the first command line + argument). + + """ + server_address = ('', port) + + HandlerClass.protocol_version = protocol + httpd = ServerClass(server_address, HandlerClass) + + sa = httpd.socket.getsockname() + print("Serving HTTP on", sa[0], "port", sa[1], "...") + try: + httpd.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + httpd.server_close() + sys.exit(0) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cgi', action='store_true', + help='Run as CGI Server') + parser.add_argument('port', action='store', + default=8000, type=int, + nargs='?', + help='Specify alternate port [default: 8000]') + args = parser.parse_args() + if args.cgi: + test(HandlerClass=CGIHTTPRequestHandler, port=args.port) + else: + test(HandlerClass=SimpleHTTPRequestHandler, port=args.port) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/misc.py b/pype/modules/ftrack/python2_vendor/future/backports/misc.py new file mode 100644 index 0000000000..098a0667e8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/misc.py @@ -0,0 +1,944 @@ +""" +Miscellaneous function (re)definitions from the Py3.4+ standard library +for Python 2.6/2.7. + +- math.ceil (for Python 2.7) +- collections.OrderedDict (for Python 2.6) +- collections.Counter (for Python 2.6) +- collections.ChainMap (for all versions prior to Python 3.3) +- itertools.count (for Python 2.6, with step parameter) +- subprocess.check_output (for Python 2.6) +- reprlib.recursive_repr (for Python 2.6+) +- functools.cmp_to_key (for Python 2.6) +""" + +from __future__ import absolute_import + +import subprocess +from math import ceil as oldceil + +from operator import itemgetter as _itemgetter, eq as _eq +import sys +import heapq as _heapq +from _weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from socket import getaddrinfo, SOCK_STREAM, error, socket + +from future.utils import iteritems, itervalues, PY2, PY26, PY3 + +if PY2: + from collections import Mapping, MutableMapping +else: + from collections.abc import Mapping, MutableMapping + + +def ceil(x): + """ + Return the ceiling of x as an int. + This is the smallest integral value >= x. + """ + return int(oldceil(x)) + + +######################################################################## +### reprlib.recursive_repr decorator from Py3.4 +######################################################################## + +from itertools import islice + +if PY3: + try: + from _thread import get_ident + except ImportError: + from _dummy_thread import get_ident +else: + try: + from thread import get_ident + except ImportError: + from dummy_thread import get_ident + + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + +################################################################################ +### OrderedDict +################################################################################ + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(*args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'OrderedDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last==False). + + Raises KeyError if the element does not exist. + When last=True, acts like a fast version of self[key]=self.pop(key). + + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + last.next = root.prev = link + else: + first = root.next + link.prev = root + link.next = first + root.next = first.prev = link + + def __sizeof__(self): + sizeof = sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = MutableMapping.update + keys = MutableMapping.keys + values = MutableMapping.values + items = MutableMapping.items + __ne__ = MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + @recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + return self.__class__, (), inst_dict or None, None, iter(self.items()) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(map(_eq, self, other)) + return dict.__eq__(self, other) + + +# {{{ http://code.activestate.com/recipes/576611/ (r11) + +try: + from operator import itemgetter + from heapq import nlargest +except ImportError: + pass + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(*args, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + super(Counter, self).__init__() + self.update(*args, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abcdeabcdabcaba').most_common(3) + [('a', 5), ('b', 4), ('c', 3)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because setting v=1 + # means that no element can have a count greater than one. + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(*args, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if not args: + raise TypeError("descriptor 'update' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + if isinstance(iterable, Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super(Counter, self).update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(*args, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if not args: + raise TypeError("descriptor 'subtract' of 'Counter' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + iterable = args[0] if args else None + if iterable is not None: + self_get = self.get + if isinstance(iterable, Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super(Counter, self).__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + def __pos__(self): + 'Adds an empty counter, effectively stripping negative and zero counts' + return self + Counter() + + def __neg__(self): + '''Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + ''' + return Counter() - self + + def _keep_positive(self): + '''Internal method to strip elements with a negative or zero count''' + nonpositive = [elem for elem, count in self.items() if not count > 0] + for elem in nonpositive: + del self[elem] + return self + + def __iadd__(self, other): + '''Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] += count + return self._keep_positive() + + def __isub__(self, other): + '''Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] -= count + return self._keep_positive() + + def __ior__(self, other): + '''Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + for elem, other_count in other.items(): + count = self[elem] + if other_count > count: + self[elem] = other_count + return self._keep_positive() + + def __iand__(self, other): + '''Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + ''' + for elem, count in self.items(): + other_count = other[elem] + if other_count < count: + self[elem] = other_count + return self._keep_positive() + + +def check_output(*popenargs, **kwargs): + """ + For Python 2.6 compatibility: see + http://stackoverflow.com/questions/4814970/ + """ + + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, it will be overridden.') + process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise subprocess.CalledProcessError(retcode, cmd) + return output + + +def count(start=0, step=1): + """ + ``itertools.count`` in Py 2.6 doesn't accept a step + parameter. This is an enhanced version of ``itertools.count`` + for Py2.6 equivalent to ``itertools.count`` in Python 2.7+. + """ + while True: + yield start + start += step + + +######################################################################## +### ChainMap (helper for configparser and string.Template) +### From the Py3.4 source code. See also: +### https://github.com/kkxue/Py2ChainMap/blob/master/py2chainmap.py +######################################################################## + +class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + # Py2 compatibility: + __nonzero__ = __bool__ + + @recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self, m=None): # like Django's Context.push() + ''' + New ChainMap with a new map followed by all previous maps. If no + map is provided, an empty dict is used. + ''' + if m is None: + m = {} + return self.__class__(m, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {0!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {0!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +# Re-use the same sentinel as in the Python stdlib socket module: +from socket import _GLOBAL_DEFAULT_TIMEOUT +# Was: _GLOBAL_DEFAULT_TIMEOUT = object() + + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Backport of 3-argument create_connection() for Py2.6. + + Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + err = None + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + else: + raise error("getaddrinfo returns an empty list") + +# Backport from Py2.7 for Py2.6: +def cmp_to_key(mycmp): + """Convert a cmp= function into a key= function""" + class K(object): + __slots__ = ['obj'] + def __init__(self, obj, *args): + self.obj = obj + def __lt__(self, other): + return mycmp(self.obj, other.obj) < 0 + def __gt__(self, other): + return mycmp(self.obj, other.obj) > 0 + def __eq__(self, other): + return mycmp(self.obj, other.obj) == 0 + def __le__(self, other): + return mycmp(self.obj, other.obj) <= 0 + def __ge__(self, other): + return mycmp(self.obj, other.obj) >= 0 + def __ne__(self, other): + return mycmp(self.obj, other.obj) != 0 + def __hash__(self): + raise TypeError('hash not implemented') + return K + +# Back up our definitions above in case they're useful +_OrderedDict = OrderedDict +_Counter = Counter +_check_output = check_output +_count = count +_ceil = ceil +__count_elements = _count_elements +_recursive_repr = recursive_repr +_ChainMap = ChainMap +_create_connection = create_connection +_cmp_to_key = cmp_to_key + +# Overwrite the definitions above with the usual ones +# from the standard library: +if sys.version_info >= (2, 7): + from collections import OrderedDict, Counter + from itertools import count + from functools import cmp_to_key + try: + from subprocess import check_output + except ImportError: + # Not available. This happens with Google App Engine: see issue #231 + pass + from socket import create_connection + +if sys.version_info >= (3, 0): + from math import ceil + from collections import _count_elements + +if sys.version_info >= (3, 3): + from reprlib import recursive_repr + from collections import ChainMap diff --git a/pype/modules/ftrack/python2_vendor/future/backports/socket.py b/pype/modules/ftrack/python2_vendor/future/backports/socket.py new file mode 100644 index 0000000000..930e1dae63 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/socket.py @@ -0,0 +1,454 @@ +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +Integer constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +from __future__ import unicode_literals +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import +from future.builtins import super + +import _socket +from _socket import * + +import os, sys, io + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EAGAIN = getattr(errno, 'EAGAIN', 11) +EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) + +__all__ = ["getfqdn", "create_connection"] +__all__.extend(os._get_exports_list(_socket)) + + +_realsocket = socket + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = {} + errorTab[10004] = "The operation was interrupted." + errorTab[10009] = "A bad file handle was passed." + errorTab[10013] = "Permission denied." + errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT + errorTab[10022] = "An invalid operation was attempted." + errorTab[10035] = "The socket operation would block" + errorTab[10036] = "A blocking operation is already in progress." + errorTab[10048] = "The network address is in use." + errorTab[10054] = "The connection has been reset." + errorTab[10058] = "The network has been shut down." + errorTab[10060] = "The operation timed out." + errorTab[10061] = "Connection refused." + errorTab[10063] = "The name is too long." + errorTab[10064] = "The host is down." + errorTab[10065] = "The host is unreachable." + __all__.append("errorTab") + + +class socket(_socket.socket): + + """A subclass of _socket.socket adding the makefile() method.""" + + __slots__ = ["__weakref__", "_io_refs", "_closed"] + + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None): + if fileno is None: + _socket.socket.__init__(self, family, type, proto) + else: + _socket.socket.__init__(self, family, type, proto, fileno) + self._io_refs = 0 + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + if not self._closed: + self.close() + + def __repr__(self): + """Wrap __repr__() to reveal the real class name.""" + s = _socket.socket.__repr__(self) + if s.startswith(" socket object + + Return a new socket object connected to the same system resource. + """ + fd = dup(self.fileno()) + sock = self.__class__(self.family, self.type, self.proto, fileno=fd) + sock.settimeout(self.gettimeout()) + return sock + + def accept(self): + """accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + """ + fd, addr = self._accept() + sock = socket(self.family, self.type, self.proto, fileno=fd) + # Issue #7995: if no default timeout is set and the listening + # socket had a (non-zero) timeout, force the new socket in blocking + # mode to override platform-specific socket flags inheritance. + if getdefaulttimeout() is None and self.gettimeout(): + sock.setblocking(True) + return sock, addr + + def makefile(self, mode="r", buffering=None, **_3to2kwargs): + """makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, + except the only mode characters supported are 'r', 'w' and 'b'. + The semantics are similar too. (XXX refactor to share code?) + """ + if 'newline' in _3to2kwargs: newline = _3to2kwargs['newline']; del _3to2kwargs['newline'] + else: newline = None + if 'errors' in _3to2kwargs: errors = _3to2kwargs['errors']; del _3to2kwargs['errors'] + else: errors = None + if 'encoding' in _3to2kwargs: encoding = _3to2kwargs['encoding']; del _3to2kwargs['encoding'] + else: encoding = None + for c in mode: + if c not in ("r", "w", "b"): + raise ValueError("invalid mode %r (only r, w, b allowed)") + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._io_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text + + def _decref_socketios(self): + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def _real_close(self, _ss=_socket.socket): + # This function should not reference any globals. See issue #808164. + _ss.close(self) + + def close(self): + # This function should not reference any globals. See issue #808164. + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + +def fromfd(fd, family, type, proto=0): + """ fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + """ + nfd = dup(fd) + return socket(family, type, proto, nfd) + +if hasattr(_socket.socket, "share"): + def fromshare(info): + """ fromshare(info) -> socket object + + Create a socket object from a the bytes object returned by + socket.share(pid). + """ + return socket(0, 0, 0, info) + +if hasattr(_socket, "socketpair"): + + def socketpair(family=None, type=SOCK_STREAM, proto=0): + """socketpair([family[, type[, proto]]]) -> (socket object, socket object) + + Create a pair of socket objects from the sockets returned by the platform + socketpair() function. + The arguments are the same as for socket() except the default family is + AF_UNIX if defined on the platform; otherwise, the default is AF_INET. + """ + if family is None: + try: + family = AF_UNIX + except NameError: + family = AF_INET + a, b = _socket.socketpair(family, type, proto) + a = socket(family, type, proto, a.detach()) + b = socket(family, type, proto, b.detach()) + return a, b + + +_blocking_errnos = set([EAGAIN, EWOULDBLOCK]) + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise IOError("cannot read from timed out object") + while True: + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + # except InterruptedError: + # continue + except error as e: + if e.args[0] in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.args[0] in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + """ + name = name.strip() + if not name or name == '0.0.0.0': + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +# Re-use the same sentinel as in the Python stdlib socket module: +from socket import _GLOBAL_DEFAULT_TIMEOUT +# Was: _GLOBAL_DEFAULT_TIMEOUT = object() + + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + An host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + err = None + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + else: + raise error("getaddrinfo returns an empty list") diff --git a/pype/modules/ftrack/python2_vendor/future/backports/socketserver.py b/pype/modules/ftrack/python2_vendor/future/backports/socketserver.py new file mode 100644 index 0000000000..d1e24a6dd0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/socketserver.py @@ -0,0 +1,747 @@ +"""Generic socket server classes. + +This module tries to capture the various aspects of defining a server: + +For socket-based servers: + +- address family: + - AF_INET{,6}: IP (Internet Protocol) sockets (default) + - AF_UNIX: Unix domain sockets + - others, e.g. AF_DECNET are conceivable (see +- socket type: + - SOCK_STREAM (reliable stream, e.g. TCP) + - SOCK_DGRAM (datagrams, e.g. UDP) + +For request-based servers (including socket-based): + +- client address verification before further looking at the request + (This is actually a hook for any processing that needs to look + at the request before anything else, e.g. logging) +- how to handle multiple requests: + - synchronous (one request is handled at a time) + - forking (each request is handled by a new process) + - threading (each request is handled by a new thread) + +The classes in this module favor the server type that is simplest to +write: a synchronous TCP/IP server. This is bad class design, but +save some typing. (There's also the issue that a deep class hierarchy +slows down method lookups.) + +There are five classes in an inheritance diagram, four of which represent +synchronous servers of four types: + + +------------+ + | BaseServer | + +------------+ + | + v + +-----------+ +------------------+ + | TCPServer |------->| UnixStreamServer | + +-----------+ +------------------+ + | + v + +-----------+ +--------------------+ + | UDPServer |------->| UnixDatagramServer | + +-----------+ +--------------------+ + +Note that UnixDatagramServer derives from UDPServer, not from +UnixStreamServer -- the only difference between an IP and a Unix +stream server is the address family, which is simply repeated in both +unix server classes. + +Forking and threading versions of each type of server can be created +using the ForkingMixIn and ThreadingMixIn mix-in classes. For +instance, a threading UDP server class is created as follows: + + class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass + +The Mix-in class must come first, since it overrides a method defined +in UDPServer! Setting the various member variables also changes +the behavior of the underlying server mechanism. + +To implement a service, you must derive a class from +BaseRequestHandler and redefine its handle() method. You can then run +various versions of the service by combining one of the server classes +with your request handler class. + +The request handler class must be different for datagram or stream +services. This can be hidden by using the request handler +subclasses StreamRequestHandler or DatagramRequestHandler. + +Of course, you still have to use your head! + +For instance, it makes no sense to use a forking server if the service +contains state in memory that can be modified by requests (since the +modifications in the child process would never reach the initial state +kept in the parent process and passed to each child). In this case, +you can use a threading server, but you will probably have to use +locks to avoid two requests that come in nearly simultaneous to apply +conflicting changes to the server state. + +On the other hand, if you are building e.g. an HTTP server, where all +data is stored externally (e.g. in the file system), a synchronous +class will essentially render the service "deaf" while one request is +being handled -- which may be for a very long time if a client is slow +to read all the data it has requested. Here a threading or forking +server is appropriate. + +In some cases, it may be appropriate to process part of a request +synchronously, but to finish processing in a forked child depending on +the request data. This can be implemented by using a synchronous +server and doing an explicit fork in the request handler class +handle() method. + +Another approach to handling multiple simultaneous requests in an +environment that supports neither threads nor fork (or where these are +too expensive or inappropriate for the service) is to maintain an +explicit table of partially finished requests and to use select() to +decide which request to work on next (or whether to handle a new +incoming request). This is particularly important for stream services +where each client can potentially be connected for a long time (if +threads or subprocesses cannot be used). + +Future work: +- Standard classes for Sun RPC (which uses either UDP or TCP) +- Standard mix-in classes to implement various authentication + and encryption schemes +- Standard framework for select-based multiplexing + +XXX Open problems: +- What to do with out-of-band data? + +BaseServer: +- split generic "request" functionality out into BaseServer class. + Copyright (C) 2000 Luke Kenneth Casson Leighton + + example: read entries from a SQL database (requires overriding + get_request() to return a table entry from the database). + entry is processed by a RequestHandlerClass. + +""" + +# Author of the BaseServer patch: Luke Kenneth Casson Leighton + +# XXX Warning! +# There is a test suite for this module, but it cannot be run by the +# standard regression test. +# To run it manually, run Lib/test/test_socketserver.py. + +from __future__ import (absolute_import, print_function) + +__version__ = "0.4" + + +import socket +import select +import sys +import os +import errno +try: + import threading +except ImportError: + import dummy_threading as threading + +__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer", + "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler", + "StreamRequestHandler","DatagramRequestHandler", + "ThreadingMixIn", "ForkingMixIn"] +if hasattr(socket, "AF_UNIX"): + __all__.extend(["UnixStreamServer","UnixDatagramServer", + "ThreadingUnixStreamServer", + "ThreadingUnixDatagramServer"]) + +def _eintr_retry(func, *args): + """restart a system call interrupted by EINTR""" + while True: + try: + return func(*args) + except OSError as e: + if e.errno != errno.EINTR: + raise + +class BaseServer(object): + + """Base class for server classes. + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you do not use serve_forever() + - fileno() -> int # for select() + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - server_close() + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - service_actions() + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - allow_reuse_address + + Instance variables: + + - RequestHandlerClass + - socket + + """ + + timeout = None + + def __init__(self, server_address, RequestHandlerClass): + """Constructor. May be extended, do not override.""" + self.server_address = server_address + self.RequestHandlerClass = RequestHandlerClass + self.__is_shut_down = threading.Event() + self.__shutdown_request = False + + def server_activate(self): + """Called by constructor to activate the server. + + May be overridden. + + """ + pass + + def serve_forever(self, poll_interval=0.5): + """Handle one request at a time until shutdown. + + Polls for shutdown every poll_interval seconds. Ignores + self.timeout. If you need to do periodic tasks, do them in + another thread. + """ + self.__is_shut_down.clear() + try: + while not self.__shutdown_request: + # XXX: Consider using another file descriptor or + # connecting to the socket to wake this up instead of + # polling. Polling reduces our responsiveness to a + # shutdown request and wastes cpu at all other times. + r, w, e = _eintr_retry(select.select, [self], [], [], + poll_interval) + if self in r: + self._handle_request_noblock() + + self.service_actions() + finally: + self.__shutdown_request = False + self.__is_shut_down.set() + + def shutdown(self): + """Stops the serve_forever loop. + + Blocks until the loop has finished. This must be called while + serve_forever() is running in another thread, or it will + deadlock. + """ + self.__shutdown_request = True + self.__is_shut_down.wait() + + def service_actions(self): + """Called by the serve_forever() loop. + + May be overridden by a subclass / Mixin to implement any code that + needs to be run during the loop. + """ + pass + + # The distinction between handling, getting, processing and + # finishing a request is fairly arbitrary. Remember: + # + # - handle_request() is the top-level call. It calls + # select, get_request(), verify_request() and process_request() + # - get_request() is different for stream or datagram sockets + # - process_request() is the place that may fork a new process + # or create a new thread to finish the request + # - finish_request() instantiates the request handler class; + # this constructor will handle the request all by itself + + def handle_request(self): + """Handle one request, possibly blocking. + + Respects self.timeout. + """ + # Support people who used socket.settimeout() to escape + # handle_request before self.timeout was available. + timeout = self.socket.gettimeout() + if timeout is None: + timeout = self.timeout + elif self.timeout is not None: + timeout = min(timeout, self.timeout) + fd_sets = _eintr_retry(select.select, [self], [], [], timeout) + if not fd_sets[0]: + self.handle_timeout() + return + self._handle_request_noblock() + + def _handle_request_noblock(self): + """Handle one request, without blocking. + + I assume that select.select has returned that the socket is + readable before this function was called, so there should be + no risk of blocking in get_request(). + """ + try: + request, client_address = self.get_request() + except socket.error: + return + if self.verify_request(request, client_address): + try: + self.process_request(request, client_address) + except: + self.handle_error(request, client_address) + self.shutdown_request(request) + + def handle_timeout(self): + """Called if no new request arrives within self.timeout. + + Overridden by ForkingMixIn. + """ + pass + + def verify_request(self, request, client_address): + """Verify the request. May be overridden. + + Return True if we should proceed with this request. + + """ + return True + + def process_request(self, request, client_address): + """Call finish_request. + + Overridden by ForkingMixIn and ThreadingMixIn. + + """ + self.finish_request(request, client_address) + self.shutdown_request(request) + + def server_close(self): + """Called to clean-up the server. + + May be overridden. + + """ + pass + + def finish_request(self, request, client_address): + """Finish one request by instantiating RequestHandlerClass.""" + self.RequestHandlerClass(request, client_address, self) + + def shutdown_request(self, request): + """Called to shutdown and close an individual request.""" + self.close_request(request) + + def close_request(self, request): + """Called to clean up an individual request.""" + pass + + def handle_error(self, request, client_address): + """Handle an error gracefully. May be overridden. + + The default is to print a traceback and continue. + + """ + print('-'*40) + print('Exception happened during processing of request from', end=' ') + print(client_address) + import traceback + traceback.print_exc() # XXX But this goes to stderr! + print('-'*40) + + +class TCPServer(BaseServer): + + """Base class for various socket-based server classes. + + Defaults to synchronous IP stream (i.e., TCP). + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass, bind_and_activate=True) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you don't use serve_forever() + - fileno() -> int # for select() + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - request_queue_size (only for stream sockets) + - allow_reuse_address + + Instance variables: + + - server_address + - RequestHandlerClass + - socket + + """ + + address_family = socket.AF_INET + + socket_type = socket.SOCK_STREAM + + request_queue_size = 5 + + allow_reuse_address = False + + def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True): + """Constructor. May be extended, do not override.""" + BaseServer.__init__(self, server_address, RequestHandlerClass) + self.socket = socket.socket(self.address_family, + self.socket_type) + if bind_and_activate: + self.server_bind() + self.server_activate() + + def server_bind(self): + """Called by constructor to bind the socket. + + May be overridden. + + """ + if self.allow_reuse_address: + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.socket.bind(self.server_address) + self.server_address = self.socket.getsockname() + + def server_activate(self): + """Called by constructor to activate the server. + + May be overridden. + + """ + self.socket.listen(self.request_queue_size) + + def server_close(self): + """Called to clean-up the server. + + May be overridden. + + """ + self.socket.close() + + def fileno(self): + """Return socket file number. + + Interface required by select(). + + """ + return self.socket.fileno() + + def get_request(self): + """Get the request and client address from the socket. + + May be overridden. + + """ + return self.socket.accept() + + def shutdown_request(self, request): + """Called to shutdown and close an individual request.""" + try: + #explicitly shutdown. socket.close() merely releases + #the socket and waits for GC to perform the actual close. + request.shutdown(socket.SHUT_WR) + except socket.error: + pass #some platforms may raise ENOTCONN here + self.close_request(request) + + def close_request(self, request): + """Called to clean up an individual request.""" + request.close() + + +class UDPServer(TCPServer): + + """UDP server class.""" + + allow_reuse_address = False + + socket_type = socket.SOCK_DGRAM + + max_packet_size = 8192 + + def get_request(self): + data, client_addr = self.socket.recvfrom(self.max_packet_size) + return (data, self.socket), client_addr + + def server_activate(self): + # No need to call listen() for UDP. + pass + + def shutdown_request(self, request): + # No need to shutdown anything. + self.close_request(request) + + def close_request(self, request): + # No need to close anything. + pass + +class ForkingMixIn(object): + + """Mix-in class to handle each request in a new process.""" + + timeout = 300 + active_children = None + max_children = 40 + + def collect_children(self): + """Internal routine to wait for children that have exited.""" + if self.active_children is None: return + while len(self.active_children) >= self.max_children: + # XXX: This will wait for any child process, not just ones + # spawned by this library. This could confuse other + # libraries that expect to be able to wait for their own + # children. + try: + pid, status = os.waitpid(0, 0) + except os.error: + pid = None + if pid not in self.active_children: continue + self.active_children.remove(pid) + + # XXX: This loop runs more system calls than it ought + # to. There should be a way to put the active_children into a + # process group and then use os.waitpid(-pgid) to wait for any + # of that set, but I couldn't find a way to allocate pgids + # that couldn't collide. + for child in self.active_children: + try: + pid, status = os.waitpid(child, os.WNOHANG) + except os.error: + pid = None + if not pid: continue + try: + self.active_children.remove(pid) + except ValueError as e: + raise ValueError('%s. x=%d and list=%r' % (e.message, pid, + self.active_children)) + + def handle_timeout(self): + """Wait for zombies after self.timeout seconds of inactivity. + + May be extended, do not override. + """ + self.collect_children() + + def service_actions(self): + """Collect the zombie child processes regularly in the ForkingMixIn. + + service_actions is called in the BaseServer's serve_forver loop. + """ + self.collect_children() + + def process_request(self, request, client_address): + """Fork a new subprocess to process the request.""" + pid = os.fork() + if pid: + # Parent process + if self.active_children is None: + self.active_children = [] + self.active_children.append(pid) + self.close_request(request) + return + else: + # Child process. + # This must never return, hence os._exit()! + try: + self.finish_request(request, client_address) + self.shutdown_request(request) + os._exit(0) + except: + try: + self.handle_error(request, client_address) + self.shutdown_request(request) + finally: + os._exit(1) + + +class ThreadingMixIn(object): + """Mix-in class to handle each request in a new thread.""" + + # Decides how threads will act upon termination of the + # main process + daemon_threads = False + + def process_request_thread(self, request, client_address): + """Same as in BaseServer but as a thread. + + In addition, exception handling is done here. + + """ + try: + self.finish_request(request, client_address) + self.shutdown_request(request) + except: + self.handle_error(request, client_address) + self.shutdown_request(request) + + def process_request(self, request, client_address): + """Start a new thread to process the request.""" + t = threading.Thread(target = self.process_request_thread, + args = (request, client_address)) + t.daemon = self.daemon_threads + t.start() + + +class ForkingUDPServer(ForkingMixIn, UDPServer): pass +class ForkingTCPServer(ForkingMixIn, TCPServer): pass + +class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass +class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass + +if hasattr(socket, 'AF_UNIX'): + + class UnixStreamServer(TCPServer): + address_family = socket.AF_UNIX + + class UnixDatagramServer(UDPServer): + address_family = socket.AF_UNIX + + class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass + + class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass + +class BaseRequestHandler(object): + + """Base class for request handler classes. + + This class is instantiated for each request to be handled. The + constructor sets the instance variables request, client_address + and server, and then calls the handle() method. To implement a + specific service, all you need to do is to derive a class which + defines a handle() method. + + The handle() method can find the request as self.request, the + client address as self.client_address, and the server (in case it + needs access to per-server information) as self.server. Since a + separate instance is created for each request, the handle() method + can define arbitrary other instance variariables. + + """ + + def __init__(self, request, client_address, server): + self.request = request + self.client_address = client_address + self.server = server + self.setup() + try: + self.handle() + finally: + self.finish() + + def setup(self): + pass + + def handle(self): + pass + + def finish(self): + pass + + +# The following two classes make it possible to use the same service +# class for stream or datagram servers. +# Each class sets up these instance variables: +# - rfile: a file object from which receives the request is read +# - wfile: a file object to which the reply is written +# When the handle() method returns, wfile is flushed properly + + +class StreamRequestHandler(BaseRequestHandler): + + """Define self.rfile and self.wfile for stream sockets.""" + + # Default buffer sizes for rfile, wfile. + # We default rfile to buffered because otherwise it could be + # really slow for large data (a getc() call per byte); we make + # wfile unbuffered because (a) often after a write() we want to + # read and we need to flush the line; (b) big writes to unbuffered + # files are typically optimized by stdio even when big reads + # aren't. + rbufsize = -1 + wbufsize = 0 + + # A timeout to apply to the request socket, if not None. + timeout = None + + # Disable nagle algorithm for this socket, if True. + # Use only when wbufsize != 0, to avoid small packets. + disable_nagle_algorithm = False + + def setup(self): + self.connection = self.request + if self.timeout is not None: + self.connection.settimeout(self.timeout) + if self.disable_nagle_algorithm: + self.connection.setsockopt(socket.IPPROTO_TCP, + socket.TCP_NODELAY, True) + self.rfile = self.connection.makefile('rb', self.rbufsize) + self.wfile = self.connection.makefile('wb', self.wbufsize) + + def finish(self): + if not self.wfile.closed: + try: + self.wfile.flush() + except socket.error: + # An final socket error may have occurred here, such as + # the local error ECONNABORTED. + pass + self.wfile.close() + self.rfile.close() + + +class DatagramRequestHandler(BaseRequestHandler): + + # XXX Regrettably, I cannot get this working on Linux; + # s.recvfrom() doesn't return a meaningful client address. + + """Define self.rfile and self.wfile for datagram sockets.""" + + def setup(self): + from io import BytesIO + self.packet, self.socket = self.request + self.rfile = BytesIO(self.packet) + self.wfile = BytesIO() + + def finish(self): + self.socket.sendto(self.wfile.getvalue(), self.client_address) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/test/__init__.py new file mode 100644 index 0000000000..0bba5e69a6 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/__init__.py @@ -0,0 +1,9 @@ +""" +test package backported for python-future. + +Its primary purpose is to allow use of "import test.support" for running +the Python standard library unit tests using the new Python 3 stdlib +import location. + +Python 3 renamed test.test_support to test.support. +""" diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/badcert.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/badcert.pem new file mode 100644 index 0000000000..c4191460f9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/badcert.pem @@ -0,0 +1,36 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L +opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH +fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB +AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU +D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA +IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM +oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 +ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ +loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j +oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA +z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq +ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV +q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +Just bad cert data +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L +opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH +fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB +AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU +D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA +IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM +oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 +ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ +loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j +oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA +z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq +ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV +q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +Just bad cert data +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/badkey.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/badkey.pem new file mode 100644 index 0000000000..1c8a955719 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/badkey.pem @@ -0,0 +1,40 @@ +-----BEGIN RSA PRIVATE KEY----- +Bad Key, though the cert should be OK +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD +VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x +IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT +U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 +NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl +bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m +dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj +aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh +m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 +M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn +fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC +AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb +08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx +CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ +iHkC6gGdBJhogs4= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +Bad Key, though the cert should be OK +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD +VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x +IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT +U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 +NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl +bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m +dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj +aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh +m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 +M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn +fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC +AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb +08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx +CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ +iHkC6gGdBJhogs4= +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/dh512.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/dh512.pem new file mode 100644 index 0000000000..200d16cd89 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/dh512.pem @@ -0,0 +1,9 @@ +-----BEGIN DH PARAMETERS----- +MEYCQQD1Kv884bEpQBgRjXyEpwpy1obEAxnIByl6ypUM2Zafq9AKUJsCRtMIPWak +XUGfnHy9iUsiGSa6q6Jew1XpKgVfAgEC +-----END DH PARAMETERS----- + +These are the 512 bit DH parameters from "Assigned Number for SKIP Protocols" +(http://www.skip-vpn.org/spec/numbers.html). +See there for how they were generated. +Note that g is not a generator, but this is not a problem since p is a safe prime. diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/https_svn_python_org_root.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/https_svn_python_org_root.pem new file mode 100644 index 0000000000..e7dfc82947 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/https_svn_python_org_root.pem @@ -0,0 +1,41 @@ +-----BEGIN CERTIFICATE----- +MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 +IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB +IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA +Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO +BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi +MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ +ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ +8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 +zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y +fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 +w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc +G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k +epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q +laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ +QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU +fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 +YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w +ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY +gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe +MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 +IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy +dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw +czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 +dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl +aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC +AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg +b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB +ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc +nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg +18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c +gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl +Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY +sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T +SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF +CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum +GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk +zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW +omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.passwd.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.passwd.pem new file mode 100644 index 0000000000..e90574881d --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.passwd.pem @@ -0,0 +1,33 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,1A8D9D2A02EC698A + +kJYbfZ8L0sfe9Oty3gw0aloNnY5E8fegRfQLZlNoxTl6jNt0nIwI8kDJ36CZgR9c +u3FDJm/KqrfUoz8vW+qEnWhSG7QPX2wWGPHd4K94Yz/FgrRzZ0DoK7XxXq9gOtVA +AVGQhnz32p+6WhfGsCr9ArXEwRZrTk/FvzEPaU5fHcoSkrNVAGX8IpSVkSDwEDQr +Gv17+cfk99UV1OCza6yKHoFkTtrC+PZU71LomBabivS2Oc4B9hYuSR2hF01wTHP+ +YlWNagZOOVtNz4oKK9x9eNQpmfQXQvPPTfusexKIbKfZrMvJoxcm1gfcZ0H/wK6P +6wmXSG35qMOOztCZNtperjs1wzEBXznyK8QmLcAJBjkfarABJX9vBEzZV0OUKhy+ +noORFwHTllphbmydLhu6ehLUZMHPhzAS5UN7srtpSN81eerDMy0RMUAwA7/PofX1 +94Me85Q8jP0PC9ETdsJcPqLzAPETEYu0ELewKRcrdyWi+tlLFrpE5KT/s5ecbl9l +7B61U4Kfd1PIXc/siINhU3A3bYK+845YyUArUOnKf1kEox7p1RpD7yFqVT04lRTo +cibNKATBusXSuBrp2G6GNuhWEOSafWCKJQAzgCYIp6ZTV2khhMUGppc/2H3CF6cO +zX0KtlPVZC7hLkB6HT8SxYUwF1zqWY7+/XPPdc37MeEZ87Q3UuZwqORLY+Z0hpgt +L5JXBCoklZhCAaN2GqwFLXtGiRSRFGY7xXIhbDTlE65Wv1WGGgDLMKGE1gOz3yAo +2jjG1+yAHJUdE69XTFHSqSkvaloA1W03LdMXZ9VuQJ/ySXCie6ABAQ== +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.pem new file mode 100644 index 0000000000..64318aa2e0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert.pem @@ -0,0 +1,31 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/keycert2.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert2.pem new file mode 100644 index 0000000000..e8a9e082b3 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/keycert2.pem @@ -0,0 +1,31 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAJnsJZVrppL+W5I9 +zGQrrawWwE5QJpBK9nWw17mXrZ03R1cD9BamLGivVISbPlRlAVnZBEyh1ATpsB7d +CUQ+WHEvALquvx4+Yw5l+fXeiYRjrLRBYZuVy8yNtXzU3iWcGObcYRkUdiXdOyP7 +sLF2YZHRvQZpzgDBKkrraeQ81w21AgMBAAECgYBEm7n07FMHWlE+0kT0sXNsLYfy +YE+QKZnJw9WkaDN+zFEEPELkhZVt5BjsMraJr6v2fIEqF0gGGJPkbenffVq2B5dC +lWUOxvJHufMK4sM3Cp6s/gOp3LP+QkzVnvJSfAyZU6l+4PGX5pLdUsXYjPxgzjzL +S36tF7/2Uv1WePyLUQJBAMsPhYzUXOPRgmbhcJiqi9A9c3GO8kvSDYTCKt3VMnqz +HBn6MQ4VQasCD1F+7jWTI0FU/3vdw8non/Fj8hhYqZcCQQDCDRdvmZqDiZnpMqDq +L6ZSrLTVtMvZXZbgwForaAD9uHj51TME7+eYT7EG2YCgJTXJ4YvRJEnPNyskwdKt +vTSTAkEAtaaN/vyemEJ82BIGStwONNw0ILsSr5cZ9tBHzqiA/tipY+e36HRFiXhP +QcU9zXlxyWkDH8iz9DSAmE2jbfoqwwJANlMJ65E543cjIlitGcKLMnvtCCLcKpb7 +xSG0XJB6Lo11OKPJ66jp0gcFTSCY1Lx2CXVd+gfJrfwI1Pp562+bhwJBAJ9IfDPU +R8OpO9v1SGd8x33Owm7uXOpB9d63/T70AD1QOXjKUC4eXYbt0WWfWuny/RNPRuyh +w7DXSfUF+kPKolU= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIJAIO3upAG445fMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xFTATBgNVBAMTDGZha2Vob3N0bmFtZTAeFw0x +MDEwMDkxNTAxMDBaFw0yMDEwMDYxNTAxMDBaMGIxCzAJBgNVBAYTAlhZMRcwFQYD +VQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZv +dW5kYXRpb24xFTATBgNVBAMTDGZha2Vob3N0bmFtZTCBnzANBgkqhkiG9w0BAQEF +AAOBjQAwgYkCgYEAmewllWumkv5bkj3MZCutrBbATlAmkEr2dbDXuZetnTdHVwP0 +FqYsaK9UhJs+VGUBWdkETKHUBOmwHt0JRD5YcS8Auq6/Hj5jDmX59d6JhGOstEFh +m5XLzI21fNTeJZwY5txhGRR2Jd07I/uwsXZhkdG9BmnOAMEqSutp5DzXDbUCAwEA +AaMbMBkwFwYDVR0RBBAwDoIMZmFrZWhvc3RuYW1lMA0GCSqGSIb3DQEBBQUAA4GB +AH+iMClLLGSaKWgwXsmdVo4FhTZZHo8Uprrtg3N9FxEeE50btpDVQysgRt5ias3K +m+bME9zbKwvbVWD5zZdjus4pDgzwF/iHyccL8JyYhxOvS/9zmvAtFXj/APIIbZFp +IT75d9f88ScIGEtknZQejnrdhB64tYki/EqluiuKBqKD +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/nokia.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/nokia.pem new file mode 100644 index 0000000000..0d044df436 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/nokia.pem @@ -0,0 +1,31 @@ +# Certificate for projects.developer.nokia.com:443 (see issue 13034) +-----BEGIN CERTIFICATE----- +MIIFLDCCBBSgAwIBAgIQLubqdkCgdc7lAF9NfHlUmjANBgkqhkiG9w0BAQUFADCB +vDELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTswOQYDVQQLEzJUZXJtcyBvZiB1c2Ug +YXQgaHR0cHM6Ly93d3cudmVyaXNpZ24uY29tL3JwYSAoYykxMDE2MDQGA1UEAxMt +VmVyaVNpZ24gQ2xhc3MgMyBJbnRlcm5hdGlvbmFsIFNlcnZlciBDQSAtIEczMB4X +DTExMDkyMTAwMDAwMFoXDTEyMDkyMDIzNTk1OVowcTELMAkGA1UEBhMCRkkxDjAM +BgNVBAgTBUVzcG9vMQ4wDAYDVQQHFAVFc3BvbzEOMAwGA1UEChQFTm9raWExCzAJ +BgNVBAsUAkJJMSUwIwYDVQQDFBxwcm9qZWN0cy5kZXZlbG9wZXIubm9raWEuY29t +MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCr92w1bpHYSYxUEx8N/8Iddda2 +lYi+aXNtQfV/l2Fw9Ykv3Ipw4nLeGTj18FFlAZgMdPRlgrzF/NNXGw/9l3/qKdow +CypkQf8lLaxb9Ze1E/KKmkRJa48QTOqvo6GqKuTI6HCeGlG1RxDb8YSKcQWLiytn +yj3Wp4MgRQO266xmMQIDAQABo4IB9jCCAfIwQQYDVR0RBDowOIIccHJvamVjdHMu +ZGV2ZWxvcGVyLm5va2lhLmNvbYIYcHJvamVjdHMuZm9ydW0ubm9raWEuY29tMAkG +A1UdEwQCMAAwCwYDVR0PBAQDAgWgMEEGA1UdHwQ6MDgwNqA0oDKGMGh0dHA6Ly9T +VlJJbnRsLUczLWNybC52ZXJpc2lnbi5jb20vU1ZSSW50bEczLmNybDBEBgNVHSAE +PTA7MDkGC2CGSAGG+EUBBxcDMCowKAYIKwYBBQUHAgEWHGh0dHBzOi8vd3d3LnZl +cmlzaWduLmNvbS9ycGEwKAYDVR0lBCEwHwYJYIZIAYb4QgQBBggrBgEFBQcDAQYI +KwYBBQUHAwIwcgYIKwYBBQUHAQEEZjBkMCQGCCsGAQUFBzABhhhodHRwOi8vb2Nz +cC52ZXJpc2lnbi5jb20wPAYIKwYBBQUHMAKGMGh0dHA6Ly9TVlJJbnRsLUczLWFp +YS52ZXJpc2lnbi5jb20vU1ZSSW50bEczLmNlcjBuBggrBgEFBQcBDARiMGChXqBc +MFowWDBWFglpbWFnZS9naWYwITAfMAcGBSsOAwIaBBRLa7kolgYMu9BSOJsprEsH +iyEFGDAmFiRodHRwOi8vbG9nby52ZXJpc2lnbi5jb20vdnNsb2dvMS5naWYwDQYJ +KoZIhvcNAQEFBQADggEBACQuPyIJqXwUyFRWw9x5yDXgMW4zYFopQYOw/ItRY522 +O5BsySTh56BWS6mQB07XVfxmYUGAvRQDA5QHpmY8jIlNwSmN3s8RKo+fAtiNRlcL +x/mWSfuMs3D/S6ev3D6+dpEMZtjrhOdctsarMKp8n/hPbwhAbg5hVjpkW5n8vz2y +0KxvvkA1AxpLwpVv7OlK17ttzIHw8bp9HTlHBU5s8bKz4a565V/a5HI0CSEv/+0y +ko4/ghTnZc1CkmUngKKeFMSah/mT/xAh8XnE2l1AazFa8UKuYki1e+ArHaGZc4ix +UYOtiRphwfuYQhRZ7qX9q2MMkCMI65XNK/SaFrAbbG0= +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/nullbytecert.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/nullbytecert.pem new file mode 100644 index 0000000000..447186c950 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/nullbytecert.pem @@ -0,0 +1,90 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 0 (0x0) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev@python.org + Validity + Not Before: Aug 7 13:11:52 2013 GMT + Not After : Aug 7 13:12:52 2013 GMT + Subject: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev@python.org + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b5:ea:ed:c9:fb:46:7d:6f:3b:76:80:dd:3a:f3: + 03:94:0b:a7:a6:db:ec:1d:df:ff:23:74:08:9d:97: + 16:3f:a3:a4:7b:3e:1b:0e:96:59:25:03:a7:26:e2: + 88:a9:cf:79:cd:f7:04:56:b0:ab:79:32:6e:59:c1: + 32:30:54:eb:58:a8:cb:91:f0:42:a5:64:27:cb:d4: + 56:31:88:52:ad:cf:bd:7f:f0:06:64:1f:cc:27:b8: + a3:8b:8c:f3:d8:29:1f:25:0b:f5:46:06:1b:ca:02: + 45:ad:7b:76:0a:9c:bf:bb:b9:ae:0d:16:ab:60:75: + ae:06:3e:9c:7c:31:dc:92:2f:29:1a:e0:4b:0c:91: + 90:6c:e9:37:c5:90:d7:2a:d7:97:15:a3:80:8f:5d: + 7b:49:8f:54:30:d4:97:2c:1c:5b:37:b5:ab:69:30: + 68:43:d3:33:78:4b:02:60:f5:3c:44:80:a1:8f:e7: + f0:0f:d1:5e:87:9e:46:cf:62:fc:f9:bf:0c:65:12: + f1:93:c8:35:79:3f:c8:ec:ec:47:f5:ef:be:44:d5: + ae:82:1e:2d:9a:9f:98:5a:67:65:e1:74:70:7c:cb: + d3:c2:ce:0e:45:49:27:dc:e3:2d:d4:fb:48:0e:2f: + 9e:77:b8:14:46:c0:c4:36:ca:02:ae:6a:91:8c:da: + 2f:85 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Subject Key Identifier: + 88:5A:55:C0:52:FF:61:CD:52:A3:35:0F:EA:5A:9C:24:38:22:F7:5C + X509v3 Key Usage: + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Subject Alternative Name: + ************************************************************* + WARNING: The values for DNS, email and URI are WRONG. OpenSSL + doesn't print the text after a NULL byte. + ************************************************************* + DNS:altnull.python.org, email:null@python.org, URI:http://null.python.org, IP Address:192.0.2.1, IP Address:2001:DB8:0:0:0:0:0:1 + Signature Algorithm: sha1WithRSAEncryption + ac:4f:45:ef:7d:49:a8:21:70:8e:88:59:3e:d4:36:42:70:f5: + a3:bd:8b:d7:a8:d0:58:f6:31:4a:b1:a4:a6:dd:6f:d9:e8:44: + 3c:b6:0a:71:d6:7f:b1:08:61:9d:60:ce:75:cf:77:0c:d2:37: + 86:02:8d:5e:5d:f9:0f:71:b4:16:a8:c1:3d:23:1c:f1:11:b3: + 56:6e:ca:d0:8d:34:94:e6:87:2a:99:f2:ae:ae:cc:c2:e8:86: + de:08:a8:7f:c5:05:fa:6f:81:a7:82:e6:d0:53:9d:34:f4:ac: + 3e:40:fe:89:57:7a:29:a4:91:7e:0b:c6:51:31:e5:10:2f:a4: + 60:76:cd:95:51:1a:be:8b:a1:b0:fd:ad:52:bd:d7:1b:87:60: + d2:31:c7:17:c4:18:4f:2d:08:25:a3:a7:4f:b7:92:ca:e2:f5: + 25:f1:54:75:81:9d:b3:3d:61:a2:f7:da:ed:e1:c6:6f:2c:60: + 1f:d8:6f:c5:92:05:ab:c9:09:62:49:a9:14:ad:55:11:cc:d6: + 4a:19:94:99:97:37:1d:81:5f:8b:cf:a3:a8:96:44:51:08:3d: + 0b:05:65:12:eb:b6:70:80:88:48:72:4f:c6:c2:da:cf:cd:8e: + 5b:ba:97:2f:60:b4:96:56:49:5e:3a:43:76:63:04:be:2a:f6: + c1:ca:a9:94 +-----BEGIN CERTIFICATE----- +MIIE2DCCA8CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBxTELMAkGA1UEBhMCVVMx +DzANBgNVBAgMBk9yZWdvbjESMBAGA1UEBwwJQmVhdmVydG9uMSMwIQYDVQQKDBpQ +eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjEgMB4GA1UECwwXUHl0aG9uIENvcmUg +RGV2ZWxvcG1lbnQxJDAiBgNVBAMMG251bGwucHl0aG9uLm9yZwBleGFtcGxlLm9y +ZzEkMCIGCSqGSIb3DQEJARYVcHl0aG9uLWRldkBweXRob24ub3JnMB4XDTEzMDgw +NzEzMTE1MloXDTEzMDgwNzEzMTI1MlowgcUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQI +DAZPcmVnb24xEjAQBgNVBAcMCUJlYXZlcnRvbjEjMCEGA1UECgwaUHl0aG9uIFNv +ZnR3YXJlIEZvdW5kYXRpb24xIDAeBgNVBAsMF1B5dGhvbiBDb3JlIERldmVsb3Bt +ZW50MSQwIgYDVQQDDBtudWxsLnB5dGhvbi5vcmcAZXhhbXBsZS5vcmcxJDAiBgkq +hkiG9w0BCQEWFXB5dGhvbi1kZXZAcHl0aG9uLm9yZzCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBALXq7cn7Rn1vO3aA3TrzA5QLp6bb7B3f/yN0CJ2XFj+j +pHs+Gw6WWSUDpybiiKnPec33BFawq3kyblnBMjBU61ioy5HwQqVkJ8vUVjGIUq3P +vX/wBmQfzCe4o4uM89gpHyUL9UYGG8oCRa17dgqcv7u5rg0Wq2B1rgY+nHwx3JIv +KRrgSwyRkGzpN8WQ1yrXlxWjgI9de0mPVDDUlywcWze1q2kwaEPTM3hLAmD1PESA +oY/n8A/RXoeeRs9i/Pm/DGUS8ZPINXk/yOzsR/XvvkTVroIeLZqfmFpnZeF0cHzL +08LODkVJJ9zjLdT7SA4vnne4FEbAxDbKAq5qkYzaL4UCAwEAAaOB0DCBzTAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSIWlXAUv9hzVKjNQ/qWpwkOCL3XDALBgNVHQ8E +BAMCBeAwgZAGA1UdEQSBiDCBhYIeYWx0bnVsbC5weXRob24ub3JnAGV4YW1wbGUu +Y29tgSBudWxsQHB5dGhvbi5vcmcAdXNlckBleGFtcGxlLm9yZ4YpaHR0cDovL251 +bGwucHl0aG9uLm9yZwBodHRwOi8vZXhhbXBsZS5vcmeHBMAAAgGHECABDbgAAAAA +AAAAAAAAAAEwDQYJKoZIhvcNAQEFBQADggEBAKxPRe99SaghcI6IWT7UNkJw9aO9 +i9eo0Fj2MUqxpKbdb9noRDy2CnHWf7EIYZ1gznXPdwzSN4YCjV5d+Q9xtBaowT0j +HPERs1ZuytCNNJTmhyqZ8q6uzMLoht4IqH/FBfpvgaeC5tBTnTT0rD5A/olXeimk +kX4LxlEx5RAvpGB2zZVRGr6LobD9rVK91xuHYNIxxxfEGE8tCCWjp0+3ksri9SXx +VHWBnbM9YaL32u3hxm8sYB/Yb8WSBavJCWJJqRStVRHM1koZlJmXNx2BX4vPo6iW +RFEIPQsFZRLrtnCAiEhyT8bC2s/Njlu6ly9gtJZWSV46Q3ZjBL4q9sHKqZQ= +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/nullcert.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/nullcert.pem new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/pystone.py b/pype/modules/ftrack/python2_vendor/future/backports/test/pystone.py new file mode 100644 index 0000000000..7652027b48 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/pystone.py @@ -0,0 +1,272 @@ +#!/usr/bin/env python3 + +""" +"PYSTONE" Benchmark Program + +Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes) + +Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013. + + Translated from ADA to C by Rick Richardson. + Every method to preserve ADA-likeness has been used, + at the expense of C-ness. + + Translated from C to Python by Guido van Rossum. + +Version History: + + Version 1.1 corrects two bugs in version 1.0: + + First, it leaked memory: in Proc1(), NextRecord ends + up having a pointer to itself. I have corrected this + by zapping NextRecord.PtrComp at the end of Proc1(). + + Second, Proc3() used the operator != to compare a + record to None. This is rather inefficient and not + true to the intention of the original benchmark (where + a pointer comparison to None is intended; the != + operator attempts to find a method __cmp__ to do value + comparison of the record). Version 1.1 runs 5-10 + percent faster than version 1.0, so benchmark figures + of different versions can't be compared directly. + +""" + +from __future__ import print_function + +from time import clock + +LOOPS = 50000 + +__version__ = "1.1" + +[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6) + +class Record(object): + + def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0, + IntComp = 0, StringComp = 0): + self.PtrComp = PtrComp + self.Discr = Discr + self.EnumComp = EnumComp + self.IntComp = IntComp + self.StringComp = StringComp + + def copy(self): + return Record(self.PtrComp, self.Discr, self.EnumComp, + self.IntComp, self.StringComp) + +TRUE = 1 +FALSE = 0 + +def main(loops=LOOPS): + benchtime, stones = pystones(loops) + print("Pystone(%s) time for %d passes = %g" % \ + (__version__, loops, benchtime)) + print("This machine benchmarks at %g pystones/second" % stones) + + +def pystones(loops=LOOPS): + return Proc0(loops) + +IntGlob = 0 +BoolGlob = FALSE +Char1Glob = '\0' +Char2Glob = '\0' +Array1Glob = [0]*51 +Array2Glob = [x[:] for x in [Array1Glob]*51] +PtrGlb = None +PtrGlbNext = None + +def Proc0(loops=LOOPS): + global IntGlob + global BoolGlob + global Char1Glob + global Char2Glob + global Array1Glob + global Array2Glob + global PtrGlb + global PtrGlbNext + + starttime = clock() + for i in range(loops): + pass + nulltime = clock() - starttime + + PtrGlbNext = Record() + PtrGlb = Record() + PtrGlb.PtrComp = PtrGlbNext + PtrGlb.Discr = Ident1 + PtrGlb.EnumComp = Ident3 + PtrGlb.IntComp = 40 + PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING" + String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING" + Array2Glob[8][7] = 10 + + starttime = clock() + + for i in range(loops): + Proc5() + Proc4() + IntLoc1 = 2 + IntLoc2 = 3 + String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING" + EnumLoc = Ident2 + BoolGlob = not Func2(String1Loc, String2Loc) + while IntLoc1 < IntLoc2: + IntLoc3 = 5 * IntLoc1 - IntLoc2 + IntLoc3 = Proc7(IntLoc1, IntLoc2) + IntLoc1 = IntLoc1 + 1 + Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3) + PtrGlb = Proc1(PtrGlb) + CharIndex = 'A' + while CharIndex <= Char2Glob: + if EnumLoc == Func1(CharIndex, 'C'): + EnumLoc = Proc6(Ident1) + CharIndex = chr(ord(CharIndex)+1) + IntLoc3 = IntLoc2 * IntLoc1 + IntLoc2 = IntLoc3 / IntLoc1 + IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1 + IntLoc1 = Proc2(IntLoc1) + + benchtime = clock() - starttime - nulltime + if benchtime == 0.0: + loopsPerBenchtime = 0.0 + else: + loopsPerBenchtime = (loops / benchtime) + return benchtime, loopsPerBenchtime + +def Proc1(PtrParIn): + PtrParIn.PtrComp = NextRecord = PtrGlb.copy() + PtrParIn.IntComp = 5 + NextRecord.IntComp = PtrParIn.IntComp + NextRecord.PtrComp = PtrParIn.PtrComp + NextRecord.PtrComp = Proc3(NextRecord.PtrComp) + if NextRecord.Discr == Ident1: + NextRecord.IntComp = 6 + NextRecord.EnumComp = Proc6(PtrParIn.EnumComp) + NextRecord.PtrComp = PtrGlb.PtrComp + NextRecord.IntComp = Proc7(NextRecord.IntComp, 10) + else: + PtrParIn = NextRecord.copy() + NextRecord.PtrComp = None + return PtrParIn + +def Proc2(IntParIO): + IntLoc = IntParIO + 10 + while 1: + if Char1Glob == 'A': + IntLoc = IntLoc - 1 + IntParIO = IntLoc - IntGlob + EnumLoc = Ident1 + if EnumLoc == Ident1: + break + return IntParIO + +def Proc3(PtrParOut): + global IntGlob + + if PtrGlb is not None: + PtrParOut = PtrGlb.PtrComp + else: + IntGlob = 100 + PtrGlb.IntComp = Proc7(10, IntGlob) + return PtrParOut + +def Proc4(): + global Char2Glob + + BoolLoc = Char1Glob == 'A' + BoolLoc = BoolLoc or BoolGlob + Char2Glob = 'B' + +def Proc5(): + global Char1Glob + global BoolGlob + + Char1Glob = 'A' + BoolGlob = FALSE + +def Proc6(EnumParIn): + EnumParOut = EnumParIn + if not Func3(EnumParIn): + EnumParOut = Ident4 + if EnumParIn == Ident1: + EnumParOut = Ident1 + elif EnumParIn == Ident2: + if IntGlob > 100: + EnumParOut = Ident1 + else: + EnumParOut = Ident4 + elif EnumParIn == Ident3: + EnumParOut = Ident2 + elif EnumParIn == Ident4: + pass + elif EnumParIn == Ident5: + EnumParOut = Ident3 + return EnumParOut + +def Proc7(IntParI1, IntParI2): + IntLoc = IntParI1 + 2 + IntParOut = IntParI2 + IntLoc + return IntParOut + +def Proc8(Array1Par, Array2Par, IntParI1, IntParI2): + global IntGlob + + IntLoc = IntParI1 + 5 + Array1Par[IntLoc] = IntParI2 + Array1Par[IntLoc+1] = Array1Par[IntLoc] + Array1Par[IntLoc+30] = IntLoc + for IntIndex in range(IntLoc, IntLoc+2): + Array2Par[IntLoc][IntIndex] = IntLoc + Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1 + Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc] + IntGlob = 5 + +def Func1(CharPar1, CharPar2): + CharLoc1 = CharPar1 + CharLoc2 = CharLoc1 + if CharLoc2 != CharPar2: + return Ident1 + else: + return Ident2 + +def Func2(StrParI1, StrParI2): + IntLoc = 1 + while IntLoc <= 1: + if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1: + CharLoc = 'A' + IntLoc = IntLoc + 1 + if CharLoc >= 'W' and CharLoc <= 'Z': + IntLoc = 7 + if CharLoc == 'X': + return TRUE + else: + if StrParI1 > StrParI2: + IntLoc = IntLoc + 7 + return TRUE + else: + return FALSE + +def Func3(EnumParIn): + EnumLoc = EnumParIn + if EnumLoc == Ident3: return TRUE + return FALSE + +if __name__ == '__main__': + import sys + def error(msg): + print(msg, end=' ', file=sys.stderr) + print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr) + sys.exit(100) + nargs = len(sys.argv) - 1 + if nargs > 1: + error("%d arguments are too many;" % nargs) + elif nargs == 1: + try: loops = int(sys.argv[1]) + except ValueError: + error("Invalid argument %r;" % sys.argv[1]) + else: + loops = LOOPS + main(loops) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/sha256.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/sha256.pem new file mode 100644 index 0000000000..d3db4b85c0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/sha256.pem @@ -0,0 +1,128 @@ +# Certificate chain for https://sha256.tbs-internet.com + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC +-----BEGIN CERTIFICATE----- +MIIGXDCCBUSgAwIBAgIRAKpVmHgg9nfCodAVwcP4siwwDQYJKoZIhvcNAQELBQAw +gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u +ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg +Q0EgU0dDMB4XDTEyMDEwNDAwMDAwMFoXDTE0MDIxNzIzNTk1OVowgcsxCzAJBgNV +BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV +BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM +VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS +c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKQIX/zdJcyxty0m +PM1XQSoSSifueS3AVcgqMsaIKS/u+rYzsv4hQ/qA6vLn5m5/ewUcZDj7zdi6rBVf +PaVNXJ6YinLX0tkaW8TEjeVuZG5yksGZlhCt1CJ1Ho9XLiLaP4uJ7MCoNUntpJ+E +LfrOdgsIj91kPmwjDJeztVcQCvKzhjVJA/KxdInc0JvOATn7rpaSmQI5bvIjufgo +qVsTPwVFzuUYULXBk7KxRT7MiEqnd5HvviNh0285QC478zl3v0I0Fb5El4yD3p49 +IthcRnxzMKc0UhU5ogi0SbONyBfm/mzONVfSxpM+MlyvZmJqrbuuLoEDzJD+t8PU +xSuzgbcCAwEAAaOCAj4wggI6MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf +2YIfMB0GA1UdDgQWBBT/qTGYdaj+f61c2IRFL/B1eEsM8DAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG +CisGAQQBgjcKAwMGCWCGSAGG+EIEATBLBgNVHSAERDBCMEAGCisGAQQB5TcCBAEw +MjAwBggrBgEFBQcCARYkaHR0cHM6Ly93d3cudGJzLWludGVybmV0LmNvbS9DQS9D +UFM0MG0GA1UdHwRmMGQwMqAwoC6GLGh0dHA6Ly9jcmwudGJzLWludGVybmV0LmNv +bS9UQlNYNTA5Q0FTR0MuY3JsMC6gLKAqhihodHRwOi8vY3JsLnRicy14NTA5LmNv +bS9UQlNYNTA5Q0FTR0MuY3JsMIGmBggrBgEFBQcBAQSBmTCBljA4BggrBgEFBQcw +AoYsaHR0cDovL2NydC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQVNHQy5jcnQw +NAYIKwYBBQUHMAKGKGh0dHA6Ly9jcnQudGJzLXg1MDkuY29tL1RCU1g1MDlDQVNH +Qy5jcnQwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLnRicy14NTA5LmNvbTA/BgNV +HREEODA2ghdzaGEyNTYudGJzLWludGVybmV0LmNvbYIbd3d3LnNoYTI1Ni50YnMt +aW50ZXJuZXQuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQA0pOuL8QvAa5yksTbGShzX +ABApagunUGoEydv4YJT1MXy9tTp7DrWaozZSlsqBxrYAXP1d9r2fuKbEniYHxaQ0 +UYaf1VSIlDo1yuC8wE7wxbHDIpQ/E5KAyxiaJ8obtDhFstWAPAH+UoGXq0kj2teN +21sFQ5dXgA95nldvVFsFhrRUNB6xXAcaj0VZFhttI0ZfQZmQwEI/P+N9Jr40OGun +aa+Dn0TMeUH4U20YntfLbu2nDcJcYfyurm+8/0Tr4HznLnedXu9pCPYj0TaddrgT +XO0oFiyy7qGaY6+qKh71yD64Y3ycCJ/HR9Wm39mjZYc9ezYwT4noP6r7Lk8YO7/q +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow +gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u +ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg +Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 +rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 +9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ +ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk +owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G +Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk +9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf +2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ +MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 +AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk +ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k +by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw +cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV +VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B +ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN +AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 +euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY +1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 +RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz +8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV +v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +-----END CERTIFICATE----- + 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT +AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 +ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 +4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 +2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh +alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv +u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW +xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p +XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd +tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX +BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov +L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN +AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO +rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd +FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM ++bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI +3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb ++M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +-----END CERTIFICATE----- + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_cert.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_cert.pem new file mode 100644 index 0000000000..47a7d7e37e --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_cert.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX +-----END CERTIFICATE----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.passwd.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.passwd.pem new file mode 100644 index 0000000000..2524672e70 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.passwd.pem @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,1A8D9D2A02EC698A + +kJYbfZ8L0sfe9Oty3gw0aloNnY5E8fegRfQLZlNoxTl6jNt0nIwI8kDJ36CZgR9c +u3FDJm/KqrfUoz8vW+qEnWhSG7QPX2wWGPHd4K94Yz/FgrRzZ0DoK7XxXq9gOtVA +AVGQhnz32p+6WhfGsCr9ArXEwRZrTk/FvzEPaU5fHcoSkrNVAGX8IpSVkSDwEDQr +Gv17+cfk99UV1OCza6yKHoFkTtrC+PZU71LomBabivS2Oc4B9hYuSR2hF01wTHP+ +YlWNagZOOVtNz4oKK9x9eNQpmfQXQvPPTfusexKIbKfZrMvJoxcm1gfcZ0H/wK6P +6wmXSG35qMOOztCZNtperjs1wzEBXznyK8QmLcAJBjkfarABJX9vBEzZV0OUKhy+ +noORFwHTllphbmydLhu6ehLUZMHPhzAS5UN7srtpSN81eerDMy0RMUAwA7/PofX1 +94Me85Q8jP0PC9ETdsJcPqLzAPETEYu0ELewKRcrdyWi+tlLFrpE5KT/s5ecbl9l +7B61U4Kfd1PIXc/siINhU3A3bYK+845YyUArUOnKf1kEox7p1RpD7yFqVT04lRTo +cibNKATBusXSuBrp2G6GNuhWEOSafWCKJQAzgCYIp6ZTV2khhMUGppc/2H3CF6cO +zX0KtlPVZC7hLkB6HT8SxYUwF1zqWY7+/XPPdc37MeEZ87Q3UuZwqORLY+Z0hpgt +L5JXBCoklZhCAaN2GqwFLXtGiRSRFGY7xXIhbDTlE65Wv1WGGgDLMKGE1gOz3yAo +2jjG1+yAHJUdE69XTFHSqSkvaloA1W03LdMXZ9VuQJ/ySXCie6ABAQ== +-----END RSA PRIVATE KEY----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.pem b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.pem new file mode 100644 index 0000000000..3fd3bbd54a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_servers.py b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_servers.py new file mode 100644 index 0000000000..87a3fb8557 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/ssl_servers.py @@ -0,0 +1,207 @@ +from __future__ import absolute_import, division, print_function, unicode_literals +from future.builtins import filter, str +from future import utils +import os +import sys +import ssl +import pprint +import socket +from future.backports.urllib import parse as urllib_parse +from future.backports.http.server import (HTTPServer as _HTTPServer, + SimpleHTTPRequestHandler, BaseHTTPRequestHandler) +from future.backports.test import support +threading = support.import_module("threading") + +here = os.path.dirname(__file__) + +HOST = support.HOST +CERTFILE = os.path.join(here, 'keycert.pem') + +# This one's based on HTTPServer, which is based on SocketServer + +class HTTPSServer(_HTTPServer): + + def __init__(self, server_address, handler_class, context): + _HTTPServer.__init__(self, server_address, handler_class) + self.context = context + + def __str__(self): + return ('<%s %s:%s>' % + (self.__class__.__name__, + self.server_name, + self.server_port)) + + def get_request(self): + # override this to wrap socket with SSL + try: + sock, addr = self.socket.accept() + sslconn = self.context.wrap_socket(sock, server_side=True) + except socket.error as e: + # socket errors are silenced by the caller, print them here + if support.verbose: + sys.stderr.write("Got an error:\n%s\n" % e) + raise + return sslconn, addr + +class RootedHTTPRequestHandler(SimpleHTTPRequestHandler): + # need to override translate_path to get a known root, + # instead of using os.curdir, since the test could be + # run from anywhere + + server_version = "TestHTTPS/1.0" + root = here + # Avoid hanging when a request gets interrupted by the client + timeout = 5 + + def translate_path(self, path): + """Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + """ + # abandon query parameters + path = urllib.parse.urlparse(path)[2] + path = os.path.normpath(urllib.parse.unquote(path)) + words = path.split('/') + words = filter(None, words) + path = self.root + for word in words: + drive, word = os.path.splitdrive(word) + head, word = os.path.split(word) + path = os.path.join(path, word) + return path + + def log_message(self, format, *args): + # we override this to suppress logging unless "verbose" + if support.verbose: + sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" % + (self.server.server_address, + self.server.server_port, + self.request.cipher(), + self.log_date_time_string(), + format%args)) + + +class StatsRequestHandler(BaseHTTPRequestHandler): + """Example HTTP request handler which returns SSL statistics on GET + requests. + """ + + server_version = "StatsHTTPS/1.0" + + def do_GET(self, send_body=True): + """Serve a GET request.""" + sock = self.rfile.raw._sock + context = sock.context + stats = { + 'session_cache': context.session_stats(), + 'cipher': sock.cipher(), + 'compression': sock.compression(), + } + body = pprint.pformat(stats) + body = body.encode('utf-8') + self.send_response(200) + self.send_header("Content-type", "text/plain; charset=utf-8") + self.send_header("Content-Length", str(len(body))) + self.end_headers() + if send_body: + self.wfile.write(body) + + def do_HEAD(self): + """Serve a HEAD request.""" + self.do_GET(send_body=False) + + def log_request(self, format, *args): + if support.verbose: + BaseHTTPRequestHandler.log_request(self, format, *args) + + +class HTTPSServerThread(threading.Thread): + + def __init__(self, context, host=HOST, handler_class=None): + self.flag = None + self.server = HTTPSServer((host, 0), + handler_class or RootedHTTPRequestHandler, + context) + self.port = self.server.server_port + threading.Thread.__init__(self) + self.daemon = True + + def __str__(self): + return "<%s %s>" % (self.__class__.__name__, self.server) + + def start(self, flag=None): + self.flag = flag + threading.Thread.start(self) + + def run(self): + if self.flag: + self.flag.set() + try: + self.server.serve_forever(0.05) + finally: + self.server.server_close() + + def stop(self): + self.server.shutdown() + + +def make_https_server(case, certfile=CERTFILE, host=HOST, handler_class=None): + # we assume the certfile contains both private key and certificate + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.load_cert_chain(certfile) + server = HTTPSServerThread(context, host, handler_class) + flag = threading.Event() + server.start(flag) + flag.wait() + def cleanup(): + if support.verbose: + sys.stdout.write('stopping HTTPS server\n') + server.stop() + if support.verbose: + sys.stdout.write('joining HTTPS thread\n') + server.join() + case.addCleanup(cleanup) + return server + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser( + description='Run a test HTTPS server. ' + 'By default, the current directory is served.') + parser.add_argument('-p', '--port', type=int, default=4433, + help='port to listen on (default: %(default)s)') + parser.add_argument('-q', '--quiet', dest='verbose', default=True, + action='store_false', help='be less verbose') + parser.add_argument('-s', '--stats', dest='use_stats_handler', default=False, + action='store_true', help='always return stats page') + parser.add_argument('--curve-name', dest='curve_name', type=str, + action='store', + help='curve name for EC-based Diffie-Hellman') + parser.add_argument('--dh', dest='dh_file', type=str, action='store', + help='PEM file containing DH parameters') + args = parser.parse_args() + + support.verbose = args.verbose + if args.use_stats_handler: + handler_class = StatsRequestHandler + else: + handler_class = RootedHTTPRequestHandler + if utils.PY2: + handler_class.root = os.getcwdu() + else: + handler_class.root = os.getcwd() + context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) + context.load_cert_chain(CERTFILE) + if args.curve_name: + context.set_ecdh_curve(args.curve_name) + if args.dh_file: + context.load_dh_params(args.dh_file) + + server = HTTPSServer(("", args.port), handler_class, context) + if args.verbose: + print("Listening on https://localhost:{0.port}".format(args)) + server.serve_forever(0.1) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/test/support.py b/pype/modules/ftrack/python2_vendor/future/backports/test/support.py new file mode 100644 index 0000000000..1999e208fe --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/test/support.py @@ -0,0 +1,2048 @@ +# -*- coding: utf-8 -*- +"""Supporting definitions for the Python regression tests. + +Backported for python-future from Python 3.3 test/support.py. +""" + +from __future__ import (absolute_import, division, + print_function, unicode_literals) +from future import utils +from future.builtins import str, range, open, int, map, list + +import contextlib +import errno +import functools +import gc +import socket +import sys +import os +import platform +import shutil +import warnings +import unittest +# For Python 2.6 compatibility: +if not hasattr(unittest, 'skip'): + import unittest2 as unittest + +import importlib +# import collections.abc # not present on Py2.7 +import re +import subprocess +import imp +import time +try: + import sysconfig +except ImportError: + # sysconfig is not available on Python 2.6. Try using distutils.sysconfig instead: + from distutils import sysconfig +import fnmatch +import logging.handlers +import struct +import tempfile + +try: + if utils.PY3: + import _thread, threading + else: + import thread as _thread, threading +except ImportError: + _thread = None + threading = None +try: + import multiprocessing.process +except ImportError: + multiprocessing = None + +try: + import zlib +except ImportError: + zlib = None + +try: + import gzip +except ImportError: + gzip = None + +try: + import bz2 +except ImportError: + bz2 = None + +try: + import lzma +except ImportError: + lzma = None + +__all__ = [ + "Error", "TestFailed", "ResourceDenied", "import_module", "verbose", + "use_resources", "max_memuse", "record_original_stdout", + "get_original_stdout", "unload", "unlink", "rmtree", "forget", + "is_resource_enabled", "requires", "requires_freebsd_version", + "requires_linux_version", "requires_mac_ver", "find_unused_port", + "bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD", + "temp_cwd", "findfile", "create_empty_file", "sortdict", + "check_syntax_error", "open_urlresource", "check_warnings", "CleanImport", + "EnvironmentVarGuard", "TransientResource", "captured_stdout", + "captured_stdin", "captured_stderr", "time_out", "socket_peer_reset", + "ioerror_peer_reset", "run_with_locale", 'temp_umask', + "transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest", + "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup", + "threading_cleanup", "reap_children", "cpython_only", "check_impl_detail", + "get_attribute", "swap_item", "swap_attr", "requires_IEEE_754", + "TestHandler", "Matcher", "can_symlink", "skip_unless_symlink", + "skip_unless_xattr", "import_fresh_module", "requires_zlib", + "PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz", + "requires_gzip", "requires_bz2", "requires_lzma", "suppress_crash_popup", + ] + +class Error(Exception): + """Base class for regression test exceptions.""" + +class TestFailed(Error): + """Test failed.""" + +class ResourceDenied(unittest.SkipTest): + """Test skipped because it requested a disallowed resource. + + This is raised when a test calls requires() for a resource that + has not be enabled. It is used to distinguish between expected + and unexpected skips. + """ + +@contextlib.contextmanager +def _ignore_deprecated_imports(ignore=True): + """Context manager to suppress package and module deprecation + warnings when importing them. + + If ignore is False, this context manager has no effect.""" + if ignore: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", ".+ (module|package)", + DeprecationWarning) + yield + else: + yield + + +def import_module(name, deprecated=False): + """Import and return the module to be tested, raising SkipTest if + it is not available. + + If deprecated is True, any module or package deprecation messages + will be suppressed.""" + with _ignore_deprecated_imports(deprecated): + try: + return importlib.import_module(name) + except ImportError as msg: + raise unittest.SkipTest(str(msg)) + + +def _save_and_remove_module(name, orig_modules): + """Helper function to save and remove a module from sys.modules + + Raise ImportError if the module can't be imported. + """ + # try to import the module and raise an error if it can't be imported + if name not in sys.modules: + __import__(name) + del sys.modules[name] + for modname in list(sys.modules): + if modname == name or modname.startswith(name + '.'): + orig_modules[modname] = sys.modules[modname] + del sys.modules[modname] + +def _save_and_block_module(name, orig_modules): + """Helper function to save and block a module in sys.modules + + Return True if the module was in sys.modules, False otherwise. + """ + saved = True + try: + orig_modules[name] = sys.modules[name] + except KeyError: + saved = False + sys.modules[name] = None + return saved + + +def anticipate_failure(condition): + """Decorator to mark a test that is known to be broken in some cases + + Any use of this decorator should have a comment identifying the + associated tracker issue. + """ + if condition: + return unittest.expectedFailure + return lambda f: f + + +def import_fresh_module(name, fresh=(), blocked=(), deprecated=False): + """Import and return a module, deliberately bypassing sys.modules. + This function imports and returns a fresh copy of the named Python module + by removing the named module from sys.modules before doing the import. + Note that unlike reload, the original module is not affected by + this operation. + + *fresh* is an iterable of additional module names that are also removed + from the sys.modules cache before doing the import. + + *blocked* is an iterable of module names that are replaced with None + in the module cache during the import to ensure that attempts to import + them raise ImportError. + + The named module and any modules named in the *fresh* and *blocked* + parameters are saved before starting the import and then reinserted into + sys.modules when the fresh import is complete. + + Module and package deprecation messages are suppressed during this import + if *deprecated* is True. + + This function will raise ImportError if the named module cannot be + imported. + + If deprecated is True, any module or package deprecation messages + will be suppressed. + """ + # NOTE: test_heapq, test_json and test_warnings include extra sanity checks + # to make sure that this utility function is working as expected + with _ignore_deprecated_imports(deprecated): + # Keep track of modules saved for later restoration as well + # as those which just need a blocking entry removed + orig_modules = {} + names_to_remove = [] + _save_and_remove_module(name, orig_modules) + try: + for fresh_name in fresh: + _save_and_remove_module(fresh_name, orig_modules) + for blocked_name in blocked: + if not _save_and_block_module(blocked_name, orig_modules): + names_to_remove.append(blocked_name) + fresh_module = importlib.import_module(name) + except ImportError: + fresh_module = None + finally: + for orig_name, module in orig_modules.items(): + sys.modules[orig_name] = module + for name_to_remove in names_to_remove: + del sys.modules[name_to_remove] + return fresh_module + + +def get_attribute(obj, name): + """Get an attribute, raising SkipTest if AttributeError is raised.""" + try: + attribute = getattr(obj, name) + except AttributeError: + raise unittest.SkipTest("object %r has no attribute %r" % (obj, name)) + else: + return attribute + +verbose = 1 # Flag set to 0 by regrtest.py +use_resources = None # Flag set to [] by regrtest.py +max_memuse = 0 # Disable bigmem tests (they will still be run with + # small sizes, to make sure they work.) +real_max_memuse = 0 +failfast = False +match_tests = None + +# _original_stdout is meant to hold stdout at the time regrtest began. +# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever. +# The point is to have some flavor of stdout the user can actually see. +_original_stdout = None +def record_original_stdout(stdout): + global _original_stdout + _original_stdout = stdout + +def get_original_stdout(): + return _original_stdout or sys.stdout + +def unload(name): + try: + del sys.modules[name] + except KeyError: + pass + +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Perform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7@4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existence of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + +def unlink(filename): + try: + _unlink(filename) + except OSError as error: + # The filename need not exist. + if error.errno not in (errno.ENOENT, errno.ENOTDIR): + raise + +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + +def rmtree(path): + try: + _rmtree(path) + except OSError as error: + if error.errno != errno.ENOENT: + raise + +def make_legacy_pyc(source): + """Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location. + + The choice of .pyc or .pyo extension is done based on the __debug__ flag + value. + + :param source: The file system path to the source file. The source file + does not need to exist, however the PEP 3147 pyc file must exist. + :return: The file system path to the legacy pyc file. + """ + pyc_file = imp.cache_from_source(source) + up_one = os.path.dirname(os.path.abspath(source)) + legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o')) + os.rename(pyc_file, legacy_pyc) + return legacy_pyc + +def forget(modname): + """'Forget' a module was ever imported. + + This removes the module from sys.modules and deletes any PEP 3147 or + legacy .pyc and .pyo files. + """ + unload(modname) + for dirname in sys.path: + source = os.path.join(dirname, modname + '.py') + # It doesn't matter if they exist or not, unlink all possible + # combinations of PEP 3147 and legacy pyc and pyo files. + unlink(source + 'c') + unlink(source + 'o') + unlink(imp.cache_from_source(source, debug_override=True)) + unlink(imp.cache_from_source(source, debug_override=False)) + +# On some platforms, should not run gui test even if it is allowed +# in `use_resources'. +if sys.platform.startswith('win'): + import ctypes + import ctypes.wintypes + def _is_gui_available(): + UOI_FLAGS = 1 + WSF_VISIBLE = 0x0001 + class USEROBJECTFLAGS(ctypes.Structure): + _fields_ = [("fInherit", ctypes.wintypes.BOOL), + ("fReserved", ctypes.wintypes.BOOL), + ("dwFlags", ctypes.wintypes.DWORD)] + dll = ctypes.windll.user32 + h = dll.GetProcessWindowStation() + if not h: + raise ctypes.WinError() + uof = USEROBJECTFLAGS() + needed = ctypes.wintypes.DWORD() + res = dll.GetUserObjectInformationW(h, + UOI_FLAGS, + ctypes.byref(uof), + ctypes.sizeof(uof), + ctypes.byref(needed)) + if not res: + raise ctypes.WinError() + return bool(uof.dwFlags & WSF_VISIBLE) +else: + def _is_gui_available(): + return True + +def is_resource_enabled(resource): + """Test whether a resource is enabled. Known resources are set by + regrtest.py.""" + return use_resources is not None and resource in use_resources + +def requires(resource, msg=None): + """Raise ResourceDenied if the specified resource is not available. + + If the caller's module is __main__ then automatically return True. The + possibility of False being returned occurs when regrtest.py is + executing. + """ + if resource == 'gui' and not _is_gui_available(): + raise unittest.SkipTest("Cannot use the 'gui' resource") + # see if the caller's module is __main__ - if so, treat as if + # the resource was set + if sys._getframe(1).f_globals.get("__name__") == "__main__": + return + if not is_resource_enabled(resource): + if msg is None: + msg = "Use of the %r resource not enabled" % resource + raise ResourceDenied(msg) + +def _requires_unix_version(sysname, min_version): + """Decorator raising SkipTest if the OS is `sysname` and the version is less + than `min_version`. + + For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if + the FreeBSD version is less than 7.2. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kw): + if platform.system() == sysname: + version_txt = platform.release().split('-', 1)[0] + try: + version = tuple(map(int, version_txt.split('.'))) + except ValueError: + pass + else: + if version < min_version: + min_version_txt = '.'.join(map(str, min_version)) + raise unittest.SkipTest( + "%s version %s or higher required, not %s" + % (sysname, min_version_txt, version_txt)) + return func(*args, **kw) + wrapper.min_version = min_version + return wrapper + return decorator + +def requires_freebsd_version(*min_version): + """Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is + less than `min_version`. + + For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD + version is less than 7.2. + """ + return _requires_unix_version('FreeBSD', min_version) + +def requires_linux_version(*min_version): + """Decorator raising SkipTest if the OS is Linux and the Linux version is + less than `min_version`. + + For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux + version is less than 2.6.32. + """ + return _requires_unix_version('Linux', min_version) + +def requires_mac_ver(*min_version): + """Decorator raising SkipTest if the OS is Mac OS X and the OS X + version if less than min_version. + + For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version + is lesser than 10.5. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kw): + if sys.platform == 'darwin': + version_txt = platform.mac_ver()[0] + try: + version = tuple(map(int, version_txt.split('.'))) + except ValueError: + pass + else: + if version < min_version: + min_version_txt = '.'.join(map(str, min_version)) + raise unittest.SkipTest( + "Mac OS X %s or higher required, not %s" + % (min_version_txt, version_txt)) + return func(*args, **kw) + wrapper.min_version = min_version + return wrapper + return decorator + +# Don't use "localhost", since resolving it uses the DNS under recent +# Windows versions (see issue #18792). +HOST = "127.0.0.1" +HOSTv6 = "::1" + + +def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM): + """Returns an unused port that should be suitable for binding. This is + achieved by creating a temporary socket with the same family and type as + the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to + the specified host address (defaults to 0.0.0.0) with the port set to 0, + eliciting an unused ephemeral port from the OS. The temporary socket is + then closed and deleted, and the ephemeral port is returned. + + Either this method or bind_port() should be used for any tests where a + server socket needs to be bound to a particular port for the duration of + the test. Which one to use depends on whether the calling code is creating + a python socket, or if an unused port needs to be provided in a constructor + or passed to an external program (i.e. the -accept argument to openssl's + s_server mode). Always prefer bind_port() over find_unused_port() where + possible. Hard coded ports should *NEVER* be used. As soon as a server + socket is bound to a hard coded port, the ability to run multiple instances + of the test simultaneously on the same host is compromised, which makes the + test a ticking time bomb in a buildbot environment. On Unix buildbots, this + may simply manifest as a failed test, which can be recovered from without + intervention in most cases, but on Windows, the entire python process can + completely and utterly wedge, requiring someone to log in to the buildbot + and manually kill the affected process. + + (This is easy to reproduce on Windows, unfortunately, and can be traced to + the SO_REUSEADDR socket option having different semantics on Windows versus + Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind, + listen and then accept connections on identical host/ports. An EADDRINUSE + socket.error will be raised at some point (depending on the platform and + the order bind and listen were called on each socket). + + However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE + will ever be raised when attempting to bind two identical host/ports. When + accept() is called on each socket, the second caller's process will steal + the port from the first caller, leaving them both in an awkwardly wedged + state where they'll no longer respond to any signals or graceful kills, and + must be forcibly killed via OpenProcess()/TerminateProcess(). + + The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option + instead of SO_REUSEADDR, which effectively affords the same semantics as + SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open + Source world compared to Windows ones, this is a common mistake. A quick + look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when + openssl.exe is called with the 's_server' option, for example. See + http://bugs.python.org/issue2550 for more info. The following site also + has a very thorough description about the implications of both REUSEADDR + and EXCLUSIVEADDRUSE on Windows: + http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx) + + XXX: although this approach is a vast improvement on previous attempts to + elicit unused ports, it rests heavily on the assumption that the ephemeral + port returned to us by the OS won't immediately be dished back out to some + other process when we close and delete our temporary socket but before our + calling code has a chance to bind the returned port. We can deal with this + issue if/when we come across it. + """ + + tempsock = socket.socket(family, socktype) + port = bind_port(tempsock) + tempsock.close() + del tempsock + return port + +def bind_port(sock, host=HOST): + """Bind the socket to a free port and return the port number. Relies on + ephemeral ports in order to ensure we are using an unbound port. This is + important as many tests may be running simultaneously, especially in a + buildbot environment. This method raises an exception if the sock.family + is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR + or SO_REUSEPORT set on it. Tests should *never* set these socket options + for TCP/IP sockets. The only case for setting these options is testing + multicasting via multiple UDP sockets. + + Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. + on Windows), it will be set on the socket. This will prevent anyone else + from bind()'ing to our host/port for the duration of the test. + """ + + if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM: + if hasattr(socket, 'SO_REUSEADDR'): + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1: + raise TestFailed("tests should never set the SO_REUSEADDR " \ + "socket option on TCP/IP sockets!") + if hasattr(socket, 'SO_REUSEPORT'): + try: + if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1: + raise TestFailed("tests should never set the SO_REUSEPORT " \ + "socket option on TCP/IP sockets!") + except socket.error: + # Python's socket module was compiled using modern headers + # thus defining SO_REUSEPORT but this process is running + # under an older kernel that does not support SO_REUSEPORT. + pass + if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1) + + sock.bind((host, 0)) + port = sock.getsockname()[1] + return port + +def _is_ipv6_enabled(): + """Check whether IPv6 is enabled on this host.""" + if socket.has_ipv6: + sock = None + try: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.bind(('::1', 0)) + return True + except (socket.error, socket.gaierror): + pass + finally: + if sock: + sock.close() + return False + +IPV6_ENABLED = _is_ipv6_enabled() + + +# A constant likely larger than the underlying OS pipe buffer size, to +# make writes blocking. +# Windows limit seems to be around 512 B, and many Unix kernels have a +# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure. +# (see issue #17835 for a discussion of this number). +PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1 + +# A constant likely larger than the underlying OS socket buffer size, to make +# writes blocking. +# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl +# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643 +# for a discussion of this number). +SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1 + +# # decorator for skipping tests on non-IEEE 754 platforms +# requires_IEEE_754 = unittest.skipUnless( +# float.__getformat__("double").startswith("IEEE"), +# "test requires IEEE 754 doubles") + +requires_zlib = unittest.skipUnless(zlib, 'requires zlib') + +requires_bz2 = unittest.skipUnless(bz2, 'requires bz2') + +requires_lzma = unittest.skipUnless(lzma, 'requires lzma') + +is_jython = sys.platform.startswith('java') + +# Filename used for testing +if os.name == 'java': + # Jython disallows @ in module names + TESTFN = '$test' +else: + TESTFN = '@test' + +# Disambiguate TESTFN for parallel testing, while letting it remain a valid +# module name. +TESTFN = "{0}_{1}_tmp".format(TESTFN, os.getpid()) + +# # FS_NONASCII: non-ASCII character encodable by os.fsencode(), +# # or None if there is no such character. +# FS_NONASCII = None +# for character in ( +# # First try printable and common characters to have a readable filename. +# # For each character, the encoding list are just example of encodings able +# # to encode the character (the list is not exhaustive). +# +# # U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1 +# '\u00E6', +# # U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3 +# '\u0130', +# # U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257 +# '\u0141', +# # U+03C6 (Greek Small Letter Phi): cp1253 +# '\u03C6', +# # U+041A (Cyrillic Capital Letter Ka): cp1251 +# '\u041A', +# # U+05D0 (Hebrew Letter Alef): Encodable to cp424 +# '\u05D0', +# # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic +# '\u060C', +# # U+062A (Arabic Letter Teh): cp720 +# '\u062A', +# # U+0E01 (Thai Character Ko Kai): cp874 +# '\u0E01', +# +# # Then try more "special" characters. "special" because they may be +# # interpreted or displayed differently depending on the exact locale +# # encoding and the font. +# +# # U+00A0 (No-Break Space) +# '\u00A0', +# # U+20AC (Euro Sign) +# '\u20AC', +# ): +# try: +# os.fsdecode(os.fsencode(character)) +# except UnicodeError: +# pass +# else: +# FS_NONASCII = character +# break +# +# # TESTFN_UNICODE is a non-ascii filename +# TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f" +# if sys.platform == 'darwin': +# # In Mac OS X's VFS API file names are, by definition, canonically +# # decomposed Unicode, encoded using UTF-8. See QA1173: +# # http://developer.apple.com/mac/library/qa/qa2001/qa1173.html +# import unicodedata +# TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE) +# TESTFN_ENCODING = sys.getfilesystemencoding() +# +# # TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be +# # encoded by the filesystem encoding (in strict mode). It can be None if we +# # cannot generate such filename. +# TESTFN_UNENCODABLE = None +# if os.name in ('nt', 'ce'): +# # skip win32s (0) or Windows 9x/ME (1) +# if sys.getwindowsversion().platform >= 2: +# # Different kinds of characters from various languages to minimize the +# # probability that the whole name is encodable to MBCS (issue #9819) +# TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80" +# try: +# TESTFN_UNENCODABLE.encode(TESTFN_ENCODING) +# except UnicodeEncodeError: +# pass +# else: +# print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). ' +# 'Unicode filename tests may not be effective' +# % (TESTFN_UNENCODABLE, TESTFN_ENCODING)) +# TESTFN_UNENCODABLE = None +# # Mac OS X denies unencodable filenames (invalid utf-8) +# elif sys.platform != 'darwin': +# try: +# # ascii and utf-8 cannot encode the byte 0xff +# b'\xff'.decode(TESTFN_ENCODING) +# except UnicodeDecodeError: +# # 0xff will be encoded using the surrogate character u+DCFF +# TESTFN_UNENCODABLE = TESTFN \ +# + b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape') +# else: +# # File system encoding (eg. ISO-8859-* encodings) can encode +# # the byte 0xff. Skip some unicode filename tests. +# pass +# +# # TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be +# # decoded from the filesystem encoding (in strict mode). It can be None if we +# # cannot generate such filename (ex: the latin1 encoding can decode any byte +# # sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks +# # to the surrogateescape error handler (PEP 383), but not from the filesystem +# # encoding in strict mode. +# TESTFN_UNDECODABLE = None +# for name in ( +# # b'\xff' is not decodable by os.fsdecode() with code page 932. Windows +# # accepts it to create a file or a directory, or don't accept to enter to +# # such directory (when the bytes name is used). So test b'\xe7' first: it is +# # not decodable from cp932. +# b'\xe7w\xf0', +# # undecodable from ASCII, UTF-8 +# b'\xff', +# # undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856 +# # and cp857 +# b'\xae\xd5' +# # undecodable from UTF-8 (UNIX and Mac OS X) +# b'\xed\xb2\x80', b'\xed\xb4\x80', +# # undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252, +# # cp1253, cp1254, cp1255, cp1257, cp1258 +# b'\x81\x98', +# ): +# try: +# name.decode(TESTFN_ENCODING) +# except UnicodeDecodeError: +# TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name +# break +# +# if FS_NONASCII: +# TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII +# else: +# TESTFN_NONASCII = None + +# Save the initial cwd +SAVEDCWD = os.getcwd() + +@contextlib.contextmanager +def temp_cwd(name='tempcwd', quiet=False, path=None): + """ + Context manager that temporarily changes the CWD. + + An existing path may be provided as *path*, in which case this + function makes no changes to the file system. + + Otherwise, the new CWD is created in the current directory and it's + named *name*. If *quiet* is False (default) and it's not possible to + create or change the CWD, an error is raised. If it's True, only a + warning is raised and the original CWD is used. + """ + saved_dir = os.getcwd() + is_temporary = False + if path is None: + path = name + try: + os.mkdir(name) + is_temporary = True + except OSError: + if not quiet: + raise + warnings.warn('tests may fail, unable to create temp CWD ' + name, + RuntimeWarning, stacklevel=3) + try: + os.chdir(path) + except OSError: + if not quiet: + raise + warnings.warn('tests may fail, unable to change the CWD to ' + path, + RuntimeWarning, stacklevel=3) + try: + yield os.getcwd() + finally: + os.chdir(saved_dir) + if is_temporary: + rmtree(name) + + +if hasattr(os, "umask"): + @contextlib.contextmanager + def temp_umask(umask): + """Context manager that temporarily sets the process umask.""" + oldmask = os.umask(umask) + try: + yield + finally: + os.umask(oldmask) + + +def findfile(file, here=__file__, subdir=None): + """Try to find a file on sys.path and the working directory. If it is not + found the argument passed to the function is returned (this does not + necessarily signal failure; could still be the legitimate path).""" + if os.path.isabs(file): + return file + if subdir is not None: + file = os.path.join(subdir, file) + path = sys.path + path = [os.path.dirname(here)] + path + for dn in path: + fn = os.path.join(dn, file) + if os.path.exists(fn): return fn + return file + +def create_empty_file(filename): + """Create an empty file. If the file already exists, truncate it.""" + fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) + os.close(fd) + +def sortdict(dict): + "Like repr(dict), but in sorted order." + items = sorted(dict.items()) + reprpairs = ["%r: %r" % pair for pair in items] + withcommas = ", ".join(reprpairs) + return "{%s}" % withcommas + +def make_bad_fd(): + """ + Create an invalid file descriptor by opening and closing a file and return + its fd. + """ + file = open(TESTFN, "wb") + try: + return file.fileno() + finally: + file.close() + unlink(TESTFN) + +def check_syntax_error(testcase, statement): + testcase.assertRaises(SyntaxError, compile, statement, + '', 'exec') + +def open_urlresource(url, *args, **kw): + from future.backports.urllib import (request as urllib_request, + parse as urllib_parse) + + check = kw.pop('check', None) + + filename = urllib_parse.urlparse(url)[2].split('/')[-1] # '/': it's URL! + + fn = os.path.join(os.path.dirname(__file__), "data", filename) + + def check_valid_file(fn): + f = open(fn, *args, **kw) + if check is None: + return f + elif check(f): + f.seek(0) + return f + f.close() + + if os.path.exists(fn): + f = check_valid_file(fn) + if f is not None: + return f + unlink(fn) + + # Verify the requirement before downloading the file + requires('urlfetch') + + print('\tfetching %s ...' % url, file=get_original_stdout()) + f = urllib_request.urlopen(url, timeout=15) + try: + with open(fn, "wb") as out: + s = f.read() + while s: + out.write(s) + s = f.read() + finally: + f.close() + + f = check_valid_file(fn) + if f is not None: + return f + raise TestFailed('invalid resource %r' % fn) + + +class WarningsRecorder(object): + """Convenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + """ + def __init__(self, warnings_list): + self._warnings = warnings_list + self._last = 0 + + def __getattr__(self, attr): + if len(self._warnings) > self._last: + return getattr(self._warnings[-1], attr) + elif attr in warnings.WarningMessage._WARNING_DETAILS: + return None + raise AttributeError("%r has no attribute %r" % (self, attr)) + + @property + def warnings(self): + return self._warnings[self._last:] + + def reset(self): + self._last = len(self._warnings) + + +def _filterwarnings(filters, quiet=False): + """Catch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + """ + # Clear the warning registry of the calling module + # in order to re-raise the warnings. + frame = sys._getframe(2) + registry = frame.f_globals.get('__warningregistry__') + if registry: + if utils.PY3: + registry.clear() + else: + # Py2-compatible: + for i in range(len(registry)): + registry.pop() + with warnings.catch_warnings(record=True) as w: + # Set filter "always" to record all warnings. Because + # test_warnings swap the module, we need to look up in + # the sys.modules dictionary. + sys.modules['warnings'].simplefilter("always") + yield WarningsRecorder(w) + # Filter the recorded warnings + reraise = list(w) + missing = [] + for msg, cat in filters: + seen = False + for w in reraise[:]: + warning = w.message + # Filter out the matching messages + if (re.match(msg, str(warning), re.I) and + issubclass(warning.__class__, cat)): + seen = True + reraise.remove(w) + if not seen and not quiet: + # This filter caught nothing + missing.append((msg, cat.__name__)) + if reraise: + raise AssertionError("unhandled warning %s" % reraise[0]) + if missing: + raise AssertionError("filter (%r, %s) did not catch any warning" % + missing[0]) + + +@contextlib.contextmanager +def check_warnings(*filters, **kwargs): + """Context manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + """ + quiet = kwargs.get('quiet') + if not filters: + filters = (("", Warning),) + # Preserve backward compatibility + if quiet is None: + quiet = True + return _filterwarnings(filters, quiet) + + +class CleanImport(object): + """Context manager to force import to return a new module reference. + + This is useful for testing module-level behaviours, such as + the emission of a DeprecationWarning on import. + + Use like this: + + with CleanImport("foo"): + importlib.import_module("foo") # new reference + """ + + def __init__(self, *module_names): + self.original_modules = sys.modules.copy() + for module_name in module_names: + if module_name in sys.modules: + module = sys.modules[module_name] + # It is possible that module_name is just an alias for + # another module (e.g. stub for modules renamed in 3.x). + # In that case, we also need delete the real module to clear + # the import cache. + if module.__name__ != module_name: + del sys.modules[module.__name__] + del sys.modules[module_name] + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + sys.modules.update(self.original_modules) + +### Added for python-future: +if utils.PY3: + import collections.abc + mybase = collections.abc.MutableMapping +else: + import UserDict + mybase = UserDict.DictMixin +### + +class EnvironmentVarGuard(mybase): + + """Class to help protect the environment variable properly. Can be used as + a context manager.""" + + def __init__(self): + self._environ = os.environ + self._changed = {} + + def __getitem__(self, envvar): + return self._environ[envvar] + + def __setitem__(self, envvar, value): + # Remember the initial value on the first access + if envvar not in self._changed: + self._changed[envvar] = self._environ.get(envvar) + self._environ[envvar] = value + + def __delitem__(self, envvar): + # Remember the initial value on the first access + if envvar not in self._changed: + self._changed[envvar] = self._environ.get(envvar) + if envvar in self._environ: + del self._environ[envvar] + + def keys(self): + return self._environ.keys() + + def __iter__(self): + return iter(self._environ) + + def __len__(self): + return len(self._environ) + + def set(self, envvar, value): + self[envvar] = value + + def unset(self, envvar): + del self[envvar] + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + for (k, v) in self._changed.items(): + if v is None: + if k in self._environ: + del self._environ[k] + else: + self._environ[k] = v + os.environ = self._environ + + +class DirsOnSysPath(object): + """Context manager to temporarily add directories to sys.path. + + This makes a copy of sys.path, appends any directories given + as positional arguments, then reverts sys.path to the copied + settings when the context ends. + + Note that *all* sys.path modifications in the body of the + context manager, including replacement of the object, + will be reverted at the end of the block. + """ + + def __init__(self, *paths): + self.original_value = sys.path[:] + self.original_object = sys.path + sys.path.extend(paths) + + def __enter__(self): + return self + + def __exit__(self, *ignore_exc): + sys.path = self.original_object + sys.path[:] = self.original_value + + +class TransientResource(object): + + """Raise ResourceDenied if an exception is raised while the context manager + is in effect that matches the specified exception and attributes.""" + + def __init__(self, exc, **kwargs): + self.exc = exc + self.attrs = kwargs + + def __enter__(self): + return self + + def __exit__(self, type_=None, value=None, traceback=None): + """If type_ is a subclass of self.exc and value has attributes matching + self.attrs, raise ResourceDenied. Otherwise let the exception + propagate (if any).""" + if type_ is not None and issubclass(self.exc, type_): + for attr, attr_value in self.attrs.items(): + if not hasattr(value, attr): + break + if getattr(value, attr) != attr_value: + break + else: + raise ResourceDenied("an optional resource is not available") + +# Context managers that raise ResourceDenied when various issues +# with the Internet connection manifest themselves as exceptions. +# XXX deprecate these and use transient_internet() instead +time_out = TransientResource(IOError, errno=errno.ETIMEDOUT) +socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET) +ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET) + + +@contextlib.contextmanager +def transient_internet(resource_name, timeout=30.0, errnos=()): + """Return a context manager that raises ResourceDenied when various issues + with the Internet connection manifest themselves as exceptions.""" + default_errnos = [ + ('ECONNREFUSED', 111), + ('ECONNRESET', 104), + ('EHOSTUNREACH', 113), + ('ENETUNREACH', 101), + ('ETIMEDOUT', 110), + ] + default_gai_errnos = [ + ('EAI_AGAIN', -3), + ('EAI_FAIL', -4), + ('EAI_NONAME', -2), + ('EAI_NODATA', -5), + # Encountered when trying to resolve IPv6-only hostnames + ('WSANO_DATA', 11004), + ] + + denied = ResourceDenied("Resource %r is not available" % resource_name) + captured_errnos = errnos + gai_errnos = [] + if not captured_errnos: + captured_errnos = [getattr(errno, name, num) + for (name, num) in default_errnos] + gai_errnos = [getattr(socket, name, num) + for (name, num) in default_gai_errnos] + + def filter_error(err): + n = getattr(err, 'errno', None) + if (isinstance(err, socket.timeout) or + (isinstance(err, socket.gaierror) and n in gai_errnos) or + n in captured_errnos): + if not verbose: + sys.stderr.write(denied.args[0] + "\n") + # Was: raise denied from err + # For Python-Future: + exc = denied + exc.__cause__ = err + raise exc + + old_timeout = socket.getdefaulttimeout() + try: + if timeout is not None: + socket.setdefaulttimeout(timeout) + yield + except IOError as err: + # urllib can wrap original socket errors multiple times (!), we must + # unwrap to get at the original error. + while True: + a = err.args + if len(a) >= 1 and isinstance(a[0], IOError): + err = a[0] + # The error can also be wrapped as args[1]: + # except socket.error as msg: + # raise IOError('socket error', msg).with_traceback(sys.exc_info()[2]) + elif len(a) >= 2 and isinstance(a[1], IOError): + err = a[1] + else: + break + filter_error(err) + raise + # XXX should we catch generic exceptions and look for their + # __cause__ or __context__? + finally: + socket.setdefaulttimeout(old_timeout) + + +@contextlib.contextmanager +def captured_output(stream_name): + """Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.""" + import io + orig_stdout = getattr(sys, stream_name) + setattr(sys, stream_name, io.StringIO()) + try: + yield getattr(sys, stream_name) + finally: + setattr(sys, stream_name, orig_stdout) + +def captured_stdout(): + """Capture the output of sys.stdout: + + with captured_stdout() as s: + print("hello") + self.assertEqual(s.getvalue(), "hello") + """ + return captured_output("stdout") + +def captured_stderr(): + return captured_output("stderr") + +def captured_stdin(): + return captured_output("stdin") + + +def gc_collect(): + """Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + """ + gc.collect() + if is_jython: + time.sleep(0.1) + gc.collect() + gc.collect() + +@contextlib.contextmanager +def disable_gc(): + have_gc = gc.isenabled() + gc.disable() + try: + yield + finally: + if have_gc: + gc.enable() + + +def python_is_optimized(): + """Find if Python was built with optimizations.""" + # We don't have sysconfig on Py2.6: + import sysconfig + cflags = sysconfig.get_config_var('PY_CFLAGS') or '' + final_opt = "" + for opt in cflags.split(): + if opt.startswith('-O'): + final_opt = opt + return final_opt != '' and final_opt != '-O0' + + +_header = 'nP' +_align = '0n' +if hasattr(sys, "gettotalrefcount"): + _header = '2P' + _header + _align = '0P' +_vheader = _header + 'n' + +def calcobjsize(fmt): + return struct.calcsize(_header + fmt + _align) + +def calcvobjsize(fmt): + return struct.calcsize(_vheader + fmt + _align) + + +_TPFLAGS_HAVE_GC = 1<<14 +_TPFLAGS_HEAPTYPE = 1<<9 + +def check_sizeof(test, o, size): + result = sys.getsizeof(o) + # add GC header size + if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\ + ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))): + size += _testcapi.SIZEOF_PYGC_HEAD + msg = 'wrong size for %s: got %d, expected %d' \ + % (type(o), result, size) + test.assertEqual(result, size, msg) + +#======================================================================= +# Decorator for running a function in a different locale, correctly resetting +# it afterwards. + +def run_with_locale(catstr, *locales): + def decorator(func): + def inner(*args, **kwds): + try: + import locale + category = getattr(locale, catstr) + orig_locale = locale.setlocale(category) + except AttributeError: + # if the test author gives us an invalid category string + raise + except: + # cannot retrieve original locale, so do nothing + locale = orig_locale = None + else: + for loc in locales: + try: + locale.setlocale(category, loc) + break + except: + pass + + # now run the function, resetting the locale on exceptions + try: + return func(*args, **kwds) + finally: + if locale and orig_locale: + locale.setlocale(category, orig_locale) + inner.__name__ = func.__name__ + inner.__doc__ = func.__doc__ + return inner + return decorator + +#======================================================================= +# Decorator for running a function in a specific timezone, correctly +# resetting it afterwards. + +def run_with_tz(tz): + def decorator(func): + def inner(*args, **kwds): + try: + tzset = time.tzset + except AttributeError: + raise unittest.SkipTest("tzset required") + if 'TZ' in os.environ: + orig_tz = os.environ['TZ'] + else: + orig_tz = None + os.environ['TZ'] = tz + tzset() + + # now run the function, resetting the tz on exceptions + try: + return func(*args, **kwds) + finally: + if orig_tz is None: + del os.environ['TZ'] + else: + os.environ['TZ'] = orig_tz + time.tzset() + + inner.__name__ = func.__name__ + inner.__doc__ = func.__doc__ + return inner + return decorator + +#======================================================================= +# Big-memory-test support. Separate from 'resources' because memory use +# should be configurable. + +# Some handy shorthands. Note that these are used for byte-limits as well +# as size-limits, in the various bigmem tests +_1M = 1024*1024 +_1G = 1024 * _1M +_2G = 2 * _1G +_4G = 4 * _1G + +MAX_Py_ssize_t = sys.maxsize + +def set_memlimit(limit): + global max_memuse + global real_max_memuse + sizes = { + 'k': 1024, + 'm': _1M, + 'g': _1G, + 't': 1024*_1G, + } + m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit, + re.IGNORECASE | re.VERBOSE) + if m is None: + raise ValueError('Invalid memory limit %r' % (limit,)) + memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()]) + real_max_memuse = memlimit + if memlimit > MAX_Py_ssize_t: + memlimit = MAX_Py_ssize_t + if memlimit < _2G - 1: + raise ValueError('Memory limit %r too low to be useful' % (limit,)) + max_memuse = memlimit + +class _MemoryWatchdog(object): + """An object which periodically watches the process' memory consumption + and prints it out. + """ + + def __init__(self): + self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid()) + self.started = False + + def start(self): + try: + f = open(self.procfile, 'r') + except OSError as e: + warnings.warn('/proc not available for stats: {0}'.format(e), + RuntimeWarning) + sys.stderr.flush() + return + + watchdog_script = findfile("memory_watchdog.py") + self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script], + stdin=f, stderr=subprocess.DEVNULL) + f.close() + self.started = True + + def stop(self): + if self.started: + self.mem_watchdog.terminate() + self.mem_watchdog.wait() + + +def bigmemtest(size, memuse, dry_run=True): + """Decorator for bigmem tests. + + 'minsize' is the minimum useful size for the test (in arbitrary, + test-interpreted units.) 'memuse' is the number of 'bytes per size' for + the test, or a good estimate of it. + + if 'dry_run' is False, it means the test doesn't support dummy runs + when -M is not specified. + """ + def decorator(f): + def wrapper(self): + size = wrapper.size + memuse = wrapper.memuse + if not real_max_memuse: + maxsize = 5147 + else: + maxsize = size + + if ((real_max_memuse or not dry_run) + and real_max_memuse < maxsize * memuse): + raise unittest.SkipTest( + "not enough memory: %.1fG minimum needed" + % (size * memuse / (1024 ** 3))) + + if real_max_memuse and verbose: + print() + print(" ... expected peak memory use: {peak:.1f}G" + .format(peak=size * memuse / (1024 ** 3))) + watchdog = _MemoryWatchdog() + watchdog.start() + else: + watchdog = None + + try: + return f(self, maxsize) + finally: + if watchdog: + watchdog.stop() + + wrapper.size = size + wrapper.memuse = memuse + return wrapper + return decorator + +def bigaddrspacetest(f): + """Decorator for tests that fill the address space.""" + def wrapper(self): + if max_memuse < MAX_Py_ssize_t: + if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31: + raise unittest.SkipTest( + "not enough memory: try a 32-bit build instead") + else: + raise unittest.SkipTest( + "not enough memory: %.1fG minimum needed" + % (MAX_Py_ssize_t / (1024 ** 3))) + else: + return f(self) + return wrapper + +#======================================================================= +# unittest integration. + +class BasicTestRunner(object): + def run(self, test): + result = unittest.TestResult() + test(result) + return result + +def _id(obj): + return obj + +def requires_resource(resource): + if resource == 'gui' and not _is_gui_available(): + return unittest.skip("resource 'gui' is not available") + if is_resource_enabled(resource): + return _id + else: + return unittest.skip("resource {0!r} is not enabled".format(resource)) + +def cpython_only(test): + """ + Decorator for tests only applicable on CPython. + """ + return impl_detail(cpython=True)(test) + +def impl_detail(msg=None, **guards): + if check_impl_detail(**guards): + return _id + if msg is None: + guardnames, default = _parse_guards(guards) + if default: + msg = "implementation detail not available on {0}" + else: + msg = "implementation detail specific to {0}" + guardnames = sorted(guardnames.keys()) + msg = msg.format(' or '.join(guardnames)) + return unittest.skip(msg) + +def _parse_guards(guards): + # Returns a tuple ({platform_name: run_me}, default_value) + if not guards: + return ({'cpython': True}, False) + is_true = list(guards.values())[0] + assert list(guards.values()) == [is_true] * len(guards) # all True or all False + return (guards, not is_true) + +# Use the following check to guard CPython's implementation-specific tests -- +# or to run them only on the implementation(s) guarded by the arguments. +def check_impl_detail(**guards): + """This function returns True or False depending on the host platform. + Examples: + if check_impl_detail(): # only on CPython (default) + if check_impl_detail(jython=True): # only on Jython + if check_impl_detail(cpython=False): # everywhere except on CPython + """ + guards, default = _parse_guards(guards) + return guards.get(platform.python_implementation().lower(), default) + + +def no_tracing(func): + """Decorator to temporarily turn off tracing for the duration of a test.""" + if not hasattr(sys, 'gettrace'): + return func + else: + @functools.wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + + +def refcount_test(test): + """Decorator for tests which involve reference counting. + + To start, the decorator does not run the test if is not run by CPython. + After that, any trace function is unset during the test to prevent + unexpected refcounts caused by the trace function. + + """ + return no_tracing(cpython_only(test)) + + +def _filter_suite(suite, pred): + """Recursively filter test cases in a suite based on a predicate.""" + newtests = [] + for test in suite._tests: + if isinstance(test, unittest.TestSuite): + _filter_suite(test, pred) + newtests.append(test) + else: + if pred(test): + newtests.append(test) + suite._tests = newtests + +def _run_suite(suite): + """Run tests from a unittest.TestSuite-derived class.""" + if verbose: + runner = unittest.TextTestRunner(sys.stdout, verbosity=2, + failfast=failfast) + else: + runner = BasicTestRunner() + + result = runner.run(suite) + if not result.wasSuccessful(): + if len(result.errors) == 1 and not result.failures: + err = result.errors[0][1] + elif len(result.failures) == 1 and not result.errors: + err = result.failures[0][1] + else: + err = "multiple errors occurred" + if not verbose: err += "; run in verbose mode for details" + raise TestFailed(err) + + +def run_unittest(*classes): + """Run tests from unittest.TestCase-derived classes.""" + valid_types = (unittest.TestSuite, unittest.TestCase) + suite = unittest.TestSuite() + for cls in classes: + if isinstance(cls, str): + if cls in sys.modules: + suite.addTest(unittest.findTestCases(sys.modules[cls])) + else: + raise ValueError("str arguments must be keys in sys.modules") + elif isinstance(cls, valid_types): + suite.addTest(cls) + else: + suite.addTest(unittest.makeSuite(cls)) + def case_pred(test): + if match_tests is None: + return True + for name in test.id().split("."): + if fnmatch.fnmatchcase(name, match_tests): + return True + return False + _filter_suite(suite, case_pred) + _run_suite(suite) + +# We don't have sysconfig on Py2.6: +# #======================================================================= +# # Check for the presence of docstrings. +# +# HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or +# sys.platform == 'win32' or +# sysconfig.get_config_var('WITH_DOC_STRINGS')) +# +# requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS, +# "test requires docstrings") +# +# +# #======================================================================= +# doctest driver. + +def run_doctest(module, verbosity=None, optionflags=0): + """Run doctest on the given module. Return (#failures, #tests). + + If optional argument verbosity is not specified (or is None), pass + support's belief about verbosity on to doctest. Else doctest's + usual behavior is used (it searches sys.argv for -v). + """ + + import doctest + + if verbosity is None: + verbosity = verbose + else: + verbosity = None + + f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags) + if f: + raise TestFailed("%d of %d doctests failed" % (f, t)) + if verbose: + print('doctest (%s) ... %d tests with zero failures' % + (module.__name__, t)) + return f, t + + +#======================================================================= +# Support for saving and restoring the imported modules. + +def modules_setup(): + return sys.modules.copy(), + +def modules_cleanup(oldmodules): + # Encoders/decoders are registered permanently within the internal + # codec cache. If we destroy the corresponding modules their + # globals will be set to None which will trip up the cached functions. + encodings = [(k, v) for k, v in sys.modules.items() + if k.startswith('encodings.')] + # Was: + # sys.modules.clear() + # Py2-compatible: + for i in range(len(sys.modules)): + sys.modules.pop() + + sys.modules.update(encodings) + # XXX: This kind of problem can affect more than just encodings. In particular + # extension modules (such as _ssl) don't cope with reloading properly. + # Really, test modules should be cleaning out the test specific modules they + # know they added (ala test_runpy) rather than relying on this function (as + # test_importhooks and test_pkg do currently). + # Implicitly imported *real* modules should be left alone (see issue 10556). + sys.modules.update(oldmodules) + +#======================================================================= +# Backported versions of threading_setup() and threading_cleanup() which don't refer +# to threading._dangling (not available on Py2.7). + +# Threading support to prevent reporting refleaks when running regrtest.py -R + +# NOTE: we use thread._count() rather than threading.enumerate() (or the +# moral equivalent thereof) because a threading.Thread object is still alive +# until its __bootstrap() method has returned, even after it has been +# unregistered from the threading module. +# thread._count(), on the other hand, only gets decremented *after* the +# __bootstrap() method has returned, which gives us reliable reference counts +# at the end of a test run. + +def threading_setup(): + if _thread: + return _thread._count(), + else: + return 1, + +def threading_cleanup(nb_threads): + if not _thread: + return + + _MAX_COUNT = 10 + for count in range(_MAX_COUNT): + n = _thread._count() + if n == nb_threads: + break + time.sleep(0.1) + # XXX print a warning in case of failure? + +def reap_threads(func): + """Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + If threading is unavailable this function does nothing. + """ + if not _thread: + return func + + @functools.wraps(func) + def decorator(*args): + key = threading_setup() + try: + return func(*args) + finally: + threading_cleanup(*key) + return decorator + +def reap_children(): + """Use this function at the end of test_main() whenever sub-processes + are started. This will help ensure that no extra children (zombies) + stick around to hog resources and create problems when looking + for refleaks. + """ + + # Reap all our dead child processes so we don't leave zombies around. + # These hog resources and might be causing some of the buildbots to die. + if hasattr(os, 'waitpid'): + any_process = -1 + while True: + try: + # This will raise an exception on Windows. That's ok. + pid, status = os.waitpid(any_process, os.WNOHANG) + if pid == 0: + break + except: + break + +@contextlib.contextmanager +def swap_attr(obj, attr, new_val): + """Temporary swap out an attribute with a new object. + + Usage: + with swap_attr(obj, "attr", 5): + ... + + This will set obj.attr to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `attr` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + """ + if hasattr(obj, attr): + real_val = getattr(obj, attr) + setattr(obj, attr, new_val) + try: + yield + finally: + setattr(obj, attr, real_val) + else: + setattr(obj, attr, new_val) + try: + yield + finally: + delattr(obj, attr) + +@contextlib.contextmanager +def swap_item(obj, item, new_val): + """Temporary swap out an item with a new object. + + Usage: + with swap_item(obj, "item", 5): + ... + + This will set obj["item"] to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `item` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + """ + if item in obj: + real_val = obj[item] + obj[item] = new_val + try: + yield + finally: + obj[item] = real_val + else: + obj[item] = new_val + try: + yield + finally: + del obj[item] + +def strip_python_stderr(stderr): + """Strip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + """ + stderr = re.sub(br"\[\d+ refs\]\r?\n?", b"", stderr).strip() + return stderr + +def args_from_interpreter_flags(): + """Return a list of command-line arguments reproducing the current + settings in sys.flags and sys.warnoptions.""" + return subprocess._args_from_interpreter_flags() + +#============================================================ +# Support for assertions about logging. +#============================================================ + +class TestHandler(logging.handlers.BufferingHandler): + def __init__(self, matcher): + # BufferingHandler takes a "capacity" argument + # so as to know when to flush. As we're overriding + # shouldFlush anyway, we can set a capacity of zero. + # You can call flush() manually to clear out the + # buffer. + logging.handlers.BufferingHandler.__init__(self, 0) + self.matcher = matcher + + def shouldFlush(self): + return False + + def emit(self, record): + self.format(record) + self.buffer.append(record.__dict__) + + def matches(self, **kwargs): + """ + Look for a saved dict whose keys/values match the supplied arguments. + """ + result = False + for d in self.buffer: + if self.matcher.matches(d, **kwargs): + result = True + break + return result + +class Matcher(object): + + _partial_matches = ('msg', 'message') + + def matches(self, d, **kwargs): + """ + Try to match a single dict with the supplied arguments. + + Keys whose values are strings and which are in self._partial_matches + will be checked for partial (i.e. substring) matches. You can extend + this scheme to (for example) do regular expression matching, etc. + """ + result = True + for k in kwargs: + v = kwargs[k] + dv = d.get(k) + if not self.match_value(k, dv, v): + result = False + break + return result + + def match_value(self, k, dv, v): + """ + Try to match a single stored value (dv) with a supplied value (v). + """ + if type(v) != type(dv): + result = False + elif type(dv) is not str or k not in self._partial_matches: + result = (v == dv) + else: + result = dv.find(v) >= 0 + return result + + +_can_symlink = None +def can_symlink(): + global _can_symlink + if _can_symlink is not None: + return _can_symlink + symlink_path = TESTFN + "can_symlink" + try: + os.symlink(TESTFN, symlink_path) + can = True + except (OSError, NotImplementedError, AttributeError): + can = False + else: + os.remove(symlink_path) + _can_symlink = can + return can + +def skip_unless_symlink(test): + """Skip decorator for tests that require functional symlink""" + ok = can_symlink() + msg = "Requires functional symlink implementation" + return test if ok else unittest.skip(msg)(test) + +_can_xattr = None +def can_xattr(): + global _can_xattr + if _can_xattr is not None: + return _can_xattr + if not hasattr(os, "setxattr"): + can = False + else: + tmp_fp, tmp_name = tempfile.mkstemp() + try: + with open(TESTFN, "wb") as fp: + try: + # TESTFN & tempfile may use different file systems with + # different capabilities + os.setxattr(tmp_fp, b"user.test", b"") + os.setxattr(fp.fileno(), b"user.test", b"") + # Kernels < 2.6.39 don't respect setxattr flags. + kernel_version = platform.release() + m = re.match("2.6.(\d{1,2})", kernel_version) + can = m is None or int(m.group(1)) >= 39 + except OSError: + can = False + finally: + unlink(TESTFN) + unlink(tmp_name) + _can_xattr = can + return can + +def skip_unless_xattr(test): + """Skip decorator for tests that require functional extended attributes""" + ok = can_xattr() + msg = "no non-broken extended attribute support" + return test if ok else unittest.skip(msg)(test) + + +if sys.platform.startswith('win'): + @contextlib.contextmanager + def suppress_crash_popup(): + """Disable Windows Error Reporting dialogs using SetErrorMode.""" + # see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621%28v=vs.85%29.aspx + # GetErrorMode is not available on Windows XP and Windows Server 2003, + # but SetErrorMode returns the previous value, so we can use that + import ctypes + k32 = ctypes.windll.kernel32 + SEM_NOGPFAULTERRORBOX = 0x02 + old_error_mode = k32.SetErrorMode(SEM_NOGPFAULTERRORBOX) + k32.SetErrorMode(old_error_mode | SEM_NOGPFAULTERRORBOX) + try: + yield + finally: + k32.SetErrorMode(old_error_mode) +else: + # this is a no-op for other platforms + @contextlib.contextmanager + def suppress_crash_popup(): + yield + + +def patch(test_instance, object_to_patch, attr_name, new_value): + """Override 'object_to_patch'.'attr_name' with 'new_value'. + + Also, add a cleanup procedure to 'test_instance' to restore + 'object_to_patch' value for 'attr_name'. + The 'attr_name' should be a valid attribute for 'object_to_patch'. + + """ + # check that 'attr_name' is a real attribute for 'object_to_patch' + # will raise AttributeError if it does not exist + getattr(object_to_patch, attr_name) + + # keep a copy of the old value + attr_is_local = False + try: + old_value = object_to_patch.__dict__[attr_name] + except (AttributeError, KeyError): + old_value = getattr(object_to_patch, attr_name, None) + else: + attr_is_local = True + + # restore the value when the test is done + def cleanup(): + if attr_is_local: + setattr(object_to_patch, attr_name, old_value) + else: + delattr(object_to_patch, attr_name) + + test_instance.addCleanup(cleanup) + + # actually override the attribute + setattr(object_to_patch, attr_name, new_value) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/total_ordering.py b/pype/modules/ftrack/python2_vendor/future/backports/total_ordering.py new file mode 100644 index 0000000000..760f06d6c3 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/total_ordering.py @@ -0,0 +1,38 @@ +""" +For Python < 2.7.2. total_ordering in versions prior to 2.7.2 is buggy. +See http://bugs.python.org/issue10042 for details. For these versions use +code borrowed from Python 2.7.3. + +From django.utils. +""" + +import sys +if sys.version_info >= (2, 7, 2): + from functools import total_ordering +else: + def total_ordering(cls): + """Class decorator that fills in missing ordering methods""" + convert = { + '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), + ('__le__', lambda self, other: self < other or self == other), + ('__ge__', lambda self, other: not self < other)], + '__le__': [('__ge__', lambda self, other: not self <= other or self == other), + ('__lt__', lambda self, other: self <= other and not self == other), + ('__gt__', lambda self, other: not self <= other)], + '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), + ('__ge__', lambda self, other: self > other or self == other), + ('__le__', lambda self, other: not self > other)], + '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), + ('__gt__', lambda self, other: self >= other and not self == other), + ('__lt__', lambda self, other: not self >= other)] + } + roots = set(dir(cls)) & set(convert) + if not roots: + raise ValueError('must define at least one ordering operation: < > <= >=') + root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ + for opname, opfunc in convert[root]: + if opname not in roots: + opfunc.__name__ = opname + opfunc.__doc__ = getattr(int, opname).__doc__ + setattr(cls, opname, opfunc) + return cls diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/error.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/error.py new file mode 100644 index 0000000000..a473e4453d --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/urllib/error.py @@ -0,0 +1,75 @@ +"""Exception classes raised by urllib. + +The base exception class is URLError, which inherits from IOError. It +doesn't define any behavior of its own, but is the base class for all +exceptions defined in this package. + +HTTPError is an exception class that is also a valid HTTP response +instance. It behaves this way because HTTP protocol errors are valid +responses, with a status code, headers, and a body. In some contexts, +an application may want to handle an exception like a regular +response. +""" +from __future__ import absolute_import, division, unicode_literals +from future import standard_library + +from future.backports.urllib import response as urllib_response + + +__all__ = ['URLError', 'HTTPError', 'ContentTooShortError'] + + +# do these error classes make sense? +# make sure all of the IOError stuff is overridden. we just want to be +# subtypes. + +class URLError(IOError): + # URLError is a sub-type of IOError, but it doesn't share any of + # the implementation. need to override __init__ and __str__. + # It sets self.args for compatibility with other EnvironmentError + # subclasses, but args doesn't have the typical format with errno in + # slot 0 and strerror in slot 1. This may be better than nothing. + def __init__(self, reason, filename=None): + self.args = reason, + self.reason = reason + if filename is not None: + self.filename = filename + + def __str__(self): + return '' % self.reason + +class HTTPError(URLError, urllib_response.addinfourl): + """Raised when HTTP error occurs, but also acts like non-error return""" + __super_init = urllib_response.addinfourl.__init__ + + def __init__(self, url, code, msg, hdrs, fp): + self.code = code + self.msg = msg + self.hdrs = hdrs + self.fp = fp + self.filename = url + # The addinfourl classes depend on fp being a valid file + # object. In some cases, the HTTPError may not have a valid + # file object. If this happens, the simplest workaround is to + # not initialize the base classes. + if fp is not None: + self.__super_init(fp, hdrs, url, code) + + def __str__(self): + return 'HTTP Error %s: %s' % (self.code, self.msg) + + # since URLError specifies a .reason attribute, HTTPError should also + # provide this attribute. See issue13211 for discussion. + @property + def reason(self): + return self.msg + + def info(self): + return self.hdrs + + +# exception raised when downloaded size does not match content-length +class ContentTooShortError(URLError): + def __init__(self, message, content): + URLError.__init__(self, message) + self.content = content diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/parse.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/parse.py new file mode 100644 index 0000000000..04e52d4925 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/urllib/parse.py @@ -0,0 +1,991 @@ +""" +Ported using Python-Future from the Python 3.3 standard library. + +Parse (absolute and relative) URLs. + +urlparse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urlparse module should conform with it. The urlparse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. +""" +from __future__ import absolute_import, division, unicode_literals +from future.builtins import bytes, chr, dict, int, range, str +from future.utils import raise_with_traceback + +import re +import sys +import collections + +__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", + "urlsplit", "urlunsplit", "urlencode", "parse_qs", + "parse_qsl", "quote", "quote_plus", "quote_from_bytes", + "unquote", "unquote_plus", "unquote_to_bytes"] + +# A classification of schemes ('' means apply by default) +uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', + 'wais', 'file', 'https', 'shttp', 'mms', + 'prospero', 'rtsp', 'rtspu', '', 'sftp', + 'svn', 'svn+ssh'] +uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', + 'imap', 'wais', 'file', 'mms', 'https', 'shttp', + 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', + 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh'] +uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', + 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', + 'mms', '', 'sftp', 'tel'] + +# These are not actually used anymore, but should stay for backwards +# compatibility. (They are undocumented, but have a public-looking name.) +non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', + 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] +uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', + 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] +uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', + 'nntp', 'wais', 'https', 'shttp', 'snews', + 'file', 'prospero', ''] + +# Characters valid in scheme names +scheme_chars = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789' + '+-.') + +# XXX: Consider replacing with functools.lru_cache +MAX_CACHE_SIZE = 20 +_parse_cache = {} + +def clear_cache(): + """Clear the parse cache and the quoters cache.""" + _parse_cache.clear() + _safe_quoters.clear() + + +# Helpers for bytes handling +# For 3.2, we deliberately require applications that +# handle improperly quoted URLs to do their own +# decoding and encoding. If valid use cases are +# presented, we may relax this by using latin-1 +# decoding internally for 3.3 +_implicit_encoding = 'ascii' +_implicit_errors = 'strict' + +def _noop(obj): + return obj + +def _encode_result(obj, encoding=_implicit_encoding, + errors=_implicit_errors): + return obj.encode(encoding, errors) + +def _decode_args(args, encoding=_implicit_encoding, + errors=_implicit_errors): + return tuple(x.decode(encoding, errors) if x else '' for x in args) + +def _coerce_args(*args): + # Invokes decode if necessary to create str args + # and returns the coerced inputs along with + # an appropriate result coercion function + # - noop for str inputs + # - encoding function otherwise + str_input = isinstance(args[0], str) + for arg in args[1:]: + # We special-case the empty string to support the + # "scheme=''" default argument to some functions + if arg and isinstance(arg, str) != str_input: + raise TypeError("Cannot mix str and non-str arguments") + if str_input: + return args + (_noop,) + return _decode_args(args) + (_encode_result,) + +# Result objects are more helpful than simple tuples +class _ResultMixinStr(object): + """Standard approach to encoding parsed results from str to bytes""" + __slots__ = () + + def encode(self, encoding='ascii', errors='strict'): + return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) + + +class _ResultMixinBytes(object): + """Standard approach to decoding parsed results from bytes to str""" + __slots__ = () + + def decode(self, encoding='ascii', errors='strict'): + return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) + + +class _NetlocResultMixinBase(object): + """Shared methods for the parsed result objects containing a netloc element""" + __slots__ = () + + @property + def username(self): + return self._userinfo[0] + + @property + def password(self): + return self._userinfo[1] + + @property + def hostname(self): + hostname = self._hostinfo[0] + if not hostname: + hostname = None + elif hostname is not None: + hostname = hostname.lower() + return hostname + + @property + def port(self): + port = self._hostinfo[1] + if port is not None: + port = int(port, 10) + # Return None on an illegal port + if not ( 0 <= port <= 65535): + return None + return port + + +class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition('@') + if have_info: + username, have_password, password = userinfo.partition(':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition('@') + _, have_open_br, bracketed = hostinfo.partition('[') + if have_open_br: + hostname, _, port = bracketed.partition(']') + _, have_port, port = port.partition(':') + else: + hostname, have_port, port = hostinfo.partition(':') + if not have_port: + port = None + return hostname, port + + +class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): + __slots__ = () + + @property + def _userinfo(self): + netloc = self.netloc + userinfo, have_info, hostinfo = netloc.rpartition(b'@') + if have_info: + username, have_password, password = userinfo.partition(b':') + if not have_password: + password = None + else: + username = password = None + return username, password + + @property + def _hostinfo(self): + netloc = self.netloc + _, _, hostinfo = netloc.rpartition(b'@') + _, have_open_br, bracketed = hostinfo.partition(b'[') + if have_open_br: + hostname, _, port = bracketed.partition(b']') + _, have_port, port = port.partition(b':') + else: + hostname, have_port, port = hostinfo.partition(b':') + if not have_port: + port = None + return hostname, port + + +from collections import namedtuple + +_DefragResultBase = namedtuple('DefragResult', 'url fragment') +_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment') +_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment') + +# For backwards compatibility, alias _NetlocResultMixinStr +# ResultBase is no longer part of the documented API, but it is +# retained since deprecating it isn't worth the hassle +ResultBase = _NetlocResultMixinStr + +# Structured result objects for string data +class DefragResult(_DefragResultBase, _ResultMixinStr): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + '#' + self.fragment + else: + return self.url + +class SplitResult(_SplitResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResult(_ParseResultBase, _NetlocResultMixinStr): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Structured result objects for bytes data +class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): + __slots__ = () + def geturl(self): + if self.fragment: + return self.url + b'#' + self.fragment + else: + return self.url + +class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunsplit(self) + +class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): + __slots__ = () + def geturl(self): + return urlunparse(self) + +# Set up the encode/decode result pairs +def _fix_result_transcoding(): + _result_pairs = ( + (DefragResult, DefragResultBytes), + (SplitResult, SplitResultBytes), + (ParseResult, ParseResultBytes), + ) + for _decoded, _encoded in _result_pairs: + _decoded._encoded_counterpart = _encoded + _encoded._decoded_counterpart = _decoded + +_fix_result_transcoding() +del _fix_result_transcoding + +def urlparse(url, scheme='', allow_fragments=True): + """Parse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" + url, scheme, _coerce_result = _coerce_args(url, scheme) + splitresult = urlsplit(url, scheme, allow_fragments) + scheme, netloc, url, query, fragment = splitresult + if scheme in uses_params and ';' in url: + url, params = _splitparams(url) + else: + params = '' + result = ParseResult(scheme, netloc, url, params, query, fragment) + return _coerce_result(result) + +def _splitparams(url): + if '/' in url: + i = url.find(';', url.rfind('/')) + if i < 0: + return url, '' + else: + i = url.find(';') + return url[:i], url[i+1:] + +def _splitnetloc(url, start=0): + delim = len(url) # position of end of domain part of url, default is end + for c in '/?#': # look for delimiters; the order is NOT important + wdelim = url.find(c, start) # find first of this delim + if wdelim >= 0: # if found + delim = min(delim, wdelim) # use earliest delim position + return url[start:delim], url[delim:] # return (domain, rest) + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.""" + url, scheme, _coerce_result = _coerce_args(url, scheme) + allow_fragments = bool(allow_fragments) + key = url, scheme, allow_fragments, type(url), type(scheme) + cached = _parse_cache.get(key, None) + if cached: + return _coerce_result(cached) + if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth + clear_cache() + netloc = query = fragment = '' + i = url.find(':') + if i > 0: + if url[:i] == 'http': # optimize the common case + scheme = url[:i].lower() + url = url[i+1:] + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return _coerce_result(v) + for c in url[:i]: + if c not in scheme_chars: + break + else: + # make sure "url" is not actually a port number (in which case + # "scheme" is really part of the path) + rest = url[i+1:] + if not rest or any(c not in '0123456789' for c in rest): + # not a port number + scheme, url = url[:i].lower(), rest + + if url[:2] == '//': + netloc, url = _splitnetloc(url, 2) + if (('[' in netloc and ']' not in netloc) or + (']' in netloc and '[' not in netloc)): + raise ValueError("Invalid IPv6 URL") + if allow_fragments and '#' in url: + url, fragment = url.split('#', 1) + if '?' in url: + url, query = url.split('?', 1) + v = SplitResult(scheme, netloc, url, query, fragment) + _parse_cache[key] = v + return _coerce_result(v) + +def urlunparse(components): + """Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).""" + scheme, netloc, url, params, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if params: + url = "%s;%s" % (url, params) + return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) + +def urlunsplit(components): + """Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).""" + scheme, netloc, url, query, fragment, _coerce_result = ( + _coerce_args(*components)) + if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): + if url and url[:1] != '/': url = '/' + url + url = '//' + (netloc or '') + url + if scheme: + url = scheme + ':' + url + if query: + url = url + '?' + query + if fragment: + url = url + '#' + fragment + return _coerce_result(url) + +def urljoin(base, url, allow_fragments=True): + """Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.""" + if not base: + return url + if not url: + return base + base, url, _coerce_result = _coerce_args(base, url) + bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ + urlparse(base, '', allow_fragments) + scheme, netloc, path, params, query, fragment = \ + urlparse(url, bscheme, allow_fragments) + if scheme != bscheme or scheme not in uses_relative: + return _coerce_result(url) + if scheme in uses_netloc: + if netloc: + return _coerce_result(urlunparse((scheme, netloc, path, + params, query, fragment))) + netloc = bnetloc + if path[:1] == '/': + return _coerce_result(urlunparse((scheme, netloc, path, + params, query, fragment))) + if not path and not params: + path = bpath + params = bparams + if not query: + query = bquery + return _coerce_result(urlunparse((scheme, netloc, path, + params, query, fragment))) + segments = bpath.split('/')[:-1] + path.split('/') + # XXX The stuff below is bogus in various ways... + if segments[-1] == '.': + segments[-1] = '' + while '.' in segments: + segments.remove('.') + while 1: + i = 1 + n = len(segments) - 1 + while i < n: + if (segments[i] == '..' + and segments[i-1] not in ('', '..')): + del segments[i-1:i+1] + break + i = i+1 + else: + break + if segments == ['', '..']: + segments[-1] = '' + elif len(segments) >= 2 and segments[-1] == '..': + segments[-2:] = [''] + return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments), + params, query, fragment))) + +def urldefrag(url): + """Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + """ + url, _coerce_result = _coerce_args(url) + if '#' in url: + s, n, p, a, q, frag = urlparse(url) + defrag = urlunparse((s, n, p, a, q, '')) + else: + frag = '' + defrag = url + return _coerce_result(DefragResult(defrag, frag)) + +_hexdig = '0123456789ABCDEFabcdef' +_hextobyte = dict(((a + b).encode(), bytes([int(a + b, 16)])) + for a in _hexdig for b in _hexdig) + +def unquote_to_bytes(string): + """unquote_to_bytes('abc%20def') -> b'abc def'.""" + # Note: strings are encoded as UTF-8. This is only an issue if it contains + # unescaped non-ASCII characters, which URIs should not. + if not string: + # Is it a string-like object? + string.split + return bytes(b'') + if isinstance(string, str): + string = string.encode('utf-8') + ### For Python-Future: + # It is already a byte-string object, but force it to be newbytes here on + # Py2: + string = bytes(string) + ### + bits = string.split(b'%') + if len(bits) == 1: + return string + res = [bits[0]] + append = res.append + for item in bits[1:]: + try: + append(_hextobyte[item[:2]]) + append(item[2:]) + except KeyError: + append(b'%') + append(item) + return bytes(b'').join(res) + +_asciire = re.compile('([\x00-\x7f]+)') + +def unquote(string, encoding='utf-8', errors='replace'): + """Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + """ + if '%' not in string: + string.split + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'replace' + bits = _asciire.split(string) + res = [bits[0]] + append = res.append + for i in range(1, len(bits), 2): + append(unquote_to_bytes(bits[i]).decode(encoding, errors)) + append(bits[i + 1]) + return ''.join(res) + +def parse_qs(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace'): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + """ + parsed_result = {} + pairs = parse_qsl(qs, keep_blank_values, strict_parsing, + encoding=encoding, errors=errors) + for name, value in pairs: + if name in parsed_result: + parsed_result[name].append(value) + else: + parsed_result[name] = [value] + return parsed_result + +def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace'): + """Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. A + true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + Returns a list, as G-d intended. + """ + qs, _coerce_result = _coerce_args(qs) + pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] + r = [] + for name_value in pairs: + if not name_value and not strict_parsing: + continue + nv = name_value.split('=', 1) + if len(nv) != 2: + if strict_parsing: + raise ValueError("bad query field: %r" % (name_value,)) + # Handle case of a control-name with no equal sign + if keep_blank_values: + nv.append('') + else: + continue + if len(nv[1]) or keep_blank_values: + name = nv[0].replace('+', ' ') + name = unquote(name, encoding=encoding, errors=errors) + name = _coerce_result(name) + value = nv[1].replace('+', ' ') + value = unquote(value, encoding=encoding, errors=errors) + value = _coerce_result(value) + r.append((name, value)) + return r + +def unquote_plus(string, encoding='utf-8', errors='replace'): + """Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + """ + string = string.replace('+', ' ') + return unquote(string, encoding, errors) + +_ALWAYS_SAFE = frozenset(bytes(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + b'abcdefghijklmnopqrstuvwxyz' + b'0123456789' + b'_.-')) +_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) +_safe_quoters = {} + +class Quoter(collections.defaultdict): + """A mapping from bytes (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in the "safe" set (either the specified safe set, or default set). + """ + # Keeps a cache internally, using defaultdict, for efficiency (lookups + # of cached keys don't call Python code at all). + def __init__(self, safe): + """safe: bytes object.""" + self.safe = _ALWAYS_SAFE.union(bytes(safe)) + + def __repr__(self): + # Without this, will just display as a defaultdict + return "" % dict(self) + + def __missing__(self, b): + # Handle a cache miss. Store quoted string in cache and return. + res = chr(b) if b in self.safe else '%{0:02X}'.format(b) + self[b] = res + return res + +def quote(string, safe='/', encoding=None, errors=None): + """quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. + + RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists + the following reserved characters. + + reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | + "$" | "," + + Each of these characters is reserved in some component of a URL, + but not necessarily in all of them. + + By default, the quote function is intended for quoting the path + section of a URL. Thus, it will not encode '/'. This character + is reserved, but in typical usage the quote function is being + called on a path where the existing slash characters are used as + reserved characters. + + string and safe may be either str or bytes objects. encoding must + not be specified if string is a str. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + """ + if isinstance(string, str): + if not string: + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'strict' + string = string.encode(encoding, errors) + else: + if encoding is not None: + raise TypeError("quote() doesn't support 'encoding' for bytes") + if errors is not None: + raise TypeError("quote() doesn't support 'errors' for bytes") + return quote_from_bytes(string, safe) + +def quote_plus(string, safe='', encoding=None, errors=None): + """Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + """ + # Check if ' ' in string, where string may either be a str or bytes. If + # there are no spaces, the regular quote will produce the right answer. + if ((isinstance(string, str) and ' ' not in string) or + (isinstance(string, bytes) and b' ' not in string)): + return quote(string, safe, encoding, errors) + if isinstance(safe, str): + space = str(' ') + else: + space = bytes(b' ') + string = quote(string, safe + space, encoding, errors) + return string.replace(' ', '+') + +def quote_from_bytes(bs, safe='/'): + """Like quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' + """ + if not isinstance(bs, (bytes, bytearray)): + raise TypeError("quote_from_bytes() expected bytes") + if not bs: + return str('') + ### For Python-Future: + bs = bytes(bs) + ### + if isinstance(safe, str): + # Normalize 'safe' by converting to bytes and removing non-ASCII chars + safe = str(safe).encode('ascii', 'ignore') + else: + ### For Python-Future: + safe = bytes(safe) + ### + safe = bytes([c for c in safe if c < 128]) + if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): + return bs.decode() + try: + quoter = _safe_quoters[safe] + except KeyError: + _safe_quoters[safe] = quoter = Quoter(safe).__getitem__ + return str('').join([quoter(char) for char in bs]) + +def urlencode(query, doseq=False, safe='', encoding=None, errors=None): + """Encode a sequence of two-element tuples or dictionary into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The query arg may be either a string or a bytes type. When query arg is a + string, the safe, encoding and error parameters are sent the quote_plus for + encoding. + """ + + if hasattr(query, "items"): + query = query.items() + else: + # It's a bother at times that strings and string-like objects are + # sequences. + try: + # non-sequence items should not work with len() + # non-empty strings will fail this + if len(query) and not isinstance(query[0], tuple): + raise TypeError + # Zero-length sequences of all types will get here and succeed, + # but that's a minor nit. Since the original implementation + # allowed empty dicts that type of behavior probably should be + # preserved for consistency + except TypeError: + ty, va, tb = sys.exc_info() + raise_with_traceback(TypeError("not a valid non-string sequence " + "or mapping object"), tb) + + l = [] + if not doseq: + for k, v in query: + if isinstance(k, bytes): + k = quote_plus(k, safe) + else: + k = quote_plus(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_plus(v, safe) + else: + v = quote_plus(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + for k, v in query: + if isinstance(k, bytes): + k = quote_plus(k, safe) + else: + k = quote_plus(str(k), safe, encoding, errors) + + if isinstance(v, bytes): + v = quote_plus(v, safe) + l.append(k + '=' + v) + elif isinstance(v, str): + v = quote_plus(v, safe, encoding, errors) + l.append(k + '=' + v) + else: + try: + # Is this a sufficient test for sequence-ness? + x = len(v) + except TypeError: + # not a sequence + v = quote_plus(str(v), safe, encoding, errors) + l.append(k + '=' + v) + else: + # loop over the sequence + for elt in v: + if isinstance(elt, bytes): + elt = quote_plus(elt, safe) + else: + elt = quote_plus(str(elt), safe, encoding, errors) + l.append(k + '=' + elt) + return str('&').join(l) + +# Utilities to parse URLs (most of these return None for missing parts): +# unwrap('') --> 'type://host/path' +# splittype('type:opaquestring') --> 'type', 'opaquestring' +# splithost('//host[:port]/path') --> 'host[:port]', '/path' +# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]' +# splitpasswd('user:passwd') -> 'user', 'passwd' +# splitport('host:port') --> 'host', 'port' +# splitquery('/path?query') --> '/path', 'query' +# splittag('/path#tag') --> '/path', 'tag' +# splitattr('/path;attr1=value1;attr2=value2;...') -> +# '/path', ['attr1=value1', 'attr2=value2', ...] +# splitvalue('attr=value') --> 'attr', 'value' +# urllib.parse.unquote('abc%20def') -> 'abc def' +# quote('abc def') -> 'abc%20def') + +def to_bytes(url): + """to_bytes(u"URL") --> 'URL'.""" + # Most URL schemes require ASCII. If that changes, the conversion + # can be relaxed. + # XXX get rid of to_bytes() + if isinstance(url, str): + try: + url = url.encode("ASCII").decode() + except UnicodeError: + raise UnicodeError("URL " + repr(url) + + " contains non-ASCII characters") + return url + +def unwrap(url): + """unwrap('') --> 'type://host/path'.""" + url = str(url).strip() + if url[:1] == '<' and url[-1:] == '>': + url = url[1:-1].strip() + if url[:4] == 'URL:': url = url[4:].strip() + return url + +_typeprog = None +def splittype(url): + """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" + global _typeprog + if _typeprog is None: + import re + _typeprog = re.compile('^([^/:]+):') + + match = _typeprog.match(url) + if match: + scheme = match.group(1) + return scheme.lower(), url[len(scheme) + 1:] + return None, url + +_hostprog = None +def splithost(url): + """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" + global _hostprog + if _hostprog is None: + import re + _hostprog = re.compile('^//([^/?]*)(.*)$') + + match = _hostprog.match(url) + if match: + host_port = match.group(1) + path = match.group(2) + if path and not path.startswith('/'): + path = '/' + path + return host_port, path + return None, url + +_userprog = None +def splituser(host): + """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + global _userprog + if _userprog is None: + import re + _userprog = re.compile('^(.*)@(.*)$') + + match = _userprog.match(host) + if match: return match.group(1, 2) + return None, host + +_passwdprog = None +def splitpasswd(user): + """splitpasswd('user:passwd') -> 'user', 'passwd'.""" + global _passwdprog + if _passwdprog is None: + import re + _passwdprog = re.compile('^([^:]*):(.*)$',re.S) + + match = _passwdprog.match(user) + if match: return match.group(1, 2) + return user, None + +# splittag('/path#tag') --> '/path', 'tag' +_portprog = None +def splitport(host): + """splitport('host:port') --> 'host', 'port'.""" + global _portprog + if _portprog is None: + import re + _portprog = re.compile('^(.*):([0-9]+)$') + + match = _portprog.match(host) + if match: return match.group(1, 2) + return host, None + +_nportprog = None +def splitnport(host, defport=-1): + """Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.""" + global _nportprog + if _nportprog is None: + import re + _nportprog = re.compile('^(.*):(.*)$') + + match = _nportprog.match(host) + if match: + host, port = match.group(1, 2) + try: + if not port: raise ValueError("no digits") + nport = int(port) + except ValueError: + nport = None + return host, nport + return host, defport + +_queryprog = None +def splitquery(url): + """splitquery('/path?query') --> '/path', 'query'.""" + global _queryprog + if _queryprog is None: + import re + _queryprog = re.compile('^(.*)\?([^?]*)$') + + match = _queryprog.match(url) + if match: return match.group(1, 2) + return url, None + +_tagprog = None +def splittag(url): + """splittag('/path#tag') --> '/path', 'tag'.""" + global _tagprog + if _tagprog is None: + import re + _tagprog = re.compile('^(.*)#([^#]*)$') + + match = _tagprog.match(url) + if match: return match.group(1, 2) + return url, None + +def splitattr(url): + """splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].""" + words = url.split(';') + return words[0], words[1:] + +_valueprog = None +def splitvalue(attr): + """splitvalue('attr=value') --> 'attr', 'value'.""" + global _valueprog + if _valueprog is None: + import re + _valueprog = re.compile('^([^=]*)=(.*)$') + + match = _valueprog.match(attr) + if match: return match.group(1, 2) + return attr, None diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/request.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/request.py new file mode 100644 index 0000000000..baee5401aa --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/urllib/request.py @@ -0,0 +1,2647 @@ +""" +Ported using Python-Future from the Python 3.3 standard library. + +An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +IOError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('http://www.python.org/') +""" + +# XXX issues: +# If an authentication error handler that tries to perform +# authentication for some reason but fails, how should the error be +# signalled? The client needs to know the HTTP error code. But if +# the handler knows that the problem was, e.g., that it didn't know +# that hash algo that requested in the challenge, it would be good to +# pass that information along to the client, too. +# ftp errors aren't handled cleanly +# check digest against correct (i.e. non-apache) implementation + +# Possible extensions: +# complex proxies XXX not sure what exactly was meant by this +# abstract factory for opener + +from __future__ import absolute_import, division, print_function, unicode_literals +from future.builtins import bytes, dict, filter, input, int, map, open, str +from future.utils import PY2, PY3, raise_with_traceback + +import base64 +import bisect +import hashlib +import array + +from future.backports import email +from future.backports.http import client as http_client +from .error import URLError, HTTPError, ContentTooShortError +from .parse import ( + urlparse, urlsplit, urljoin, unwrap, quote, unquote, + splittype, splithost, splitport, splituser, splitpasswd, + splitattr, splitquery, splitvalue, splittag, to_bytes, urlunparse) +from .response import addinfourl, addclosehook + +import io +import os +import posixpath +import re +import socket +import sys +import time +import tempfile +import contextlib +import warnings + +from future.utils import PY2 + +if PY2: + from collections import Iterable +else: + from collections.abc import Iterable + +# check for SSL +try: + import ssl + # Not available in the SSL module in Py2: + from ssl import SSLContext +except ImportError: + _have_ssl = False +else: + _have_ssl = True + +__all__ = [ + # Classes + 'Request', 'OpenerDirector', 'BaseHandler', 'HTTPDefaultErrorHandler', + 'HTTPRedirectHandler', 'HTTPCookieProcessor', 'ProxyHandler', + 'HTTPPasswordMgr', 'HTTPPasswordMgrWithDefaultRealm', + 'AbstractBasicAuthHandler', 'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler', + 'AbstractDigestAuthHandler', 'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler', + 'HTTPHandler', 'FileHandler', 'FTPHandler', 'CacheFTPHandler', + 'UnknownHandler', 'HTTPErrorProcessor', + # Functions + 'urlopen', 'install_opener', 'build_opener', + 'pathname2url', 'url2pathname', 'getproxies', + # Legacy interface + 'urlretrieve', 'urlcleanup', 'URLopener', 'FancyURLopener', +] + +# used in User-Agent header sent +__version__ = sys.version[:3] + +_opener = None +def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **_3to2kwargs): + if 'cadefault' in _3to2kwargs: cadefault = _3to2kwargs['cadefault']; del _3to2kwargs['cadefault'] + else: cadefault = False + if 'capath' in _3to2kwargs: capath = _3to2kwargs['capath']; del _3to2kwargs['capath'] + else: capath = None + if 'cafile' in _3to2kwargs: cafile = _3to2kwargs['cafile']; del _3to2kwargs['cafile'] + else: cafile = None + global _opener + if cafile or capath or cadefault: + if not _have_ssl: + raise ValueError('SSL support not available') + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + context.verify_mode = ssl.CERT_REQUIRED + if cafile or capath: + context.load_verify_locations(cafile, capath) + else: + context.set_default_verify_paths() + https_handler = HTTPSHandler(context=context, check_hostname=True) + opener = build_opener(https_handler) + elif _opener is None: + _opener = opener = build_opener() + else: + opener = _opener + return opener.open(url, data, timeout) + +def install_opener(opener): + global _opener + _opener = opener + +_url_tempfiles = [] +def urlretrieve(url, filename=None, reporthook=None, data=None): + """ + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + """ + url_type, path = splittype(url) + + with contextlib.closing(urlopen(url, data)) as fp: + headers = fp.info() + + # Just return the local path and the "headers" for file:// + # URLs. No sense in performing a copy unless requested. + if url_type == "file" and not filename: + return os.path.normpath(path), headers + + # Handle temporary file setup. + if filename: + tfp = open(filename, 'wb') + else: + tfp = tempfile.NamedTemporaryFile(delete=False) + filename = tfp.name + _url_tempfiles.append(filename) + + with tfp: + result = filename, headers + bs = 1024*8 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + + if reporthook: + reporthook(blocknum, bs, size) + + while True: + block = fp.read(bs) + if not block: + break + read += len(block) + tfp.write(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, bs, size) + + if size >= 0 and read < size: + raise ContentTooShortError( + "retrieval incomplete: got only %i out of %i bytes" + % (read, size), result) + + return result + +def urlcleanup(): + for temp_file in _url_tempfiles: + try: + os.unlink(temp_file) + except EnvironmentError: + pass + + del _url_tempfiles[:] + global _opener + if _opener: + _opener = None + +if PY3: + _cut_port_re = re.compile(r":\d+$", re.ASCII) +else: + _cut_port_re = re.compile(r":\d+$") + +def request_host(request): + + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.full_url + host = urlparse(url)[1] + if host == "": + host = request.get_header("Host", "") + + # remove port, if present + host = _cut_port_re.sub("", host, 1) + return host.lower() + +class Request(object): + + def __init__(self, url, data=None, headers={}, + origin_req_host=None, unverifiable=False, + method=None): + # unwrap('') --> 'type://host/path' + self.full_url = unwrap(url) + self.full_url, self.fragment = splittag(self.full_url) + self.data = data + self.headers = {} + self._tunnel_host = None + for key, value in headers.items(): + self.add_header(key, value) + self.unredirected_hdrs = {} + if origin_req_host is None: + origin_req_host = request_host(self) + self.origin_req_host = origin_req_host + self.unverifiable = unverifiable + self.method = method + self._parse() + + def _parse(self): + self.type, rest = splittype(self.full_url) + if self.type is None: + raise ValueError("unknown url type: %r" % self.full_url) + self.host, self.selector = splithost(rest) + if self.host: + self.host = unquote(self.host) + + def get_method(self): + """Return a string indicating the HTTP request method.""" + if self.method is not None: + return self.method + elif self.data is not None: + return "POST" + else: + return "GET" + + def get_full_url(self): + if self.fragment: + return '%s#%s' % (self.full_url, self.fragment) + else: + return self.full_url + + # Begin deprecated methods + + def add_data(self, data): + msg = "Request.add_data method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + self.data = data + + def has_data(self): + msg = "Request.has_data method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.data is not None + + def get_data(self): + msg = "Request.get_data method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.data + + def get_type(self): + msg = "Request.get_type method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.type + + def get_host(self): + msg = "Request.get_host method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.host + + def get_selector(self): + msg = "Request.get_selector method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.selector + + def is_unverifiable(self): + msg = "Request.is_unverifiable method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.unverifiable + + def get_origin_req_host(self): + msg = "Request.get_origin_req_host method is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=1) + return self.origin_req_host + + # End deprecated methods + + def set_proxy(self, host, type): + if self.type == 'https' and not self._tunnel_host: + self._tunnel_host = self.host + else: + self.type= type + self.selector = self.full_url + self.host = host + + def has_proxy(self): + return self.selector == self.full_url + + def add_header(self, key, val): + # useful for something like authentication + self.headers[key.capitalize()] = val + + def add_unredirected_header(self, key, val): + # will not be added to a redirected request + self.unredirected_hdrs[key.capitalize()] = val + + def has_header(self, header_name): + return (header_name in self.headers or + header_name in self.unredirected_hdrs) + + def get_header(self, header_name, default=None): + return self.headers.get( + header_name, + self.unredirected_hdrs.get(header_name, default)) + + def header_items(self): + hdrs = self.unredirected_hdrs.copy() + hdrs.update(self.headers) + return list(hdrs.items()) + +class OpenerDirector(object): + def __init__(self): + client_version = "Python-urllib/%s" % __version__ + self.addheaders = [('User-agent', client_version)] + # self.handlers is retained only for backward compatibility + self.handlers = [] + # manage the individual handlers + self.handle_open = {} + self.handle_error = {} + self.process_response = {} + self.process_request = {} + + def add_handler(self, handler): + if not hasattr(handler, "add_parent"): + raise TypeError("expected BaseHandler instance, got %r" % + type(handler)) + + added = False + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i+1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j+1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = self.handle_error.get(protocol, {}) + self.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = self.handle_open + elif condition == "response": + kind = protocol + lookup = self.process_response + elif condition == "request": + kind = protocol + lookup = self.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + bisect.insort(handlers, handler) + else: + handlers.append(handler) + added = True + + if added: + bisect.insort(self.handlers, handler) + handler.add_parent(self) + + def close(self): + # Only exists for backwards compatibility. + pass + + def _call_chain(self, chain, kind, meth_name, *args): + # Handlers raise an exception if no one else should try to handle + # the request, or return None if they can't but another handler + # could. Otherwise, they return the response. + handlers = chain.get(kind, ()) + for handler in handlers: + func = getattr(handler, meth_name) + result = func(*args) + if result is not None: + return result + + def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + """ + Accept a URL or a Request object + + Python-Future: if the URL is passed as a byte-string, decode it first. + """ + if isinstance(fullurl, bytes): + fullurl = fullurl.decode() + if isinstance(fullurl, str): + req = Request(fullurl, data) + else: + req = fullurl + if data is not None: + req.data = data + + req.timeout = timeout + protocol = req.type + + # pre-process request + meth_name = protocol+"_request" + for processor in self.process_request.get(protocol, []): + meth = getattr(processor, meth_name) + req = meth(req) + + response = self._open(req, data) + + # post-process response + meth_name = protocol+"_response" + for processor in self.process_response.get(protocol, []): + meth = getattr(processor, meth_name) + response = meth(req, response) + + return response + + def _open(self, req, data=None): + result = self._call_chain(self.handle_open, 'default', + 'default_open', req) + if result: + return result + + protocol = req.type + result = self._call_chain(self.handle_open, protocol, protocol + + '_open', req) + if result: + return result + + return self._call_chain(self.handle_open, 'unknown', + 'unknown_open', req) + + def error(self, proto, *args): + if proto in ('http', 'https'): + # XXX http[s] protocols are special-cased + dict = self.handle_error['http'] # https is not different than http + proto = args[2] # YUCK! + meth_name = 'http_error_%s' % proto + http_err = 1 + orig_args = args + else: + dict = self.handle_error + meth_name = proto + '_error' + http_err = 0 + args = (dict, proto, meth_name) + args + result = self._call_chain(*args) + if result: + return result + + if http_err: + args = (dict, 'default', 'http_error_default') + orig_args + return self._call_chain(*args) + +# XXX probably also want an abstract factory that knows when it makes +# sense to skip a superclass in favor of a subclass and when it might +# make sense to include both + +def build_opener(*handlers): + """Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + """ + def isclass(obj): + return isinstance(obj, type) or hasattr(obj, "__bases__") + + opener = OpenerDirector() + default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, + HTTPDefaultErrorHandler, HTTPRedirectHandler, + FTPHandler, FileHandler, HTTPErrorProcessor] + if hasattr(http_client, "HTTPSConnection"): + default_classes.append(HTTPSHandler) + skip = set() + for klass in default_classes: + for check in handlers: + if isclass(check): + if issubclass(check, klass): + skip.add(klass) + elif isinstance(check, klass): + skip.add(klass) + for klass in skip: + default_classes.remove(klass) + + for klass in default_classes: + opener.add_handler(klass()) + + for h in handlers: + if isclass(h): + h = h() + opener.add_handler(h) + return opener + +class BaseHandler(object): + handler_order = 500 + + def add_parent(self, parent): + self.parent = parent + + def close(self): + # Only exists for backwards compatibility + pass + + def __lt__(self, other): + if not hasattr(other, "handler_order"): + # Try to preserve the old behavior of having custom classes + # inserted after default ones (works only for custom user + # classes which are not aware of handler_order). + return True + return self.handler_order < other.handler_order + + +class HTTPErrorProcessor(BaseHandler): + """Process HTTP error responses.""" + handler_order = 1000 # after all other processing + + def http_response(self, request, response): + code, msg, hdrs = response.code, response.msg, response.info() + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if not (200 <= code < 300): + response = self.parent.error( + 'http', request, response, code, msg, hdrs) + + return response + + https_response = http_response + +class HTTPDefaultErrorHandler(BaseHandler): + def http_error_default(self, req, fp, code, msg, hdrs): + raise HTTPError(req.full_url, code, msg, hdrs, fp) + +class HTTPRedirectHandler(BaseHandler): + # maximum number of redirections to any single URL + # this is needed because of the state that cookies introduce + max_repeats = 4 + # maximum total number of redirections (regardless of URL) before + # assuming we're in a loop + max_redirections = 10 + + def redirect_request(self, req, fp, code, msg, headers, newurl): + """Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + """ + m = req.get_method() + if (not (code in (301, 302, 303, 307) and m in ("GET", "HEAD") + or code in (301, 302, 303) and m == "POST")): + raise HTTPError(req.full_url, code, msg, headers, fp) + + # Strictly (according to RFC 2616), 301 or 302 in response to + # a POST MUST NOT cause a redirection without confirmation + # from the user (of urllib.request, in this case). In practice, + # essentially all clients do redirect in this case, so we do + # the same. + # be conciliant with URIs containing a space + newurl = newurl.replace(' ', '%20') + CONTENT_HEADERS = ("content-length", "content-type") + newheaders = dict((k, v) for k, v in req.headers.items() + if k.lower() not in CONTENT_HEADERS) + return Request(newurl, + headers=newheaders, + origin_req_host=req.origin_req_host, + unverifiable=True) + + # Implementation note: To avoid the server sending us into an + # infinite loop, the request object needs to track what URLs we + # have already seen. Do this by adding a handler-specific + # attribute to the Request object. + def http_error_302(self, req, fp, code, msg, headers): + # Some servers (incorrectly) return multiple Location headers + # (so probably same goes for URI). Use first header. + if "location" in headers: + newurl = headers["location"] + elif "uri" in headers: + newurl = headers["uri"] + else: + return + + # fix a possible malformed URL + urlparts = urlparse(newurl) + + # For security reasons we don't allow redirection to anything other + # than http, https or ftp. + + if urlparts.scheme not in ('http', 'https', 'ftp', ''): + raise HTTPError( + newurl, code, + "%s - Redirection to url '%s' is not allowed" % (msg, newurl), + headers, fp) + + if not urlparts.path: + urlparts = list(urlparts) + urlparts[2] = "/" + newurl = urlunparse(urlparts) + + newurl = urljoin(req.full_url, newurl) + + # XXX Probably want to forget about the state of the current + # request, although that might interact poorly with other + # handlers that also use handler-specific request attributes + new = self.redirect_request(req, fp, code, msg, headers, newurl) + if new is None: + return + + # loop detection + # .redirect_dict has a key url if url was previously visited. + if hasattr(req, 'redirect_dict'): + visited = new.redirect_dict = req.redirect_dict + if (visited.get(newurl, 0) >= self.max_repeats or + len(visited) >= self.max_redirections): + raise HTTPError(req.full_url, code, + self.inf_msg + msg, headers, fp) + else: + visited = new.redirect_dict = req.redirect_dict = {} + visited[newurl] = visited.get(newurl, 0) + 1 + + # Don't close the fp until we are sure that we won't use it + # with HTTPError. + fp.read() + fp.close() + + return self.parent.open(new, timeout=req.timeout) + + http_error_301 = http_error_303 = http_error_307 = http_error_302 + + inf_msg = "The HTTP server returned a redirect error that would " \ + "lead to an infinite loop.\n" \ + "The last 30x error message was:\n" + + +def _parse_proxy(proxy): + """Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme: + + >>> _parse_proxy('file:/ftp.example.com/') + Traceback (most recent call last): + ValueError: proxy URL with no authority: 'file:/ftp.example.com/' + + The first three items of the returned tuple may be None. + + Examples of authority parsing: + + >>> _parse_proxy('proxy.example.com') + (None, None, None, 'proxy.example.com') + >>> _parse_proxy('proxy.example.com:3128') + (None, None, None, 'proxy.example.com:3128') + + The authority component may optionally include userinfo (assumed to be + username:password): + + >>> _parse_proxy('joe:password@proxy.example.com') + (None, 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('joe:password@proxy.example.com:3128') + (None, 'joe', 'password', 'proxy.example.com:3128') + + Same examples, but with URLs instead: + + >>> _parse_proxy('http://proxy.example.com/') + ('http', None, None, 'proxy.example.com') + >>> _parse_proxy('http://proxy.example.com:3128/') + ('http', None, None, 'proxy.example.com:3128') + >>> _parse_proxy('http://joe:password@proxy.example.com/') + ('http', 'joe', 'password', 'proxy.example.com') + >>> _parse_proxy('http://joe:password@proxy.example.com:3128') + ('http', 'joe', 'password', 'proxy.example.com:3128') + + Everything after the authority is ignored: + + >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128') + ('ftp', 'joe', 'password', 'proxy.example.com') + + Test for no trailing '/' case: + + >>> _parse_proxy('http://joe:password@proxy.example.com') + ('http', 'joe', 'password', 'proxy.example.com') + + """ + scheme, r_scheme = splittype(proxy) + if not r_scheme.startswith("/"): + # authority + scheme = None + authority = proxy + else: + # URL + if not r_scheme.startswith("//"): + raise ValueError("proxy URL with no authority: %r" % proxy) + # We have an authority, so for RFC 3986-compliant URLs (by ss 3. + # and 3.3.), path is empty or starts with '/' + end = r_scheme.find("/", 2) + if end == -1: + end = None + authority = r_scheme[2:end] + userinfo, hostport = splituser(authority) + if userinfo is not None: + user, password = splitpasswd(userinfo) + else: + user = password = None + return scheme, user, password, hostport + +class ProxyHandler(BaseHandler): + # Proxies must be in front + handler_order = 100 + + def __init__(self, proxies=None): + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'keys'), "proxies must be a mapping" + self.proxies = proxies + for type, url in proxies.items(): + setattr(self, '%s_open' % type, + lambda r, proxy=url, type=type, meth=self.proxy_open: + meth(r, proxy, type)) + + def proxy_open(self, req, proxy, type): + orig_type = req.type + proxy_type, user, password, hostport = _parse_proxy(proxy) + if proxy_type is None: + proxy_type = orig_type + + if req.host and proxy_bypass(req.host): + return None + + if user and password: + user_pass = '%s:%s' % (unquote(user), + unquote(password)) + creds = base64.b64encode(user_pass.encode()).decode("ascii") + req.add_header('Proxy-authorization', 'Basic ' + creds) + hostport = unquote(hostport) + req.set_proxy(hostport, proxy_type) + if orig_type == proxy_type or orig_type == 'https': + # let other handlers take care of it + return None + else: + # need to start over, because the other handlers don't + # grok the proxy's URL type + # e.g. if we have a constructor arg proxies like so: + # {'http': 'ftp://proxy.example.com'}, we may end up turning + # a request for http://acme.example.com/a into one for + # ftp://proxy.example.com/a + return self.parent.open(req, timeout=req.timeout) + +class HTTPPasswordMgr(object): + + def __init__(self): + self.passwd = {} + + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if isinstance(uri, str): + uri = [uri] + if realm not in self.passwd: + self.passwd[realm] = {} + for default_port in True, False: + reduced_uri = tuple( + [self.reduce_uri(u, default_port) for u in uri]) + self.passwd[realm][reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + domains = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uris, authinfo in domains.items(): + for uri in uris: + if self.is_suburi(uri, reduced_authuri): + return authinfo + return None, None + + def reduce_uri(self, uri, default_port=True): + """Accept authority or URI and extract only the authority and path.""" + # note HTTP URLs do not have a userinfo component + parts = urlsplit(uri) + if parts[1]: + # URI + scheme = parts[0] + authority = parts[1] + path = parts[2] or '/' + else: + # host or host:port + scheme = None + authority = uri + path = '/' + host, port = splitport(authority) + if default_port and port is None and scheme is not None: + dport = {"http": 80, + "https": 443, + }.get(scheme) + if dport is not None: + authority = "%s:%d" % (host, dport) + return authority, path + + def is_suburi(self, base, test): + """Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + """ + if base == test: + return True + if base[0] != test[0]: + return False + common = posixpath.commonprefix((base[1], test[1])) + if len(common) == len(base[1]): + return True + return False + + +class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): + + def find_user_password(self, realm, authuri): + user, password = HTTPPasswordMgr.find_user_password(self, realm, + authuri) + if user is not None: + return user, password + return HTTPPasswordMgr.find_user_password(self, None, authuri) + + +class AbstractBasicAuthHandler(object): + + # XXX this allows for multiple auth-schemes, but will stupidly pick + # the last one with a realm specified. + + # allow for double- and single-quoted realm values + # (single quotes are a violation of the RFC, but appear in the wild) + rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' + 'realm=(["\']?)([^"\']*)\\2', re.I) + + # XXX could pre-emptively send auth info already accepted (RFC 2617, + # end of section 2, and section 1.2 immediately after "credentials" + # production). + + def __init__(self, password_mgr=None): + if password_mgr is None: + password_mgr = HTTPPasswordMgr() + self.passwd = password_mgr + self.add_password = self.passwd.add_password + self.retried = 0 + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, authreq, host, req, headers): + # host may be an authority (without userinfo) or a URL with an + # authority + # XXX could be multiple headers + authreq = headers.get(authreq, None) + + if self.retried > 5: + # retry sending the username:password 5 times before failing. + raise HTTPError(req.get_full_url(), 401, "basic auth failed", + headers, None) + else: + self.retried += 1 + + if authreq: + scheme = authreq.split()[0] + if scheme.lower() != 'basic': + raise ValueError("AbstractBasicAuthHandler does not" + " support the following scheme: '%s'" % + scheme) + else: + mo = AbstractBasicAuthHandler.rx.search(authreq) + if mo: + scheme, quote, realm = mo.groups() + if quote not in ['"',"'"]: + warnings.warn("Basic Auth Realm was unquoted", + UserWarning, 2) + if scheme.lower() == 'basic': + response = self.retry_http_basic_auth(host, req, realm) + if response and response.code != 401: + self.retried = 0 + return response + + def retry_http_basic_auth(self, host, req, realm): + user, pw = self.passwd.find_user_password(realm, host) + if pw is not None: + raw = "%s:%s" % (user, pw) + auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii") + if req.headers.get(self.auth_header, None) == auth: + return None + req.add_unredirected_header(self.auth_header, auth) + return self.parent.open(req, timeout=req.timeout) + else: + return None + + +class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Authorization' + + def http_error_401(self, req, fp, code, msg, headers): + url = req.full_url + response = self.http_error_auth_reqed('www-authenticate', + url, req, headers) + self.reset_retry_count() + return response + + +class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): + + auth_header = 'Proxy-authorization' + + def http_error_407(self, req, fp, code, msg, headers): + # http_error_auth_reqed requires that there is no userinfo component in + # authority. Assume there isn't one, since urllib.request does not (and + # should not, RFC 3986 s. 3.2.1) support requests for URLs containing + # userinfo. + authority = req.host + response = self.http_error_auth_reqed('proxy-authenticate', + authority, req, headers) + self.reset_retry_count() + return response + + +# Return n random bytes. +_randombytes = os.urandom + + +class AbstractDigestAuthHandler(object): + # Digest authentication is specified in RFC 2617. + + # XXX The client does not inspect the Authentication-Info header + # in a successful response. + + # XXX It should be possible to test this implementation against + # a mock server that just generates a static set of challenges. + + # XXX qop="auth-int" supports is shaky + + def __init__(self, passwd=None): + if passwd is None: + passwd = HTTPPasswordMgr() + self.passwd = passwd + self.add_password = self.passwd.add_password + self.retried = 0 + self.nonce_count = 0 + self.last_nonce = None + + def reset_retry_count(self): + self.retried = 0 + + def http_error_auth_reqed(self, auth_header, host, req, headers): + authreq = headers.get(auth_header, None) + if self.retried > 5: + # Don't fail endlessly - if we failed once, we'll probably + # fail a second time. Hm. Unless the Password Manager is + # prompting for the information. Crap. This isn't great + # but it's better than the current 'repeat until recursion + # depth exceeded' approach + raise HTTPError(req.full_url, 401, "digest auth failed", + headers, None) + else: + self.retried += 1 + if authreq: + scheme = authreq.split()[0] + if scheme.lower() == 'digest': + return self.retry_http_digest_auth(req, authreq) + elif scheme.lower() != 'basic': + raise ValueError("AbstractDigestAuthHandler does not support" + " the following scheme: '%s'" % scheme) + + def retry_http_digest_auth(self, req, auth): + token, challenge = auth.split(' ', 1) + chal = parse_keqv_list(filter(None, parse_http_list(challenge))) + auth = self.get_authorization(req, chal) + if auth: + auth_val = 'Digest %s' % auth + if req.headers.get(self.auth_header, None) == auth_val: + return None + req.add_unredirected_header(self.auth_header, auth_val) + resp = self.parent.open(req, timeout=req.timeout) + return resp + + def get_cnonce(self, nonce): + # The cnonce-value is an opaque + # quoted string value provided by the client and used by both client + # and server to avoid chosen plaintext attacks, to provide mutual + # authentication, and to provide some message integrity protection. + # This isn't a fabulous effort, but it's probably Good Enough. + s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime()) + b = s.encode("ascii") + _randombytes(8) + dig = hashlib.sha1(b).hexdigest() + return dig[:16] + + def get_authorization(self, req, chal): + try: + realm = chal['realm'] + nonce = chal['nonce'] + qop = chal.get('qop') + algorithm = chal.get('algorithm', 'MD5') + # mod_digest doesn't send an opaque, even though it isn't + # supposed to be optional + opaque = chal.get('opaque', None) + except KeyError: + return None + + H, KD = self.get_algorithm_impls(algorithm) + if H is None: + return None + + user, pw = self.passwd.find_user_password(realm, req.full_url) + if user is None: + return None + + # XXX not implemented yet + if req.data is not None: + entdig = self.get_entity_digest(req.data, chal) + else: + entdig = None + + A1 = "%s:%s:%s" % (user, realm, pw) + A2 = "%s:%s" % (req.get_method(), + # XXX selector: what about proxies and full urls + req.selector) + if qop == 'auth': + if nonce == self.last_nonce: + self.nonce_count += 1 + else: + self.nonce_count = 1 + self.last_nonce = nonce + ncvalue = '%08x' % self.nonce_count + cnonce = self.get_cnonce(nonce) + noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) + respdig = KD(H(A1), noncebit) + elif qop is None: + respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) + else: + # XXX handle auth-int. + raise URLError("qop '%s' is not supported." % qop) + + # XXX should the partial digests be encoded too? + + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (user, realm, nonce, req.selector, + respdig) + if opaque: + base += ', opaque="%s"' % opaque + if entdig: + base += ', digest="%s"' % entdig + base += ', algorithm="%s"' % algorithm + if qop: + base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) + return base + + def get_algorithm_impls(self, algorithm): + # lambdas assume digest modules are imported at the top level + if algorithm == 'MD5': + H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest() + elif algorithm == 'SHA': + H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest() + # XXX MD5-sess + KD = lambda s, d: H("%s:%s" % (s, d)) + return H, KD + + def get_entity_digest(self, data, chal): + # XXX not implemented yet + return None + + +class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + """An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + """ + + auth_header = 'Authorization' + handler_order = 490 # before Basic auth + + def http_error_401(self, req, fp, code, msg, headers): + host = urlparse(req.full_url)[1] + retry = self.http_error_auth_reqed('www-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + + +class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): + + auth_header = 'Proxy-Authorization' + handler_order = 490 # before Basic auth + + def http_error_407(self, req, fp, code, msg, headers): + host = req.host + retry = self.http_error_auth_reqed('proxy-authenticate', + host, req, headers) + self.reset_retry_count() + return retry + +class AbstractHTTPHandler(BaseHandler): + + def __init__(self, debuglevel=0): + self._debuglevel = debuglevel + + def set_http_debuglevel(self, level): + self._debuglevel = level + + def do_request_(self, request): + host = request.host + if not host: + raise URLError('no host given') + + if request.data is not None: # POST + data = request.data + if isinstance(data, str): + msg = "POST data should be bytes or an iterable of bytes. " \ + "It cannot be of type str." + raise TypeError(msg) + if not request.has_header('Content-type'): + request.add_unredirected_header( + 'Content-type', + 'application/x-www-form-urlencoded') + if not request.has_header('Content-length'): + size = None + try: + ### For Python-Future: + if PY2 and isinstance(data, array.array): + # memoryviews of arrays aren't supported + # in Py2.7. (e.g. memoryview(array.array('I', + # [1, 2, 3, 4])) raises a TypeError.) + # So we calculate the size manually instead: + size = len(data) * data.itemsize + ### + else: + mv = memoryview(data) + size = len(mv) * mv.itemsize + except TypeError: + if isinstance(data, Iterable): + raise ValueError("Content-Length should be specified " + "for iterable data of type %r %r" % (type(data), + data)) + else: + request.add_unredirected_header( + 'Content-length', '%d' % size) + + sel_host = host + if request.has_proxy(): + scheme, sel = splittype(request.selector) + sel_host, sel_path = splithost(sel) + if not request.has_header('Host'): + request.add_unredirected_header('Host', sel_host) + for name, value in self.parent.addheaders: + name = name.capitalize() + if not request.has_header(name): + request.add_unredirected_header(name, value) + + return request + + def do_open(self, http_class, req, **http_conn_args): + """Return an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + """ + host = req.host + if not host: + raise URLError('no host given') + + # will parse host:port + h = http_class(host, timeout=req.timeout, **http_conn_args) + + headers = dict(req.unredirected_hdrs) + headers.update(dict((k, v) for k, v in req.headers.items() + if k not in headers)) + + # TODO(jhylton): Should this be redesigned to handle + # persistent connections? + + # We want to make an HTTP/1.1 request, but the addinfourl + # class isn't prepared to deal with a persistent connection. + # It will try to read all remaining data from the socket, + # which will block while the server waits for the next request. + # So make sure the connection gets closed after the (only) + # request. + headers["Connection"] = "close" + headers = dict((name.title(), val) for name, val in headers.items()) + + if req._tunnel_host: + tunnel_headers = {} + proxy_auth_hdr = "Proxy-Authorization" + if proxy_auth_hdr in headers: + tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] + # Proxy-Authorization should not be sent to origin + # server. + del headers[proxy_auth_hdr] + h.set_tunnel(req._tunnel_host, headers=tunnel_headers) + + try: + h.request(req.get_method(), req.selector, req.data, headers) + except socket.error as err: # timeout error + h.close() + raise URLError(err) + else: + r = h.getresponse() + # If the server does not send us a 'Connection: close' header, + # HTTPConnection assumes the socket should be left open. Manually + # mark the socket to be closed when this response object goes away. + if h.sock: + h.sock.close() + h.sock = None + + + r.url = req.get_full_url() + # This line replaces the .msg attribute of the HTTPResponse + # with .headers, because urllib clients expect the response to + # have the reason in .msg. It would be good to mark this + # attribute is deprecated and get then to use info() or + # .headers. + r.msg = r.reason + return r + + +class HTTPHandler(AbstractHTTPHandler): + + def http_open(self, req): + return self.do_open(http_client.HTTPConnection, req) + + http_request = AbstractHTTPHandler.do_request_ + +if hasattr(http_client, 'HTTPSConnection'): + + class HTTPSHandler(AbstractHTTPHandler): + + def __init__(self, debuglevel=0, context=None, check_hostname=None): + AbstractHTTPHandler.__init__(self, debuglevel) + self._context = context + self._check_hostname = check_hostname + + def https_open(self, req): + return self.do_open(http_client.HTTPSConnection, req, + context=self._context, check_hostname=self._check_hostname) + + https_request = AbstractHTTPHandler.do_request_ + + __all__.append('HTTPSHandler') + +class HTTPCookieProcessor(BaseHandler): + def __init__(self, cookiejar=None): + import future.backports.http.cookiejar as http_cookiejar + if cookiejar is None: + cookiejar = http_cookiejar.CookieJar() + self.cookiejar = cookiejar + + def http_request(self, request): + self.cookiejar.add_cookie_header(request) + return request + + def http_response(self, request, response): + self.cookiejar.extract_cookies(response, request) + return response + + https_request = http_request + https_response = http_response + +class UnknownHandler(BaseHandler): + def unknown_open(self, req): + type = req.type + raise URLError('unknown url type: %s' % type) + +def parse_keqv_list(l): + """Parse list of key=value strings where keys are not duplicated.""" + parsed = {} + for elt in l: + k, v = elt.split('=', 1) + if v[0] == '"' and v[-1] == '"': + v = v[1:-1] + parsed[k] = v + return parsed + +def parse_http_list(s): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + """ + res = [] + part = '' + + escape = quote = False + for cur in s: + if escape: + part += cur + escape = False + continue + if quote: + if cur == '\\': + escape = True + continue + elif cur == '"': + quote = False + part += cur + continue + + if cur == ',': + res.append(part) + part = '' + continue + + if cur == '"': + quote = True + + part += cur + + # append last part + if part: + res.append(part) + + return [part.strip() for part in res] + +class FileHandler(BaseHandler): + # Use local file or FTP depending on form of URL + def file_open(self, req): + url = req.selector + if url[:2] == '//' and url[2:3] != '/' and (req.host and + req.host != 'localhost'): + if not req.host is self.get_names(): + raise URLError("file:// scheme is supported only on localhost") + else: + return self.open_local_file(req) + + # names for the localhost + names = None + def get_names(self): + if FileHandler.names is None: + try: + FileHandler.names = tuple( + socket.gethostbyname_ex('localhost')[2] + + socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + FileHandler.names = (socket.gethostbyname('localhost'),) + return FileHandler.names + + # not entirely sure what the rules are here + def open_local_file(self, req): + import future.backports.email.utils as email_utils + import mimetypes + host = req.host + filename = req.selector + localfile = url2pathname(filename) + try: + stats = os.stat(localfile) + size = stats.st_size + modified = email_utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(filename)[0] + headers = email.message_from_string( + 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified)) + if host: + host, port = splitport(host) + if not host or \ + (not port and _safe_gethostbyname(host) in self.get_names()): + if host: + origurl = 'file://' + host + filename + else: + origurl = 'file://' + filename + return addinfourl(open(localfile, 'rb'), headers, origurl) + except OSError as exp: + # users shouldn't expect OSErrors coming from urlopen() + raise URLError(exp) + raise URLError('file not on local host') + +def _safe_gethostbyname(host): + try: + return socket.gethostbyname(host) + except socket.gaierror: + return None + +class FTPHandler(BaseHandler): + def ftp_open(self, req): + import ftplib + import mimetypes + host = req.host + if not host: + raise URLError('ftp error: no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = user or '' + passwd = passwd or '' + + try: + host = socket.gethostbyname(host) + except socket.error as msg: + raise URLError(msg) + path, attrs = splitattr(req.selector) + dirs = path.split('/') + dirs = list(map(unquote, dirs)) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + fp, retrlen = fw.retrfile(file, type) + headers = "" + mtype = mimetypes.guess_type(req.full_url)[0] + if mtype: + headers += "Content-type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.full_url) + except ftplib.all_errors as exp: + exc = URLError('ftp error: %r' % exp) + raise_with_traceback(exc) + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + return ftpwrapper(user, passwd, host, port, dirs, timeout, + persistent=False) + +class CacheFTPHandler(FTPHandler): + # XXX would be nice to have pluggable cache strategies + # XXX this stuff is definitely not thread safe + def __init__(self): + self.cache = {} + self.timeout = {} + self.soonest = 0 + self.delay = 60 + self.max_conns = 16 + + def setTimeout(self, t): + self.delay = t + + def setMaxConns(self, m): + self.max_conns = m + + def connect_ftp(self, user, passwd, host, port, dirs, timeout): + key = user, host, port, '/'.join(dirs), timeout + if key in self.cache: + self.timeout[key] = time.time() + self.delay + else: + self.cache[key] = ftpwrapper(user, passwd, host, port, + dirs, timeout) + self.timeout[key] = time.time() + self.delay + self.check_cache() + return self.cache[key] + + def check_cache(self): + # first check for old ones + t = time.time() + if self.soonest <= t: + for k, v in list(self.timeout.items()): + if v < t: + self.cache[k].close() + del self.cache[k] + del self.timeout[k] + self.soonest = min(list(self.timeout.values())) + + # then check the size + if len(self.cache) == self.max_conns: + for k, v in list(self.timeout.items()): + if v == self.soonest: + del self.cache[k] + del self.timeout[k] + break + self.soonest = min(list(self.timeout.values())) + + def clear_cache(self): + for conn in self.cache.values(): + conn.close() + self.cache.clear() + self.timeout.clear() + + +# Code move from the old urllib module + +MAXFTPCACHE = 10 # Trim the ftp cache beyond this size + +# Helper for non-unix systems +if os.name == 'nt': + from nturl2path import url2pathname, pathname2url +else: + def url2pathname(pathname): + """OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.""" + return unquote(pathname) + + def pathname2url(pathname): + """OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.""" + return quote(pathname) + +# This really consists of two pieces: +# (1) a class which handles opening of all sorts of URLs +# (plus assorted utilities etc.) +# (2) a set of functions for parsing URLs +# XXX Should these be separated out into different modules? + + +ftpcache = {} +class URLopener(object): + """Class to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).""" + + __tempfiles = None + + version = "Python-urllib/%s" % __version__ + + # Constructor + def __init__(self, proxies=None, **x509): + msg = "%(class)s style of invoking requests is deprecated. " \ + "Use newer urlopen functions/methods" % {'class': self.__class__.__name__} + warnings.warn(msg, DeprecationWarning, stacklevel=3) + if proxies is None: + proxies = getproxies() + assert hasattr(proxies, 'keys'), "proxies must be a mapping" + self.proxies = proxies + self.key_file = x509.get('key_file') + self.cert_file = x509.get('cert_file') + self.addheaders = [('User-Agent', self.version)] + self.__tempfiles = [] + self.__unlink = os.unlink # See cleanup() + self.tempcache = None + # Undocumented feature: if you assign {} to tempcache, + # it is used to cache files retrieved with + # self.retrieve(). This is not enabled by default + # since it does not work for changing documents (and I + # haven't got the logic to check expiration headers + # yet). + self.ftpcache = ftpcache + # Undocumented feature: you can use a different + # ftp cache by assigning to the .ftpcache member; + # in case you want logically independent URL openers + # XXX This is not threadsafe. Bah. + + def __del__(self): + self.close() + + def close(self): + self.cleanup() + + def cleanup(self): + # This code sometimes runs when the rest of this module + # has already been deleted, so it can't use any globals + # or import anything. + if self.__tempfiles: + for file in self.__tempfiles: + try: + self.__unlink(file) + except OSError: + pass + del self.__tempfiles[:] + if self.tempcache: + self.tempcache.clear() + + def addheader(self, *args): + """Add a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')""" + self.addheaders.append(args) + + # External interface + def open(self, fullurl, data=None): + """Use URLopener().open(file) instead of open(file, 'r').""" + fullurl = unwrap(to_bytes(fullurl)) + fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|") + if self.tempcache and fullurl in self.tempcache: + filename, headers = self.tempcache[fullurl] + fp = open(filename, 'rb') + return addinfourl(fp, headers, fullurl) + urltype, url = splittype(fullurl) + if not urltype: + urltype = 'file' + if urltype in self.proxies: + proxy = self.proxies[urltype] + urltype, proxyhost = splittype(proxy) + host, selector = splithost(proxyhost) + url = (host, fullurl) # Signal special case to open_*() + else: + proxy = None + name = 'open_' + urltype + self.type = urltype + name = name.replace('-', '_') + if not hasattr(self, name): + if proxy: + return self.open_unknown_proxy(proxy, fullurl, data) + else: + return self.open_unknown(fullurl, data) + try: + if data is None: + return getattr(self, name)(url) + else: + return getattr(self, name)(url, data) + except HTTPError: + raise + except socket.error as msg: + raise_with_traceback(IOError('socket error', msg)) + + def open_unknown(self, fullurl, data=None): + """Overridable interface to open unknown URL type.""" + type, url = splittype(fullurl) + raise IOError('url error', 'unknown url type', type) + + def open_unknown_proxy(self, proxy, fullurl, data=None): + """Overridable interface to open unknown URL type.""" + type, url = splittype(fullurl) + raise IOError('url error', 'invalid proxy for %s' % type, proxy) + + # External interface + def retrieve(self, url, filename=None, reporthook=None, data=None): + """retrieve(url) returns (filename, headers) for a local object + or (tempfilename, headers) for a remote object.""" + url = unwrap(to_bytes(url)) + if self.tempcache and url in self.tempcache: + return self.tempcache[url] + type, url1 = splittype(url) + if filename is None and (not type or type == 'file'): + try: + fp = self.open_local_file(url1) + hdrs = fp.info() + fp.close() + return url2pathname(splithost(url1)[1]), hdrs + except IOError as msg: + pass + fp = self.open(url, data) + try: + headers = fp.info() + if filename: + tfp = open(filename, 'wb') + else: + import tempfile + garbage, path = splittype(url) + garbage, path = splithost(path or "") + path, garbage = splitquery(path or "") + path, garbage = splitattr(path or "") + suffix = os.path.splitext(path)[1] + (fd, filename) = tempfile.mkstemp(suffix) + self.__tempfiles.append(filename) + tfp = os.fdopen(fd, 'wb') + try: + result = filename, headers + if self.tempcache is not None: + self.tempcache[url] = result + bs = 1024*8 + size = -1 + read = 0 + blocknum = 0 + if "content-length" in headers: + size = int(headers["Content-Length"]) + if reporthook: + reporthook(blocknum, bs, size) + while 1: + block = fp.read(bs) + if not block: + break + read += len(block) + tfp.write(block) + blocknum += 1 + if reporthook: + reporthook(blocknum, bs, size) + finally: + tfp.close() + finally: + fp.close() + + # raise exception if actual size does not match content-length header + if size >= 0 and read < size: + raise ContentTooShortError( + "retrieval incomplete: got only %i out of %i bytes" + % (read, size), result) + + return result + + # Each method named open_ knows how to open that type of URL + + def _open_generic_http(self, connection_factory, url, data): + """Make an HTTP connection using connection_class. + + This is an internal method that should be called from + open_http() or open_https(). + + Arguments: + - connection_factory should take a host name and return an + HTTPConnection instance. + - url is the url to retrieval or a host, relative-path pair. + - data is payload for a POST request or None. + """ + + user_passwd = None + proxy_passwd= None + if isinstance(url, str): + host, selector = splithost(url) + if host: + user_passwd, host = splituser(host) + host = unquote(host) + realhost = host + else: + host, selector = url + # check whether the proxy contains authorization information + proxy_passwd, host = splituser(host) + # now we proceed with the url we want to obtain + urltype, rest = splittype(selector) + url = rest + user_passwd = None + if urltype.lower() != 'http': + realhost = None + else: + realhost, rest = splithost(rest) + if realhost: + user_passwd, realhost = splituser(realhost) + if user_passwd: + selector = "%s://%s%s" % (urltype, realhost, rest) + if proxy_bypass(realhost): + host = realhost + + if not host: raise IOError('http error', 'no host given') + + if proxy_passwd: + proxy_passwd = unquote(proxy_passwd) + proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') + else: + proxy_auth = None + + if user_passwd: + user_passwd = unquote(user_passwd) + auth = base64.b64encode(user_passwd.encode()).decode('ascii') + else: + auth = None + http_conn = connection_factory(host) + headers = {} + if proxy_auth: + headers["Proxy-Authorization"] = "Basic %s" % proxy_auth + if auth: + headers["Authorization"] = "Basic %s" % auth + if realhost: + headers["Host"] = realhost + + # Add Connection:close as we don't support persistent connections yet. + # This helps in closing the socket and avoiding ResourceWarning + + headers["Connection"] = "close" + + for header, value in self.addheaders: + headers[header] = value + + if data is not None: + headers["Content-Type"] = "application/x-www-form-urlencoded" + http_conn.request("POST", selector, data, headers) + else: + http_conn.request("GET", selector, headers=headers) + + try: + response = http_conn.getresponse() + except http_client.BadStatusLine: + # something went wrong with the HTTP status line + raise URLError("http protocol error: bad status line") + + # According to RFC 2616, "2xx" code indicates that the client's + # request was successfully received, understood, and accepted. + if 200 <= response.status < 300: + return addinfourl(response, response.msg, "http:" + url, + response.status) + else: + return self.http_error( + url, response.fp, + response.status, response.reason, response.msg, data) + + def open_http(self, url, data=None): + """Use HTTP protocol.""" + return self._open_generic_http(http_client.HTTPConnection, url, data) + + def http_error(self, url, fp, errcode, errmsg, headers, data=None): + """Handle http errors. + + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.""" + # First check if there's a specific handler for this error + name = 'http_error_%d' % errcode + if hasattr(self, name): + method = getattr(self, name) + if data is None: + result = method(url, fp, errcode, errmsg, headers) + else: + result = method(url, fp, errcode, errmsg, headers, data) + if result: return result + return self.http_error_default(url, fp, errcode, errmsg, headers) + + def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handler: close the connection and raise IOError.""" + fp.close() + raise HTTPError(url, errcode, errmsg, headers, None) + + if _have_ssl: + def _https_connection(self, host): + return http_client.HTTPSConnection(host, + key_file=self.key_file, + cert_file=self.cert_file) + + def open_https(self, url, data=None): + """Use HTTPS protocol.""" + return self._open_generic_http(self._https_connection, url, data) + + def open_file(self, url): + """Use local file or FTP depending on form of URL.""" + if not isinstance(url, str): + raise URLError('file error: proxy support for file protocol currently not implemented') + if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': + raise ValueError("file:// scheme is supported only on localhost") + else: + return self.open_local_file(url) + + def open_local_file(self, url): + """Use local file.""" + import future.backports.email.utils as email_utils + import mimetypes + host, file = splithost(url) + localname = url2pathname(file) + try: + stats = os.stat(localname) + except OSError as e: + raise URLError(e.strerror, e.filename) + size = stats.st_size + modified = email_utils.formatdate(stats.st_mtime, usegmt=True) + mtype = mimetypes.guess_type(url)[0] + headers = email.message_from_string( + 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % + (mtype or 'text/plain', size, modified)) + if not host: + urlfile = file + if file[:1] == '/': + urlfile = 'file://' + file + return addinfourl(open(localname, 'rb'), headers, urlfile) + host, port = splitport(host) + if (not port + and socket.gethostbyname(host) in ((localhost(),) + thishost())): + urlfile = file + if file[:1] == '/': + urlfile = 'file://' + file + elif file[:2] == './': + raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url) + return addinfourl(open(localname, 'rb'), headers, urlfile) + raise URLError('local file error: not on local host') + + def open_ftp(self, url): + """Use FTP protocol.""" + if not isinstance(url, str): + raise URLError('ftp error: proxy support for ftp protocol currently not implemented') + import mimetypes + host, path = splithost(url) + if not host: raise URLError('ftp error: no host given') + host, port = splitport(host) + user, host = splituser(host) + if user: user, passwd = splitpasswd(user) + else: passwd = None + host = unquote(host) + user = unquote(user or '') + passwd = unquote(passwd or '') + host = socket.gethostbyname(host) + if not port: + import ftplib + port = ftplib.FTP_PORT + else: + port = int(port) + path, attrs = splitattr(path) + path = unquote(path) + dirs = path.split('/') + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: dirs = dirs[1:] + if dirs and not dirs[0]: dirs[0] = '/' + key = user, host, port, '/'.join(dirs) + # XXX thread unsafe! + if len(self.ftpcache) > MAXFTPCACHE: + # Prune the cache, rather arbitrarily + for k in self.ftpcache.keys(): + if k != key: + v = self.ftpcache[k] + del self.ftpcache[k] + v.close() + try: + if key not in self.ftpcache: + self.ftpcache[key] = \ + ftpwrapper(user, passwd, host, port, dirs) + if not file: type = 'D' + else: type = 'I' + for attr in attrs: + attr, value = splitvalue(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + (fp, retrlen) = self.ftpcache[key].retrfile(file, type) + mtype = mimetypes.guess_type("ftp:" + url)[0] + headers = "" + if mtype: + headers += "Content-Type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-Length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, "ftp:" + url) + except ftperrors() as exp: + raise_with_traceback(URLError('ftp error %r' % exp)) + + def open_data(self, url, data=None): + """Use "data" URL.""" + if not isinstance(url, str): + raise URLError('data error: proxy support for data protocol currently not implemented') + # ignore POSTed data + # + # syntax of data URLs: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + try: + [type, data] = url.split(',', 1) + except ValueError: + raise IOError('data error', 'bad data URL') + if not type: + type = 'text/plain;charset=US-ASCII' + semi = type.rfind(';') + if semi >= 0 and '=' not in type[semi:]: + encoding = type[semi+1:] + type = type[:semi] + else: + encoding = '' + msg = [] + msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT', + time.gmtime(time.time()))) + msg.append('Content-type: %s' % type) + if encoding == 'base64': + # XXX is this encoding/decoding ok? + data = base64.decodebytes(data.encode('ascii')).decode('latin-1') + else: + data = unquote(data) + msg.append('Content-Length: %d' % len(data)) + msg.append('') + msg.append(data) + msg = '\n'.join(msg) + headers = email.message_from_string(msg) + f = io.StringIO(msg) + #f.fileno = None # needed for addinfourl + return addinfourl(f, headers, url) + + +class FancyURLopener(URLopener): + """Derived class with handlers for errors we can handle (perhaps).""" + + def __init__(self, *args, **kwargs): + URLopener.__init__(self, *args, **kwargs) + self.auth_cache = {} + self.tries = 0 + self.maxtries = 10 + + def http_error_default(self, url, fp, errcode, errmsg, headers): + """Default error handling -- don't raise an exception.""" + return addinfourl(fp, headers, "http:" + url, errcode) + + def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): + """Error 302 -- relocated (temporarily).""" + self.tries += 1 + if self.maxtries and self.tries >= self.maxtries: + if hasattr(self, "http_error_500"): + meth = self.http_error_500 + else: + meth = self.http_error_default + self.tries = 0 + return meth(url, fp, 500, + "Internal Server Error: Redirect Recursion", headers) + result = self.redirect_internal(url, fp, errcode, errmsg, headers, + data) + self.tries = 0 + return result + + def redirect_internal(self, url, fp, errcode, errmsg, headers, data): + if 'location' in headers: + newurl = headers['location'] + elif 'uri' in headers: + newurl = headers['uri'] + else: + return + fp.close() + + # In case the server sent a relative URL, join with original: + newurl = urljoin(self.type + ":" + url, newurl) + + urlparts = urlparse(newurl) + + # For security reasons, we don't allow redirection to anything other + # than http, https and ftp. + + # We are using newer HTTPError with older redirect_internal method + # This older method will get deprecated in 3.3 + + if urlparts.scheme not in ('http', 'https', 'ftp', ''): + raise HTTPError(newurl, errcode, + errmsg + + " Redirection to url '%s' is not allowed." % newurl, + headers, fp) + + return self.open(newurl) + + def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): + """Error 301 -- also relocated (permanently).""" + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + + def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): + """Error 303 -- also relocated (essentially identical to 302).""" + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + + def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): + """Error 307 -- relocated, but turn POST into error.""" + if data is None: + return self.http_error_302(url, fp, errcode, errmsg, headers, data) + else: + return self.http_error_default(url, fp, errcode, errmsg, headers) + + def http_error_401(self, url, fp, errcode, errmsg, headers, data=None, + retry=False): + """Error 401 -- authentication required. + This function supports Basic authentication only.""" + if 'www-authenticate' not in headers: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + stuff = headers['www-authenticate'] + match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) + if not match: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + scheme, realm = match.groups() + if scheme.lower() != 'basic': + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + if not retry: + URLopener.http_error_default(self, url, fp, errcode, errmsg, + headers) + name = 'retry_' + self.type + '_basic_auth' + if data is None: + return getattr(self,name)(url, realm) + else: + return getattr(self,name)(url, realm, data) + + def http_error_407(self, url, fp, errcode, errmsg, headers, data=None, + retry=False): + """Error 407 -- proxy authentication required. + This function supports Basic authentication only.""" + if 'proxy-authenticate' not in headers: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + stuff = headers['proxy-authenticate'] + match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) + if not match: + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + scheme, realm = match.groups() + if scheme.lower() != 'basic': + URLopener.http_error_default(self, url, fp, + errcode, errmsg, headers) + if not retry: + URLopener.http_error_default(self, url, fp, errcode, errmsg, + headers) + name = 'retry_proxy_' + self.type + '_basic_auth' + if data is None: + return getattr(self,name)(url, realm) + else: + return getattr(self,name)(url, realm, data) + + def retry_proxy_http_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + newurl = 'http://' + host + selector + proxy = self.proxies['http'] + urltype, proxyhost = splittype(proxy) + proxyhost, proxyselector = splithost(proxyhost) + i = proxyhost.find('@') + 1 + proxyhost = proxyhost[i:] + user, passwd = self.get_user_passwd(proxyhost, realm, i) + if not (user or passwd): return None + proxyhost = "%s:%s@%s" % (quote(user, safe=''), + quote(passwd, safe=''), proxyhost) + self.proxies['http'] = 'http://' + proxyhost + proxyselector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_proxy_https_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + newurl = 'https://' + host + selector + proxy = self.proxies['https'] + urltype, proxyhost = splittype(proxy) + proxyhost, proxyselector = splithost(proxyhost) + i = proxyhost.find('@') + 1 + proxyhost = proxyhost[i:] + user, passwd = self.get_user_passwd(proxyhost, realm, i) + if not (user or passwd): return None + proxyhost = "%s:%s@%s" % (quote(user, safe=''), + quote(passwd, safe=''), proxyhost) + self.proxies['https'] = 'https://' + proxyhost + proxyselector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_http_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + i = host.find('@') + 1 + host = host[i:] + user, passwd = self.get_user_passwd(host, realm, i) + if not (user or passwd): return None + host = "%s:%s@%s" % (quote(user, safe=''), + quote(passwd, safe=''), host) + newurl = 'http://' + host + selector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def retry_https_basic_auth(self, url, realm, data=None): + host, selector = splithost(url) + i = host.find('@') + 1 + host = host[i:] + user, passwd = self.get_user_passwd(host, realm, i) + if not (user or passwd): return None + host = "%s:%s@%s" % (quote(user, safe=''), + quote(passwd, safe=''), host) + newurl = 'https://' + host + selector + if data is None: + return self.open(newurl) + else: + return self.open(newurl, data) + + def get_user_passwd(self, host, realm, clear_cache=0): + key = realm + '@' + host.lower() + if key in self.auth_cache: + if clear_cache: + del self.auth_cache[key] + else: + return self.auth_cache[key] + user, passwd = self.prompt_user_passwd(host, realm) + if user or passwd: self.auth_cache[key] = (user, passwd) + return user, passwd + + def prompt_user_passwd(self, host, realm): + """Override this in a GUI environment!""" + import getpass + try: + user = input("Enter username for %s at %s: " % (realm, host)) + passwd = getpass.getpass("Enter password for %s in %s at %s: " % + (user, realm, host)) + return user, passwd + except KeyboardInterrupt: + print() + return None, None + + +# Utility functions + +_localhost = None +def localhost(): + """Return the IP address of the magic hostname 'localhost'.""" + global _localhost + if _localhost is None: + _localhost = socket.gethostbyname('localhost') + return _localhost + +_thishost = None +def thishost(): + """Return the IP addresses of the current host.""" + global _thishost + if _thishost is None: + try: + _thishost = tuple(socket.gethostbyname_ex(socket.gethostname())[2]) + except socket.gaierror: + _thishost = tuple(socket.gethostbyname_ex('localhost')[2]) + return _thishost + +_ftperrors = None +def ftperrors(): + """Return the set of errors raised by the FTP class.""" + global _ftperrors + if _ftperrors is None: + import ftplib + _ftperrors = ftplib.all_errors + return _ftperrors + +_noheaders = None +def noheaders(): + """Return an empty email Message object.""" + global _noheaders + if _noheaders is None: + _noheaders = email.message_from_string("") + return _noheaders + + +# Utility classes + +class ftpwrapper(object): + """Class used by open_ftp() for cache of open FTP connections.""" + + def __init__(self, user, passwd, host, port, dirs, timeout=None, + persistent=True): + self.user = user + self.passwd = passwd + self.host = host + self.port = port + self.dirs = dirs + self.timeout = timeout + self.refcount = 0 + self.keepalive = persistent + self.init() + + def init(self): + import ftplib + self.busy = 0 + self.ftp = ftplib.FTP() + self.ftp.connect(self.host, self.port, self.timeout) + self.ftp.login(self.user, self.passwd) + _target = '/'.join(self.dirs) + self.ftp.cwd(_target) + + def retrfile(self, file, type): + import ftplib + self.endtransfer() + if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 + else: cmd = 'TYPE ' + type; isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn, retrlen = self.ftp.ntransfercmd(cmd) + except ftplib.error_perm as reason: + if str(reason)[:3] != '550': + raise_with_traceback(URLError('ftp error: %r' % reason)) + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing. Verify that directory exists. + if file: + pwd = self.ftp.pwd() + try: + try: + self.ftp.cwd(file) + except ftplib.error_perm as reason: + ### Was: + # raise URLError('ftp error: %r' % reason) from reason + exc = URLError('ftp error: %r' % reason) + exc.__cause__ = reason + raise exc + finally: + self.ftp.cwd(pwd) + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn, retrlen = self.ftp.ntransfercmd(cmd) + self.busy = 1 + + ftpobj = addclosehook(conn.makefile('rb'), self.file_close) + self.refcount += 1 + conn.close() + # Pass back both a suitably decorated object and a retrieval length + return (ftpobj, retrlen) + + def endtransfer(self): + self.busy = 0 + + def close(self): + self.keepalive = False + if self.refcount <= 0: + self.real_close() + + def file_close(self): + self.endtransfer() + self.refcount -= 1 + if self.refcount <= 0 and not self.keepalive: + self.real_close() + + def real_close(self): + self.endtransfer() + try: + self.ftp.close() + except ftperrors(): + pass + +# Proxy handling +def getproxies_environment(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. If you need a + different way, you can pass a proxies dictionary to the + [Fancy]URLopener constructor. + + """ + proxies = {} + for name, value in os.environ.items(): + name = name.lower() + if value and name[-6:] == '_proxy': + proxies[name[:-6]] = value + return proxies + +def proxy_bypass_environment(host): + """Test if proxies should not be used for a particular host. + + Checks the environment for a variable named no_proxy, which should + be a list of DNS suffixes separated by commas, or '*' for all hosts. + """ + no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') + # '*' is special case for always bypass + if no_proxy == '*': + return 1 + # strip port off host + hostonly, port = splitport(host) + # check if the host ends with any of the DNS suffixes + no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')] + for name in no_proxy_list: + if name and (hostonly.endswith(name) or host.endswith(name)): + return 1 + # otherwise, don't bypass + return 0 + + +# This code tests an OSX specific data structure but is testable on all +# platforms +def _proxy_bypass_macosx_sysconf(host, proxy_settings): + """ + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + """ + from fnmatch import fnmatch + + hostonly, port = splitport(host) + + def ip2num(ipAddr): + parts = ipAddr.split('.') + parts = list(map(int, parts)) + if len(parts) != 4: + parts = (parts + [0, 0, 0, 0])[:4] + return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] + + # Check for simple host names: + if '.' not in host: + if proxy_settings['exclude_simple']: + return True + + hostIP = None + + for value in proxy_settings.get('exceptions', ()): + # Items in the list are strings like these: *.local, 169.254/16 + if not value: continue + + m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) + if m is not None: + if hostIP is None: + try: + hostIP = socket.gethostbyname(hostonly) + hostIP = ip2num(hostIP) + except socket.error: + continue + + base = ip2num(m.group(1)) + mask = m.group(2) + if mask is None: + mask = 8 * (m.group(1).count('.') + 1) + else: + mask = int(mask[1:]) + mask = 32 - mask + + if (hostIP >> mask) == (base >> mask): + return True + + elif fnmatch(host, value): + return True + + return False + + +if sys.platform == 'darwin': + from _scproxy import _get_proxy_settings, _get_proxies + + def proxy_bypass_macosx_sysconf(host): + proxy_settings = _get_proxy_settings() + return _proxy_bypass_macosx_sysconf(host, proxy_settings) + + def getproxies_macosx_sysconf(): + """Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + """ + return _get_proxies() + + + + def proxy_bypass(host): + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_macosx_sysconf(host) + + def getproxies(): + return getproxies_environment() or getproxies_macosx_sysconf() + + +elif os.name == 'nt': + def getproxies_registry(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + """ + proxies = {} + try: + import winreg + except ImportError: + # Std module, so should be around - but you never know! + return proxies + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + if proxyEnable: + # Returned as Unicode but problems if not converted to ASCII + proxyServer = str(winreg.QueryValueEx(internetSettings, + 'ProxyServer')[0]) + if '=' in proxyServer: + # Per-protocol settings + for p in proxyServer.split(';'): + protocol, address = p.split('=', 1) + # See if address has a type:// prefix + if not re.match('^([^/:]+)://', address): + address = '%s://%s' % (protocol, address) + proxies[protocol] = address + else: + # Use one setting for all protocols + if proxyServer[:5] == 'http:': + proxies['http'] = proxyServer + else: + proxies['http'] = 'http://%s' % proxyServer + proxies['https'] = 'https://%s' % proxyServer + proxies['ftp'] = 'ftp://%s' % proxyServer + internetSettings.Close() + except (WindowsError, ValueError, TypeError): + # Either registry key not found etc, or the value in an + # unexpected format. + # proxies already set up to be empty so nothing to do + pass + return proxies + + def getproxies(): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + return getproxies_environment() or getproxies_registry() + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + # Std modules, so should be around - but you never know! + return 0 + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + proxyEnable = winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0] + proxyOverride = str(winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0]) + # ^^^^ Returned as Unicode but problems if not converted to ASCII + except WindowsError: + return 0 + if not proxyEnable or not proxyOverride: + return 0 + # try to make a host list from name and IP address. + rawHost, port = splitport(host) + host = [rawHost] + try: + addr = socket.gethostbyname(rawHost) + if addr != rawHost: + host.append(addr) + except socket.error: + pass + try: + fqdn = socket.getfqdn(rawHost) + if fqdn != rawHost: + host.append(fqdn) + except socket.error: + pass + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(';') + # now check if we match one of the registry values. + for test in proxyOverride: + if test == '': + if '.' not in rawHost: + return 1 + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + for val in host: + if re.match(test, val, re.I): + return 1 + return 0 + + def proxy_bypass(host): + """Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + +else: + # By default use environment variables + getproxies = getproxies_environment + proxy_bypass = proxy_bypass_environment diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/response.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/response.py new file mode 100644 index 0000000000..adbf6e5ae3 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/urllib/response.py @@ -0,0 +1,103 @@ +"""Response classes used by urllib. + +The base class, addbase, defines a minimal file-like interface, +including read() and readline(). The typical response object is an +addinfourl instance, which defines an info() method that returns +headers and a geturl() method that returns the url. +""" +from __future__ import absolute_import, division, unicode_literals +from future.builtins import object + +class addbase(object): + """Base class for addinfo and addclosehook.""" + + # XXX Add a method to expose the timeout on the underlying socket? + + def __init__(self, fp): + # TODO(jhylton): Is there a better way to delegate using io? + self.fp = fp + self.read = self.fp.read + self.readline = self.fp.readline + # TODO(jhylton): Make sure an object with readlines() is also iterable + if hasattr(self.fp, "readlines"): + self.readlines = self.fp.readlines + if hasattr(self.fp, "fileno"): + self.fileno = self.fp.fileno + else: + self.fileno = lambda: None + + def __iter__(self): + # Assigning `__iter__` to the instance doesn't work as intended + # because the iter builtin does something like `cls.__iter__(obj)` + # and thus fails to find the _bound_ method `obj.__iter__`. + # Returning just `self.fp` works for built-in file objects but + # might not work for general file-like objects. + return iter(self.fp) + + def __repr__(self): + return '<%s at %r whose fp = %r>' % (self.__class__.__name__, + id(self), self.fp) + + def close(self): + if self.fp: + self.fp.close() + self.fp = None + self.read = None + self.readline = None + self.readlines = None + self.fileno = None + self.__iter__ = None + self.__next__ = None + + def __enter__(self): + if self.fp is None: + raise ValueError("I/O operation on closed file") + return self + + def __exit__(self, type, value, traceback): + self.close() + +class addclosehook(addbase): + """Class to add a close hook to an open file.""" + + def __init__(self, fp, closehook, *hookargs): + addbase.__init__(self, fp) + self.closehook = closehook + self.hookargs = hookargs + + def close(self): + if self.closehook: + self.closehook(*self.hookargs) + self.closehook = None + self.hookargs = None + addbase.close(self) + +class addinfo(addbase): + """class to add an info() method to an open file.""" + + def __init__(self, fp, headers): + addbase.__init__(self, fp) + self.headers = headers + + def info(self): + return self.headers + +class addinfourl(addbase): + """class to add info() and geturl() methods to an open file.""" + + def __init__(self, fp, headers, url, code=None): + addbase.__init__(self, fp) + self.headers = headers + self.url = url + self.code = code + + def info(self): + return self.headers + + def getcode(self): + return self.code + + def geturl(self): + return self.url + +del absolute_import, division, unicode_literals, object diff --git a/pype/modules/ftrack/python2_vendor/future/backports/urllib/robotparser.py b/pype/modules/ftrack/python2_vendor/future/backports/urllib/robotparser.py new file mode 100644 index 0000000000..a0f36511b4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/urllib/robotparser.py @@ -0,0 +1,211 @@ +from __future__ import absolute_import, division, unicode_literals +from future.builtins import str +""" robotparser.py + + Copyright (C) 2000 Bastian Kleineidam + + You can choose between two licenses when using this package: + 1) GNU GPLv2 + 2) PSF license for Python 2.2 + + The robots.txt Exclusion Protocol is implemented as specified in + http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html +""" + +# Was: import urllib.parse, urllib.request +from future.backports import urllib +from future.backports.urllib import parse as _parse, request as _request +urllib.parse = _parse +urllib.request = _request + + +__all__ = ["RobotFileParser"] + +class RobotFileParser(object): + """ This class provides a set of methods to read, parse and answer + questions about a single robots.txt file. + + """ + + def __init__(self, url=''): + self.entries = [] + self.default_entry = None + self.disallow_all = False + self.allow_all = False + self.set_url(url) + self.last_checked = 0 + + def mtime(self): + """Returns the time the robots.txt file was last fetched. + + This is useful for long-running web spiders that need to + check for new robots.txt files periodically. + + """ + return self.last_checked + + def modified(self): + """Sets the time the robots.txt file was last fetched to the + current time. + + """ + import time + self.last_checked = time.time() + + def set_url(self, url): + """Sets the URL referring to a robots.txt file.""" + self.url = url + self.host, self.path = urllib.parse.urlparse(url)[1:3] + + def read(self): + """Reads the robots.txt URL and feeds it to the parser.""" + try: + f = urllib.request.urlopen(self.url) + except urllib.error.HTTPError as err: + if err.code in (401, 403): + self.disallow_all = True + elif err.code >= 400: + self.allow_all = True + else: + raw = f.read() + self.parse(raw.decode("utf-8").splitlines()) + + def _add_entry(self, entry): + if "*" in entry.useragents: + # the default entry is considered last + if self.default_entry is None: + # the first default entry wins + self.default_entry = entry + else: + self.entries.append(entry) + + def parse(self, lines): + """Parse the input lines from a robots.txt file. + + We allow that a user-agent: line is not preceded by + one or more blank lines. + """ + # states: + # 0: start state + # 1: saw user-agent line + # 2: saw an allow or disallow line + state = 0 + entry = Entry() + + for line in lines: + if not line: + if state == 1: + entry = Entry() + state = 0 + elif state == 2: + self._add_entry(entry) + entry = Entry() + state = 0 + # remove optional comment and strip line + i = line.find('#') + if i >= 0: + line = line[:i] + line = line.strip() + if not line: + continue + line = line.split(':', 1) + if len(line) == 2: + line[0] = line[0].strip().lower() + line[1] = urllib.parse.unquote(line[1].strip()) + if line[0] == "user-agent": + if state == 2: + self._add_entry(entry) + entry = Entry() + entry.useragents.append(line[1]) + state = 1 + elif line[0] == "disallow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], False)) + state = 2 + elif line[0] == "allow": + if state != 0: + entry.rulelines.append(RuleLine(line[1], True)) + state = 2 + if state == 2: + self._add_entry(entry) + + + def can_fetch(self, useragent, url): + """using the parsed robots.txt decide if useragent can fetch url""" + if self.disallow_all: + return False + if self.allow_all: + return True + # search for given user agent matches + # the first match counts + parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url)) + url = urllib.parse.urlunparse(('','',parsed_url.path, + parsed_url.params,parsed_url.query, parsed_url.fragment)) + url = urllib.parse.quote(url) + if not url: + url = "/" + for entry in self.entries: + if entry.applies_to(useragent): + return entry.allowance(url) + # try the default entry last + if self.default_entry: + return self.default_entry.allowance(url) + # agent not found ==> access granted + return True + + def __str__(self): + return ''.join([str(entry) + "\n" for entry in self.entries]) + + +class RuleLine(object): + """A rule line is a single "Allow:" (allowance==True) or "Disallow:" + (allowance==False) followed by a path.""" + def __init__(self, path, allowance): + if path == '' and not allowance: + # an empty value means allow all + allowance = True + self.path = urllib.parse.quote(path) + self.allowance = allowance + + def applies_to(self, filename): + return self.path == "*" or filename.startswith(self.path) + + def __str__(self): + return (self.allowance and "Allow" or "Disallow") + ": " + self.path + + +class Entry(object): + """An entry has one or more user-agents and zero or more rulelines""" + def __init__(self): + self.useragents = [] + self.rulelines = [] + + def __str__(self): + ret = [] + for agent in self.useragents: + ret.extend(["User-agent: ", agent, "\n"]) + for line in self.rulelines: + ret.extend([str(line), "\n"]) + return ''.join(ret) + + def applies_to(self, useragent): + """check if this entry applies to the specified agent""" + # split the name token and make it lower case + useragent = useragent.split("/")[0].lower() + for agent in self.useragents: + if agent == '*': + # we have the catch-all agent + return True + agent = agent.lower() + if agent in useragent: + return True + return False + + def allowance(self, filename): + """Preconditions: + - our agent applies to this entry + - filename is URL decoded""" + for line in self.rulelines: + if line.applies_to(filename): + return line.allowance + return True diff --git a/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/__init__.py b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/__init__.py new file mode 100644 index 0000000000..196d378857 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/client.py b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/client.py new file mode 100644 index 0000000000..b78e5bad64 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/client.py @@ -0,0 +1,1496 @@ +# +# XML-RPC CLIENT LIBRARY +# $Id$ +# +# an XML-RPC client interface for Python. +# +# the marshalling and response parser code can also be used to +# implement XML-RPC servers. +# +# Notes: +# this version is designed to work with Python 2.1 or newer. +# +# History: +# 1999-01-14 fl Created +# 1999-01-15 fl Changed dateTime to use localtime +# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service +# 1999-01-19 fl Fixed array data element (from Skip Montanaro) +# 1999-01-21 fl Fixed dateTime constructor, etc. +# 1999-02-02 fl Added fault handling, handle empty sequences, etc. +# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro) +# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8) +# 2000-11-28 fl Changed boolean to check the truth value of its argument +# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches +# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1) +# 2001-03-28 fl Make sure response tuple is a singleton +# 2001-03-29 fl Don't require empty params element (from Nicholas Riley) +# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2) +# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod) +# 2001-09-03 fl Allow Transport subclass to override getparser +# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup) +# 2001-10-01 fl Remove containers from memo cache when done with them +# 2001-10-01 fl Use faster escape method (80% dumps speedup) +# 2001-10-02 fl More dumps microtuning +# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum) +# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow +# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems) +# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix) +# 2002-03-17 fl Avoid buffered read when possible (from James Rucker) +# 2002-04-07 fl Added pythondoc comments +# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers +# 2002-05-15 fl Added error constants (from Andrew Kuchling) +# 2002-06-27 fl Merged with Python CVS version +# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby) +# 2003-01-22 sm Add support for the bool type +# 2003-02-27 gvr Remove apply calls +# 2003-04-24 sm Use cStringIO if available +# 2003-04-25 ak Add support for nil +# 2003-06-15 gn Add support for time.struct_time +# 2003-07-12 gp Correct marshalling of Faults +# 2003-10-31 mvl Add multicall support +# 2004-08-20 mvl Bump minimum supported Python version to 2.1 +# +# Copyright (c) 1999-2002 by Secret Labs AB. +# Copyright (c) 1999-2002 by Fredrik Lundh. +# +# info@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The XML-RPC client interface is +# +# Copyright (c) 1999-2002 by Secret Labs AB +# Copyright (c) 1999-2002 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +""" +Ported using Python-Future from the Python 3.3 standard library. + +An XML-RPC client interface for Python. + +The marshalling and response parser code can also be used to +implement XML-RPC servers. + +Exported exceptions: + + Error Base class for client errors + ProtocolError Indicates an HTTP protocol error + ResponseError Indicates a broken response package + Fault Indicates an XML-RPC fault package + +Exported classes: + + ServerProxy Represents a logical connection to an XML-RPC server + + MultiCall Executor of boxcared xmlrpc requests + DateTime dateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate a "dateTime.iso8601" + XML-RPC value + Binary binary data wrapper + + Marshaller Generate an XML-RPC params chunk from a Python data structure + Unmarshaller Unmarshal an XML-RPC response from incoming XML event message + Transport Handles an HTTP transaction to an XML-RPC server + SafeTransport Handles an HTTPS transaction to an XML-RPC server + +Exported constants: + + (none) + +Exported functions: + + getparser Create instance of the fastest available parser & attach + to an unmarshalling object + dumps Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + loads Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) +from future.builtins import bytes, dict, int, range, str + +import base64 +# Py2.7 compatibility hack +base64.encodebytes = base64.encodestring +base64.decodebytes = base64.decodestring +import sys +import time +from datetime import datetime +from future.backports.http import client as http_client +from future.backports.urllib import parse as urllib_parse +from future.utils import ensure_new_type +from xml.parsers import expat +import socket +import errno +from io import BytesIO +try: + import gzip +except ImportError: + gzip = None #python can be built without zlib/gzip support + +# -------------------------------------------------------------------- +# Internal stuff + +def escape(s): + s = s.replace("&", "&") + s = s.replace("<", "<") + return s.replace(">", ">",) + +# used in User-Agent header sent +__version__ = sys.version[:3] + +# xmlrpc integer limits +MAXINT = 2**31-1 +MININT = -2**31 + +# -------------------------------------------------------------------- +# Error constants (from Dan Libby's specification at +# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php) + +# Ranges of errors +PARSE_ERROR = -32700 +SERVER_ERROR = -32600 +APPLICATION_ERROR = -32500 +SYSTEM_ERROR = -32400 +TRANSPORT_ERROR = -32300 + +# Specific errors +NOT_WELLFORMED_ERROR = -32700 +UNSUPPORTED_ENCODING = -32701 +INVALID_ENCODING_CHAR = -32702 +INVALID_XMLRPC = -32600 +METHOD_NOT_FOUND = -32601 +INVALID_METHOD_PARAMS = -32602 +INTERNAL_ERROR = -32603 + +# -------------------------------------------------------------------- +# Exceptions + +## +# Base class for all kinds of client-side errors. + +class Error(Exception): + """Base class for client errors.""" + def __str__(self): + return repr(self) + +## +# Indicates an HTTP-level protocol error. This is raised by the HTTP +# transport layer, if the server returns an error code other than 200 +# (OK). +# +# @param url The target URL. +# @param errcode The HTTP error code. +# @param errmsg The HTTP error message. +# @param headers The HTTP header dictionary. + +class ProtocolError(Error): + """Indicates an HTTP protocol error.""" + def __init__(self, url, errcode, errmsg, headers): + Error.__init__(self) + self.url = url + self.errcode = errcode + self.errmsg = errmsg + self.headers = headers + def __repr__(self): + return ( + "" % + (self.url, self.errcode, self.errmsg) + ) + +## +# Indicates a broken XML-RPC response package. This exception is +# raised by the unmarshalling layer, if the XML-RPC response is +# malformed. + +class ResponseError(Error): + """Indicates a broken response package.""" + pass + +## +# Indicates an XML-RPC fault response package. This exception is +# raised by the unmarshalling layer, if the XML-RPC response contains +# a fault string. This exception can also be used as a class, to +# generate a fault XML-RPC message. +# +# @param faultCode The XML-RPC fault code. +# @param faultString The XML-RPC fault string. + +class Fault(Error): + """Indicates an XML-RPC fault package.""" + def __init__(self, faultCode, faultString, **extra): + Error.__init__(self) + self.faultCode = faultCode + self.faultString = faultString + def __repr__(self): + return "" % (ensure_new_type(self.faultCode), + ensure_new_type(self.faultString)) + +# -------------------------------------------------------------------- +# Special values + +## +# Backwards compatibility + +boolean = Boolean = bool + +## +# Wrapper for XML-RPC DateTime values. This converts a time value to +# the format used by XML-RPC. +#

+# The value can be given as a datetime object, as a string in the +# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by +# time.localtime()), or an integer value (as returned by time.time()). +# The wrapper uses time.localtime() to convert an integer to a time +# tuple. +# +# @param value The time, given as a datetime object, an ISO 8601 string, +# a time tuple, or an integer time value. + + +### For Python-Future: +def _iso8601_format(value): + return "%04d%02d%02dT%02d:%02d:%02d" % ( + value.year, value.month, value.day, + value.hour, value.minute, value.second) +### +# Issue #13305: different format codes across platforms +# _day0 = datetime(1, 1, 1) +# if _day0.strftime('%Y') == '0001': # Mac OS X +# def _iso8601_format(value): +# return value.strftime("%Y%m%dT%H:%M:%S") +# elif _day0.strftime('%4Y') == '0001': # Linux +# def _iso8601_format(value): +# return value.strftime("%4Y%m%dT%H:%M:%S") +# else: +# def _iso8601_format(value): +# return value.strftime("%Y%m%dT%H:%M:%S").zfill(17) +# del _day0 + + +def _strftime(value): + if isinstance(value, datetime): + return _iso8601_format(value) + + if not isinstance(value, (tuple, time.struct_time)): + if value == 0: + value = time.time() + value = time.localtime(value) + + return "%04d%02d%02dT%02d:%02d:%02d" % value[:6] + +class DateTime(object): + """DateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate 'dateTime.iso8601' XML-RPC + value. + """ + + def __init__(self, value=0): + if isinstance(value, str): + self.value = value + else: + self.value = _strftime(value) + + def make_comparable(self, other): + if isinstance(other, DateTime): + s = self.value + o = other.value + elif isinstance(other, datetime): + s = self.value + o = _iso8601_format(other) + elif isinstance(other, str): + s = self.value + o = other + elif hasattr(other, "timetuple"): + s = self.timetuple() + o = other.timetuple() + else: + otype = (hasattr(other, "__class__") + and other.__class__.__name__ + or type(other)) + raise TypeError("Can't compare %s and %s" % + (self.__class__.__name__, otype)) + return s, o + + def __lt__(self, other): + s, o = self.make_comparable(other) + return s < o + + def __le__(self, other): + s, o = self.make_comparable(other) + return s <= o + + def __gt__(self, other): + s, o = self.make_comparable(other) + return s > o + + def __ge__(self, other): + s, o = self.make_comparable(other) + return s >= o + + def __eq__(self, other): + s, o = self.make_comparable(other) + return s == o + + def __ne__(self, other): + s, o = self.make_comparable(other) + return s != o + + def timetuple(self): + return time.strptime(self.value, "%Y%m%dT%H:%M:%S") + + ## + # Get date/time value. + # + # @return Date/time value, as an ISO 8601 string. + + def __str__(self): + return self.value + + def __repr__(self): + return "" % (ensure_new_type(self.value), id(self)) + + def decode(self, data): + self.value = str(data).strip() + + def encode(self, out): + out.write("") + out.write(self.value) + out.write("\n") + +def _datetime(data): + # decode xml element contents into a DateTime structure. + value = DateTime() + value.decode(data) + return value + +def _datetime_type(data): + return datetime.strptime(data, "%Y%m%dT%H:%M:%S") + +## +# Wrapper for binary data. This can be used to transport any kind +# of binary data over XML-RPC, using BASE64 encoding. +# +# @param data An 8-bit string containing arbitrary data. + +class Binary(object): + """Wrapper for binary data.""" + + def __init__(self, data=None): + if data is None: + data = b"" + else: + if not isinstance(data, (bytes, bytearray)): + raise TypeError("expected bytes or bytearray, not %s" % + data.__class__.__name__) + data = bytes(data) # Make a copy of the bytes! + self.data = data + + ## + # Get buffer contents. + # + # @return Buffer contents, as an 8-bit string. + + def __str__(self): + return str(self.data, "latin-1") # XXX encoding?! + + def __eq__(self, other): + if isinstance(other, Binary): + other = other.data + return self.data == other + + def __ne__(self, other): + if isinstance(other, Binary): + other = other.data + return self.data != other + + def decode(self, data): + self.data = base64.decodebytes(data) + + def encode(self, out): + out.write("\n") + encoded = base64.encodebytes(self.data) + out.write(encoded.decode('ascii')) + out.write("\n") + +def _binary(data): + # decode xml element contents into a Binary structure + value = Binary() + value.decode(data) + return value + +WRAPPERS = (DateTime, Binary) + +# -------------------------------------------------------------------- +# XML parsers + +class ExpatParser(object): + # fast expat parser for Python 2.0 and later. + def __init__(self, target): + self._parser = parser = expat.ParserCreate(None, None) + self._target = target + parser.StartElementHandler = target.start + parser.EndElementHandler = target.end + parser.CharacterDataHandler = target.data + encoding = None + target.xml(encoding, None) + + def feed(self, data): + self._parser.Parse(data, 0) + + def close(self): + self._parser.Parse("", 1) # end of data + del self._target, self._parser # get rid of circular references + +# -------------------------------------------------------------------- +# XML-RPC marshalling and unmarshalling code + +## +# XML-RPC marshaller. +# +# @param encoding Default encoding for 8-bit strings. The default +# value is None (interpreted as UTF-8). +# @see dumps + +class Marshaller(object): + """Generate an XML-RPC params chunk from a Python data structure. + + Create a Marshaller instance for each set of parameters, and use + the "dumps" method to convert your data (represented as a tuple) + to an XML-RPC params chunk. To write a fault response, pass a + Fault instance instead. You may prefer to use the "dumps" module + function for this purpose. + """ + + # by the way, if you don't understand what's going on in here, + # that's perfectly ok. + + def __init__(self, encoding=None, allow_none=False): + self.memo = {} + self.data = None + self.encoding = encoding + self.allow_none = allow_none + + dispatch = {} + + def dumps(self, values): + out = [] + write = out.append + dump = self.__dump + if isinstance(values, Fault): + # fault instance + write("\n") + dump({'faultCode': values.faultCode, + 'faultString': values.faultString}, + write) + write("\n") + else: + # parameter block + # FIXME: the xml-rpc specification allows us to leave out + # the entire block if there are no parameters. + # however, changing this may break older code (including + # old versions of xmlrpclib.py), so this is better left as + # is for now. See @XMLRPC3 for more information. /F + write("\n") + for v in values: + write("\n") + dump(v, write) + write("\n") + write("\n") + result = "".join(out) + return str(result) + + def __dump(self, value, write): + try: + f = self.dispatch[type(ensure_new_type(value))] + except KeyError: + # check if this object can be marshalled as a structure + if not hasattr(value, '__dict__'): + raise TypeError("cannot marshal %s objects" % type(value)) + # check if this class is a sub-class of a basic type, + # because we don't know how to marshal these types + # (e.g. a string sub-class) + for type_ in type(value).__mro__: + if type_ in self.dispatch.keys(): + raise TypeError("cannot marshal %s objects" % type(value)) + # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix + # for the p3yk merge, this should probably be fixed more neatly. + f = self.dispatch["_arbitrary_instance"] + f(self, value, write) + + def dump_nil (self, value, write): + if not self.allow_none: + raise TypeError("cannot marshal None unless allow_none is enabled") + write("") + dispatch[type(None)] = dump_nil + + def dump_bool(self, value, write): + write("") + write(value and "1" or "0") + write("\n") + dispatch[bool] = dump_bool + + def dump_long(self, value, write): + if value > MAXINT or value < MININT: + raise OverflowError("long int exceeds XML-RPC limits") + write("") + write(str(int(value))) + write("\n") + dispatch[int] = dump_long + + # backward compatible + dump_int = dump_long + + def dump_double(self, value, write): + write("") + write(repr(ensure_new_type(value))) + write("\n") + dispatch[float] = dump_double + + def dump_unicode(self, value, write, escape=escape): + write("") + write(escape(value)) + write("\n") + dispatch[str] = dump_unicode + + def dump_bytes(self, value, write): + write("\n") + encoded = base64.encodebytes(value) + write(encoded.decode('ascii')) + write("\n") + dispatch[bytes] = dump_bytes + dispatch[bytearray] = dump_bytes + + def dump_array(self, value, write): + i = id(value) + if i in self.memo: + raise TypeError("cannot marshal recursive sequences") + self.memo[i] = None + dump = self.__dump + write("\n") + for v in value: + dump(v, write) + write("\n") + del self.memo[i] + dispatch[tuple] = dump_array + dispatch[list] = dump_array + + def dump_struct(self, value, write, escape=escape): + i = id(value) + if i in self.memo: + raise TypeError("cannot marshal recursive dictionaries") + self.memo[i] = None + dump = self.__dump + write("\n") + for k, v in value.items(): + write("\n") + if not isinstance(k, str): + raise TypeError("dictionary key must be string") + write("%s\n" % escape(k)) + dump(v, write) + write("\n") + write("\n") + del self.memo[i] + dispatch[dict] = dump_struct + + def dump_datetime(self, value, write): + write("") + write(_strftime(value)) + write("\n") + dispatch[datetime] = dump_datetime + + def dump_instance(self, value, write): + # check for special wrappers + if value.__class__ in WRAPPERS: + self.write = write + value.encode(self) + del self.write + else: + # store instance attributes as a struct (really?) + self.dump_struct(value.__dict__, write) + dispatch[DateTime] = dump_instance + dispatch[Binary] = dump_instance + # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix + # for the p3yk merge, this should probably be fixed more neatly. + dispatch["_arbitrary_instance"] = dump_instance + +## +# XML-RPC unmarshaller. +# +# @see loads + +class Unmarshaller(object): + """Unmarshal an XML-RPC response, based on incoming XML event + messages (start, data, end). Call close() to get the resulting + data structure. + + Note that this reader is fairly tolerant, and gladly accepts bogus + XML-RPC data without complaining (but not bogus XML). + """ + + # and again, if you don't understand what's going on in here, + # that's perfectly ok. + + def __init__(self, use_datetime=False, use_builtin_types=False): + self._type = None + self._stack = [] + self._marks = [] + self._data = [] + self._methodname = None + self._encoding = "utf-8" + self.append = self._stack.append + self._use_datetime = use_builtin_types or use_datetime + self._use_bytes = use_builtin_types + + def close(self): + # return response tuple and target method + if self._type is None or self._marks: + raise ResponseError() + if self._type == "fault": + raise Fault(**self._stack[0]) + return tuple(self._stack) + + def getmethodname(self): + return self._methodname + + # + # event handlers + + def xml(self, encoding, standalone): + self._encoding = encoding + # FIXME: assert standalone == 1 ??? + + def start(self, tag, attrs): + # prepare to handle this element + if tag == "array" or tag == "struct": + self._marks.append(len(self._stack)) + self._data = [] + self._value = (tag == "value") + + def data(self, text): + self._data.append(text) + + def end(self, tag): + # call the appropriate end tag handler + try: + f = self.dispatch[tag] + except KeyError: + pass # unknown tag ? + else: + return f(self, "".join(self._data)) + + # + # accelerator support + + def end_dispatch(self, tag, data): + # dispatch data + try: + f = self.dispatch[tag] + except KeyError: + pass # unknown tag ? + else: + return f(self, data) + + # + # element decoders + + dispatch = {} + + def end_nil (self, data): + self.append(None) + self._value = 0 + dispatch["nil"] = end_nil + + def end_boolean(self, data): + if data == "0": + self.append(False) + elif data == "1": + self.append(True) + else: + raise TypeError("bad boolean value") + self._value = 0 + dispatch["boolean"] = end_boolean + + def end_int(self, data): + self.append(int(data)) + self._value = 0 + dispatch["i4"] = end_int + dispatch["i8"] = end_int + dispatch["int"] = end_int + + def end_double(self, data): + self.append(float(data)) + self._value = 0 + dispatch["double"] = end_double + + def end_string(self, data): + if self._encoding: + data = data.decode(self._encoding) + self.append(data) + self._value = 0 + dispatch["string"] = end_string + dispatch["name"] = end_string # struct keys are always strings + + def end_array(self, data): + mark = self._marks.pop() + # map arrays to Python lists + self._stack[mark:] = [self._stack[mark:]] + self._value = 0 + dispatch["array"] = end_array + + def end_struct(self, data): + mark = self._marks.pop() + # map structs to Python dictionaries + dict = {} + items = self._stack[mark:] + for i in range(0, len(items), 2): + dict[items[i]] = items[i+1] + self._stack[mark:] = [dict] + self._value = 0 + dispatch["struct"] = end_struct + + def end_base64(self, data): + value = Binary() + value.decode(data.encode("ascii")) + if self._use_bytes: + value = value.data + self.append(value) + self._value = 0 + dispatch["base64"] = end_base64 + + def end_dateTime(self, data): + value = DateTime() + value.decode(data) + if self._use_datetime: + value = _datetime_type(data) + self.append(value) + dispatch["dateTime.iso8601"] = end_dateTime + + def end_value(self, data): + # if we stumble upon a value element with no internal + # elements, treat it as a string element + if self._value: + self.end_string(data) + dispatch["value"] = end_value + + def end_params(self, data): + self._type = "params" + dispatch["params"] = end_params + + def end_fault(self, data): + self._type = "fault" + dispatch["fault"] = end_fault + + def end_methodName(self, data): + if self._encoding: + data = data.decode(self._encoding) + self._methodname = data + self._type = "methodName" # no params + dispatch["methodName"] = end_methodName + +## Multicall support +# + +class _MultiCallMethod(object): + # some lesser magic to store calls made to a MultiCall object + # for batch execution + def __init__(self, call_list, name): + self.__call_list = call_list + self.__name = name + def __getattr__(self, name): + return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name)) + def __call__(self, *args): + self.__call_list.append((self.__name, args)) + +class MultiCallIterator(object): + """Iterates over the results of a multicall. Exceptions are + raised in response to xmlrpc faults.""" + + def __init__(self, results): + self.results = results + + def __getitem__(self, i): + item = self.results[i] + if isinstance(type(item), dict): + raise Fault(item['faultCode'], item['faultString']) + elif type(item) == type([]): + return item[0] + else: + raise ValueError("unexpected type in multicall result") + +class MultiCall(object): + """server -> a object used to boxcar method calls + + server should be a ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + """ + + def __init__(self, server): + self.__server = server + self.__call_list = [] + + def __repr__(self): + return "" % id(self) + + __str__ = __repr__ + + def __getattr__(self, name): + return _MultiCallMethod(self.__call_list, name) + + def __call__(self): + marshalled_list = [] + for name, args in self.__call_list: + marshalled_list.append({'methodName' : name, 'params' : args}) + + return MultiCallIterator(self.__server.system.multicall(marshalled_list)) + +# -------------------------------------------------------------------- +# convenience functions + +FastMarshaller = FastParser = FastUnmarshaller = None + +## +# Create a parser object, and connect it to an unmarshalling instance. +# This function picks the fastest available XML parser. +# +# return A (parser, unmarshaller) tuple. + +def getparser(use_datetime=False, use_builtin_types=False): + """getparser() -> parser, unmarshaller + + Create an instance of the fastest available parser, and attach it + to an unmarshalling object. Return both objects. + """ + if FastParser and FastUnmarshaller: + if use_builtin_types: + mkdatetime = _datetime_type + mkbytes = base64.decodebytes + elif use_datetime: + mkdatetime = _datetime_type + mkbytes = _binary + else: + mkdatetime = _datetime + mkbytes = _binary + target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault) + parser = FastParser(target) + else: + target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types) + if FastParser: + parser = FastParser(target) + else: + parser = ExpatParser(target) + return parser, target + +## +# Convert a Python tuple or a Fault instance to an XML-RPC packet. +# +# @def dumps(params, **options) +# @param params A tuple or Fault instance. +# @keyparam methodname If given, create a methodCall request for +# this method name. +# @keyparam methodresponse If given, create a methodResponse packet. +# If used with a tuple, the tuple must be a singleton (that is, +# it must contain exactly one element). +# @keyparam encoding The packet encoding. +# @return A string containing marshalled data. + +def dumps(params, methodname=None, methodresponse=None, encoding=None, + allow_none=False): + """data [,options] -> marshalled data + + Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + + In addition to the data object, the following options can be given + as keyword arguments: + + methodname: the method name for a methodCall packet + + methodresponse: true to create a methodResponse packet. + If this option is used with a tuple, the tuple must be + a singleton (i.e. it can contain only one element). + + encoding: the packet encoding (default is UTF-8) + + All byte strings in the data structure are assumed to use the + packet encoding. Unicode strings are automatically converted, + where necessary. + """ + + assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance" + if isinstance(params, Fault): + methodresponse = 1 + elif methodresponse and isinstance(params, tuple): + assert len(params) == 1, "response tuple must be a singleton" + + if not encoding: + encoding = "utf-8" + + if FastMarshaller: + m = FastMarshaller(encoding) + else: + m = Marshaller(encoding, allow_none) + + data = m.dumps(params) + + if encoding != "utf-8": + xmlheader = "\n" % str(encoding) + else: + xmlheader = "\n" # utf-8 is default + + # standard XML-RPC wrappings + if methodname: + # a method call + if not isinstance(methodname, str): + methodname = methodname.encode(encoding) + data = ( + xmlheader, + "\n" + "", methodname, "\n", + data, + "\n" + ) + elif methodresponse: + # a method response, or a fault structure + data = ( + xmlheader, + "\n", + data, + "\n" + ) + else: + return data # return as is + return str("").join(data) + +## +# Convert an XML-RPC packet to a Python object. If the XML-RPC packet +# represents a fault condition, this function raises a Fault exception. +# +# @param data An XML-RPC packet, given as an 8-bit string. +# @return A tuple containing the unpacked data, and the method name +# (None if not present). +# @see Fault + +def loads(data, use_datetime=False, use_builtin_types=False): + """data -> unmarshalled data, method name + + Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). + + If the XML-RPC packet represents a fault condition, this function + raises a Fault exception. + """ + p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types) + p.feed(data) + p.close() + return u.close(), u.getmethodname() + +## +# Encode a string using the gzip content encoding such as specified by the +# Content-Encoding: gzip +# in the HTTP header, as described in RFC 1952 +# +# @param data the unencoded data +# @return the encoded data + +def gzip_encode(data): + """data -> gzip encoded data + + Encode data using the gzip content encoding as described in RFC 1952 + """ + if not gzip: + raise NotImplementedError + f = BytesIO() + gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) + gzf.write(data) + gzf.close() + encoded = f.getvalue() + f.close() + return encoded + +## +# Decode a string using the gzip content encoding such as specified by the +# Content-Encoding: gzip +# in the HTTP header, as described in RFC 1952 +# +# @param data The encoded data +# @return the unencoded data +# @raises ValueError if data is not correctly coded. + +def gzip_decode(data): + """gzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + """ + if not gzip: + raise NotImplementedError + f = BytesIO(data) + gzf = gzip.GzipFile(mode="rb", fileobj=f) + try: + decoded = gzf.read() + except IOError: + raise ValueError("invalid data") + f.close() + gzf.close() + return decoded + +## +# Return a decoded file-like object for the gzip encoding +# as described in RFC 1952. +# +# @param response A stream supporting a read() method +# @return a file-like object that the decoded data can be read() from + +class GzipDecodedResponse(gzip.GzipFile if gzip else object): + """a file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + """ + def __init__(self, response): + #response doesn't support tell() and read(), required by + #GzipFile + if not gzip: + raise NotImplementedError + self.io = BytesIO(response.read()) + gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io) + + def close(self): + gzip.GzipFile.close(self) + self.io.close() + + +# -------------------------------------------------------------------- +# request dispatcher + +class _Method(object): + # some magic to bind an XML-RPC method to an RPC server. + # supports "nested" methods (e.g. examples.getStateName) + def __init__(self, send, name): + self.__send = send + self.__name = name + def __getattr__(self, name): + return _Method(self.__send, "%s.%s" % (self.__name, name)) + def __call__(self, *args): + return self.__send(self.__name, args) + +## +# Standard transport class for XML-RPC over HTTP. +#

+# You can create custom transports by subclassing this method, and +# overriding selected methods. + +class Transport(object): + """Handles an HTTP transaction to an XML-RPC server.""" + + # client identifier (may be overridden) + user_agent = "Python-xmlrpc/%s" % __version__ + + #if true, we'll request gzip encoding + accept_gzip_encoding = True + + # if positive, encode request using gzip if it exceeds this threshold + # note that many server will get confused, so only use it if you know + # that they can decode such a request + encode_threshold = None #None = don't encode + + def __init__(self, use_datetime=False, use_builtin_types=False): + self._use_datetime = use_datetime + self._use_builtin_types = use_builtin_types + self._connection = (None, None) + self._extra_headers = [] + + ## + # Send a complete request, and parse the response. + # Retry request if a cached connection has disconnected. + # + # @param host Target host. + # @param handler Target PRC handler. + # @param request_body XML-RPC request body. + # @param verbose Debugging flag. + # @return Parsed response. + + def request(self, host, handler, request_body, verbose=False): + #retry request once if cached connection has gone cold + for i in (0, 1): + try: + return self.single_request(host, handler, request_body, verbose) + except socket.error as e: + if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE): + raise + except http_client.BadStatusLine: #close after we sent request + if i: + raise + + def single_request(self, host, handler, request_body, verbose=False): + # issue XML-RPC request + try: + http_conn = self.send_request(host, handler, request_body, verbose) + resp = http_conn.getresponse() + if resp.status == 200: + self.verbose = verbose + return self.parse_response(resp) + + except Fault: + raise + except Exception: + #All unexpected errors leave connection in + # a strange state, so we clear it. + self.close() + raise + + #We got an error response. + #Discard any response data and raise exception + if resp.getheader("content-length", ""): + resp.read() + raise ProtocolError( + host + handler, + resp.status, resp.reason, + dict(resp.getheaders()) + ) + + + ## + # Create parser. + # + # @return A 2-tuple containing a parser and a unmarshaller. + + def getparser(self): + # get parser and unmarshaller + return getparser(use_datetime=self._use_datetime, + use_builtin_types=self._use_builtin_types) + + ## + # Get authorization info from host parameter + # Host may be a string, or a (host, x509-dict) tuple; if a string, + # it is checked for a "user:pw@host" format, and a "Basic + # Authentication" header is added if appropriate. + # + # @param host Host descriptor (URL or (URL, x509 info) tuple). + # @return A 3-tuple containing (actual host, extra headers, + # x509 info). The header and x509 fields may be None. + + def get_host_info(self, host): + + x509 = {} + if isinstance(host, tuple): + host, x509 = host + + auth, host = urllib_parse.splituser(host) + + if auth: + auth = urllib_parse.unquote_to_bytes(auth) + auth = base64.encodebytes(auth).decode("utf-8") + auth = "".join(auth.split()) # get rid of whitespace + extra_headers = [ + ("Authorization", "Basic " + auth) + ] + else: + extra_headers = [] + + return host, extra_headers, x509 + + ## + # Connect to server. + # + # @param host Target host. + # @return An HTTPConnection object + + def make_connection(self, host): + #return an existing connection if possible. This allows + #HTTP/1.1 keep-alive. + if self._connection and host == self._connection[0]: + return self._connection[1] + # create a HTTP connection object from a host descriptor + chost, self._extra_headers, x509 = self.get_host_info(host) + self._connection = host, http_client.HTTPConnection(chost) + return self._connection[1] + + ## + # Clear any cached connection object. + # Used in the event of socket errors. + # + def close(self): + if self._connection[1]: + self._connection[1].close() + self._connection = (None, None) + + ## + # Send HTTP request. + # + # @param host Host descriptor (URL or (URL, x509 info) tuple). + # @param handler Targer RPC handler (a path relative to host) + # @param request_body The XML-RPC request body + # @param debug Enable debugging if debug is true. + # @return An HTTPConnection. + + def send_request(self, host, handler, request_body, debug): + connection = self.make_connection(host) + headers = self._extra_headers[:] + if debug: + connection.set_debuglevel(1) + if self.accept_gzip_encoding and gzip: + connection.putrequest("POST", handler, skip_accept_encoding=True) + headers.append(("Accept-Encoding", "gzip")) + else: + connection.putrequest("POST", handler) + headers.append(("Content-Type", "text/xml")) + headers.append(("User-Agent", self.user_agent)) + self.send_headers(connection, headers) + self.send_content(connection, request_body) + return connection + + ## + # Send request headers. + # This function provides a useful hook for subclassing + # + # @param connection httpConnection. + # @param headers list of key,value pairs for HTTP headers + + def send_headers(self, connection, headers): + for key, val in headers: + connection.putheader(key, val) + + ## + # Send request body. + # This function provides a useful hook for subclassing + # + # @param connection httpConnection. + # @param request_body XML-RPC request body. + + def send_content(self, connection, request_body): + #optionally encode the request + if (self.encode_threshold is not None and + self.encode_threshold < len(request_body) and + gzip): + connection.putheader("Content-Encoding", "gzip") + request_body = gzip_encode(request_body) + + connection.putheader("Content-Length", str(len(request_body))) + connection.endheaders(request_body) + + ## + # Parse response. + # + # @param file Stream. + # @return Response tuple and target method. + + def parse_response(self, response): + # read response data from httpresponse, and parse it + # Check for new http response object, otherwise it is a file object. + if hasattr(response, 'getheader'): + if response.getheader("Content-Encoding", "") == "gzip": + stream = GzipDecodedResponse(response) + else: + stream = response + else: + stream = response + + p, u = self.getparser() + + while 1: + data = stream.read(1024) + if not data: + break + if self.verbose: + print("body:", repr(data)) + p.feed(data) + + if stream is not response: + stream.close() + p.close() + + return u.close() + +## +# Standard transport class for XML-RPC over HTTPS. + +class SafeTransport(Transport): + """Handles an HTTPS transaction to an XML-RPC server.""" + + # FIXME: mostly untested + + def make_connection(self, host): + if self._connection and host == self._connection[0]: + return self._connection[1] + + if not hasattr(http_client, "HTTPSConnection"): + raise NotImplementedError( + "your version of http.client doesn't support HTTPS") + # create a HTTPS connection object from a host descriptor + # host may be a string, or a (host, x509-dict) tuple + chost, self._extra_headers, x509 = self.get_host_info(host) + self._connection = host, http_client.HTTPSConnection(chost, + None, **(x509 or {})) + return self._connection[1] + +## +# Standard server proxy. This class establishes a virtual connection +# to an XML-RPC server. +#

+# This class is available as ServerProxy and Server. New code should +# use ServerProxy, to avoid confusion. +# +# @def ServerProxy(uri, **options) +# @param uri The connection point on the server. +# @keyparam transport A transport factory, compatible with the +# standard transport class. +# @keyparam encoding The default encoding used for 8-bit strings +# (default is UTF-8). +# @keyparam verbose Use a true value to enable debugging output. +# (printed to standard output). +# @see Transport + +class ServerProxy(object): + """uri [,options] -> a logical connection to an XML-RPC server + + uri is the connection point on the server, given as + scheme://host/target. + + The standard implementation always supports the "http" scheme. If + SSL socket support is available (Python 2.0), it also supports + "https". + + If the target part and the slash preceding it are both omitted, + "/RPC2" is assumed. + + The following options can be given as keyword arguments: + + transport: a transport factory + encoding: the request encoding (default is UTF-8) + + All 8-bit strings passed to the server proxy are assumed to use + the given encoding. + """ + + def __init__(self, uri, transport=None, encoding=None, verbose=False, + allow_none=False, use_datetime=False, use_builtin_types=False): + # establish a "logical" server connection + + # get the url + type, uri = urllib_parse.splittype(uri) + if type not in ("http", "https"): + raise IOError("unsupported XML-RPC protocol") + self.__host, self.__handler = urllib_parse.splithost(uri) + if not self.__handler: + self.__handler = "/RPC2" + + if transport is None: + if type == "https": + handler = SafeTransport + else: + handler = Transport + transport = handler(use_datetime=use_datetime, + use_builtin_types=use_builtin_types) + self.__transport = transport + + self.__encoding = encoding or 'utf-8' + self.__verbose = verbose + self.__allow_none = allow_none + + def __close(self): + self.__transport.close() + + def __request(self, methodname, params): + # call a method on the remote server + + request = dumps(params, methodname, encoding=self.__encoding, + allow_none=self.__allow_none).encode(self.__encoding) + + response = self.__transport.request( + self.__host, + self.__handler, + request, + verbose=self.__verbose + ) + + if len(response) == 1: + response = response[0] + + return response + + def __repr__(self): + return ( + "" % + (self.__host, self.__handler) + ) + + __str__ = __repr__ + + def __getattr__(self, name): + # magic method dispatcher + return _Method(self.__request, name) + + # note: to call a remote object with an non-standard name, use + # result getattr(server, "strange-python-name")(args) + + def __call__(self, attr): + """A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + """ + if attr == "close": + return self.__close + elif attr == "transport": + return self.__transport + raise AttributeError("Attribute %r not found" % (attr,)) + +# compatibility + +Server = ServerProxy + +# -------------------------------------------------------------------- +# test code + +if __name__ == "__main__": + + # simple test program (from the XML-RPC specification) + + # local server, available from Lib/xmlrpc/server.py + server = ServerProxy("http://localhost:8000") + + try: + print(server.currentTime.getCurrentTime()) + except Error as v: + print("ERROR", v) + + multi = MultiCall(server) + multi.getData() + multi.pow(2,9) + multi.add(1,2) + try: + for response in multi(): + print(response) + except Error as v: + print("ERROR", v) diff --git a/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/server.py b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/server.py new file mode 100644 index 0000000000..28072bfecd --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/backports/xmlrpc/server.py @@ -0,0 +1,999 @@ +r""" +Ported using Python-Future from the Python 3.3 standard library. + +XML-RPC Servers. + +This module can be used to create simple XML-RPC servers +by creating a server and either installing functions, a +class instance, or by extending the SimpleXMLRPCServer +class. + +It can also be used to handle XML-RPC requests in a CGI +environment using CGIXMLRPCRequestHandler. + +The Doc* classes can be used to create XML-RPC servers that +serve pydoc-style documentation in response to HTTP +GET requests. This documentation is dynamically generated +based on the functions and methods registered with the +server. + +A list of possible usage patterns follows: + +1. Install functions: + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_function(pow) +server.register_function(lambda x,y: x+y, 'add') +server.serve_forever() + +2. Install an instance: + +class MyFuncs: + def __init__(self): + # make all of the sys functions available through sys.func_name + import sys + self.sys = sys + def _listMethods(self): + # implement this method so that system.listMethods + # knows to advertise the sys methods + return list_public_methods(self) + \ + ['sys.' + method for method in list_public_methods(self.sys)] + def pow(self, x, y): return pow(x, y) + def add(self, x, y) : return x + y + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(MyFuncs()) +server.serve_forever() + +3. Install an instance with custom dispatch method: + +class Math: + def _listMethods(self): + # this method must be present for system.listMethods + # to work + return ['add', 'pow'] + def _methodHelp(self, method): + # this method must be present for system.methodHelp + # to work + if method == 'add': + return "add(2,3) => 5" + elif method == 'pow': + return "pow(x, y[, z]) => number" + else: + # By convention, return empty + # string if no help is available + return "" + def _dispatch(self, method, params): + if method == 'pow': + return pow(*params) + elif method == 'add': + return params[0] + params[1] + else: + raise ValueError('bad method') + +server = SimpleXMLRPCServer(("localhost", 8000)) +server.register_introspection_functions() +server.register_instance(Math()) +server.serve_forever() + +4. Subclass SimpleXMLRPCServer: + +class MathServer(SimpleXMLRPCServer): + def _dispatch(self, method, params): + try: + # We are forcing the 'export_' prefix on methods that are + # callable through XML-RPC to prevent potential security + # problems + func = getattr(self, 'export_' + method) + except AttributeError: + raise Exception('method "%s" is not supported' % method) + else: + return func(*params) + + def export_add(self, x, y): + return x + y + +server = MathServer(("localhost", 8000)) +server.serve_forever() + +5. CGI script: + +server = CGIXMLRPCRequestHandler() +server.register_function(pow) +server.handle_request() +""" + +from __future__ import absolute_import, division, print_function, unicode_literals +from future.builtins import int, str + +# Written by Brian Quinlan (brian@sweetapp.com). +# Based on code written by Fredrik Lundh. + +from future.backports.xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode +from future.backports.http.server import BaseHTTPRequestHandler +import future.backports.http.server as http_server +from future.backports import socketserver +import sys +import os +import re +import pydoc +import inspect +import traceback +try: + import fcntl +except ImportError: + fcntl = None + +def resolve_dotted_attribute(obj, attr, allow_dotted_names=True): + """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d + + Resolves a dotted attribute name to an object. Raises + an AttributeError if any attribute in the chain starts with a '_'. + + If the optional allow_dotted_names argument is false, dots are not + supported and this function operates similar to getattr(obj, attr). + """ + + if allow_dotted_names: + attrs = attr.split('.') + else: + attrs = [attr] + + for i in attrs: + if i.startswith('_'): + raise AttributeError( + 'attempt to access private attribute "%s"' % i + ) + else: + obj = getattr(obj,i) + return obj + +def list_public_methods(obj): + """Returns a list of attribute strings, found in the specified + object, which represent callable attributes""" + + return [member for member in dir(obj) + if not member.startswith('_') and + callable(getattr(obj, member))] + +class SimpleXMLRPCDispatcher(object): + """Mix-in class that dispatches XML-RPC requests. + + This class is used to register XML-RPC method handlers + and then to dispatch them. This class doesn't need to be + instanced directly when used by SimpleXMLRPCServer but it + can be instanced when used by the MultiPathXMLRPCServer + """ + + def __init__(self, allow_none=False, encoding=None, + use_builtin_types=False): + self.funcs = {} + self.instance = None + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + self.use_builtin_types = use_builtin_types + + def register_instance(self, instance, allow_dotted_names=False): + """Registers an instance to respond to XML-RPC requests. + + Only one instance can be installed at a time. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. Methods beginning with an '_' + are considered private and will not be called by + SimpleXMLRPCServer. + + If a registered function matches a XML-RPC request, then it + will be called instead of the registered instance. + + If the optional allow_dotted_names argument is true and the + instance does not have a _dispatch method, method names + containing dots are supported and resolved, as long as none of + the name segments start with an '_'. + + *** SECURITY WARNING: *** + + Enabling the allow_dotted_names options allows intruders + to access your module's global variables and may allow + intruders to execute arbitrary code on your machine. Only + use this option on a secure, closed network. + + """ + + self.instance = instance + self.allow_dotted_names = allow_dotted_names + + def register_function(self, function, name=None): + """Registers a function to respond to XML-RPC requests. + + The optional name argument can be used to set a Unicode name + for the function. + """ + + if name is None: + name = function.__name__ + self.funcs[name] = function + + def register_introspection_functions(self): + """Registers the XML-RPC introspection methods in the system + namespace. + + see http://xmlrpc.usefulinc.com/doc/reserved.html + """ + + self.funcs.update({'system.listMethods' : self.system_listMethods, + 'system.methodSignature' : self.system_methodSignature, + 'system.methodHelp' : self.system_methodHelp}) + + def register_multicall_functions(self): + """Registers the XML-RPC multicall method in the system + namespace. + + see http://www.xmlrpc.com/discuss/msgReader$1208""" + + self.funcs.update({'system.multicall' : self.system_multicall}) + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + """Dispatches an XML-RPC method from marshalled (XML) data. + + XML-RPC methods are dispatched from the marshalled (XML) data + using the _dispatch method and the result is returned as + marshalled data. For backwards compatibility, a dispatch + function can be provided as an argument (see comment in + SimpleXMLRPCRequestHandler.do_POST) but overriding the + existing method through subclassing is the preferred means + of changing method dispatch behavior. + """ + + try: + params, method = loads(data, use_builtin_types=self.use_builtin_types) + + # generate response + if dispatch_method is not None: + response = dispatch_method(method, params) + else: + response = self._dispatch(method, params) + # wrap response in a singleton tuple + response = (response,) + response = dumps(response, methodresponse=1, + allow_none=self.allow_none, encoding=self.encoding) + except Fault as fault: + response = dumps(fault, allow_none=self.allow_none, + encoding=self.encoding) + except: + # report exception back to server + exc_type, exc_value, exc_tb = sys.exc_info() + response = dumps( + Fault(1, "%s:%s" % (exc_type, exc_value)), + encoding=self.encoding, allow_none=self.allow_none, + ) + + return response.encode(self.encoding) + + def system_listMethods(self): + """system.listMethods() => ['add', 'subtract', 'multiple'] + + Returns a list of the methods supported by the server.""" + + methods = set(self.funcs.keys()) + if self.instance is not None: + # Instance can implement _listMethod to return a list of + # methods + if hasattr(self.instance, '_listMethods'): + methods |= set(self.instance._listMethods()) + # if the instance has a _dispatch method then we + # don't have enough information to provide a list + # of methods + elif not hasattr(self.instance, '_dispatch'): + methods |= set(list_public_methods(self.instance)) + return sorted(methods) + + def system_methodSignature(self, method_name): + """system.methodSignature('add') => [double, int, int] + + Returns a list describing the signature of the method. In the + above example, the add method takes two integers as arguments + and returns a double result. + + This server does NOT support system.methodSignature.""" + + # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html + + return 'signatures not supported' + + def system_methodHelp(self, method_name): + """system.methodHelp('add') => "Adds two integers together" + + Returns a string containing documentation for the specified method.""" + + method = None + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + # Instance can implement _methodHelp to return help for a method + if hasattr(self.instance, '_methodHelp'): + return self.instance._methodHelp(method_name) + # if the instance has a _dispatch method then we + # don't have enough information to provide help + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name, + self.allow_dotted_names + ) + except AttributeError: + pass + + # Note that we aren't checking that the method actually + # be a callable object of some kind + if method is None: + return "" + else: + return pydoc.getdoc(method) + + def system_multicall(self, call_list): + """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \ +[[4], ...] + + Allows the caller to package multiple XML-RPC calls into a single + request. + + See http://www.xmlrpc.com/discuss/msgReader$1208 + """ + + results = [] + for call in call_list: + method_name = call['methodName'] + params = call['params'] + + try: + # XXX A marshalling error in any response will fail the entire + # multicall. If someone cares they should fix this. + results.append([self._dispatch(method_name, params)]) + except Fault as fault: + results.append( + {'faultCode' : fault.faultCode, + 'faultString' : fault.faultString} + ) + except: + exc_type, exc_value, exc_tb = sys.exc_info() + results.append( + {'faultCode' : 1, + 'faultString' : "%s:%s" % (exc_type, exc_value)} + ) + return results + + def _dispatch(self, method, params): + """Dispatches the XML-RPC method. + + XML-RPC calls are forwarded to a registered function that + matches the called XML-RPC method name. If no such function + exists then the call is forwarded to the registered instance, + if available. + + If the registered instance has a _dispatch method then that + method will be called with the name of the XML-RPC method and + its parameters as a tuple + e.g. instance._dispatch('add',(2,3)) + + If the registered instance does not have a _dispatch method + then the instance will be searched to find a matching method + and, if found, will be called. + + Methods beginning with an '_' are considered private and will + not be called. + """ + + func = None + try: + # check to see if a matching function has been registered + func = self.funcs[method] + except KeyError: + if self.instance is not None: + # check for a _dispatch method + if hasattr(self.instance, '_dispatch'): + return self.instance._dispatch(method, params) + else: + # call instance method directly + try: + func = resolve_dotted_attribute( + self.instance, + method, + self.allow_dotted_names + ) + except AttributeError: + pass + + if func is not None: + return func(*params) + else: + raise Exception('method "%s" is not supported' % method) + +class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler): + """Simple XML-RPC request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + """ + + # Class attribute listing the accessible path components; + # paths not on this list will result in a 404 error. + rpc_paths = ('/', '/RPC2') + + #if not None, encode responses larger than this, if possible + encode_threshold = 1400 #a common MTU + + #Override form StreamRequestHandler: full buffering of output + #and no Nagle. + wbufsize = -1 + disable_nagle_algorithm = True + + # a re to match a gzip Accept-Encoding + aepattern = re.compile(r""" + \s* ([^\s;]+) \s* #content-coding + (;\s* q \s*=\s* ([0-9\.]+))? #q + """, re.VERBOSE | re.IGNORECASE) + + def accept_encodings(self): + r = {} + ae = self.headers.get("Accept-Encoding", "") + for e in ae.split(","): + match = self.aepattern.match(e) + if match: + v = match.group(3) + v = float(v) if v else 1.0 + r[match.group(1)] = v + return r + + def is_rpc_path_valid(self): + if self.rpc_paths: + return self.path in self.rpc_paths + else: + # If .rpc_paths is empty, just assume all paths are legal + return True + + def do_POST(self): + """Handles the HTTP POST request. + + Attempts to interpret all HTTP POST requests as XML-RPC calls, + which are forwarded to the server's _dispatch method for handling. + """ + + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + try: + # Get arguments by reading body of request. + # We read this in chunks to avoid straining + # socket.read(); around the 10 or 15Mb mark, some platforms + # begin to have problems (bug #792570). + max_chunk_size = 10*1024*1024 + size_remaining = int(self.headers["content-length"]) + L = [] + while size_remaining: + chunk_size = min(size_remaining, max_chunk_size) + chunk = self.rfile.read(chunk_size) + if not chunk: + break + L.append(chunk) + size_remaining -= len(L[-1]) + data = b''.join(L) + + data = self.decode_request_content(data) + if data is None: + return #response has been sent + + # In previous versions of SimpleXMLRPCServer, _dispatch + # could be overridden in this class, instead of in + # SimpleXMLRPCDispatcher. To maintain backwards compatibility, + # check to see if a subclass implements _dispatch and dispatch + # using that method if present. + response = self.server._marshaled_dispatch( + data, getattr(self, '_dispatch', None), self.path + ) + except Exception as e: # This should only happen if the module is buggy + # internal error, report as HTTP server error + self.send_response(500) + + # Send information about the exception if requested + if hasattr(self.server, '_send_traceback_header') and \ + self.server._send_traceback_header: + self.send_header("X-exception", str(e)) + trace = traceback.format_exc() + trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') + self.send_header("X-traceback", trace) + + self.send_header("Content-length", "0") + self.end_headers() + else: + self.send_response(200) + self.send_header("Content-type", "text/xml") + if self.encode_threshold is not None: + if len(response) > self.encode_threshold: + q = self.accept_encodings().get("gzip", 0) + if q: + try: + response = gzip_encode(response) + self.send_header("Content-Encoding", "gzip") + except NotImplementedError: + pass + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def decode_request_content(self, data): + #support gzip encoding of request + encoding = self.headers.get("content-encoding", "identity").lower() + if encoding == "identity": + return data + if encoding == "gzip": + try: + return gzip_decode(data) + except NotImplementedError: + self.send_response(501, "encoding %r not supported" % encoding) + except ValueError: + self.send_response(400, "error decoding gzip content") + else: + self.send_response(501, "encoding %r not supported" % encoding) + self.send_header("Content-length", "0") + self.end_headers() + + def report_404 (self): + # Report a 404 error + self.send_response(404) + response = b'No such page' + self.send_header("Content-type", "text/plain") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + + def log_request(self, code='-', size='-'): + """Selectively log an accepted request.""" + + if self.server.logRequests: + BaseHTTPRequestHandler.log_request(self, code, size) + +class SimpleXMLRPCServer(socketserver.TCPServer, + SimpleXMLRPCDispatcher): + """Simple XML-RPC server. + + Simple XML-RPC server that allows functions and a single instance + to be installed to handle requests. The default implementation + attempts to dispatch XML-RPC calls to the functions or instance + installed in the server. Override the _dispatch method inherited + from SimpleXMLRPCDispatcher to change this behavior. + """ + + allow_reuse_address = True + + # Warning: this is for debugging purposes only! Never set this to True in + # production code, as will be sending out sensitive information (exception + # and stack trace details) when exceptions are raised inside + # SimpleXMLRPCRequestHandler.do_POST + _send_traceback_header = False + + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + self.logRequests = logRequests + + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) + + # [Bug #1222790] If possible, set close-on-exec flag; if a + # method spawns a subprocess, the subprocess shouldn't have + # the listening socket open. + if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): + flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags) + +class MultiPathXMLRPCServer(SimpleXMLRPCServer): + """Multipath XML-RPC Server + This specialization of SimpleXMLRPCServer allows the user to create + multiple Dispatcher instances and assign them to different + HTTP request paths. This makes it possible to run two or more + 'virtual XML-RPC servers' at the same port. + Make sure that the requestHandler accepts the paths in question. + """ + def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none, + encoding, bind_and_activate, use_builtin_types) + self.dispatchers = {} + self.allow_none = allow_none + self.encoding = encoding or 'utf-8' + + def add_dispatcher(self, path, dispatcher): + self.dispatchers[path] = dispatcher + return dispatcher + + def get_dispatcher(self, path): + return self.dispatchers[path] + + def _marshaled_dispatch(self, data, dispatch_method = None, path = None): + try: + response = self.dispatchers[path]._marshaled_dispatch( + data, dispatch_method, path) + except: + # report low level exception back to server + # (each dispatcher should have handled their own + # exceptions) + exc_type, exc_value = sys.exc_info()[:2] + response = dumps( + Fault(1, "%s:%s" % (exc_type, exc_value)), + encoding=self.encoding, allow_none=self.allow_none) + response = response.encode(self.encoding) + return response + +class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher): + """Simple handler for XML-RPC data passed through CGI.""" + + def __init__(self, allow_none=False, encoding=None, use_builtin_types=False): + SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types) + + def handle_xmlrpc(self, request_text): + """Handle a single XML-RPC request""" + + response = self._marshaled_dispatch(request_text) + + print('Content-Type: text/xml') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_get(self): + """Handle a single HTTP GET request. + + Default implementation indicates an error because + XML-RPC uses the POST method. + """ + + code = 400 + message, explain = BaseHTTPRequestHandler.responses[code] + + response = http_server.DEFAULT_ERROR_MESSAGE % \ + { + 'code' : code, + 'message' : message, + 'explain' : explain + } + response = response.encode('utf-8') + print('Status: %d %s' % (code, message)) + print('Content-Type: %s' % http_server.DEFAULT_ERROR_CONTENT_TYPE) + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def handle_request(self, request_text=None): + """Handle a single XML-RPC request passed through a CGI post method. + + If no XML data is given then it is read from stdin. The resulting + XML-RPC response is printed to stdout along with the correct HTTP + headers. + """ + + if request_text is None and \ + os.environ.get('REQUEST_METHOD', None) == 'GET': + self.handle_get() + else: + # POST data is normally available through stdin + try: + length = int(os.environ.get('CONTENT_LENGTH', None)) + except (ValueError, TypeError): + length = -1 + if request_text is None: + request_text = sys.stdin.read(length) + + self.handle_xmlrpc(request_text) + + +# ----------------------------------------------------------------------------- +# Self documenting XML-RPC Server. + +class ServerHTMLDoc(pydoc.HTMLDoc): + """Class used to generate pydoc HTML document for a server""" + + def markup(self, text, escape=None, funcs={}, classes={}, methods={}): + """Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.""" + escape = escape or self.escape + results = [] + here = 0 + + # XXX Note that this regular expression does not allow for the + # hyperlinking of arbitrary strings being used as method + # names. Only methods with names consisting of word characters + # and '.'s are hyperlinked. + pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|' + r'RFC[- ]?(\d+)|' + r'PEP[- ]?(\d+)|' + r'(self\.)?((?:\w|\.)+))\b') + while 1: + match = pattern.search(text, here) + if not match: break + start, end = match.span() + results.append(escape(text[here:start])) + + all, scheme, rfc, pep, selfdot, name = match.groups() + if scheme: + url = escape(all).replace('"', '"') + results.append('%s' % (url, url)) + elif rfc: + url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc) + results.append('%s' % (url, escape(all))) + elif pep: + url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep) + results.append('%s' % (url, escape(all))) + elif text[end:end+1] == '(': + results.append(self.namelink(name, methods, funcs, classes)) + elif selfdot: + results.append('self.%s' % name) + else: + results.append(self.namelink(name, classes)) + here = end + results.append(escape(text[here:])) + return ''.join(results) + + def docroutine(self, object, name, mod=None, + funcs={}, classes={}, methods={}, cl=None): + """Produce HTML documentation for a function or method object.""" + + anchor = (cl and cl.__name__ or '') + '-' + name + note = '' + + title = '%s' % ( + self.escape(anchor), self.escape(name)) + + if inspect.ismethod(object): + args = inspect.getfullargspec(object) + # exclude the argument bound to the instance, it will be + # confusing to the non-Python user + argspec = inspect.formatargspec ( + args.args[1:], + args.varargs, + args.varkw, + args.defaults, + annotations=args.annotations, + formatvalue=self.formatvalue + ) + elif inspect.isfunction(object): + args = inspect.getfullargspec(object) + argspec = inspect.formatargspec( + args.args, args.varargs, args.varkw, args.defaults, + annotations=args.annotations, + formatvalue=self.formatvalue) + else: + argspec = '(...)' + + if isinstance(object, tuple): + argspec = object[0] or argspec + docstring = object[1] or "" + else: + docstring = pydoc.getdoc(object) + + decl = title + argspec + (note and self.grey( + '%s' % note)) + + doc = self.markup( + docstring, self.preformat, funcs, classes, methods) + doc = doc and '

%s
' % doc + return '
%s
%s
\n' % (decl, doc) + + def docserver(self, server_name, package_documentation, methods): + """Produce HTML documentation for an XML-RPC server.""" + + fdict = {} + for key, value in methods.items(): + fdict[key] = '#-' + key + fdict[value] = fdict[key] + + server_name = self.escape(server_name) + head = '%s' % server_name + result = self.heading(head, '#ffffff', '#7799ee') + + doc = self.markup(package_documentation, self.preformat, fdict) + doc = doc and '%s' % doc + result = result + '

%s

\n' % doc + + contents = [] + method_items = sorted(methods.items()) + for key, value in method_items: + contents.append(self.docroutine(value, key, funcs=fdict)) + result = result + self.bigsection( + 'Methods', '#ffffff', '#eeaa77', ''.join(contents)) + + return result + +class XMLRPCDocGenerator(object): + """Generates documentation for an XML-RPC server. + + This class is designed as mix-in and should not + be constructed directly. + """ + + def __init__(self): + # setup variables used for HTML documentation + self.server_name = 'XML-RPC Server Documentation' + self.server_documentation = \ + "This server exports the following methods through the XML-RPC "\ + "protocol." + self.server_title = 'XML-RPC Server Documentation' + + def set_server_title(self, server_title): + """Set the HTML title of the generated server documentation""" + + self.server_title = server_title + + def set_server_name(self, server_name): + """Set the name of the generated HTML server documentation""" + + self.server_name = server_name + + def set_server_documentation(self, server_documentation): + """Set the documentation string for the entire server.""" + + self.server_documentation = server_documentation + + def generate_html_documentation(self): + """generate_html_documentation() => html documentation for the server + + Generates HTML documentation for the server using introspection for + installed functions and instances that do not implement the + _dispatch method. Alternatively, instances can choose to implement + the _get_method_argstring(method_name) method to provide the + argument string used in the documentation and the + _methodHelp(method_name) method to provide the help text used + in the documentation.""" + + methods = {} + + for method_name in self.system_listMethods(): + if method_name in self.funcs: + method = self.funcs[method_name] + elif self.instance is not None: + method_info = [None, None] # argspec, documentation + if hasattr(self.instance, '_get_method_argstring'): + method_info[0] = self.instance._get_method_argstring(method_name) + if hasattr(self.instance, '_methodHelp'): + method_info[1] = self.instance._methodHelp(method_name) + + method_info = tuple(method_info) + if method_info != (None, None): + method = method_info + elif not hasattr(self.instance, '_dispatch'): + try: + method = resolve_dotted_attribute( + self.instance, + method_name + ) + except AttributeError: + method = method_info + else: + method = method_info + else: + assert 0, "Could not find method in self.functions and no "\ + "instance installed" + + methods[method_name] = method + + documenter = ServerHTMLDoc() + documentation = documenter.docserver( + self.server_name, + self.server_documentation, + methods + ) + + return documenter.page(self.server_title, documentation) + +class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler): + """XML-RPC and documentation request handler class. + + Handles all HTTP POST requests and attempts to decode them as + XML-RPC requests. + + Handles all HTTP GET requests and interprets them as requests + for documentation. + """ + + def do_GET(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + # Check that the path is legal + if not self.is_rpc_path_valid(): + self.report_404() + return + + response = self.server.generate_html_documentation().encode('utf-8') + self.send_response(200) + self.send_header("Content-type", "text/html") + self.send_header("Content-length", str(len(response))) + self.end_headers() + self.wfile.write(response) + +class DocXMLRPCServer( SimpleXMLRPCServer, + XMLRPCDocGenerator): + """XML-RPC and HTML documentation server. + + Adds the ability to serve server documentation to the capabilities + of SimpleXMLRPCServer. + """ + + def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler, + logRequests=True, allow_none=False, encoding=None, + bind_and_activate=True, use_builtin_types=False): + SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, + allow_none, encoding, bind_and_activate, + use_builtin_types) + XMLRPCDocGenerator.__init__(self) + +class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler, + XMLRPCDocGenerator): + """Handler for XML-RPC data and documentation requests passed through + CGI""" + + def handle_get(self): + """Handles the HTTP GET request. + + Interpret all HTTP GET requests as requests for server + documentation. + """ + + response = self.generate_html_documentation().encode('utf-8') + + print('Content-Type: text/html') + print('Content-Length: %d' % len(response)) + print() + sys.stdout.flush() + sys.stdout.buffer.write(response) + sys.stdout.buffer.flush() + + def __init__(self): + CGIXMLRPCRequestHandler.__init__(self) + XMLRPCDocGenerator.__init__(self) + + +if __name__ == '__main__': + import datetime + + class ExampleService: + def getData(self): + return '42' + + class currentTime: + @staticmethod + def getCurrentTime(): + return datetime.datetime.now() + + server = SimpleXMLRPCServer(("localhost", 8000)) + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_instance(ExampleService(), allow_dotted_names=True) + server.register_multicall_functions() + print('Serving XML-RPC on localhost port 8000') + print('It is advisable to run this example server within a secure, closed network.') + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nKeyboard interrupt received, exiting.") + server.server_close() + sys.exit(0) diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/__init__.py b/pype/modules/ftrack/python2_vendor/future/builtins/__init__.py new file mode 100644 index 0000000000..8bc1649d2f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/__init__.py @@ -0,0 +1,51 @@ +""" +A module that brings in equivalents of the new and modified Python 3 +builtins into Py2. Has no effect on Py3. + +See the docs `here `_ +(``docs/what-else.rst``) for more information. + +""" + +from future.builtins.iterators import (filter, map, zip) +# The isinstance import is no longer needed. We provide it only for +# backward-compatibility with future v0.8.2. It will be removed in future v1.0. +from future.builtins.misc import (ascii, chr, hex, input, isinstance, next, + oct, open, pow, round, super, max, min) +from future.utils import PY3 + +if PY3: + import builtins + bytes = builtins.bytes + dict = builtins.dict + int = builtins.int + list = builtins.list + object = builtins.object + range = builtins.range + str = builtins.str + __all__ = [] +else: + from future.types import (newbytes as bytes, + newdict as dict, + newint as int, + newlist as list, + newobject as object, + newrange as range, + newstr as str) +from future import utils + + +if not utils.PY3: + # We only import names that shadow the builtins on Py2. No other namespace + # pollution on Py2. + + # Only shadow builtins on Py2; no new names + __all__ = ['filter', 'map', 'zip', + 'ascii', 'chr', 'hex', 'input', 'next', 'oct', 'open', 'pow', + 'round', 'super', + 'bytes', 'dict', 'int', 'list', 'object', 'range', 'str', 'max', 'min' + ] + +else: + # No namespace pollution on Py3 + __all__ = [] diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/disabled.py b/pype/modules/ftrack/python2_vendor/future/builtins/disabled.py new file mode 100644 index 0000000000..f6d6ea9b80 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/disabled.py @@ -0,0 +1,66 @@ +""" +This disables builtin functions (and one exception class) which are +removed from Python 3.3. + +This module is designed to be used like this:: + + from future.builtins.disabled import * + +This disables the following obsolete Py2 builtin functions:: + + apply, cmp, coerce, execfile, file, input, long, + raw_input, reduce, reload, unicode, xrange + +We don't hack __builtin__, which is very fragile because it contaminates +imported modules too. Instead, we just create new functions with +the same names as the obsolete builtins from Python 2 which raise +NameError exceptions when called. + +Note that both ``input()`` and ``raw_input()`` are among the disabled +functions (in this module). Although ``input()`` exists as a builtin in +Python 3, the Python 2 ``input()`` builtin is unsafe to use because it +can lead to shell injection. Therefore we shadow it by default upon ``from +future.builtins.disabled import *``, in case someone forgets to import our +replacement ``input()`` somehow and expects Python 3 semantics. + +See the ``future.builtins.misc`` module for a working version of +``input`` with Python 3 semantics. + +(Note that callable() is not among the functions disabled; this was +reintroduced into Python 3.2.) + +This exception class is also disabled: + + StandardError + +""" + +from __future__ import division, absolute_import, print_function + +from future import utils + + +OBSOLETE_BUILTINS = ['apply', 'chr', 'cmp', 'coerce', 'execfile', 'file', + 'input', 'long', 'raw_input', 'reduce', 'reload', + 'unicode', 'xrange', 'StandardError'] + + +def disabled_function(name): + ''' + Returns a function that cannot be called + ''' + def disabled(*args, **kwargs): + ''' + A function disabled by the ``future`` module. This function is + no longer a builtin in Python 3. + ''' + raise NameError('obsolete Python 2 builtin {0} is disabled'.format(name)) + return disabled + + +if not utils.PY3: + for fname in OBSOLETE_BUILTINS: + locals()[fname] = disabled_function(fname) + __all__ = OBSOLETE_BUILTINS +else: + __all__ = [] diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/iterators.py b/pype/modules/ftrack/python2_vendor/future/builtins/iterators.py new file mode 100644 index 0000000000..dff651e0f4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/iterators.py @@ -0,0 +1,52 @@ +""" +This module is designed to be used as follows:: + + from future.builtins.iterators import * + +And then, for example:: + + for i in range(10**15): + pass + + for (a, b) in zip(range(10**15), range(-10**15, 0)): + pass + +Note that this is standard Python 3 code, plus some imports that do +nothing on Python 3. + +The iterators this brings in are:: + +- ``range`` +- ``filter`` +- ``map`` +- ``zip`` + +On Python 2, ``range`` is a pure-Python backport of Python 3's ``range`` +iterator with slicing support. The other iterators (``filter``, ``map``, +``zip``) are from the ``itertools`` module on Python 2. On Python 3 these +are available in the module namespace but not exported for * imports via +__all__ (zero no namespace pollution). + +Note that these are also available in the standard library +``future_builtins`` module on Python 2 -- but not Python 3, so using +the standard library version is not portable, nor anywhere near complete. +""" + +from __future__ import division, absolute_import, print_function + +import itertools +from future import utils + +if not utils.PY3: + filter = itertools.ifilter + map = itertools.imap + from future.types import newrange as range + zip = itertools.izip + __all__ = ['filter', 'map', 'range', 'zip'] +else: + import builtins + filter = builtins.filter + map = builtins.map + range = builtins.range + zip = builtins.zip + __all__ = [] diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/misc.py b/pype/modules/ftrack/python2_vendor/future/builtins/misc.py new file mode 100644 index 0000000000..f86ce5f342 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/misc.py @@ -0,0 +1,135 @@ +""" +A module that brings in equivalents of various modified Python 3 builtins +into Py2. Has no effect on Py3. + +The builtin functions are: + +- ``ascii`` (from Py2's future_builtins module) +- ``hex`` (from Py2's future_builtins module) +- ``oct`` (from Py2's future_builtins module) +- ``chr`` (equivalent to ``unichr`` on Py2) +- ``input`` (equivalent to ``raw_input`` on Py2) +- ``next`` (calls ``__next__`` if it exists, else ``next`` method) +- ``open`` (equivalent to io.open on Py2) +- ``super`` (backport of Py3's magic zero-argument super() function +- ``round`` (new "Banker's Rounding" behaviour from Py3) +- ``max`` (new default option from Py3.4) +- ``min`` (new default option from Py3.4) + +``isinstance`` is also currently exported for backwards compatibility +with v0.8.2, although this has been deprecated since v0.9. + + +input() +------- +Like the new ``input()`` function from Python 3 (without eval()), except +that it returns bytes. Equivalent to Python 2's ``raw_input()``. + +Warning: By default, importing this module *removes* the old Python 2 +input() function entirely from ``__builtin__`` for safety. This is +because forgetting to import the new ``input`` from ``future`` might +otherwise lead to a security vulnerability (shell injection) on Python 2. + +To restore it, you can retrieve it yourself from +``__builtin__._old_input``. + +Fortunately, ``input()`` seems to be seldom used in the wild in Python +2... + +""" + +from future import utils + + +if utils.PY2: + from io import open + from future_builtins import ascii, oct, hex + from __builtin__ import unichr as chr, pow as _builtin_pow + import __builtin__ + + # Only for backward compatibility with future v0.8.2: + isinstance = __builtin__.isinstance + + # Warning: Python 2's input() is unsafe and MUST not be able to be used + # accidentally by someone who expects Python 3 semantics but forgets + # to import it on Python 2. Versions of ``future`` prior to 0.11 + # deleted it from __builtin__. Now we keep in __builtin__ but shadow + # the name like all others. Just be sure to import ``input``. + + input = raw_input + + from future.builtins.newnext import newnext as next + from future.builtins.newround import newround as round + from future.builtins.newsuper import newsuper as super + from future.builtins.new_min_max import newmax as max + from future.builtins.new_min_max import newmin as min + from future.types.newint import newint + + _SENTINEL = object() + + def pow(x, y, z=_SENTINEL): + """ + pow(x, y[, z]) -> number + + With two arguments, equivalent to x**y. With three arguments, + equivalent to (x**y) % z, but may be more efficient (e.g. for ints). + """ + # Handle newints + if isinstance(x, newint): + x = long(x) + if isinstance(y, newint): + y = long(y) + if isinstance(z, newint): + z = long(z) + + try: + if z == _SENTINEL: + return _builtin_pow(x, y) + else: + return _builtin_pow(x, y, z) + except ValueError: + if z == _SENTINEL: + return _builtin_pow(x+0j, y) + else: + return _builtin_pow(x+0j, y, z) + + + # ``future`` doesn't support Py3.0/3.1. If we ever did, we'd add this: + # callable = __builtin__.callable + + __all__ = ['ascii', 'chr', 'hex', 'input', 'isinstance', 'next', 'oct', + 'open', 'pow', 'round', 'super', 'max', 'min'] + +else: + import builtins + ascii = builtins.ascii + chr = builtins.chr + hex = builtins.hex + input = builtins.input + next = builtins.next + # Only for backward compatibility with future v0.8.2: + isinstance = builtins.isinstance + oct = builtins.oct + open = builtins.open + pow = builtins.pow + round = builtins.round + super = builtins.super + if utils.PY34_PLUS: + max = builtins.max + min = builtins.min + __all__ = [] + else: + from future.builtins.new_min_max import newmax as max + from future.builtins.new_min_max import newmin as min + __all__ = ['min', 'max'] + + # The callable() function was removed from Py3.0 and 3.1 and + # reintroduced into Py3.2+. ``future`` doesn't support Py3.0/3.1. If we ever + # did, we'd add this: + # try: + # callable = builtins.callable + # except AttributeError: + # # Definition from Pandas + # def callable(obj): + # return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + # __all__.append('callable') diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/new_min_max.py b/pype/modules/ftrack/python2_vendor/future/builtins/new_min_max.py new file mode 100644 index 0000000000..6f0c2a86fe --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/new_min_max.py @@ -0,0 +1,59 @@ +import itertools + +from future import utils +if utils.PY2: + from __builtin__ import max as _builtin_max, min as _builtin_min +else: + from builtins import max as _builtin_max, min as _builtin_min + +_SENTINEL = object() + + +def newmin(*args, **kwargs): + return new_min_max(_builtin_min, *args, **kwargs) + + +def newmax(*args, **kwargs): + return new_min_max(_builtin_max, *args, **kwargs) + + +def new_min_max(_builtin_func, *args, **kwargs): + """ + To support the argument "default" introduced in python 3.4 for min and max + :param _builtin_func: builtin min or builtin max + :param args: + :param kwargs: + :return: returns the min or max based on the arguments passed + """ + + for key, _ in kwargs.items(): + if key not in set(['key', 'default']): + raise TypeError('Illegal argument %s', key) + + if len(args) == 0: + raise TypeError + + if len(args) != 1 and kwargs.get('default', _SENTINEL) is not _SENTINEL: + raise TypeError + + if len(args) == 1: + iterator = iter(args[0]) + try: + first = next(iterator) + except StopIteration: + if kwargs.get('default', _SENTINEL) is not _SENTINEL: + return kwargs.get('default') + else: + raise ValueError('{}() arg is an empty sequence'.format(_builtin_func.__name__)) + else: + iterator = itertools.chain([first], iterator) + if kwargs.get('key') is not None: + return _builtin_func(iterator, key=kwargs.get('key')) + else: + return _builtin_func(iterator) + + if len(args) > 1: + if kwargs.get('key') is not None: + return _builtin_func(args, key=kwargs.get('key')) + else: + return _builtin_func(args) diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/newnext.py b/pype/modules/ftrack/python2_vendor/future/builtins/newnext.py new file mode 100644 index 0000000000..097638ac11 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/newnext.py @@ -0,0 +1,70 @@ +''' +This module provides a newnext() function in Python 2 that mimics the +behaviour of ``next()`` in Python 3, falling back to Python 2's behaviour for +compatibility if this fails. + +``newnext(iterator)`` calls the iterator's ``__next__()`` method if it exists. If this +doesn't exist, it falls back to calling a ``next()`` method. + +For example: + + >>> class Odds(object): + ... def __init__(self, start=1): + ... self.value = start - 2 + ... def __next__(self): # note the Py3 interface + ... self.value += 2 + ... return self.value + ... def __iter__(self): + ... return self + ... + >>> iterator = Odds() + >>> next(iterator) + 1 + >>> next(iterator) + 3 + +If you are defining your own custom iterator class as above, it is preferable +to explicitly decorate the class with the @implements_iterator decorator from +``future.utils`` as follows: + + >>> @implements_iterator + ... class Odds(object): + ... # etc + ... pass + +This next() function is primarily for consuming iterators defined in Python 3 +code elsewhere that we would like to run on Python 2 or 3. +''' + +_builtin_next = next + +_SENTINEL = object() + +def newnext(iterator, default=_SENTINEL): + """ + next(iterator[, default]) + + Return the next item from the iterator. If default is given and the iterator + is exhausted, it is returned instead of raising StopIteration. + """ + + # args = [] + # if default is not _SENTINEL: + # args.append(default) + try: + try: + return iterator.__next__() + except AttributeError: + try: + return iterator.next() + except AttributeError: + raise TypeError("'{0}' object is not an iterator".format( + iterator.__class__.__name__)) + except StopIteration as e: + if default is _SENTINEL: + raise e + else: + return default + + +__all__ = ['newnext'] diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/newround.py b/pype/modules/ftrack/python2_vendor/future/builtins/newround.py new file mode 100644 index 0000000000..394a2c63c4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/newround.py @@ -0,0 +1,102 @@ +""" +``python-future``: pure Python implementation of Python 3 round(). +""" + +from future.utils import PYPY, PY26, bind_method + +# Use the decimal module for simplicity of implementation (and +# hopefully correctness). +from decimal import Decimal, ROUND_HALF_EVEN + + +def newround(number, ndigits=None): + """ + See Python 3 documentation: uses Banker's Rounding. + + Delegates to the __round__ method if for some reason this exists. + + If not, rounds a number to a given precision in decimal digits (default + 0 digits). This returns an int when called with one argument, + otherwise the same type as the number. ndigits may be negative. + + See the test_round method in future/tests/test_builtins.py for + examples. + """ + return_int = False + if ndigits is None: + return_int = True + ndigits = 0 + if hasattr(number, '__round__'): + return number.__round__(ndigits) + + if ndigits < 0: + raise NotImplementedError('negative ndigits not supported yet') + exponent = Decimal('10') ** (-ndigits) + + if PYPY: + # Work around issue #24: round() breaks on PyPy with NumPy's types + if 'numpy' in repr(type(number)): + number = float(number) + + if isinstance(number, Decimal): + d = number + else: + if not PY26: + d = Decimal.from_float(number).quantize(exponent, + rounding=ROUND_HALF_EVEN) + else: + d = from_float_26(number).quantize(exponent, rounding=ROUND_HALF_EVEN) + + if return_int: + return int(d) + else: + return float(d) + + +### From Python 2.7's decimal.py. Only needed to support Py2.6: + +def from_float_26(f): + """Converts a float to a decimal number, exactly. + + Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). + Since 0.1 is not exactly representable in binary floating point, the + value is stored as the nearest representable value which is + 0x1.999999999999ap-4. The exact equivalent of the value in decimal + is 0.1000000000000000055511151231257827021181583404541015625. + + >>> Decimal.from_float(0.1) + Decimal('0.1000000000000000055511151231257827021181583404541015625') + >>> Decimal.from_float(float('nan')) + Decimal('NaN') + >>> Decimal.from_float(float('inf')) + Decimal('Infinity') + >>> Decimal.from_float(-float('inf')) + Decimal('-Infinity') + >>> Decimal.from_float(-0.0) + Decimal('-0') + + """ + import math as _math + from decimal import _dec_from_triple # only available on Py2.6 and Py2.7 (not 3.3) + + if isinstance(f, (int, long)): # handle integer inputs + return Decimal(f) + if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float + return Decimal(repr(f)) + if _math.copysign(1.0, f) == 1.0: + sign = 0 + else: + sign = 1 + n, d = abs(f).as_integer_ratio() + # int.bit_length() method doesn't exist on Py2.6: + def bit_length(d): + if d != 0: + return len(bin(abs(d))) - 2 + else: + return 0 + k = bit_length(d) - 1 + result = _dec_from_triple(sign, str(n*5**k), -k) + return result + + +__all__ = ['newround'] diff --git a/pype/modules/ftrack/python2_vendor/future/builtins/newsuper.py b/pype/modules/ftrack/python2_vendor/future/builtins/newsuper.py new file mode 100644 index 0000000000..5d3402bd2f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/builtins/newsuper.py @@ -0,0 +1,114 @@ +''' +This module provides a newsuper() function in Python 2 that mimics the +behaviour of super() in Python 3. It is designed to be used as follows: + + from __future__ import division, absolute_import, print_function + from future.builtins import super + +And then, for example: + + class VerboseList(list): + def append(self, item): + print('Adding an item') + super().append(item) # new simpler super() function + +Importing this module on Python 3 has no effect. + +This is based on (i.e. almost identical to) Ryan Kelly's magicsuper +module here: + + https://github.com/rfk/magicsuper.git + +Excerpts from Ryan's docstring: + + "Of course, you can still explicitly pass in the arguments if you want + to do something strange. Sometimes you really do want that, e.g. to + skip over some classes in the method resolution order. + + "How does it work? By inspecting the calling frame to determine the + function object being executed and the object on which it's being + called, and then walking the object's __mro__ chain to find out where + that function was defined. Yuck, but it seems to work..." +''' + +from __future__ import absolute_import +import sys +from types import FunctionType + +from future.utils import PY3, PY26 + + +_builtin_super = super + +_SENTINEL = object() + +def newsuper(typ=_SENTINEL, type_or_obj=_SENTINEL, framedepth=1): + '''Like builtin super(), but capable of magic. + + This acts just like the builtin super() function, but if called + without any arguments it attempts to infer them at runtime. + ''' + # Infer the correct call if used without arguments. + if typ is _SENTINEL: + # We'll need to do some frame hacking. + f = sys._getframe(framedepth) + + try: + # Get the function's first positional argument. + type_or_obj = f.f_locals[f.f_code.co_varnames[0]] + except (IndexError, KeyError,): + raise RuntimeError('super() used in a function with no args') + + try: + # Get the MRO so we can crawl it. + mro = type_or_obj.__mro__ + except (AttributeError, RuntimeError): # see issue #160 + try: + mro = type_or_obj.__class__.__mro__ + except AttributeError: + raise RuntimeError('super() used with a non-newstyle class') + + # A ``for...else`` block? Yes! It's odd, but useful. + # If unfamiliar with for...else, see: + # + # http://psung.blogspot.com/2007/12/for-else-in-python.html + for typ in mro: + # Find the class that owns the currently-executing method. + for meth in typ.__dict__.values(): + # Drill down through any wrappers to the underlying func. + # This handles e.g. classmethod() and staticmethod(). + try: + while not isinstance(meth,FunctionType): + if isinstance(meth, property): + # Calling __get__ on the property will invoke + # user code which might throw exceptions or have + # side effects + meth = meth.fget + else: + try: + meth = meth.__func__ + except AttributeError: + meth = meth.__get__(type_or_obj, typ) + except (AttributeError, TypeError): + continue + if meth.func_code is f.f_code: + break # Aha! Found you. + else: + continue # Not found! Move onto the next class in MRO. + break # Found! Break out of the search loop. + else: + raise RuntimeError('super() called outside a method') + + # Dispatch to builtin super(). + if type_or_obj is not _SENTINEL: + return _builtin_super(typ, type_or_obj) + return _builtin_super(typ) + + +def superm(*args, **kwds): + f = sys._getframe(1) + nm = f.f_code.co_name + return getattr(newsuper(framedepth=2),nm)(*args, **kwds) + + +__all__ = ['newsuper'] diff --git a/pype/modules/ftrack/python2_vendor/future/moves/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/__init__.py new file mode 100644 index 0000000000..0cd60d3d5c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/__init__.py @@ -0,0 +1,8 @@ +# future.moves package +from __future__ import absolute_import +import sys +__future_module__ = True +from future.standard_library import import_top_level_modules + +if sys.version_info[0] >= 3: + import_top_level_modules() diff --git a/pype/modules/ftrack/python2_vendor/future/moves/_dummy_thread.py b/pype/modules/ftrack/python2_vendor/future/moves/_dummy_thread.py new file mode 100644 index 0000000000..688d249bbe --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/_dummy_thread.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from _dummy_thread import * +else: + __future_module__ = True + from dummy_thread import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/_markupbase.py b/pype/modules/ftrack/python2_vendor/future/moves/_markupbase.py new file mode 100644 index 0000000000..f9fb4bbf28 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/_markupbase.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from _markupbase import * +else: + __future_module__ = True + from markupbase import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/_thread.py b/pype/modules/ftrack/python2_vendor/future/moves/_thread.py new file mode 100644 index 0000000000..c68018bb11 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/_thread.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from _thread import * +else: + __future_module__ = True + from thread import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/builtins.py b/pype/modules/ftrack/python2_vendor/future/moves/builtins.py new file mode 100644 index 0000000000..e4b6221d59 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/builtins.py @@ -0,0 +1,10 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from builtins import * +else: + __future_module__ = True + from __builtin__ import * + # Overwrite any old definitions with the equivalent future.builtins ones: + from future.builtins import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/collections.py b/pype/modules/ftrack/python2_vendor/future/moves/collections.py new file mode 100644 index 0000000000..664ee6a3d0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/collections.py @@ -0,0 +1,18 @@ +from __future__ import absolute_import +import sys + +from future.utils import PY2, PY26 +__future_module__ = True + +from collections import * + +if PY2: + from UserDict import UserDict + from UserList import UserList + from UserString import UserString + +if PY26: + from future.backports.misc import OrderedDict, Counter + +if sys.version_info < (3, 3): + from future.backports.misc import ChainMap, _count_elements diff --git a/pype/modules/ftrack/python2_vendor/future/moves/configparser.py b/pype/modules/ftrack/python2_vendor/future/moves/configparser.py new file mode 100644 index 0000000000..33d9cf9533 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/configparser.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import + +from future.utils import PY2 + +if PY2: + from ConfigParser import * +else: + from configparser import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/copyreg.py b/pype/modules/ftrack/python2_vendor/future/moves/copyreg.py new file mode 100644 index 0000000000..9d08cdc5ed --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/copyreg.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + import copyreg, sys + # A "*" import uses Python 3's copyreg.__all__ which does not include + # all public names in the API surface for copyreg, this avoids that + # problem by just making our module _be_ a reference to the actual module. + sys.modules['future.moves.copyreg'] = copyreg +else: + __future_module__ = True + from copy_reg import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/dbm/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/dbm/__init__.py new file mode 100644 index 0000000000..626b406f7f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/dbm/__init__.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from dbm import * +else: + __future_module__ = True + from whichdb import * + from anydbm import * + +# Py3.3's dbm/__init__.py imports ndbm but doesn't expose it via __all__. +# In case some (badly written) code depends on dbm.ndbm after import dbm, +# we simulate this: +if PY3: + from dbm import ndbm +else: + try: + from future.moves.dbm import ndbm + except ImportError: + ndbm = None diff --git a/pype/modules/ftrack/python2_vendor/future/moves/dbm/dumb.py b/pype/modules/ftrack/python2_vendor/future/moves/dbm/dumb.py new file mode 100644 index 0000000000..528383f6d8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/dbm/dumb.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from dbm.dumb import * +else: + __future_module__ = True + from dumbdbm import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/dbm/gnu.py b/pype/modules/ftrack/python2_vendor/future/moves/dbm/gnu.py new file mode 100644 index 0000000000..68ccf67b9a --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/dbm/gnu.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from dbm.gnu import * +else: + __future_module__ = True + from gdbm import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/dbm/ndbm.py b/pype/modules/ftrack/python2_vendor/future/moves/dbm/ndbm.py new file mode 100644 index 0000000000..8c6fff8ab7 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/dbm/ndbm.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from dbm.ndbm import * +else: + __future_module__ = True + from dbm import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/html/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/html/__init__.py new file mode 100644 index 0000000000..22ed6e7d2c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/html/__init__.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import +from future.utils import PY3 +__future_module__ = True + +if PY3: + from html import * +else: + # cgi.escape isn't good enough for the single Py3.3 html test to pass. + # Define it inline here instead. From the Py3.4 stdlib. Note that the + # html.escape() function from the Py3.3 stdlib is not suitable for use on + # Py2.x. + """ + General functions for HTML manipulation. + """ + + def escape(s, quote=True): + """ + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + """ + s = s.replace("&", "&") # Must be done first! + s = s.replace("<", "<") + s = s.replace(">", ">") + if quote: + s = s.replace('"', """) + s = s.replace('\'', "'") + return s + + __all__ = ['escape'] diff --git a/pype/modules/ftrack/python2_vendor/future/moves/html/entities.py b/pype/modules/ftrack/python2_vendor/future/moves/html/entities.py new file mode 100644 index 0000000000..56a8860911 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/html/entities.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from html.entities import * +else: + __future_module__ = True + from htmlentitydefs import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/html/parser.py b/pype/modules/ftrack/python2_vendor/future/moves/html/parser.py new file mode 100644 index 0000000000..a6115b59f0 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/html/parser.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 +__future_module__ = True + +if PY3: + from html.parser import * +else: + from HTMLParser import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/http/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/http/__init__.py new file mode 100644 index 0000000000..917b3d71ac --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/http/__init__.py @@ -0,0 +1,4 @@ +from future.utils import PY3 + +if not PY3: + __future_module__ = True diff --git a/pype/modules/ftrack/python2_vendor/future/moves/http/client.py b/pype/modules/ftrack/python2_vendor/future/moves/http/client.py new file mode 100644 index 0000000000..55f9c9c1ae --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/http/client.py @@ -0,0 +1,8 @@ +from future.utils import PY3 + +if PY3: + from http.client import * +else: + from httplib import * + from httplib import HTTPMessage + __future_module__ = True diff --git a/pype/modules/ftrack/python2_vendor/future/moves/http/cookiejar.py b/pype/modules/ftrack/python2_vendor/future/moves/http/cookiejar.py new file mode 100644 index 0000000000..ea00df7720 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/http/cookiejar.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from http.cookiejar import * +else: + __future_module__ = True + from cookielib import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/http/cookies.py b/pype/modules/ftrack/python2_vendor/future/moves/http/cookies.py new file mode 100644 index 0000000000..1b74fe2dd7 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/http/cookies.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from http.cookies import * +else: + __future_module__ = True + from Cookie import * + from Cookie import Morsel # left out of __all__ on Py2.7! diff --git a/pype/modules/ftrack/python2_vendor/future/moves/http/server.py b/pype/modules/ftrack/python2_vendor/future/moves/http/server.py new file mode 100644 index 0000000000..4e75cc1dec --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/http/server.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from http.server import * +else: + __future_module__ = True + from BaseHTTPServer import * + from CGIHTTPServer import * + from SimpleHTTPServer import * + try: + from CGIHTTPServer import _url_collapse_path # needed for a test + except ImportError: + try: + # Python 2.7.0 to 2.7.3 + from CGIHTTPServer import ( + _url_collapse_path_split as _url_collapse_path) + except ImportError: + # Doesn't exist on Python 2.6.x. Ignore it. + pass diff --git a/pype/modules/ftrack/python2_vendor/future/moves/itertools.py b/pype/modules/ftrack/python2_vendor/future/moves/itertools.py new file mode 100644 index 0000000000..e5eb20d5d5 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/itertools.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import + +from itertools import * +try: + zip_longest = izip_longest + filterfalse = ifilterfalse +except NameError: + pass diff --git a/pype/modules/ftrack/python2_vendor/future/moves/pickle.py b/pype/modules/ftrack/python2_vendor/future/moves/pickle.py new file mode 100644 index 0000000000..c53d693925 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/pickle.py @@ -0,0 +1,11 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from pickle import * +else: + __future_module__ = True + try: + from cPickle import * + except ImportError: + from pickle import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/queue.py b/pype/modules/ftrack/python2_vendor/future/moves/queue.py new file mode 100644 index 0000000000..1cb1437d74 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/queue.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from queue import * +else: + __future_module__ = True + from Queue import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/reprlib.py b/pype/modules/ftrack/python2_vendor/future/moves/reprlib.py new file mode 100644 index 0000000000..a313a13a49 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/reprlib.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from reprlib import * +else: + __future_module__ = True + from repr import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/socketserver.py b/pype/modules/ftrack/python2_vendor/future/moves/socketserver.py new file mode 100644 index 0000000000..062e0848de --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/socketserver.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from socketserver import * +else: + __future_module__ = True + from SocketServer import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/subprocess.py b/pype/modules/ftrack/python2_vendor/future/moves/subprocess.py new file mode 100644 index 0000000000..43ffd2ac23 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/subprocess.py @@ -0,0 +1,11 @@ +from __future__ import absolute_import +from future.utils import PY2, PY26 + +from subprocess import * + +if PY2: + __future_module__ = True + from commands import getoutput, getstatusoutput + +if PY26: + from future.backports.misc import check_output diff --git a/pype/modules/ftrack/python2_vendor/future/moves/sys.py b/pype/modules/ftrack/python2_vendor/future/moves/sys.py new file mode 100644 index 0000000000..1293bcb070 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/sys.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import + +from future.utils import PY2 + +from sys import * + +if PY2: + from __builtin__ import intern diff --git a/pype/modules/ftrack/python2_vendor/future/moves/test/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/test/__init__.py new file mode 100644 index 0000000000..5cf428b6ec --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/test/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if not PY3: + __future_module__ = True diff --git a/pype/modules/ftrack/python2_vendor/future/moves/test/support.py b/pype/modules/ftrack/python2_vendor/future/moves/test/support.py new file mode 100644 index 0000000000..e9aa0f48f9 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/test/support.py @@ -0,0 +1,10 @@ +from __future__ import absolute_import +from future.standard_library import suspend_hooks +from future.utils import PY3 + +if PY3: + from test.support import * +else: + __future_module__ = True + with suspend_hooks(): + from test.test_support import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/__init__.py new file mode 100644 index 0000000000..e40829663e --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/__init__.py @@ -0,0 +1,27 @@ +from __future__ import absolute_import +from future.utils import PY3 +__future_module__ = True + +if not PY3: + from Tkinter import * + from Tkinter import (_cnfmerge, _default_root, _flatten, + _support_default_root, _test, + _tkinter, _setit) + + try: # >= 2.7.4 + from Tkinter import (_join) + except ImportError: + pass + + try: # >= 2.7.4 + from Tkinter import (_stringify) + except ImportError: + pass + + try: # >= 2.7.9 + from Tkinter import (_splitdict) + except ImportError: + pass + +else: + from tkinter import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/colorchooser.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/colorchooser.py new file mode 100644 index 0000000000..6dde6e8d30 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/colorchooser.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.colorchooser import * +else: + try: + from tkColorChooser import * + except ImportError: + raise ImportError('The tkColorChooser module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/commondialog.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/commondialog.py new file mode 100644 index 0000000000..eb7ae8d607 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/commondialog.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.commondialog import * +else: + try: + from tkCommonDialog import * + except ImportError: + raise ImportError('The tkCommonDialog module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/constants.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/constants.py new file mode 100644 index 0000000000..ffe098152f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/constants.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.constants import * +else: + try: + from Tkconstants import * + except ImportError: + raise ImportError('The Tkconstants module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dialog.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dialog.py new file mode 100644 index 0000000000..113370ca2c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dialog.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.dialog import * +else: + try: + from Dialog import * + except ImportError: + raise ImportError('The Dialog module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dnd.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dnd.py new file mode 100644 index 0000000000..1ab437917d --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/dnd.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.dnd import * +else: + try: + from Tkdnd import * + except ImportError: + raise ImportError('The Tkdnd module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/filedialog.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/filedialog.py new file mode 100644 index 0000000000..973923e2c8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/filedialog.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.filedialog import * +else: + try: + from FileDialog import * + except ImportError: + raise ImportError('The FileDialog module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/font.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/font.py new file mode 100644 index 0000000000..628f399a35 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/font.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.font import * +else: + try: + from tkFont import * + except ImportError: + raise ImportError('The tkFont module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/messagebox.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/messagebox.py new file mode 100644 index 0000000000..b43d8702f5 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/messagebox.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.messagebox import * +else: + try: + from tkMessageBox import * + except ImportError: + raise ImportError('The tkMessageBox module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/scrolledtext.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/scrolledtext.py new file mode 100644 index 0000000000..1c69db6067 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/scrolledtext.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.scrolledtext import * +else: + try: + from ScrolledText import * + except ImportError: + raise ImportError('The ScrolledText module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/simpledialog.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/simpledialog.py new file mode 100644 index 0000000000..dba93fbf25 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/simpledialog.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.simpledialog import * +else: + try: + from SimpleDialog import * + except ImportError: + raise ImportError('The SimpleDialog module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/tix.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/tix.py new file mode 100644 index 0000000000..8d1718ad0b --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/tix.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.tix import * +else: + try: + from Tix import * + except ImportError: + raise ImportError('The Tix module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/tkinter/ttk.py b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/ttk.py new file mode 100644 index 0000000000..081c1b4956 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/tkinter/ttk.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from future.utils import PY3 + +if PY3: + from tkinter.ttk import * +else: + try: + from ttk import * + except ImportError: + raise ImportError('The ttk module is missing. Does your Py2 ' + 'installation include tkinter?') diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/__init__.py new file mode 100644 index 0000000000..5cf428b6ec --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if not PY3: + __future_module__ = True diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/error.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/error.py new file mode 100644 index 0000000000..7d8ada73f8 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/error.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import +from future.standard_library import suspend_hooks + +from future.utils import PY3 + +if PY3: + from urllib.error import * +else: + __future_module__ = True + + # We use this method to get at the original Py2 urllib before any renaming magic + # ContentTooShortError = sys.py2_modules['urllib'].ContentTooShortError + + with suspend_hooks(): + from urllib import ContentTooShortError + from urllib2 import URLError, HTTPError diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/parse.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/parse.py new file mode 100644 index 0000000000..9074b8163f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/parse.py @@ -0,0 +1,28 @@ +from __future__ import absolute_import +from future.standard_library import suspend_hooks + +from future.utils import PY3 + +if PY3: + from urllib.parse import * +else: + __future_module__ = True + from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl, + urldefrag, urljoin, urlparse, urlsplit, + urlunparse, urlunsplit) + + # we use this method to get at the original py2 urllib before any renaming + # quote = sys.py2_modules['urllib'].quote + # quote_plus = sys.py2_modules['urllib'].quote_plus + # unquote = sys.py2_modules['urllib'].unquote + # unquote_plus = sys.py2_modules['urllib'].unquote_plus + # urlencode = sys.py2_modules['urllib'].urlencode + # splitquery = sys.py2_modules['urllib'].splitquery + + with suspend_hooks(): + from urllib import (quote, + quote_plus, + unquote, + unquote_plus, + urlencode, + splitquery) diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/request.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/request.py new file mode 100644 index 0000000000..972aa4ab5d --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/request.py @@ -0,0 +1,94 @@ +from __future__ import absolute_import + +from future.standard_library import suspend_hooks +from future.utils import PY3 + +if PY3: + from urllib.request import * + # This aren't in __all__: + from urllib.request import (getproxies, + pathname2url, + proxy_bypass, + quote, + request_host, + thishost, + unquote, + url2pathname, + urlcleanup, + urljoin, + urlopen, + urlparse, + urlretrieve, + urlsplit, + urlunparse) + + from urllib.parse import (splitattr, + splithost, + splitpasswd, + splitport, + splitquery, + splittag, + splittype, + splituser, + splitvalue, + to_bytes, + unwrap) +else: + __future_module__ = True + with suspend_hooks(): + from urllib import * + from urllib2 import * + from urlparse import * + + # Rename: + from urllib import toBytes # missing from __all__ on Py2.6 + to_bytes = toBytes + + # from urllib import (pathname2url, + # url2pathname, + # getproxies, + # urlretrieve, + # urlcleanup, + # URLopener, + # FancyURLopener, + # proxy_bypass) + + # from urllib2 import ( + # AbstractBasicAuthHandler, + # AbstractDigestAuthHandler, + # BaseHandler, + # CacheFTPHandler, + # FileHandler, + # FTPHandler, + # HTTPBasicAuthHandler, + # HTTPCookieProcessor, + # HTTPDefaultErrorHandler, + # HTTPDigestAuthHandler, + # HTTPErrorProcessor, + # HTTPHandler, + # HTTPPasswordMgr, + # HTTPPasswordMgrWithDefaultRealm, + # HTTPRedirectHandler, + # HTTPSHandler, + # URLError, + # build_opener, + # install_opener, + # OpenerDirector, + # ProxyBasicAuthHandler, + # ProxyDigestAuthHandler, + # ProxyHandler, + # Request, + # UnknownHandler, + # urlopen, + # ) + + # from urlparse import ( + # urldefrag + # urljoin, + # urlparse, + # urlunparse, + # urlsplit, + # urlunsplit, + # parse_qs, + # parse_q" + # ) diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/response.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/response.py new file mode 100644 index 0000000000..a287ae2833 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/response.py @@ -0,0 +1,12 @@ +from future import standard_library +from future.utils import PY3 + +if PY3: + from urllib.response import * +else: + __future_module__ = True + with standard_library.suspend_hooks(): + from urllib import (addbase, + addclosehook, + addinfo, + addinfourl) diff --git a/pype/modules/ftrack/python2_vendor/future/moves/urllib/robotparser.py b/pype/modules/ftrack/python2_vendor/future/moves/urllib/robotparser.py new file mode 100644 index 0000000000..0dc8f5715c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/urllib/robotparser.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from urllib.robotparser import * +else: + __future_module__ = True + from robotparser import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/winreg.py b/pype/modules/ftrack/python2_vendor/future/moves/winreg.py new file mode 100644 index 0000000000..c8b147568c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/winreg.py @@ -0,0 +1,8 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from winreg import * +else: + __future_module__ = True + from _winreg import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/__init__.py b/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/client.py b/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/client.py new file mode 100644 index 0000000000..4708cf8992 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/client.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from xmlrpc.client import * +else: + from xmlrpclib import * diff --git a/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/server.py b/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/server.py new file mode 100644 index 0000000000..1a8af3454b --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/moves/xmlrpc/server.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import +from future.utils import PY3 + +if PY3: + from xmlrpc.server import * +else: + from xmlrpclib import * diff --git a/pype/modules/ftrack/python2_vendor/future/standard_library/__init__.py b/pype/modules/ftrack/python2_vendor/future/standard_library/__init__.py new file mode 100644 index 0000000000..cff02f9594 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/standard_library/__init__.py @@ -0,0 +1,815 @@ +""" +Python 3 reorganized the standard library (PEP 3108). This module exposes +several standard library modules to Python 2 under their new Python 3 +names. + +It is designed to be used as follows:: + + from future import standard_library + standard_library.install_aliases() + +And then these normal Py3 imports work on both Py3 and Py2:: + + import builtins + import copyreg + import queue + import reprlib + import socketserver + import winreg # on Windows only + import test.support + import html, html.parser, html.entites + import http, http.client, http.server + import http.cookies, http.cookiejar + import urllib.parse, urllib.request, urllib.response, urllib.error, urllib.robotparser + import xmlrpc.client, xmlrpc.server + + import _thread + import _dummy_thread + import _markupbase + + from itertools import filterfalse, zip_longest + from sys import intern + from collections import UserDict, UserList, UserString + from collections import OrderedDict, Counter, ChainMap # even on Py2.6 + from subprocess import getoutput, getstatusoutput + from subprocess import check_output # even on Py2.6 + +(The renamed modules and functions are still available under their old +names on Python 2.) + +This is a cleaner alternative to this idiom (see +http://docs.pythonsprints.com/python3_porting/py-porting.html):: + + try: + import queue + except ImportError: + import Queue as queue + + +Limitations +----------- +We don't currently support these modules, but would like to:: + + import dbm + import dbm.dumb + import dbm.gnu + import collections.abc # on Py33 + import pickle # should (optionally) bring in cPickle on Python 2 + +""" + +from __future__ import absolute_import, division, print_function + +import sys +import logging +import imp +import contextlib +import types +import copy +import os + +# Make a dedicated logger; leave the root logger to be configured +# by the application. +flog = logging.getLogger('future_stdlib') +_formatter = logging.Formatter(logging.BASIC_FORMAT) +_handler = logging.StreamHandler() +_handler.setFormatter(_formatter) +flog.addHandler(_handler) +flog.setLevel(logging.WARN) + +from future.utils import PY2, PY3 + +# The modules that are defined under the same names on Py3 but with +# different contents in a significant way (e.g. submodules) are: +# pickle (fast one) +# dbm +# urllib +# test +# email + +REPLACED_MODULES = set(['test', 'urllib', 'pickle', 'dbm']) # add email and dbm when we support it + +# The following module names are not present in Python 2.x, so they cause no +# potential clashes between the old and new names: +# http +# html +# tkinter +# xmlrpc +# Keys: Py2 / real module names +# Values: Py3 / simulated module names +RENAMES = { + # 'cStringIO': 'io', # there's a new io module in Python 2.6 + # that provides StringIO and BytesIO + # 'StringIO': 'io', # ditto + # 'cPickle': 'pickle', + '__builtin__': 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'future.moves.socketserver': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + # 'FileDialog': 'tkinter.filedialog', + # 'tkFileDialog': 'tkinter.filedialog', + # 'SimpleDialog': 'tkinter.simpledialog', + # 'tkSimpleDialog': 'tkinter.simpledialog', + # 'tkColorChooser': 'tkinter.colorchooser', + # 'tkCommonDialog': 'tkinter.commondialog', + # 'Dialog': 'tkinter.dialog', + # 'Tkdnd': 'tkinter.dnd', + # 'tkFont': 'tkinter.font', + # 'tkMessageBox': 'tkinter.messagebox', + # 'ScrolledText': 'tkinter.scrolledtext', + # 'Tkconstants': 'tkinter.constants', + # 'Tix': 'tkinter.tix', + # 'ttk': 'tkinter.ttk', + # 'Tkinter': 'tkinter', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + # 'anydbm': 'dbm', # causes infinite import loop + # 'whichdb': 'dbm', # causes infinite import loop + # anydbm and whichdb are handled by fix_imports2 + # 'dbhash': 'dbm.bsd', + # 'dumbdbm': 'dbm.dumb', + # 'dbm': 'dbm.ndbm', + # 'gdbm': 'dbm.gnu', + 'future.moves.xmlrpc': 'xmlrpc', + # 'future.backports.email': 'email', # for use by urllib + # 'DocXMLRPCServer': 'xmlrpc.server', + # 'SimpleXMLRPCServer': 'xmlrpc.server', + # 'httplib': 'http.client', + # 'htmlentitydefs' : 'html.entities', + # 'HTMLParser' : 'html.parser', + # 'Cookie': 'http.cookies', + # 'cookielib': 'http.cookiejar', + # 'BaseHTTPServer': 'http.server', + # 'SimpleHTTPServer': 'http.server', + # 'CGIHTTPServer': 'http.server', + # 'future.backports.test': 'test', # primarily for renaming test_support to support + # 'commands': 'subprocess', + # 'urlparse' : 'urllib.parse', + # 'robotparser' : 'urllib.robotparser', + # 'abc': 'collections.abc', # for Py33 + # 'future.utils.six.moves.html': 'html', + # 'future.utils.six.moves.http': 'http', + 'future.moves.html': 'html', + 'future.moves.http': 'http', + # 'future.backports.urllib': 'urllib', + # 'future.utils.six.moves.urllib': 'urllib', + 'future.moves._markupbase': '_markupbase', + } + + +# It is complicated and apparently brittle to mess around with the +# ``sys.modules`` cache in order to support "import urllib" meaning two +# different things (Py2.7 urllib and backported Py3.3-like urllib) in different +# contexts. So we require explicit imports for these modules. +assert len(set(RENAMES.values()) & set(REPLACED_MODULES)) == 0 + + +# Harmless renames that we can insert. +# These modules need names from elsewhere being added to them: +# subprocess: should provide getoutput and other fns from commands +# module but these fns are missing: getstatus, mk2arg, +# mkarg +# re: needs an ASCII constant that works compatibly with Py3 + +# etc: see lib2to3/fixes/fix_imports.py + +# (New module name, new object name, old module name, old object name) +MOVES = [('collections', 'UserList', 'UserList', 'UserList'), + ('collections', 'UserDict', 'UserDict', 'UserDict'), + ('collections', 'UserString','UserString', 'UserString'), + ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'), + ('itertools', 'filterfalse','itertools', 'ifilterfalse'), + ('itertools', 'zip_longest','itertools', 'izip_longest'), + ('sys', 'intern','__builtin__', 'intern'), + # The re module has no ASCII flag in Py2, but this is the default. + # Set re.ASCII to a zero constant. stat.ST_MODE just happens to be one + # (and it exists on Py2.6+). + ('re', 'ASCII','stat', 'ST_MODE'), + ('base64', 'encodebytes','base64', 'encodestring'), + ('base64', 'decodebytes','base64', 'decodestring'), + ('subprocess', 'getoutput', 'commands', 'getoutput'), + ('subprocess', 'getstatusoutput', 'commands', 'getstatusoutput'), + ('subprocess', 'check_output', 'future.backports.misc', 'check_output'), + ('math', 'ceil', 'future.backports.misc', 'ceil'), + ('collections', 'OrderedDict', 'future.backports.misc', 'OrderedDict'), + ('collections', 'Counter', 'future.backports.misc', 'Counter'), + ('collections', 'ChainMap', 'future.backports.misc', 'ChainMap'), + ('itertools', 'count', 'future.backports.misc', 'count'), + ('reprlib', 'recursive_repr', 'future.backports.misc', 'recursive_repr'), + ('functools', 'cmp_to_key', 'future.backports.misc', 'cmp_to_key'), + +# This is no use, since "import urllib.request" etc. still fails: +# ('urllib', 'error', 'future.moves.urllib', 'error'), +# ('urllib', 'parse', 'future.moves.urllib', 'parse'), +# ('urllib', 'request', 'future.moves.urllib', 'request'), +# ('urllib', 'response', 'future.moves.urllib', 'response'), +# ('urllib', 'robotparser', 'future.moves.urllib', 'robotparser'), + ] + + +# A minimal example of an import hook: +# class WarnOnImport(object): +# def __init__(self, *args): +# self.module_names = args +# +# def find_module(self, fullname, path=None): +# if fullname in self.module_names: +# self.path = path +# return self +# return None +# +# def load_module(self, name): +# if name in sys.modules: +# return sys.modules[name] +# module_info = imp.find_module(name, self.path) +# module = imp.load_module(name, *module_info) +# sys.modules[name] = module +# flog.warning("Imported deprecated module %s", name) +# return module + + +class RenameImport(object): + """ + A class for import hooks mapping Py3 module names etc. to the Py2 equivalents. + """ + # Different RenameImport classes are created when importing this module from + # different source files. This causes isinstance(hook, RenameImport) checks + # to produce inconsistent results. We add this RENAMER attribute here so + # remove_hooks() and install_hooks() can find instances of these classes + # easily: + RENAMER = True + + def __init__(self, old_to_new): + ''' + Pass in a dictionary-like object mapping from old names to new + names. E.g. {'ConfigParser': 'configparser', 'cPickle': 'pickle'} + ''' + self.old_to_new = old_to_new + both = set(old_to_new.keys()) & set(old_to_new.values()) + assert (len(both) == 0 and + len(set(old_to_new.values())) == len(old_to_new.values())), \ + 'Ambiguity in renaming (handler not implemented)' + self.new_to_old = dict((new, old) for (old, new) in old_to_new.items()) + + def find_module(self, fullname, path=None): + # Handles hierarchical importing: package.module.module2 + new_base_names = set([s.split('.')[0] for s in self.new_to_old]) + # Before v0.12: Was: if fullname in set(self.old_to_new) | new_base_names: + if fullname in new_base_names: + return self + return None + + def load_module(self, name): + path = None + if name in sys.modules: + return sys.modules[name] + elif name in self.new_to_old: + # New name. Look up the corresponding old (Py2) name: + oldname = self.new_to_old[name] + module = self._find_and_load_module(oldname) + # module.__future_module__ = True + else: + module = self._find_and_load_module(name) + # In any case, make it available under the requested (Py3) name + sys.modules[name] = module + return module + + def _find_and_load_module(self, name, path=None): + """ + Finds and loads it. But if there's a . in the name, handles it + properly. + """ + bits = name.split('.') + while len(bits) > 1: + # Treat the first bit as a package + packagename = bits.pop(0) + package = self._find_and_load_module(packagename, path) + try: + path = package.__path__ + except AttributeError: + # This could be e.g. moves. + flog.debug('Package {0} has no __path__.'.format(package)) + if name in sys.modules: + return sys.modules[name] + flog.debug('What to do here?') + + name = bits[0] + module_info = imp.find_module(name, path) + return imp.load_module(name, *module_info) + + +class hooks(object): + """ + Acts as a context manager. Saves the state of sys.modules and restores it + after the 'with' block. + + Use like this: + + >>> from future import standard_library + >>> with standard_library.hooks(): + ... import http.client + >>> import requests + + For this to work, http.client will be scrubbed from sys.modules after the + 'with' block. That way the modules imported in the 'with' block will + continue to be accessible in the current namespace but not from any + imported modules (like requests). + """ + def __enter__(self): + # flog.debug('Entering hooks context manager') + self.old_sys_modules = copy.copy(sys.modules) + self.hooks_were_installed = detect_hooks() + # self.scrubbed = scrub_py2_sys_modules() + install_hooks() + return self + + def __exit__(self, *args): + # flog.debug('Exiting hooks context manager') + # restore_sys_modules(self.scrubbed) + if not self.hooks_were_installed: + remove_hooks() + # scrub_future_sys_modules() + +# Sanity check for is_py2_stdlib_module(): We aren't replacing any +# builtin modules names: +if PY2: + assert len(set(RENAMES.values()) & set(sys.builtin_module_names)) == 0 + + +def is_py2_stdlib_module(m): + """ + Tries to infer whether the module m is from the Python 2 standard library. + This may not be reliable on all systems. + """ + if PY3: + return False + if not 'stdlib_path' in is_py2_stdlib_module.__dict__: + stdlib_files = [contextlib.__file__, os.__file__, copy.__file__] + stdlib_paths = [os.path.split(f)[0] for f in stdlib_files] + if not len(set(stdlib_paths)) == 1: + # This seems to happen on travis-ci.org. Very strange. We'll try to + # ignore it. + flog.warn('Multiple locations found for the Python standard ' + 'library: %s' % stdlib_paths) + # Choose the first one arbitrarily + is_py2_stdlib_module.stdlib_path = stdlib_paths[0] + + if m.__name__ in sys.builtin_module_names: + return True + + if hasattr(m, '__file__'): + modpath = os.path.split(m.__file__) + if (modpath[0].startswith(is_py2_stdlib_module.stdlib_path) and + 'site-packages' not in modpath[0]): + return True + + return False + + +def scrub_py2_sys_modules(): + """ + Removes any Python 2 standard library modules from ``sys.modules`` that + would interfere with Py3-style imports using import hooks. Examples are + modules with the same names (like urllib or email). + + (Note that currently import hooks are disabled for modules like these + with ambiguous names anyway ...) + """ + if PY3: + return {} + scrubbed = {} + for modulename in REPLACED_MODULES & set(RENAMES.keys()): + if not modulename in sys.modules: + continue + + module = sys.modules[modulename] + + if is_py2_stdlib_module(module): + flog.debug('Deleting (Py2) {} from sys.modules'.format(modulename)) + scrubbed[modulename] = sys.modules[modulename] + del sys.modules[modulename] + return scrubbed + + +def scrub_future_sys_modules(): + """ + Deprecated. + """ + return {} + +class suspend_hooks(object): + """ + Acts as a context manager. Use like this: + + >>> from future import standard_library + >>> standard_library.install_hooks() + >>> import http.client + >>> # ... + >>> with standard_library.suspend_hooks(): + >>> import requests # incompatible with ``future``'s standard library hooks + + If the hooks were disabled before the context, they are not installed when + the context is left. + """ + def __enter__(self): + self.hooks_were_installed = detect_hooks() + remove_hooks() + # self.scrubbed = scrub_future_sys_modules() + return self + + def __exit__(self, *args): + if self.hooks_were_installed: + install_hooks() + # restore_sys_modules(self.scrubbed) + + +def restore_sys_modules(scrubbed): + """ + Add any previously scrubbed modules back to the sys.modules cache, + but only if it's safe to do so. + """ + clash = set(sys.modules) & set(scrubbed) + if len(clash) != 0: + # If several, choose one arbitrarily to raise an exception about + first = list(clash)[0] + raise ImportError('future module {} clashes with Py2 module' + .format(first)) + sys.modules.update(scrubbed) + + +def install_aliases(): + """ + Monkey-patches the standard library in Py2.6/7 to provide + aliases for better Py3 compatibility. + """ + if PY3: + return + # if hasattr(install_aliases, 'run_already'): + # return + for (newmodname, newobjname, oldmodname, oldobjname) in MOVES: + __import__(newmodname) + # We look up the module in sys.modules because __import__ just returns the + # top-level package: + newmod = sys.modules[newmodname] + # newmod.__future_module__ = True + + __import__(oldmodname) + oldmod = sys.modules[oldmodname] + + obj = getattr(oldmod, oldobjname) + setattr(newmod, newobjname, obj) + + # Hack for urllib so it appears to have the same structure on Py2 as on Py3 + import urllib + from future.backports.urllib import request + from future.backports.urllib import response + from future.backports.urllib import parse + from future.backports.urllib import error + from future.backports.urllib import robotparser + urllib.request = request + urllib.response = response + urllib.parse = parse + urllib.error = error + urllib.robotparser = robotparser + sys.modules['urllib.request'] = request + sys.modules['urllib.response'] = response + sys.modules['urllib.parse'] = parse + sys.modules['urllib.error'] = error + sys.modules['urllib.robotparser'] = robotparser + + # Patch the test module so it appears to have the same structure on Py2 as on Py3 + try: + import test + except ImportError: + pass + try: + from future.moves.test import support + except ImportError: + pass + else: + test.support = support + sys.modules['test.support'] = support + + # Patch the dbm module so it appears to have the same structure on Py2 as on Py3 + try: + import dbm + except ImportError: + pass + else: + from future.moves.dbm import dumb + dbm.dumb = dumb + sys.modules['dbm.dumb'] = dumb + try: + from future.moves.dbm import gnu + except ImportError: + pass + else: + dbm.gnu = gnu + sys.modules['dbm.gnu'] = gnu + try: + from future.moves.dbm import ndbm + except ImportError: + pass + else: + dbm.ndbm = ndbm + sys.modules['dbm.ndbm'] = ndbm + + # install_aliases.run_already = True + + +def install_hooks(): + """ + This function installs the future.standard_library import hook into + sys.meta_path. + """ + if PY3: + return + + install_aliases() + + flog.debug('sys.meta_path was: {0}'.format(sys.meta_path)) + flog.debug('Installing hooks ...') + + # Add it unless it's there already + newhook = RenameImport(RENAMES) + if not detect_hooks(): + sys.meta_path.append(newhook) + flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path)) + + +def enable_hooks(): + """ + Deprecated. Use install_hooks() instead. This will be removed by + ``future`` v1.0. + """ + install_hooks() + + +def remove_hooks(scrub_sys_modules=False): + """ + This function removes the import hook from sys.meta_path. + """ + if PY3: + return + flog.debug('Uninstalling hooks ...') + # Loop backwards, so deleting items keeps the ordering: + for i, hook in list(enumerate(sys.meta_path))[::-1]: + if hasattr(hook, 'RENAMER'): + del sys.meta_path[i] + + # Explicit is better than implicit. In the future the interface should + # probably change so that scrubbing the import hooks requires a separate + # function call. Left as is for now for backward compatibility with + # v0.11.x. + if scrub_sys_modules: + scrub_future_sys_modules() + + +def disable_hooks(): + """ + Deprecated. Use remove_hooks() instead. This will be removed by + ``future`` v1.0. + """ + remove_hooks() + + +def detect_hooks(): + """ + Returns True if the import hooks are installed, False if not. + """ + flog.debug('Detecting hooks ...') + present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) + if present: + flog.debug('Detected.') + else: + flog.debug('Not detected.') + return present + + +# As of v0.12, this no longer happens implicitly: +# if not PY3: +# install_hooks() + + +if not hasattr(sys, 'py2_modules'): + sys.py2_modules = {} + +def cache_py2_modules(): + """ + Currently this function is unneeded, as we are not attempting to provide import hooks + for modules with ambiguous names: email, urllib, pickle. + """ + if len(sys.py2_modules) != 0: + return + assert not detect_hooks() + import urllib + sys.py2_modules['urllib'] = urllib + + import email + sys.py2_modules['email'] = email + + import pickle + sys.py2_modules['pickle'] = pickle + + # Not all Python installations have test module. (Anaconda doesn't, for example.) + # try: + # import test + # except ImportError: + # sys.py2_modules['test'] = None + # sys.py2_modules['test'] = test + + # import dbm + # sys.py2_modules['dbm'] = dbm + + +def import_(module_name, backport=False): + """ + Pass a (potentially dotted) module name of a Python 3 standard library + module. This function imports the module compatibly on Py2 and Py3 and + returns the top-level module. + + Example use: + >>> http = import_('http.client') + >>> http = import_('http.server') + >>> urllib = import_('urllib.request') + + Then: + >>> conn = http.client.HTTPConnection(...) + >>> response = urllib.request.urlopen('http://mywebsite.com') + >>> # etc. + + Use as follows: + >>> package_name = import_(module_name) + + On Py3, equivalent to this: + + >>> import module_name + + On Py2, equivalent to this if backport=False: + + >>> from future.moves import module_name + + or to this if backport=True: + + >>> from future.backports import module_name + + except that it also handles dotted module names such as ``http.client`` + The effect then is like this: + + >>> from future.backports import module + >>> from future.backports.module import submodule + >>> module.submodule = submodule + + Note that this would be a SyntaxError in Python: + + >>> from future.backports import http.client + + """ + # Python 2.6 doesn't have importlib in the stdlib, so it requires + # the backported ``importlib`` package from PyPI as a dependency to use + # this function: + import importlib + + if PY3: + return __import__(module_name) + else: + # client.blah = blah + # Then http.client = client + # etc. + if backport: + prefix = 'future.backports' + else: + prefix = 'future.moves' + parts = prefix.split('.') + module_name.split('.') + + modules = [] + for i, part in enumerate(parts): + sofar = '.'.join(parts[:i+1]) + modules.append(importlib.import_module(sofar)) + for i, part in reversed(list(enumerate(parts))): + if i == 0: + break + setattr(modules[i-1], part, modules[i]) + + # Return the next-most top-level module after future.backports / future.moves: + return modules[2] + + +def from_import(module_name, *symbol_names, **kwargs): + """ + Example use: + >>> HTTPConnection = from_import('http.client', 'HTTPConnection') + >>> HTTPServer = from_import('http.server', 'HTTPServer') + >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse') + + Equivalent to this on Py3: + + >>> from module_name import symbol_names[0], symbol_names[1], ... + + and this on Py2: + + >>> from future.moves.module_name import symbol_names[0], ... + + or: + + >>> from future.backports.module_name import symbol_names[0], ... + + except that it also handles dotted module names such as ``http.client``. + """ + + if PY3: + return __import__(module_name) + else: + if 'backport' in kwargs and bool(kwargs['backport']): + prefix = 'future.backports' + else: + prefix = 'future.moves' + parts = prefix.split('.') + module_name.split('.') + module = importlib.import_module(prefix + '.' + module_name) + output = [getattr(module, name) for name in symbol_names] + if len(output) == 1: + return output[0] + else: + return output + + +class exclude_local_folder_imports(object): + """ + A context-manager that prevents standard library modules like configparser + from being imported from the local python-future source folder on Py3. + + (This was need prior to v0.16.0 because the presence of a configparser + folder would otherwise have prevented setuptools from running on Py3. Maybe + it's not needed any more?) + """ + def __init__(self, *args): + assert len(args) > 0 + self.module_names = args + # Disallow dotted module names like http.client: + if any(['.' in m for m in self.module_names]): + raise NotImplementedError('Dotted module names are not supported') + + def __enter__(self): + self.old_sys_path = copy.copy(sys.path) + self.old_sys_modules = copy.copy(sys.modules) + if sys.version_info[0] < 3: + return + # The presence of all these indicates we've found our source folder, + # because `builtins` won't have been installed in site-packages by setup.py: + FUTURE_SOURCE_SUBFOLDERS = ['future', 'past', 'libfuturize', 'libpasteurize', 'builtins'] + + # Look for the future source folder: + for folder in self.old_sys_path: + if all([os.path.exists(os.path.join(folder, subfolder)) + for subfolder in FUTURE_SOURCE_SUBFOLDERS]): + # Found it. Remove it. + sys.path.remove(folder) + + # Ensure we import the system module: + for m in self.module_names: + # Delete the module and any submodules from sys.modules: + # for key in list(sys.modules): + # if key == m or key.startswith(m + '.'): + # try: + # del sys.modules[key] + # except KeyError: + # pass + try: + module = __import__(m, level=0) + except ImportError: + # There's a problem importing the system module. E.g. the + # winreg module is not available except on Windows. + pass + + def __exit__(self, *args): + # Restore sys.path and sys.modules: + sys.path = self.old_sys_path + for m in set(self.old_sys_modules.keys()) - set(sys.modules.keys()): + sys.modules[m] = self.old_sys_modules[m] + +TOP_LEVEL_MODULES = ['builtins', + 'copyreg', + 'html', + 'http', + 'queue', + 'reprlib', + 'socketserver', + 'test', + 'tkinter', + 'winreg', + 'xmlrpc', + '_dummy_thread', + '_markupbase', + '_thread', + ] + +def import_top_level_modules(): + with exclude_local_folder_imports(*TOP_LEVEL_MODULES): + for m in TOP_LEVEL_MODULES: + try: + __import__(m) + except ImportError: # e.g. winreg + pass diff --git a/pype/modules/ftrack/python2_vendor/future/tests/__init__.py b/pype/modules/ftrack/python2_vendor/future/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/modules/ftrack/python2_vendor/future/tests/base.py b/pype/modules/ftrack/python2_vendor/future/tests/base.py new file mode 100644 index 0000000000..4ef437baa6 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/tests/base.py @@ -0,0 +1,539 @@ +from __future__ import print_function, absolute_import +import os +import tempfile +import unittest +import sys +import re +import warnings +import io +from textwrap import dedent + +from future.utils import bind_method, PY26, PY3, PY2, PY27 +from future.moves.subprocess import check_output, STDOUT, CalledProcessError + +if PY26: + import unittest2 as unittest + + +def reformat_code(code): + """ + Removes any leading \n and dedents. + """ + if code.startswith('\n'): + code = code[1:] + return dedent(code) + + +def order_future_lines(code): + """ + Returns the code block with any ``__future__`` import lines sorted, and + then any ``future`` import lines sorted, then any ``builtins`` import lines + sorted. + + This only sorts the lines within the expected blocks. + + See test_order_future_lines() for an example. + """ + + # We need .splitlines(keepends=True), which doesn't exist on Py2, + # so we use this instead: + lines = code.split('\n') + + uufuture_line_numbers = [i for i, line in enumerate(lines) + if line.startswith('from __future__ import ')] + + future_line_numbers = [i for i, line in enumerate(lines) + if line.startswith('from future') + or line.startswith('from past')] + + builtins_line_numbers = [i for i, line in enumerate(lines) + if line.startswith('from builtins')] + + assert code.lstrip() == code, ('internal usage error: ' + 'dedent the code before calling order_future_lines()') + + def mymax(numbers): + return max(numbers) if len(numbers) > 0 else 0 + + def mymin(numbers): + return min(numbers) if len(numbers) > 0 else float('inf') + + assert mymax(uufuture_line_numbers) <= mymin(future_line_numbers), \ + 'the __future__ and future imports are out of order' + + # assert mymax(future_line_numbers) <= mymin(builtins_line_numbers), \ + # 'the future and builtins imports are out of order' + + uul = sorted([lines[i] for i in uufuture_line_numbers]) + sorted_uufuture_lines = dict(zip(uufuture_line_numbers, uul)) + + fl = sorted([lines[i] for i in future_line_numbers]) + sorted_future_lines = dict(zip(future_line_numbers, fl)) + + bl = sorted([lines[i] for i in builtins_line_numbers]) + sorted_builtins_lines = dict(zip(builtins_line_numbers, bl)) + + # Replace the old unsorted "from __future__ import ..." lines with the + # new sorted ones: + new_lines = [] + for i in range(len(lines)): + if i in uufuture_line_numbers: + new_lines.append(sorted_uufuture_lines[i]) + elif i in future_line_numbers: + new_lines.append(sorted_future_lines[i]) + elif i in builtins_line_numbers: + new_lines.append(sorted_builtins_lines[i]) + else: + new_lines.append(lines[i]) + return '\n'.join(new_lines) + + +class VerboseCalledProcessError(CalledProcessError): + """ + Like CalledProcessError, but it displays more information (message and + script output) for diagnosing test failures etc. + """ + def __init__(self, msg, returncode, cmd, output=None): + self.msg = msg + self.returncode = returncode + self.cmd = cmd + self.output = output + + def __str__(self): + return ("Command '%s' failed with exit status %d\nMessage: %s\nOutput: %s" + % (self.cmd, self.returncode, self.msg, self.output)) + +class FuturizeError(VerboseCalledProcessError): + pass + +class PasteurizeError(VerboseCalledProcessError): + pass + + +class CodeHandler(unittest.TestCase): + """ + Handy mixin for test classes for writing / reading / futurizing / + running .py files in the test suite. + """ + def setUp(self): + """ + The outputs from the various futurize stages should have the + following headers: + """ + # After stage1: + # TODO: use this form after implementing a fixer to consolidate + # __future__ imports into a single line: + # self.headers1 = """ + # from __future__ import absolute_import, division, print_function + # """ + self.headers1 = reformat_code(""" + from __future__ import absolute_import + from __future__ import division + from __future__ import print_function + """) + + # After stage2 --all-imports: + # TODO: use this form after implementing a fixer to consolidate + # __future__ imports into a single line: + # self.headers2 = """ + # from __future__ import (absolute_import, division, + # print_function, unicode_literals) + # from future import standard_library + # from future.builtins import * + # """ + self.headers2 = reformat_code(""" + from __future__ import absolute_import + from __future__ import division + from __future__ import print_function + from __future__ import unicode_literals + from future import standard_library + standard_library.install_aliases() + from builtins import * + """) + self.interpreters = [sys.executable] + self.tempdir = tempfile.mkdtemp() + os.path.sep + pypath = os.getenv('PYTHONPATH') + if pypath: + self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath} + else: + self.env = {'PYTHONPATH': os.getcwd()} + + def convert(self, code, stages=(1, 2), all_imports=False, from3=False, + reformat=True, run=True, conservative=False): + """ + Converts the code block using ``futurize`` and returns the + resulting code. + + Passing stages=[1] or stages=[2] passes the flag ``--stage1`` or + ``stage2`` to ``futurize``. Passing both stages runs ``futurize`` + with both stages by default. + + If from3 is False, runs ``futurize``, converting from Python 2 to + both 2 and 3. If from3 is True, runs ``pasteurize`` to convert + from Python 3 to both 2 and 3. + + Optionally reformats the code block first using the reformat() function. + + If run is True, runs the resulting code under all Python + interpreters in self.interpreters. + """ + if reformat: + code = reformat_code(code) + self._write_test_script(code) + self._futurize_test_script(stages=stages, all_imports=all_imports, + from3=from3, conservative=conservative) + output = self._read_test_script() + if run: + for interpreter in self.interpreters: + _ = self._run_test_script(interpreter=interpreter) + return output + + def compare(self, output, expected, ignore_imports=True): + """ + Compares whether the code blocks are equal. If not, raises an + exception so the test fails. Ignores any trailing whitespace like + blank lines. + + If ignore_imports is True, passes the code blocks into the + strip_future_imports method. + + If one code block is a unicode string and the other a + byte-string, it assumes the byte-string is encoded as utf-8. + """ + if ignore_imports: + output = self.strip_future_imports(output) + expected = self.strip_future_imports(expected) + if isinstance(output, bytes) and not isinstance(expected, bytes): + output = output.decode('utf-8') + if isinstance(expected, bytes) and not isinstance(output, bytes): + expected = expected.decode('utf-8') + self.assertEqual(order_future_lines(output.rstrip()), + expected.rstrip()) + + def strip_future_imports(self, code): + """ + Strips any of these import lines: + + from __future__ import + from future + from future. + from builtins + + or any line containing: + install_hooks() + or: + install_aliases() + + Limitation: doesn't handle imports split across multiple lines like + this: + + from __future__ import (absolute_import, division, print_function, + unicode_literals) + """ + output = [] + # We need .splitlines(keepends=True), which doesn't exist on Py2, + # so we use this instead: + for line in code.split('\n'): + if not (line.startswith('from __future__ import ') + or line.startswith('from future ') + or line.startswith('from builtins ') + or 'install_hooks()' in line + or 'install_aliases()' in line + # but don't match "from future_builtins" :) + or line.startswith('from future.')): + output.append(line) + return '\n'.join(output) + + def convert_check(self, before, expected, stages=(1, 2), all_imports=False, + ignore_imports=True, from3=False, run=True, + conservative=False): + """ + Convenience method that calls convert() and compare(). + + Reformats the code blocks automatically using the reformat_code() + function. + + If all_imports is passed, we add the appropriate import headers + for the stage(s) selected to the ``expected`` code-block, so they + needn't appear repeatedly in the test code. + + If ignore_imports is True, ignores the presence of any lines + beginning: + + from __future__ import ... + from future import ... + + for the purpose of the comparison. + """ + output = self.convert(before, stages=stages, all_imports=all_imports, + from3=from3, run=run, conservative=conservative) + if all_imports: + headers = self.headers2 if 2 in stages else self.headers1 + else: + headers = '' + + reformatted = reformat_code(expected) + if headers in reformatted: + headers = '' + + self.compare(output, headers + reformatted, + ignore_imports=ignore_imports) + + def unchanged(self, code, **kwargs): + """ + Convenience method to ensure the code is unchanged by the + futurize process. + """ + self.convert_check(code, code, **kwargs) + + def _write_test_script(self, code, filename='mytestscript.py'): + """ + Dedents the given code (a multiline string) and writes it out to + a file in a temporary folder like /tmp/tmpUDCn7x/mytestscript.py. + """ + if isinstance(code, bytes): + code = code.decode('utf-8') + # Be explicit about encoding the temp file as UTF-8 (issue #63): + with io.open(self.tempdir + filename, 'wt', encoding='utf-8') as f: + f.write(dedent(code)) + + def _read_test_script(self, filename='mytestscript.py'): + with io.open(self.tempdir + filename, 'rt', encoding='utf-8') as f: + newsource = f.read() + return newsource + + def _futurize_test_script(self, filename='mytestscript.py', stages=(1, 2), + all_imports=False, from3=False, + conservative=False): + params = [] + stages = list(stages) + if all_imports: + params.append('--all-imports') + if from3: + script = 'pasteurize.py' + else: + script = 'futurize.py' + if stages == [1]: + params.append('--stage1') + elif stages == [2]: + params.append('--stage2') + else: + assert stages == [1, 2] + if conservative: + params.append('--conservative') + # No extra params needed + + # Absolute file path: + fn = self.tempdir + filename + call_args = [sys.executable, script] + params + ['-w', fn] + try: + output = check_output(call_args, stderr=STDOUT, env=self.env) + except CalledProcessError as e: + with open(fn) as f: + msg = ( + 'Error running the command %s\n' + '%s\n' + 'Contents of file %s:\n' + '\n' + '%s') % ( + ' '.join(call_args), + 'env=%s' % self.env, + fn, + '----\n%s\n----' % f.read(), + ) + ErrorClass = (FuturizeError if 'futurize' in script else PasteurizeError) + + if not hasattr(e, 'output'): + # The attribute CalledProcessError.output doesn't exist on Py2.6 + e.output = None + raise ErrorClass(msg, e.returncode, e.cmd, output=e.output) + return output + + def _run_test_script(self, filename='mytestscript.py', + interpreter=sys.executable): + # Absolute file path: + fn = self.tempdir + filename + try: + output = check_output([interpreter, fn], + env=self.env, stderr=STDOUT) + except CalledProcessError as e: + with open(fn) as f: + msg = ( + 'Error running the command %s\n' + '%s\n' + 'Contents of file %s:\n' + '\n' + '%s') % ( + ' '.join([interpreter, fn]), + 'env=%s' % self.env, + fn, + '----\n%s\n----' % f.read(), + ) + if not hasattr(e, 'output'): + # The attribute CalledProcessError.output doesn't exist on Py2.6 + e.output = None + raise VerboseCalledProcessError(msg, e.returncode, e.cmd, output=e.output) + return output + + +# Decorator to skip some tests on Python 2.6 ... +skip26 = unittest.skipIf(PY26, "this test is known to fail on Py2.6") + + +def expectedFailurePY3(func): + if not PY3: + return func + return unittest.expectedFailure(func) + +def expectedFailurePY26(func): + if not PY26: + return func + return unittest.expectedFailure(func) + + +def expectedFailurePY27(func): + if not PY27: + return func + return unittest.expectedFailure(func) + + +def expectedFailurePY2(func): + if not PY2: + return func + return unittest.expectedFailure(func) + + +# Renamed in Py3.3: +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + +# From Py3.3: +def assertRegex(self, text, expected_regex, msg=None): + """Fail the test unless the text matches the regular expression.""" + if isinstance(expected_regex, (str, unicode)): + assert expected_regex, "expected_regex must not be empty." + expected_regex = re.compile(expected_regex) + if not expected_regex.search(text): + msg = msg or "Regex didn't match" + msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text) + raise self.failureException(msg) + +if not hasattr(unittest.TestCase, 'assertRegex'): + bind_method(unittest.TestCase, 'assertRegex', assertRegex) + +class _AssertRaisesBaseContext(object): + + def __init__(self, expected, test_case, callable_obj=None, + expected_regex=None): + self.expected = expected + self.test_case = test_case + if callable_obj is not None: + try: + self.obj_name = callable_obj.__name__ + except AttributeError: + self.obj_name = str(callable_obj) + else: + self.obj_name = None + if isinstance(expected_regex, (bytes, str)): + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + self.msg = None + + def _raiseFailure(self, standardMsg): + msg = self.test_case._formatMessage(self.msg, standardMsg) + raise self.test_case.failureException(msg) + + def handle(self, name, callable_obj, args, kwargs): + """ + If callable_obj is None, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If callable_obj is not None, call it passing args and kwargs. + """ + if callable_obj is None: + self.msg = kwargs.pop('msg', None) + return self + with self: + callable_obj(*args, **kwargs) + +class _AssertWarnsContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertWarns* methods.""" + + def __enter__(self): + # The __warningregistry__'s need to be in a pristine state for tests + # to work properly. + for v in sys.modules.values(): + if getattr(v, '__warningregistry__', None): + v.__warningregistry__ = {} + self.warnings_manager = warnings.catch_warnings(record=True) + self.warnings = self.warnings_manager.__enter__() + warnings.simplefilter("always", self.expected) + return self + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + first_matching = None + for m in self.warnings: + w = m.message + if not isinstance(w, self.expected): + continue + if first_matching is None: + first_matching = w + if (self.expected_regex is not None and + not self.expected_regex.search(str(w))): + continue + # store warning for later retrieval + self.warning = w + self.filename = m.filename + self.lineno = m.lineno + return + # Now we simply try to choose a helpful failure message + if first_matching is not None: + self._raiseFailure('"{}" does not match "{}"'.format( + self.expected_regex.pattern, str(first_matching))) + if self.obj_name: + self._raiseFailure("{} not triggered by {}".format(exc_name, + self.obj_name)) + else: + self._raiseFailure("{} not triggered".format(exc_name)) + + +def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs): + """Fail unless a warning of class warnClass is triggered + by callable_obj when invoked with arguments args and keyword + arguments kwargs. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with callable_obj omitted or None, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + """ + context = _AssertWarnsContext(expected_warning, self, callable_obj) + return context.handle('assertWarns', callable_obj, args, kwargs) + +if not hasattr(unittest.TestCase, 'assertWarns'): + bind_method(unittest.TestCase, 'assertWarns', assertWarns) diff --git a/pype/modules/ftrack/python2_vendor/future/types/__init__.py b/pype/modules/ftrack/python2_vendor/future/types/__init__.py new file mode 100644 index 0000000000..062507703e --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/__init__.py @@ -0,0 +1,257 @@ +""" +This module contains backports the data types that were significantly changed +in the transition from Python 2 to Python 3. + +- an implementation of Python 3's bytes object (pure Python subclass of + Python 2's builtin 8-bit str type) +- an implementation of Python 3's str object (pure Python subclass of + Python 2's builtin unicode type) +- a backport of the range iterator from Py3 with slicing support + +It is used as follows:: + + from __future__ import division, absolute_import, print_function + from builtins import bytes, dict, int, range, str + +to bring in the new semantics for these functions from Python 3. And +then, for example:: + + b = bytes(b'ABCD') + assert list(b) == [65, 66, 67, 68] + assert repr(b) == "b'ABCD'" + assert [65, 66] in b + + # These raise TypeErrors: + # b + u'EFGH' + # b.split(u'B') + # bytes(b',').join([u'Fred', u'Bill']) + + + s = str(u'ABCD') + + # These raise TypeErrors: + # s.join([b'Fred', b'Bill']) + # s.startswith(b'A') + # b'B' in s + # s.find(b'A') + # s.replace(u'A', b'a') + + # This raises an AttributeError: + # s.decode('utf-8') + + assert repr(s) == 'ABCD' # consistent repr with Py3 (no u prefix) + + + for i in range(10**11)[:10]: + pass + +and:: + + class VerboseList(list): + def append(self, item): + print('Adding an item') + super().append(item) # new simpler super() function + +For more information: +--------------------- + +- future.types.newbytes +- future.types.newdict +- future.types.newint +- future.types.newobject +- future.types.newrange +- future.types.newstr + + +Notes +===== + +range() +------- +``range`` is a custom class that backports the slicing behaviour from +Python 3 (based on the ``xrange`` module by Dan Crosta). See the +``newrange`` module docstring for more details. + + +super() +------- +``super()`` is based on Ryan Kelly's ``magicsuper`` module. See the +``newsuper`` module docstring for more details. + + +round() +------- +Python 3 modifies the behaviour of ``round()`` to use "Banker's Rounding". +See http://stackoverflow.com/a/10825998. See the ``newround`` module +docstring for more details. + +""" + +from __future__ import absolute_import, division, print_function + +import functools +from numbers import Integral + +from future import utils + + +# Some utility functions to enforce strict type-separation of unicode str and +# bytes: +def disallow_types(argnums, disallowed_types): + """ + A decorator that raises a TypeError if any of the given numbered + arguments is of the corresponding given type (e.g. bytes or unicode + string). + + For example: + + @disallow_types([0, 1], [unicode, bytes]) + def f(a, b): + pass + + raises a TypeError when f is called if a unicode object is passed as + `a` or a bytes object is passed as `b`. + + This also skips over keyword arguments, so + + @disallow_types([0, 1], [unicode, bytes]) + def g(a, b=None): + pass + + doesn't raise an exception if g is called with only one argument a, + e.g.: + + g(b'Byte string') + + Example use: + + >>> class newbytes(object): + ... @disallow_types([1], [unicode]) + ... def __add__(self, other): + ... pass + + >>> newbytes('1234') + u'1234' #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + TypeError: can't concat 'bytes' to (unicode) str + """ + + def decorator(function): + + @functools.wraps(function) + def wrapper(*args, **kwargs): + # These imports are just for this decorator, and are defined here + # to prevent circular imports: + from .newbytes import newbytes + from .newint import newint + from .newstr import newstr + + errmsg = "argument can't be {0}" + for (argnum, mytype) in zip(argnums, disallowed_types): + # Handle the case where the type is passed as a string like 'newbytes'. + if isinstance(mytype, str) or isinstance(mytype, bytes): + mytype = locals()[mytype] + + # Only restrict kw args only if they are passed: + if len(args) <= argnum: + break + + # Here we use type() rather than isinstance() because + # __instancecheck__ is being overridden. E.g. + # isinstance(b'abc', newbytes) is True on Py2. + if type(args[argnum]) == mytype: + raise TypeError(errmsg.format(mytype)) + + return function(*args, **kwargs) + return wrapper + return decorator + + +def no(mytype, argnums=(1,)): + """ + A shortcut for the disallow_types decorator that disallows only one type + (in any position in argnums). + + Example use: + + >>> class newstr(object): + ... @no('bytes') + ... def __add__(self, other): + ... pass + + >>> newstr(u'1234') + b'1234' #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + TypeError: argument can't be bytes + + The object can also be passed directly, but passing the string helps + to prevent circular import problems. + """ + if isinstance(argnums, Integral): + argnums = (argnums,) + disallowed_types = [mytype] * len(argnums) + return disallow_types(argnums, disallowed_types) + + +def issubset(list1, list2): + """ + Examples: + + >>> issubset([], [65, 66, 67]) + True + >>> issubset([65], [65, 66, 67]) + True + >>> issubset([65, 66], [65, 66, 67]) + True + >>> issubset([65, 67], [65, 66, 67]) + False + """ + n = len(list1) + for startpos in range(len(list2) - n + 1): + if list2[startpos:startpos+n] == list1: + return True + return False + + +if utils.PY3: + import builtins + bytes = builtins.bytes + dict = builtins.dict + int = builtins.int + list = builtins.list + object = builtins.object + range = builtins.range + str = builtins.str + + # The identity mapping + newtypes = {bytes: bytes, + dict: dict, + int: int, + list: list, + object: object, + range: range, + str: str} + + __all__ = ['newtypes'] + +else: + + from .newbytes import newbytes + from .newdict import newdict + from .newint import newint + from .newlist import newlist + from .newrange import newrange + from .newobject import newobject + from .newstr import newstr + + newtypes = {bytes: newbytes, + dict: newdict, + int: newint, + long: newint, + list: newlist, + object: newobject, + range: newrange, + str: newbytes, + unicode: newstr} + + __all__ = ['newbytes', 'newdict', 'newint', 'newlist', 'newrange', 'newstr', 'newtypes'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newbytes.py b/pype/modules/ftrack/python2_vendor/future/types/newbytes.py new file mode 100644 index 0000000000..c9d584a7ca --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newbytes.py @@ -0,0 +1,460 @@ +""" +Pure-Python implementation of a Python 3-like bytes object for Python 2. + +Why do this? Without it, the Python 2 bytes object is a very, very +different beast to the Python 3 bytes object. +""" + +from numbers import Integral +import string +import copy + +from future.utils import istext, isbytes, PY2, PY3, with_metaclass +from future.types import no, issubset +from future.types.newobject import newobject + +if PY2: + from collections import Iterable +else: + from collections.abc import Iterable + + +_builtin_bytes = bytes + +if PY3: + # We'll probably never use newstr on Py3 anyway... + unicode = str + + +class BaseNewBytes(type): + def __instancecheck__(cls, instance): + if cls == newbytes: + return isinstance(instance, _builtin_bytes) + else: + return issubclass(instance.__class__, cls) + + +def _newchr(x): + if isinstance(x, str): # this happens on pypy + return x.encode('ascii') + else: + return chr(x) + + +class newbytes(with_metaclass(BaseNewBytes, _builtin_bytes)): + """ + A backport of the Python 3 bytes object to Py2 + """ + def __new__(cls, *args, **kwargs): + """ + From the Py3 bytes docstring: + + bytes(iterable_of_ints) -> bytes + bytes(string, encoding[, errors]) -> bytes + bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer + bytes(int) -> bytes object of size given by the parameter initialized with null bytes + bytes() -> empty bytes object + + Construct an immutable array of bytes from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - any object implementing the buffer API. + - an integer + """ + + encoding = None + errors = None + + if len(args) == 0: + return super(newbytes, cls).__new__(cls) + elif len(args) >= 2: + args = list(args) + if len(args) == 3: + errors = args.pop() + encoding=args.pop() + # Was: elif isinstance(args[0], newbytes): + # We use type() instead of the above because we're redefining + # this to be True for all unicode string subclasses. Warning: + # This may render newstr un-subclassable. + if type(args[0]) == newbytes: + # Special-case: for consistency with Py3.3, we return the same object + # (with the same id) if a newbytes object is passed into the + # newbytes constructor. + return args[0] + elif isinstance(args[0], _builtin_bytes): + value = args[0] + elif isinstance(args[0], unicode): + try: + if 'encoding' in kwargs: + assert encoding is None + encoding = kwargs['encoding'] + if 'errors' in kwargs: + assert errors is None + errors = kwargs['errors'] + except AssertionError: + raise TypeError('Argument given by name and position') + if encoding is None: + raise TypeError('unicode string argument without an encoding') + ### + # Was: value = args[0].encode(**kwargs) + # Python 2.6 string encode() method doesn't take kwargs: + # Use this instead: + newargs = [encoding] + if errors is not None: + newargs.append(errors) + value = args[0].encode(*newargs) + ### + elif hasattr(args[0], '__bytes__'): + value = args[0].__bytes__() + elif isinstance(args[0], Iterable): + if len(args[0]) == 0: + # This could be an empty list or tuple. Return b'' as on Py3. + value = b'' + else: + # Was: elif len(args[0])>0 and isinstance(args[0][0], Integral): + # # It's a list of integers + # But then we can't index into e.g. frozensets. Try to proceed + # anyway. + try: + value = bytearray([_newchr(x) for x in args[0]]) + except: + raise ValueError('bytes must be in range(0, 256)') + elif isinstance(args[0], Integral): + if args[0] < 0: + raise ValueError('negative count') + value = b'\x00' * args[0] + else: + value = args[0] + if type(value) == newbytes: + # Above we use type(...) rather than isinstance(...) because the + # newbytes metaclass overrides __instancecheck__. + # oldbytes(value) gives the wrong thing on Py2: the same + # result as str(value) on Py3, e.g. "b'abc'". (Issue #193). + # So we handle this case separately: + return copy.copy(value) + else: + return super(newbytes, cls).__new__(cls, value) + + def __repr__(self): + return 'b' + super(newbytes, self).__repr__() + + def __str__(self): + return 'b' + "'{0}'".format(super(newbytes, self).__str__()) + + def __getitem__(self, y): + value = super(newbytes, self).__getitem__(y) + if isinstance(y, Integral): + return ord(value) + else: + return newbytes(value) + + def __getslice__(self, *args): + return self.__getitem__(slice(*args)) + + def __contains__(self, key): + if isinstance(key, int): + newbyteskey = newbytes([key]) + # Don't use isinstance() here because we only want to catch + # newbytes, not Python 2 str: + elif type(key) == newbytes: + newbyteskey = key + else: + newbyteskey = newbytes(key) + return issubset(list(newbyteskey), list(self)) + + @no(unicode) + def __add__(self, other): + return newbytes(super(newbytes, self).__add__(other)) + + @no(unicode) + def __radd__(self, left): + return newbytes(left) + self + + @no(unicode) + def __mul__(self, other): + return newbytes(super(newbytes, self).__mul__(other)) + + @no(unicode) + def __rmul__(self, other): + return newbytes(super(newbytes, self).__rmul__(other)) + + def __mod__(self, vals): + if isinstance(vals, newbytes): + vals = _builtin_bytes.__str__(vals) + + elif isinstance(vals, tuple): + newvals = [] + for v in vals: + if isinstance(v, newbytes): + v = _builtin_bytes.__str__(v) + newvals.append(v) + vals = tuple(newvals) + + elif (hasattr(vals.__class__, '__getitem__') and + hasattr(vals.__class__, 'iteritems')): + for k, v in vals.iteritems(): + if isinstance(v, newbytes): + vals[k] = _builtin_bytes.__str__(v) + + return _builtin_bytes.__mod__(self, vals) + + def __imod__(self, other): + return self.__mod__(other) + + def join(self, iterable_of_bytes): + errmsg = 'sequence item {0}: expected bytes, {1} found' + if isbytes(iterable_of_bytes) or istext(iterable_of_bytes): + raise TypeError(errmsg.format(0, type(iterable_of_bytes))) + for i, item in enumerate(iterable_of_bytes): + if istext(item): + raise TypeError(errmsg.format(i, type(item))) + return newbytes(super(newbytes, self).join(iterable_of_bytes)) + + @classmethod + def fromhex(cls, string): + # Only on Py2: + return cls(string.replace(' ', '').decode('hex')) + + @no(unicode) + def find(self, sub, *args): + return super(newbytes, self).find(sub, *args) + + @no(unicode) + def rfind(self, sub, *args): + return super(newbytes, self).rfind(sub, *args) + + @no(unicode, (1, 2)) + def replace(self, old, new, *args): + return newbytes(super(newbytes, self).replace(old, new, *args)) + + def encode(self, *args): + raise AttributeError("encode method has been disabled in newbytes") + + def decode(self, encoding='utf-8', errors='strict'): + """ + Returns a newstr (i.e. unicode subclass) + + Decode B using the codec registered for encoding. Default encoding + is 'utf-8'. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + # Py2 str.encode() takes encoding and errors as optional parameter, + # not keyword arguments as in Python 3 str. + + from future.types.newstr import newstr + + if errors == 'surrogateescape': + from future.utils.surrogateescape import register_surrogateescape + register_surrogateescape() + + return newstr(super(newbytes, self).decode(encoding, errors)) + + # This is currently broken: + # # We implement surrogateescape error handling here in addition rather + # # than relying on the custom error handler from + # # future.utils.surrogateescape to be registered globally, even though + # # that is fine in the case of decoding. (But not encoding: see the + # # comments in newstr.encode()``.) + # + # if errors == 'surrogateescape': + # # Decode char by char + # mybytes = [] + # for code in self: + # # Code is an int + # if 0x80 <= code <= 0xFF: + # b = 0xDC00 + code + # elif code <= 0x7F: + # b = _unichr(c).decode(encoding=encoding) + # else: + # # # It may be a bad byte + # # FIXME: What to do in this case? See the Py3 docs / tests. + # # # Try swallowing it. + # # continue + # # print("RAISE!") + # raise NotASurrogateError + # mybytes.append(b) + # return newbytes(mybytes) + # return newbytes(super(newstr, self).decode(encoding, errors)) + + @no(unicode) + def startswith(self, prefix, *args): + return super(newbytes, self).startswith(prefix, *args) + + @no(unicode) + def endswith(self, prefix, *args): + return super(newbytes, self).endswith(prefix, *args) + + @no(unicode) + def split(self, sep=None, maxsplit=-1): + # Py2 str.split() takes maxsplit as an optional parameter, not as a + # keyword argument as in Python 3 bytes. + parts = super(newbytes, self).split(sep, maxsplit) + return [newbytes(part) for part in parts] + + def splitlines(self, keepends=False): + """ + B.splitlines([keepends]) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + # Py2 str.splitlines() takes keepends as an optional parameter, + # not as a keyword argument as in Python 3 bytes. + parts = super(newbytes, self).splitlines(keepends) + return [newbytes(part) for part in parts] + + @no(unicode) + def rsplit(self, sep=None, maxsplit=-1): + # Py2 str.rsplit() takes maxsplit as an optional parameter, not as a + # keyword argument as in Python 3 bytes. + parts = super(newbytes, self).rsplit(sep, maxsplit) + return [newbytes(part) for part in parts] + + @no(unicode) + def partition(self, sep): + parts = super(newbytes, self).partition(sep) + return tuple(newbytes(part) for part in parts) + + @no(unicode) + def rpartition(self, sep): + parts = super(newbytes, self).rpartition(sep) + return tuple(newbytes(part) for part in parts) + + @no(unicode, (1,)) + def rindex(self, sub, *args): + ''' + S.rindex(sub [,start [,end]]) -> int + + Like S.rfind() but raise ValueError when the substring is not found. + ''' + pos = self.rfind(sub, *args) + if pos == -1: + raise ValueError('substring not found') + + @no(unicode) + def index(self, sub, *args): + ''' + Returns index of sub in bytes. + Raises ValueError if byte is not in bytes and TypeError if can't + be converted bytes or its length is not 1. + ''' + if isinstance(sub, int): + if len(args) == 0: + start, end = 0, len(self) + elif len(args) == 1: + start = args[0] + elif len(args) == 2: + start, end = args + else: + raise TypeError('takes at most 3 arguments') + return list(self)[start:end].index(sub) + if not isinstance(sub, bytes): + try: + sub = self.__class__(sub) + except (TypeError, ValueError): + raise TypeError("can't convert sub to bytes") + try: + return super(newbytes, self).index(sub, *args) + except ValueError: + raise ValueError('substring not found') + + def __eq__(self, other): + if isinstance(other, (_builtin_bytes, bytearray)): + return super(newbytes, self).__eq__(other) + else: + return False + + def __ne__(self, other): + if isinstance(other, _builtin_bytes): + return super(newbytes, self).__ne__(other) + else: + return True + + unorderable_err = 'unorderable types: bytes() and {0}' + + def __lt__(self, other): + if isinstance(other, _builtin_bytes): + return super(newbytes, self).__lt__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __le__(self, other): + if isinstance(other, _builtin_bytes): + return super(newbytes, self).__le__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __gt__(self, other): + if isinstance(other, _builtin_bytes): + return super(newbytes, self).__gt__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __ge__(self, other): + if isinstance(other, _builtin_bytes): + return super(newbytes, self).__ge__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __native__(self): + # We can't just feed a newbytes object into str(), because + # newbytes.__str__() returns e.g. "b'blah'", consistent with Py3 bytes. + return super(newbytes, self).__str__() + + def __getattribute__(self, name): + """ + A trick to cause the ``hasattr`` builtin-fn to return False for + the 'encode' method on Py2. + """ + if name in ['encode', u'encode']: + raise AttributeError("encode method has been disabled in newbytes") + return super(newbytes, self).__getattribute__(name) + + @no(unicode) + def rstrip(self, bytes_to_strip=None): + """ + Strip trailing bytes contained in the argument. + If the argument is omitted, strip trailing ASCII whitespace. + """ + return newbytes(super(newbytes, self).rstrip(bytes_to_strip)) + + @no(unicode) + def strip(self, bytes_to_strip=None): + """ + Strip leading and trailing bytes contained in the argument. + If the argument is omitted, strip trailing ASCII whitespace. + """ + return newbytes(super(newbytes, self).strip(bytes_to_strip)) + + def lower(self): + """ + b.lower() -> copy of b + + Return a copy of b with all ASCII characters converted to lowercase. + """ + return newbytes(super(newbytes, self).lower()) + + @no(unicode) + def upper(self): + """ + b.upper() -> copy of b + + Return a copy of b with all ASCII characters converted to uppercase. + """ + return newbytes(super(newbytes, self).upper()) + + @classmethod + @no(unicode) + def maketrans(cls, frm, to): + """ + B.maketrans(frm, to) -> translation table + + Return a translation table (a bytes object of length 256) suitable + for use in the bytes or bytearray translate method where each byte + in frm is mapped to the byte at the same position in to. + The bytes objects frm and to must be of the same length. + """ + return newbytes(string.maketrans(frm, to)) + + +__all__ = ['newbytes'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newdict.py b/pype/modules/ftrack/python2_vendor/future/types/newdict.py new file mode 100644 index 0000000000..3f3a559dd5 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newdict.py @@ -0,0 +1,111 @@ +""" +A dict subclass for Python 2 that behaves like Python 3's dict + +Example use: + +>>> from builtins import dict +>>> d1 = dict() # instead of {} for an empty dict +>>> d2 = dict(key1='value1', key2='value2') + +The keys, values and items methods now return iterators on Python 2.x +(with set-like behaviour on Python 2.7). + +>>> for d in (d1, d2): +... assert not isinstance(d.keys(), list) +... assert not isinstance(d.values(), list) +... assert not isinstance(d.items(), list) +""" + +import sys + +from future.utils import with_metaclass +from future.types.newobject import newobject + + +_builtin_dict = dict +ver = sys.version_info[:2] + + +class BaseNewDict(type): + def __instancecheck__(cls, instance): + if cls == newdict: + return isinstance(instance, _builtin_dict) + else: + return issubclass(instance.__class__, cls) + + +class newdict(with_metaclass(BaseNewDict, _builtin_dict)): + """ + A backport of the Python 3 dict object to Py2 + """ + def items(self): + """ + On Python 2.7+: + D.items() -> a set-like object providing a view on D's items + On Python 2.6: + D.items() -> an iterator over D's items + """ + if ver == (2, 7): + return self.viewitems() + elif ver == (2, 6): + return self.iteritems() + elif ver >= (3, 0): + return self.items() + + def keys(self): + """ + On Python 2.7+: + D.keys() -> a set-like object providing a view on D's keys + On Python 2.6: + D.keys() -> an iterator over D's keys + """ + if ver == (2, 7): + return self.viewkeys() + elif ver == (2, 6): + return self.iterkeys() + elif ver >= (3, 0): + return self.keys() + + def values(self): + """ + On Python 2.7+: + D.values() -> a set-like object providing a view on D's values + On Python 2.6: + D.values() -> an iterator over D's values + """ + if ver == (2, 7): + return self.viewvalues() + elif ver == (2, 6): + return self.itervalues() + elif ver >= (3, 0): + return self.values() + + def __new__(cls, *args, **kwargs): + """ + dict() -> new empty dictionary + dict(mapping) -> new dictionary initialized from a mapping object's + (key, value) pairs + dict(iterable) -> new dictionary initialized as if via: + d = {} + for k, v in iterable: + d[k] = v + dict(**kwargs) -> new dictionary initialized with the name=value pairs + in the keyword argument list. For example: dict(one=1, two=2) + """ + + if len(args) == 0: + return super(newdict, cls).__new__(cls) + elif type(args[0]) == newdict: + value = args[0] + else: + value = args[0] + return super(newdict, cls).__new__(cls, value) + + def __native__(self): + """ + Hook for the future.utils.native() function + """ + return dict(self) + + +__all__ = ['newdict'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newint.py b/pype/modules/ftrack/python2_vendor/future/types/newint.py new file mode 100644 index 0000000000..748dba9d23 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newint.py @@ -0,0 +1,381 @@ +""" +Backport of Python 3's int, based on Py2's long. + +They are very similar. The most notable difference is: + +- representation: trailing L in Python 2 removed in Python 3 +""" +from __future__ import division + +import struct + +from future.types.newbytes import newbytes +from future.types.newobject import newobject +from future.utils import PY3, isint, istext, isbytes, with_metaclass, native + + +if PY3: + long = int + from collections.abc import Iterable +else: + from collections import Iterable + + +class BaseNewInt(type): + def __instancecheck__(cls, instance): + if cls == newint: + # Special case for Py2 short or long int + return isinstance(instance, (int, long)) + else: + return issubclass(instance.__class__, cls) + + +class newint(with_metaclass(BaseNewInt, long)): + """ + A backport of the Python 3 int object to Py2 + """ + def __new__(cls, x=0, base=10): + """ + From the Py3 int docstring: + + | int(x=0) -> integer + | int(x, base=10) -> integer + | + | Convert a number or string to an integer, or return 0 if no + | arguments are given. If x is a number, return x.__int__(). For + | floating point numbers, this truncates towards zero. + | + | If x is not a number or if base is given, then x must be a string, + | bytes, or bytearray instance representing an integer literal in the + | given base. The literal can be preceded by '+' or '-' and be + | surrounded by whitespace. The base defaults to 10. Valid bases are + | 0 and 2-36. Base 0 means to interpret the base from the string as an + | integer literal. + | >>> int('0b100', base=0) + | 4 + + """ + try: + val = x.__int__() + except AttributeError: + val = x + else: + if not isint(val): + raise TypeError('__int__ returned non-int ({0})'.format( + type(val))) + + if base != 10: + # Explicit base + if not (istext(val) or isbytes(val) or isinstance(val, bytearray)): + raise TypeError( + "int() can't convert non-string with explicit base") + try: + return super(newint, cls).__new__(cls, val, base) + except TypeError: + return super(newint, cls).__new__(cls, newbytes(val), base) + # After here, base is 10 + try: + return super(newint, cls).__new__(cls, val) + except TypeError: + # Py2 long doesn't handle bytearray input with an explicit base, so + # handle this here. + # Py3: int(bytearray(b'10'), 2) == 2 + # Py2: int(bytearray(b'10'), 2) == 2 raises TypeError + # Py2: long(bytearray(b'10'), 2) == 2 raises TypeError + try: + return super(newint, cls).__new__(cls, newbytes(val)) + except: + raise TypeError("newint argument must be a string or a number," + "not '{0}'".format(type(val))) + + def __repr__(self): + """ + Without the L suffix + """ + value = super(newint, self).__repr__() + assert value[-1] == 'L' + return value[:-1] + + def __add__(self, other): + value = super(newint, self).__add__(other) + if value is NotImplemented: + return long(self) + other + return newint(value) + + def __radd__(self, other): + value = super(newint, self).__radd__(other) + if value is NotImplemented: + return other + long(self) + return newint(value) + + def __sub__(self, other): + value = super(newint, self).__sub__(other) + if value is NotImplemented: + return long(self) - other + return newint(value) + + def __rsub__(self, other): + value = super(newint, self).__rsub__(other) + if value is NotImplemented: + return other - long(self) + return newint(value) + + def __mul__(self, other): + value = super(newint, self).__mul__(other) + if isint(value): + return newint(value) + elif value is NotImplemented: + return long(self) * other + return value + + def __rmul__(self, other): + value = super(newint, self).__rmul__(other) + if isint(value): + return newint(value) + elif value is NotImplemented: + return other * long(self) + return value + + def __div__(self, other): + # We override this rather than e.g. relying on object.__div__ or + # long.__div__ because we want to wrap the value in a newint() + # call if other is another int + value = long(self) / other + if isinstance(other, (int, long)): + return newint(value) + else: + return value + + def __rdiv__(self, other): + value = other / long(self) + if isinstance(other, (int, long)): + return newint(value) + else: + return value + + def __idiv__(self, other): + # long has no __idiv__ method. Use __itruediv__ and cast back to + # newint: + value = self.__itruediv__(other) + if isinstance(other, (int, long)): + return newint(value) + else: + return value + + def __truediv__(self, other): + value = super(newint, self).__truediv__(other) + if value is NotImplemented: + value = long(self) / other + return value + + def __rtruediv__(self, other): + return super(newint, self).__rtruediv__(other) + + def __itruediv__(self, other): + # long has no __itruediv__ method + mylong = long(self) + mylong /= other + return mylong + + def __floordiv__(self, other): + return newint(super(newint, self).__floordiv__(other)) + + def __rfloordiv__(self, other): + return newint(super(newint, self).__rfloordiv__(other)) + + def __ifloordiv__(self, other): + # long has no __ifloordiv__ method + mylong = long(self) + mylong //= other + return newint(mylong) + + def __mod__(self, other): + value = super(newint, self).__mod__(other) + if value is NotImplemented: + return long(self) % other + return newint(value) + + def __rmod__(self, other): + value = super(newint, self).__rmod__(other) + if value is NotImplemented: + return other % long(self) + return newint(value) + + def __divmod__(self, other): + value = super(newint, self).__divmod__(other) + if value is NotImplemented: + mylong = long(self) + return (mylong // other, mylong % other) + return (newint(value[0]), newint(value[1])) + + def __rdivmod__(self, other): + value = super(newint, self).__rdivmod__(other) + if value is NotImplemented: + mylong = long(self) + return (other // mylong, other % mylong) + return (newint(value[0]), newint(value[1])) + + def __pow__(self, other): + value = super(newint, self).__pow__(other) + if value is NotImplemented: + return long(self) ** other + return newint(value) + + def __rpow__(self, other): + value = super(newint, self).__rpow__(other) + if value is NotImplemented: + return other ** long(self) + return newint(value) + + def __lshift__(self, other): + if not isint(other): + raise TypeError( + "unsupported operand type(s) for <<: '%s' and '%s'" % + (type(self).__name__, type(other).__name__)) + return newint(super(newint, self).__lshift__(other)) + + def __rshift__(self, other): + if not isint(other): + raise TypeError( + "unsupported operand type(s) for >>: '%s' and '%s'" % + (type(self).__name__, type(other).__name__)) + return newint(super(newint, self).__rshift__(other)) + + def __and__(self, other): + if not isint(other): + raise TypeError( + "unsupported operand type(s) for &: '%s' and '%s'" % + (type(self).__name__, type(other).__name__)) + return newint(super(newint, self).__and__(other)) + + def __or__(self, other): + if not isint(other): + raise TypeError( + "unsupported operand type(s) for |: '%s' and '%s'" % + (type(self).__name__, type(other).__name__)) + return newint(super(newint, self).__or__(other)) + + def __xor__(self, other): + if not isint(other): + raise TypeError( + "unsupported operand type(s) for ^: '%s' and '%s'" % + (type(self).__name__, type(other).__name__)) + return newint(super(newint, self).__xor__(other)) + + def __neg__(self): + return newint(super(newint, self).__neg__()) + + def __pos__(self): + return newint(super(newint, self).__pos__()) + + def __abs__(self): + return newint(super(newint, self).__abs__()) + + def __invert__(self): + return newint(super(newint, self).__invert__()) + + def __int__(self): + return self + + def __nonzero__(self): + return self.__bool__() + + def __bool__(self): + """ + So subclasses can override this, Py3-style + """ + return super(newint, self).__nonzero__() + + def __native__(self): + return long(self) + + def to_bytes(self, length, byteorder='big', signed=False): + """ + Return an array of bytes representing an integer. + + The integer is represented using length bytes. An OverflowError is + raised if the integer is not representable with the given number of + bytes. + + The byteorder argument determines the byte order used to represent the + integer. If byteorder is 'big', the most significant byte is at the + beginning of the byte array. If byteorder is 'little', the most + significant byte is at the end of the byte array. To request the native + byte order of the host system, use `sys.byteorder' as the byte order value. + + The signed keyword-only argument determines whether two's complement is + used to represent the integer. If signed is False and a negative integer + is given, an OverflowError is raised. + """ + if length < 0: + raise ValueError("length argument must be non-negative") + if length == 0 and self == 0: + return newbytes() + if signed and self < 0: + bits = length * 8 + num = (2**bits) + self + if num <= 0: + raise OverflowError("int too smal to convert") + else: + if self < 0: + raise OverflowError("can't convert negative int to unsigned") + num = self + if byteorder not in ('little', 'big'): + raise ValueError("byteorder must be either 'little' or 'big'") + h = b'%x' % num + s = newbytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex')) + if signed: + high_set = s[0] & 0x80 + if self > 0 and high_set: + raise OverflowError("int too big to convert") + if self < 0 and not high_set: + raise OverflowError("int too small to convert") + if len(s) > length: + raise OverflowError("int too big to convert") + return s if byteorder == 'big' else s[::-1] + + @classmethod + def from_bytes(cls, mybytes, byteorder='big', signed=False): + """ + Return the integer represented by the given array of bytes. + + The mybytes argument must either support the buffer protocol or be an + iterable object producing bytes. Bytes and bytearray are examples of + built-in objects that support the buffer protocol. + + The byteorder argument determines the byte order used to represent the + integer. If byteorder is 'big', the most significant byte is at the + beginning of the byte array. If byteorder is 'little', the most + significant byte is at the end of the byte array. To request the native + byte order of the host system, use `sys.byteorder' as the byte order value. + + The signed keyword-only argument indicates whether two's complement is + used to represent the integer. + """ + if byteorder not in ('little', 'big'): + raise ValueError("byteorder must be either 'little' or 'big'") + if isinstance(mybytes, unicode): + raise TypeError("cannot convert unicode objects to bytes") + # mybytes can also be passed as a sequence of integers on Py3. + # Test for this: + elif isinstance(mybytes, Iterable): + mybytes = newbytes(mybytes) + b = mybytes if byteorder == 'big' else mybytes[::-1] + if len(b) == 0: + b = b'\x00' + # The encode() method has been disabled by newbytes, but Py2's + # str has it: + num = int(native(b).encode('hex'), 16) + if signed and (b[0] & 0x80): + num = num - (2 ** (len(b)*8)) + return cls(num) + + +# def _twos_comp(val, bits): +# """compute the 2's compliment of int value val""" +# if( (val&(1<<(bits-1))) != 0 ): +# val = val - (1<>> from builtins import list +>>> l1 = list() # instead of {} for an empty list +>>> l1.append('hello') +>>> l2 = l1.copy() + +""" + +import sys +import copy + +from future.utils import with_metaclass +from future.types.newobject import newobject + + +_builtin_list = list +ver = sys.version_info[:2] + + +class BaseNewList(type): + def __instancecheck__(cls, instance): + if cls == newlist: + return isinstance(instance, _builtin_list) + else: + return issubclass(instance.__class__, cls) + + +class newlist(with_metaclass(BaseNewList, _builtin_list)): + """ + A backport of the Python 3 list object to Py2 + """ + def copy(self): + """ + L.copy() -> list -- a shallow copy of L + """ + return copy.copy(self) + + def clear(self): + """L.clear() -> None -- remove all items from L""" + for i in range(len(self)): + self.pop() + + def __new__(cls, *args, **kwargs): + """ + list() -> new empty list + list(iterable) -> new list initialized from iterable's items + """ + + if len(args) == 0: + return super(newlist, cls).__new__(cls) + elif type(args[0]) == newlist: + value = args[0] + else: + value = args[0] + return super(newlist, cls).__new__(cls, value) + + def __add__(self, value): + return newlist(super(newlist, self).__add__(value)) + + def __radd__(self, left): + " left + self " + try: + return newlist(left) + self + except: + return NotImplemented + + def __getitem__(self, y): + """ + x.__getitem__(y) <==> x[y] + + Warning: a bug in Python 2.x prevents indexing via a slice from + returning a newlist object. + """ + if isinstance(y, slice): + return newlist(super(newlist, self).__getitem__(y)) + else: + return super(newlist, self).__getitem__(y) + + def __native__(self): + """ + Hook for the future.utils.native() function + """ + return list(self) + + def __nonzero__(self): + return len(self) > 0 + + +__all__ = ['newlist'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newmemoryview.py b/pype/modules/ftrack/python2_vendor/future/types/newmemoryview.py new file mode 100644 index 0000000000..09f804dcf4 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newmemoryview.py @@ -0,0 +1,29 @@ +""" +A pretty lame implementation of a memoryview object for Python 2.6. +""" +from numbers import Integral +import string + +from future.utils import istext, isbytes, PY2, with_metaclass +from future.types import no, issubset + +if PY2: + from collections import Iterable +else: + from collections.abc import Iterable + +# class BaseNewBytes(type): +# def __instancecheck__(cls, instance): +# return isinstance(instance, _builtin_bytes) + + +class newmemoryview(object): # with_metaclass(BaseNewBytes, _builtin_bytes)): + """ + A pretty lame backport of the Python 2.7 and Python 3.x + memoryviewview object to Py2.6. + """ + def __init__(self, obj): + return obj + + +__all__ = ['newmemoryview'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newobject.py b/pype/modules/ftrack/python2_vendor/future/types/newobject.py new file mode 100644 index 0000000000..31b84fc12c --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newobject.py @@ -0,0 +1,117 @@ +""" +An object subclass for Python 2 that gives new-style classes written in the +style of Python 3 (with ``__next__`` and unicode-returning ``__str__`` methods) +the appropriate Python 2-style ``next`` and ``__unicode__`` methods for compatible. + +Example use:: + + from builtins import object + + my_unicode_str = u'Unicode string: \u5b54\u5b50' + + class A(object): + def __str__(self): + return my_unicode_str + + a = A() + print(str(a)) + + # On Python 2, these relations hold: + assert unicode(a) == my_unicode_string + assert str(a) == my_unicode_string.encode('utf-8') + + +Another example:: + + from builtins import object + + class Upper(object): + def __init__(self, iterable): + self._iter = iter(iterable) + def __next__(self): # note the Py3 interface + return next(self._iter).upper() + def __iter__(self): + return self + + assert list(Upper('hello')) == list('HELLO') + +""" + + +class newobject(object): + """ + A magical object class that provides Python 2 compatibility methods:: + next + __unicode__ + __nonzero__ + + Subclasses of this class can merely define the Python 3 methods (__next__, + __str__, and __bool__). + """ + def next(self): + if hasattr(self, '__next__'): + return type(self).__next__(self) + raise TypeError('newobject is not an iterator') + + def __unicode__(self): + # All subclasses of the builtin object should have __str__ defined. + # Note that old-style classes do not have __str__ defined. + if hasattr(self, '__str__'): + s = type(self).__str__(self) + else: + s = str(self) + if isinstance(s, unicode): + return s + else: + return s.decode('utf-8') + + def __nonzero__(self): + if hasattr(self, '__bool__'): + return type(self).__bool__(self) + if hasattr(self, '__len__'): + return type(self).__len__(self) + # object has no __nonzero__ method + return True + + # Are these ever needed? + # def __div__(self): + # return self.__truediv__() + + # def __idiv__(self, other): + # return self.__itruediv__(other) + + def __long__(self): + if not hasattr(self, '__int__'): + return NotImplemented + return self.__int__() # not type(self).__int__(self) + + # def __new__(cls, *args, **kwargs): + # """ + # dict() -> new empty dictionary + # dict(mapping) -> new dictionary initialized from a mapping object's + # (key, value) pairs + # dict(iterable) -> new dictionary initialized as if via: + # d = {} + # for k, v in iterable: + # d[k] = v + # dict(**kwargs) -> new dictionary initialized with the name=value pairs + # in the keyword argument list. For example: dict(one=1, two=2) + # """ + + # if len(args) == 0: + # return super(newdict, cls).__new__(cls) + # elif type(args[0]) == newdict: + # return args[0] + # else: + # value = args[0] + # return super(newdict, cls).__new__(cls, value) + + def __native__(self): + """ + Hook for the future.utils.native() function + """ + return object(self) + + __slots__ = [] + +__all__ = ['newobject'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newopen.py b/pype/modules/ftrack/python2_vendor/future/types/newopen.py new file mode 100644 index 0000000000..b75d45afb2 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newopen.py @@ -0,0 +1,32 @@ +""" +A substitute for the Python 3 open() function. + +Note that io.open() is more complete but maybe slower. Even so, the +completeness may be a better default. TODO: compare these +""" + +_builtin_open = open + +class newopen(object): + """Wrapper providing key part of Python 3 open() interface. + + From IPython's py3compat.py module. License: BSD. + """ + def __init__(self, fname, mode="r", encoding="utf-8"): + self.f = _builtin_open(fname, mode) + self.enc = encoding + + def write(self, s): + return self.f.write(s.encode(self.enc)) + + def read(self, size=-1): + return self.f.read(size).decode(self.enc) + + def close(self): + return self.f.close() + + def __enter__(self): + return self + + def __exit__(self, etype, value, traceback): + self.f.close() diff --git a/pype/modules/ftrack/python2_vendor/future/types/newrange.py b/pype/modules/ftrack/python2_vendor/future/types/newrange.py new file mode 100644 index 0000000000..eda01a5a50 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newrange.py @@ -0,0 +1,170 @@ +""" +Nearly identical to xrange.py, by Dan Crosta, from + + https://github.com/dcrosta/xrange.git + +This is included here in the ``future`` package rather than pointed to as +a dependency because there is no package for ``xrange`` on PyPI. It is +also tweaked to appear like a regular Python 3 ``range`` object rather +than a Python 2 xrange. + +From Dan Crosta's README: + + "A pure-Python implementation of Python 2.7's xrange built-in, with + some features backported from the Python 3.x range built-in (which + replaced xrange) in that version." + + Read more at + https://late.am/post/2012/06/18/what-the-heck-is-an-xrange +""" +from __future__ import absolute_import + +from future.utils import PY2 + +if PY2: + from collections import Sequence, Iterator +else: + from collections.abc import Sequence, Iterator +from itertools import islice + +from future.backports.misc import count # with step parameter on Py2.6 +# For backward compatibility with python-future versions < 0.14.4: +_count = count + + +class newrange(Sequence): + """ + Pure-Python backport of Python 3's range object. See `the CPython + documentation for details: + `_ + """ + + def __init__(self, *args): + if len(args) == 1: + start, stop, step = 0, args[0], 1 + elif len(args) == 2: + start, stop, step = args[0], args[1], 1 + elif len(args) == 3: + start, stop, step = args + else: + raise TypeError('range() requires 1-3 int arguments') + + try: + start, stop, step = int(start), int(stop), int(step) + except ValueError: + raise TypeError('an integer is required') + + if step == 0: + raise ValueError('range() arg 3 must not be zero') + elif step < 0: + stop = min(stop, start) + else: + stop = max(stop, start) + + self._start = start + self._stop = stop + self._step = step + self._len = (stop - start) // step + bool((stop - start) % step) + + @property + def start(self): + return self._start + + @property + def stop(self): + return self._stop + + @property + def step(self): + return self._step + + def __repr__(self): + if self._step == 1: + return 'range(%d, %d)' % (self._start, self._stop) + return 'range(%d, %d, %d)' % (self._start, self._stop, self._step) + + def __eq__(self, other): + return (isinstance(other, newrange) and + (self._len == 0 == other._len or + (self._start, self._step, self._len) == + (other._start, other._step, self._len))) + + def __len__(self): + return self._len + + def index(self, value): + """Return the 0-based position of integer `value` in + the sequence this range represents.""" + try: + diff = value - self._start + except TypeError: + raise ValueError('%r is not in range' % value) + quotient, remainder = divmod(diff, self._step) + if remainder == 0 and 0 <= quotient < self._len: + return abs(quotient) + raise ValueError('%r is not in range' % value) + + def count(self, value): + """Return the number of ocurrences of integer `value` + in the sequence this range represents.""" + # a value can occur exactly zero or one times + return int(value in self) + + def __contains__(self, value): + """Return ``True`` if the integer `value` occurs in + the sequence this range represents.""" + try: + self.index(value) + return True + except ValueError: + return False + + def __reversed__(self): + return iter(self[::-1]) + + def __getitem__(self, index): + """Return the element at position ``index`` in the sequence + this range represents, or raise :class:`IndexError` if the + position is out of range.""" + if isinstance(index, slice): + return self.__getitem_slice(index) + if index < 0: + # negative indexes access from the end + index = self._len + index + if index < 0 or index >= self._len: + raise IndexError('range object index out of range') + return self._start + index * self._step + + def __getitem_slice(self, slce): + """Return a range which represents the requested slce + of the sequence represented by this range. + """ + scaled_indices = (self._step * n for n in slce.indices(self._len)) + start_offset, stop_offset, new_step = scaled_indices + return newrange(self._start + start_offset, + self._start + stop_offset, + new_step) + + def __iter__(self): + """Return an iterator which enumerates the elements of the + sequence this range represents.""" + return range_iterator(self) + + +class range_iterator(Iterator): + """An iterator for a :class:`range`. + """ + def __init__(self, range_): + self._stepper = islice(count(range_.start, range_.step), len(range_)) + + def __iter__(self): + return self + + def __next__(self): + return next(self._stepper) + + def next(self): + return next(self._stepper) + + +__all__ = ['newrange'] diff --git a/pype/modules/ftrack/python2_vendor/future/types/newstr.py b/pype/modules/ftrack/python2_vendor/future/types/newstr.py new file mode 100644 index 0000000000..8ca191f978 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/types/newstr.py @@ -0,0 +1,426 @@ +""" +This module redefines ``str`` on Python 2.x to be a subclass of the Py2 +``unicode`` type that behaves like the Python 3.x ``str``. + +The main differences between ``newstr`` and Python 2.x's ``unicode`` type are +the stricter type-checking and absence of a `u''` prefix in the representation. + +It is designed to be used together with the ``unicode_literals`` import +as follows: + + >>> from __future__ import unicode_literals + >>> from builtins import str, isinstance + +On Python 3.x and normally on Python 2.x, these expressions hold + + >>> str('blah') is 'blah' + True + >>> isinstance('blah', str) + True + +However, on Python 2.x, with this import: + + >>> from __future__ import unicode_literals + +the same expressions are False: + + >>> str('blah') is 'blah' + False + >>> isinstance('blah', str) + False + +This module is designed to be imported together with ``unicode_literals`` on +Python 2 to bring the meaning of ``str`` back into alignment with unprefixed +string literals (i.e. ``unicode`` subclasses). + +Note that ``str()`` (and ``print()``) would then normally call the +``__unicode__`` method on objects in Python 2. To define string +representations of your objects portably across Py3 and Py2, use the +:func:`python_2_unicode_compatible` decorator in :mod:`future.utils`. + +""" + +from numbers import Number + +from future.utils import PY3, istext, with_metaclass, isnewbytes +from future.types import no, issubset +from future.types.newobject import newobject + + +if PY3: + # We'll probably never use newstr on Py3 anyway... + unicode = str + from collections.abc import Iterable +else: + from collections import Iterable + + +class BaseNewStr(type): + def __instancecheck__(cls, instance): + if cls == newstr: + return isinstance(instance, unicode) + else: + return issubclass(instance.__class__, cls) + + +class newstr(with_metaclass(BaseNewStr, unicode)): + """ + A backport of the Python 3 str object to Py2 + """ + no_convert_msg = "Can't convert '{0}' object to str implicitly" + + def __new__(cls, *args, **kwargs): + """ + From the Py3 str docstring: + + str(object='') -> str + str(bytes_or_buffer[, encoding[, errors]]) -> str + + Create a new string object from the given object. If encoding or + errors is specified, then the object must expose a data buffer + that will be decoded using the given encoding and error handler. + Otherwise, returns the result of object.__str__() (if defined) + or repr(object). + encoding defaults to sys.getdefaultencoding(). + errors defaults to 'strict'. + + """ + if len(args) == 0: + return super(newstr, cls).__new__(cls) + # Special case: If someone requests str(str(u'abc')), return the same + # object (same id) for consistency with Py3.3. This is not true for + # other objects like list or dict. + elif type(args[0]) == newstr and cls == newstr: + return args[0] + elif isinstance(args[0], unicode): + value = args[0] + elif isinstance(args[0], bytes): # i.e. Py2 bytes or newbytes + if 'encoding' in kwargs or len(args) > 1: + value = args[0].decode(*args[1:], **kwargs) + else: + value = args[0].__str__() + else: + value = args[0] + return super(newstr, cls).__new__(cls, value) + + def __repr__(self): + """ + Without the u prefix + """ + + value = super(newstr, self).__repr__() + # assert value[0] == u'u' + return value[1:] + + def __getitem__(self, y): + """ + Warning: Python <= 2.7.6 has a bug that causes this method never to be called + when y is a slice object. Therefore the type of newstr()[:2] is wrong + (unicode instead of newstr). + """ + return newstr(super(newstr, self).__getitem__(y)) + + def __contains__(self, key): + errmsg = "'in ' requires string as left operand, not {0}" + # Don't use isinstance() here because we only want to catch + # newstr, not Python 2 unicode: + if type(key) == newstr: + newkey = key + elif isinstance(key, unicode) or isinstance(key, bytes) and not isnewbytes(key): + newkey = newstr(key) + else: + raise TypeError(errmsg.format(type(key))) + return issubset(list(newkey), list(self)) + + @no('newbytes') + def __add__(self, other): + return newstr(super(newstr, self).__add__(other)) + + @no('newbytes') + def __radd__(self, left): + " left + self " + try: + return newstr(left) + self + except: + return NotImplemented + + def __mul__(self, other): + return newstr(super(newstr, self).__mul__(other)) + + def __rmul__(self, other): + return newstr(super(newstr, self).__rmul__(other)) + + def join(self, iterable): + errmsg = 'sequence item {0}: expected unicode string, found bytes' + for i, item in enumerate(iterable): + # Here we use type() rather than isinstance() because + # __instancecheck__ is being overridden. E.g. + # isinstance(b'abc', newbytes) is True on Py2. + if isnewbytes(item): + raise TypeError(errmsg.format(i)) + # Support use as a staticmethod: str.join('-', ['a', 'b']) + if type(self) == newstr: + return newstr(super(newstr, self).join(iterable)) + else: + return newstr(super(newstr, newstr(self)).join(iterable)) + + @no('newbytes') + def find(self, sub, *args): + return super(newstr, self).find(sub, *args) + + @no('newbytes') + def rfind(self, sub, *args): + return super(newstr, self).rfind(sub, *args) + + @no('newbytes', (1, 2)) + def replace(self, old, new, *args): + return newstr(super(newstr, self).replace(old, new, *args)) + + def decode(self, *args): + raise AttributeError("decode method has been disabled in newstr") + + def encode(self, encoding='utf-8', errors='strict'): + """ + Returns bytes + + Encode S using the codec registered for encoding. Default encoding + is 'utf-8'. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and + 'xmlcharrefreplace' as well as any other name registered with + codecs.register_error that can handle UnicodeEncodeErrors. + """ + from future.types.newbytes import newbytes + # Py2 unicode.encode() takes encoding and errors as optional parameter, + # not keyword arguments as in Python 3 str. + + # For the surrogateescape error handling mechanism, the + # codecs.register_error() function seems to be inadequate for an + # implementation of it when encoding. (Decoding seems fine, however.) + # For example, in the case of + # u'\udcc3'.encode('ascii', 'surrogateescape_handler') + # after registering the ``surrogateescape_handler`` function in + # future.utils.surrogateescape, both Python 2.x and 3.x raise an + # exception anyway after the function is called because the unicode + # string it has to return isn't encodable strictly as ASCII. + + if errors == 'surrogateescape': + if encoding == 'utf-16': + # Known to fail here. See test_encoding_works_normally() + raise NotImplementedError('FIXME: surrogateescape handling is ' + 'not yet implemented properly') + # Encode char by char, building up list of byte-strings + mybytes = [] + for c in self: + code = ord(c) + if 0xD800 <= code <= 0xDCFF: + mybytes.append(newbytes([code - 0xDC00])) + else: + mybytes.append(c.encode(encoding=encoding)) + return newbytes(b'').join(mybytes) + return newbytes(super(newstr, self).encode(encoding, errors)) + + @no('newbytes', 1) + def startswith(self, prefix, *args): + if isinstance(prefix, Iterable): + for thing in prefix: + if isnewbytes(thing): + raise TypeError(self.no_convert_msg.format(type(thing))) + return super(newstr, self).startswith(prefix, *args) + + @no('newbytes', 1) + def endswith(self, prefix, *args): + # Note we need the decorator above as well as the isnewbytes() + # check because prefix can be either a bytes object or e.g. a + # tuple of possible prefixes. (If it's a bytes object, each item + # in it is an int.) + if isinstance(prefix, Iterable): + for thing in prefix: + if isnewbytes(thing): + raise TypeError(self.no_convert_msg.format(type(thing))) + return super(newstr, self).endswith(prefix, *args) + + @no('newbytes', 1) + def split(self, sep=None, maxsplit=-1): + # Py2 unicode.split() takes maxsplit as an optional parameter, + # not as a keyword argument as in Python 3 str. + parts = super(newstr, self).split(sep, maxsplit) + return [newstr(part) for part in parts] + + @no('newbytes', 1) + def rsplit(self, sep=None, maxsplit=-1): + # Py2 unicode.rsplit() takes maxsplit as an optional parameter, + # not as a keyword argument as in Python 3 str. + parts = super(newstr, self).rsplit(sep, maxsplit) + return [newstr(part) for part in parts] + + @no('newbytes', 1) + def partition(self, sep): + parts = super(newstr, self).partition(sep) + return tuple(newstr(part) for part in parts) + + @no('newbytes', 1) + def rpartition(self, sep): + parts = super(newstr, self).rpartition(sep) + return tuple(newstr(part) for part in parts) + + @no('newbytes', 1) + def index(self, sub, *args): + """ + Like newstr.find() but raise ValueError when the substring is not + found. + """ + pos = self.find(sub, *args) + if pos == -1: + raise ValueError('substring not found') + return pos + + def splitlines(self, keepends=False): + """ + S.splitlines(keepends=False) -> list of strings + + Return a list of the lines in S, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + # Py2 unicode.splitlines() takes keepends as an optional parameter, + # not as a keyword argument as in Python 3 str. + parts = super(newstr, self).splitlines(keepends) + return [newstr(part) for part in parts] + + def __eq__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__eq__(other) + else: + return NotImplemented + + def __hash__(self): + if (isinstance(self, unicode) or + isinstance(self, bytes) and not isnewbytes(self)): + return super(newstr, self).__hash__() + else: + raise NotImplementedError() + + def __ne__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__ne__(other) + else: + return True + + unorderable_err = 'unorderable types: str() and {0}' + + def __lt__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__lt__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __le__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__le__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __gt__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__gt__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __ge__(self, other): + if (isinstance(other, unicode) or + isinstance(other, bytes) and not isnewbytes(other)): + return super(newstr, self).__ge__(other) + raise TypeError(self.unorderable_err.format(type(other))) + + def __getattribute__(self, name): + """ + A trick to cause the ``hasattr`` builtin-fn to return False for + the 'decode' method on Py2. + """ + if name in ['decode', u'decode']: + raise AttributeError("decode method has been disabled in newstr") + return super(newstr, self).__getattribute__(name) + + def __native__(self): + """ + A hook for the future.utils.native() function. + """ + return unicode(self) + + @staticmethod + def maketrans(x, y=None, z=None): + """ + Return a translation table usable for str.translate(). + + If there is only one argument, it must be a dictionary mapping Unicode + ordinals (integers) or characters to Unicode ordinals, strings or None. + Character keys will be then converted to ordinals. + If there are two arguments, they must be strings of equal length, and + in the resulting dictionary, each character in x will be mapped to the + character at the same position in y. If there is a third argument, it + must be a string, whose characters will be mapped to None in the result. + """ + + if y is None: + assert z is None + if not isinstance(x, dict): + raise TypeError('if you give only one argument to maketrans it must be a dict') + result = {} + for (key, value) in x.items(): + if len(key) > 1: + raise ValueError('keys in translate table must be strings or integers') + result[ord(key)] = value + else: + if not isinstance(x, unicode) and isinstance(y, unicode): + raise TypeError('x and y must be unicode strings') + if not len(x) == len(y): + raise ValueError('the first two maketrans arguments must have equal length') + result = {} + for (xi, yi) in zip(x, y): + if len(xi) > 1: + raise ValueError('keys in translate table must be strings or integers') + result[ord(xi)] = ord(yi) + + if z is not None: + for char in z: + result[ord(char)] = None + return result + + def translate(self, table): + """ + S.translate(table) -> str + + Return a copy of the string S, where all characters have been mapped + through the given translation table, which must be a mapping of + Unicode ordinals to Unicode ordinals, strings, or None. + Unmapped characters are left untouched. Characters mapped to None + are deleted. + """ + l = [] + for c in self: + if ord(c) in table: + val = table[ord(c)] + if val is None: + continue + elif isinstance(val, unicode): + l.append(val) + else: + l.append(chr(val)) + else: + l.append(c) + return ''.join(l) + + def isprintable(self): + raise NotImplementedError('fixme') + + def isidentifier(self): + raise NotImplementedError('fixme') + + def format_map(self): + raise NotImplementedError('fixme') + + +__all__ = ['newstr'] diff --git a/pype/modules/ftrack/python2_vendor/future/utils/__init__.py b/pype/modules/ftrack/python2_vendor/future/utils/__init__.py new file mode 100644 index 0000000000..46bd96def3 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/utils/__init__.py @@ -0,0 +1,767 @@ +""" +A selection of cross-compatible functions for Python 2 and 3. + +This module exports useful functions for 2/3 compatible code: + + * bind_method: binds functions to classes + * ``native_str_to_bytes`` and ``bytes_to_native_str`` + * ``native_str``: always equal to the native platform string object (because + this may be shadowed by imports from future.builtins) + * lists: lrange(), lmap(), lzip(), lfilter() + * iterable method compatibility: + - iteritems, iterkeys, itervalues + - viewitems, viewkeys, viewvalues + + These use the original method if available, otherwise they use items, + keys, values. + + * types: + + * text_type: unicode in Python 2, str in Python 3 + * string_types: basestring in Python 2, str in Python 3 + * binary_type: str in Python 2, bytes in Python 3 + * integer_types: (int, long) in Python 2, int in Python 3 + * class_types: (type, types.ClassType) in Python 2, type in Python 3 + + * bchr(c): + Take an integer and make a 1-character byte string + * bord(c) + Take the result of indexing on a byte string and make an integer + * tobytes(s) + Take a text string, a byte string, or a sequence of characters taken + from a byte string, and make a byte string. + + * raise_from() + * raise_with_traceback() + +This module also defines these decorators: + + * ``python_2_unicode_compatible`` + * ``with_metaclass`` + * ``implements_iterator`` + +Some of the functions in this module come from the following sources: + + * Jinja2 (BSD licensed: see + https://github.com/mitsuhiko/jinja2/blob/master/LICENSE) + * Pandas compatibility module pandas.compat + * six.py by Benjamin Peterson + * Django +""" + +import types +import sys +import numbers +import functools +import copy +import inspect + + +PY3 = sys.version_info[0] >= 3 +PY34_PLUS = sys.version_info[0:2] >= (3, 4) +PY35_PLUS = sys.version_info[0:2] >= (3, 5) +PY36_PLUS = sys.version_info[0:2] >= (3, 6) +PY2 = sys.version_info[0] == 2 +PY26 = sys.version_info[0:2] == (2, 6) +PY27 = sys.version_info[0:2] == (2, 7) +PYPY = hasattr(sys, 'pypy_translation_info') + + +def python_2_unicode_compatible(cls): + """ + A decorator that defines __unicode__ and __str__ methods under Python + 2. Under Python 3, this decorator is a no-op. + + To support Python 2 and 3 with a single code base, define a __str__ + method returning unicode text and apply this decorator to the class, like + this:: + + >>> from future.utils import python_2_unicode_compatible + + >>> @python_2_unicode_compatible + ... class MyClass(object): + ... def __str__(self): + ... return u'Unicode string: \u5b54\u5b50' + + >>> a = MyClass() + + Then, after this import: + + >>> from future.builtins import str + + the following is ``True`` on both Python 3 and 2:: + + >>> str(a) == a.encode('utf-8').decode('utf-8') + True + + and, on a Unicode-enabled terminal with the right fonts, these both print the + Chinese characters for Confucius:: + + >>> print(a) + >>> print(str(a)) + + The implementation comes from django.utils.encoding. + """ + if not PY3: + cls.__unicode__ = cls.__str__ + cls.__str__ = lambda self: self.__unicode__().encode('utf-8') + return cls + + +def with_metaclass(meta, *bases): + """ + Function from jinja2/_compat.py. License: BSD. + + Use it like this:: + + class BaseForm(object): + pass + + class FormType(type): + pass + + class Form(with_metaclass(FormType, BaseForm)): + pass + + This requires a bit of explanation: the basic idea is to make a + dummy metaclass for one level of class instantiation that replaces + itself with the actual metaclass. Because of internal type checks + we also need to make sure that we downgrade the custom metaclass + for one level to something closer to type (that's why __call__ and + __init__ comes back from type etc.). + + This has the advantage over six.with_metaclass of not introducing + dummy classes into the final MRO. + """ + class metaclass(meta): + __call__ = type.__call__ + __init__ = type.__init__ + def __new__(cls, name, this_bases, d): + if this_bases is None: + return type.__new__(cls, name, (), d) + return meta(name, bases, d) + return metaclass('temporary_class', None, {}) + + +# Definitions from pandas.compat and six.py follow: +if PY3: + def bchr(s): + return bytes([s]) + def bstr(s): + if isinstance(s, str): + return bytes(s, 'latin-1') + else: + return bytes(s) + def bord(s): + return s + + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + +else: + # Python 2 + def bchr(s): + return chr(s) + def bstr(s): + return str(s) + def bord(s): + return ord(s) + + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + +### + +if PY3: + def tobytes(s): + if isinstance(s, bytes): + return s + else: + if isinstance(s, str): + return s.encode('latin-1') + else: + return bytes(s) +else: + # Python 2 + def tobytes(s): + if isinstance(s, unicode): + return s.encode('latin-1') + else: + return ''.join(s) + +tobytes.__doc__ = """ + Encodes to latin-1 (where the first 256 chars are the same as + ASCII.) + """ + +if PY3: + def native_str_to_bytes(s, encoding='utf-8'): + return s.encode(encoding) + + def bytes_to_native_str(b, encoding='utf-8'): + return b.decode(encoding) + + def text_to_native_str(t, encoding=None): + return t +else: + # Python 2 + def native_str_to_bytes(s, encoding=None): + from future.types import newbytes # to avoid a circular import + return newbytes(s) + + def bytes_to_native_str(b, encoding=None): + return native(b) + + def text_to_native_str(t, encoding='ascii'): + """ + Use this to create a Py2 native string when "from __future__ import + unicode_literals" is in effect. + """ + return unicode(t).encode(encoding) + +native_str_to_bytes.__doc__ = """ + On Py3, returns an encoded string. + On Py2, returns a newbytes type, ignoring the ``encoding`` argument. + """ + +if PY3: + # list-producing versions of the major Python iterating functions + def lrange(*args, **kwargs): + return list(range(*args, **kwargs)) + + def lzip(*args, **kwargs): + return list(zip(*args, **kwargs)) + + def lmap(*args, **kwargs): + return list(map(*args, **kwargs)) + + def lfilter(*args, **kwargs): + return list(filter(*args, **kwargs)) +else: + import __builtin__ + # Python 2-builtin ranges produce lists + lrange = __builtin__.range + lzip = __builtin__.zip + lmap = __builtin__.map + lfilter = __builtin__.filter + + +def isidentifier(s, dotted=False): + ''' + A function equivalent to the str.isidentifier method on Py3 + ''' + if dotted: + return all(isidentifier(a) for a in s.split('.')) + if PY3: + return s.isidentifier() + else: + import re + _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") + return bool(_name_re.match(s)) + + +def viewitems(obj, **kwargs): + """ + Function for iterating over dictionary items with the same set-like + behaviour on Py2.7 as on Py3. + + Passes kwargs to method.""" + func = getattr(obj, "viewitems", None) + if not func: + func = obj.items + return func(**kwargs) + + +def viewkeys(obj, **kwargs): + """ + Function for iterating over dictionary keys with the same set-like + behaviour on Py2.7 as on Py3. + + Passes kwargs to method.""" + func = getattr(obj, "viewkeys", None) + if not func: + func = obj.keys + return func(**kwargs) + + +def viewvalues(obj, **kwargs): + """ + Function for iterating over dictionary values with the same set-like + behaviour on Py2.7 as on Py3. + + Passes kwargs to method.""" + func = getattr(obj, "viewvalues", None) + if not func: + func = obj.values + return func(**kwargs) + + +def iteritems(obj, **kwargs): + """Use this only if compatibility with Python versions before 2.7 is + required. Otherwise, prefer viewitems(). + """ + func = getattr(obj, "iteritems", None) + if not func: + func = obj.items + return func(**kwargs) + + +def iterkeys(obj, **kwargs): + """Use this only if compatibility with Python versions before 2.7 is + required. Otherwise, prefer viewkeys(). + """ + func = getattr(obj, "iterkeys", None) + if not func: + func = obj.keys + return func(**kwargs) + + +def itervalues(obj, **kwargs): + """Use this only if compatibility with Python versions before 2.7 is + required. Otherwise, prefer viewvalues(). + """ + func = getattr(obj, "itervalues", None) + if not func: + func = obj.values + return func(**kwargs) + + +def bind_method(cls, name, func): + """Bind a method to class, python 2 and python 3 compatible. + + Parameters + ---------- + + cls : type + class to receive bound method + name : basestring + name of method on class instance + func : function + function to be bound as method + + Returns + ------- + None + """ + # only python 2 has an issue with bound/unbound methods + if not PY3: + setattr(cls, name, types.MethodType(func, None, cls)) + else: + setattr(cls, name, func) + + +def getexception(): + return sys.exc_info()[1] + + +def _get_caller_globals_and_locals(): + """ + Returns the globals and locals of the calling frame. + + Is there an alternative to frame hacking here? + """ + caller_frame = inspect.stack()[2] + myglobals = caller_frame[0].f_globals + mylocals = caller_frame[0].f_locals + return myglobals, mylocals + + +def _repr_strip(mystring): + """ + Returns the string without any initial or final quotes. + """ + r = repr(mystring) + if r.startswith("'") and r.endswith("'"): + return r[1:-1] + else: + return r + + +if PY3: + def raise_from(exc, cause): + """ + Equivalent to: + + raise EXCEPTION from CAUSE + + on Python 3. (See PEP 3134). + """ + myglobals, mylocals = _get_caller_globals_and_locals() + + # We pass the exception and cause along with other globals + # when we exec(): + myglobals = myglobals.copy() + myglobals['__python_future_raise_from_exc'] = exc + myglobals['__python_future_raise_from_cause'] = cause + execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause" + exec(execstr, myglobals, mylocals) + + def raise_(tp, value=None, tb=None): + """ + A function that matches the Python 2.x ``raise`` statement. This + allows re-raising exceptions with the cls value and traceback on + Python 2 and 3. + """ + if isinstance(tp, BaseException): + # If the first object is an instance, the type of the exception + # is the class of the instance, the instance itself is the value, + # and the second object must be None. + if value is not None: + raise TypeError("instance exception may not have a separate value") + exc = tp + elif isinstance(tp, type) and not issubclass(tp, BaseException): + # If the first object is a class, it becomes the type of the + # exception. + raise TypeError("class must derive from BaseException, not %s" % tp.__name__) + else: + # The second object is used to determine the exception value: If it + # is an instance of the class, the instance becomes the exception + # value. If the second object is a tuple, it is used as the argument + # list for the class constructor; if it is None, an empty argument + # list is used, and any other object is treated as a single argument + # to the constructor. The instance so created by calling the + # constructor is used as the exception value. + if isinstance(value, tp): + exc = value + elif isinstance(value, tuple): + exc = tp(*value) + elif value is None: + exc = tp() + else: + exc = tp(value) + + if exc.__traceback__ is not tb: + raise exc.with_traceback(tb) + raise exc + + def raise_with_traceback(exc, traceback=Ellipsis): + if traceback == Ellipsis: + _, _, traceback = sys.exc_info() + raise exc.with_traceback(traceback) + +else: + def raise_from(exc, cause): + """ + Equivalent to: + + raise EXCEPTION from CAUSE + + on Python 3. (See PEP 3134). + """ + # Is either arg an exception class (e.g. IndexError) rather than + # instance (e.g. IndexError('my message here')? If so, pass the + # name of the class undisturbed through to "raise ... from ...". + if isinstance(exc, type) and issubclass(exc, Exception): + e = exc() + # exc = exc.__name__ + # execstr = "e = " + _repr_strip(exc) + "()" + # myglobals, mylocals = _get_caller_globals_and_locals() + # exec(execstr, myglobals, mylocals) + else: + e = exc + e.__suppress_context__ = False + if isinstance(cause, type) and issubclass(cause, Exception): + e.__cause__ = cause() + e.__cause__.__traceback__ = sys.exc_info()[2] + e.__suppress_context__ = True + elif cause is None: + e.__cause__ = None + e.__suppress_context__ = True + elif isinstance(cause, BaseException): + e.__cause__ = cause + object.__setattr__(e.__cause__, '__traceback__', sys.exc_info()[2]) + e.__suppress_context__ = True + else: + raise TypeError("exception causes must derive from BaseException") + e.__context__ = sys.exc_info()[1] + raise e + + exec(''' +def raise_(tp, value=None, tb=None): + raise tp, value, tb + +def raise_with_traceback(exc, traceback=Ellipsis): + if traceback == Ellipsis: + _, _, traceback = sys.exc_info() + raise exc, None, traceback +'''.strip()) + + +raise_with_traceback.__doc__ = ( +"""Raise exception with existing traceback. +If traceback is not passed, uses sys.exc_info() to get traceback.""" +) + + +# Deprecated alias for backward compatibility with ``future`` versions < 0.11: +reraise = raise_ + + +def implements_iterator(cls): + ''' + From jinja2/_compat.py. License: BSD. + + Use as a decorator like this:: + + @implements_iterator + class UppercasingIterator(object): + def __init__(self, iterable): + self._iter = iter(iterable) + def __iter__(self): + return self + def __next__(self): + return next(self._iter).upper() + + ''' + if PY3: + return cls + else: + cls.next = cls.__next__ + del cls.__next__ + return cls + +if PY3: + get_next = lambda x: x.next +else: + get_next = lambda x: x.__next__ + + +def encode_filename(filename): + if PY3: + return filename + else: + if isinstance(filename, unicode): + return filename.encode('utf-8') + return filename + + +def is_new_style(cls): + """ + Python 2.7 has both new-style and old-style classes. Old-style classes can + be pesky in some circumstances, such as when using inheritance. Use this + function to test for whether a class is new-style. (Python 3 only has + new-style classes.) + """ + return hasattr(cls, '__class__') and ('__dict__' in dir(cls) + or hasattr(cls, '__slots__')) + +# The native platform string and bytes types. Useful because ``str`` and +# ``bytes`` are redefined on Py2 by ``from future.builtins import *``. +native_str = str +native_bytes = bytes + + +def istext(obj): + """ + Deprecated. Use:: + >>> isinstance(obj, str) + after this import: + >>> from future.builtins import str + """ + return isinstance(obj, type(u'')) + + +def isbytes(obj): + """ + Deprecated. Use:: + >>> isinstance(obj, bytes) + after this import: + >>> from future.builtins import bytes + """ + return isinstance(obj, type(b'')) + + +def isnewbytes(obj): + """ + Equivalent to the result of ``type(obj) == type(newbytes)`` + in other words, it is REALLY a newbytes instance, not a Py2 native str + object? + + Note that this does not cover subclasses of newbytes, and it is not + equivalent to ininstance(obj, newbytes) + """ + return type(obj).__name__ == 'newbytes' + + +def isint(obj): + """ + Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or + ``long``. + + Instead of using this function, you can use: + + >>> from future.builtins import int + >>> isinstance(obj, int) + + The following idiom is equivalent: + + >>> from numbers import Integral + >>> isinstance(obj, Integral) + """ + + return isinstance(obj, numbers.Integral) + + +def native(obj): + """ + On Py3, this is a no-op: native(obj) -> obj + + On Py2, returns the corresponding native Py2 types that are + superclasses for backported objects from Py3: + + >>> from builtins import str, bytes, int + + >>> native(str(u'ABC')) + u'ABC' + >>> type(native(str(u'ABC'))) + unicode + + >>> native(bytes(b'ABC')) + b'ABC' + >>> type(native(bytes(b'ABC'))) + bytes + + >>> native(int(10**20)) + 100000000000000000000L + >>> type(native(int(10**20))) + long + + Existing native types on Py2 will be returned unchanged: + + >>> type(native(u'ABC')) + unicode + """ + if hasattr(obj, '__native__'): + return obj.__native__() + else: + return obj + + +# Implementation of exec_ is from ``six``: +if PY3: + import builtins + exec_ = getattr(builtins, "exec") +else: + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + +# Defined here for backward compatibility: +def old_div(a, b): + """ + DEPRECATED: import ``old_div`` from ``past.utils`` instead. + + Equivalent to ``a / b`` on Python 2 without ``from __future__ import + division``. + + TODO: generalize this to other objects (like arrays etc.) + """ + if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral): + return a // b + else: + return a / b + + +def as_native_str(encoding='utf-8'): + ''' + A decorator to turn a function or method call that returns text, i.e. + unicode, into one that returns a native platform str. + + Use it as a decorator like this:: + + from __future__ import unicode_literals + + class MyClass(object): + @as_native_str(encoding='ascii') + def __repr__(self): + return next(self._iter).upper() + ''' + if PY3: + return lambda f: f + else: + def encoder(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + return f(*args, **kwargs).encode(encoding=encoding) + return wrapper + return encoder + +# listvalues and listitems definitions from Nick Coghlan's (withdrawn) +# PEP 496: +try: + dict.iteritems +except AttributeError: + # Python 3 + def listvalues(d): + return list(d.values()) + def listitems(d): + return list(d.items()) +else: + # Python 2 + def listvalues(d): + return d.values() + def listitems(d): + return d.items() + +if PY3: + def ensure_new_type(obj): + return obj +else: + def ensure_new_type(obj): + from future.types.newbytes import newbytes + from future.types.newstr import newstr + from future.types.newint import newint + from future.types.newdict import newdict + + native_type = type(native(obj)) + + # Upcast only if the type is already a native (non-future) type + if issubclass(native_type, type(obj)): + # Upcast + if native_type == str: # i.e. Py2 8-bit str + return newbytes(obj) + elif native_type == unicode: + return newstr(obj) + elif native_type == int: + return newint(obj) + elif native_type == long: + return newint(obj) + elif native_type == dict: + return newdict(obj) + else: + return obj + else: + # Already a new type + assert type(obj) in [newbytes, newstr] + return obj + + +__all__ = ['PY2', 'PY26', 'PY3', 'PYPY', + 'as_native_str', 'binary_type', 'bind_method', 'bord', 'bstr', + 'bytes_to_native_str', 'class_types', 'encode_filename', + 'ensure_new_type', 'exec_', 'get_next', 'getexception', + 'implements_iterator', 'integer_types', 'is_new_style', 'isbytes', + 'isidentifier', 'isint', 'isnewbytes', 'istext', 'iteritems', + 'iterkeys', 'itervalues', 'lfilter', 'listitems', 'listvalues', + 'lmap', 'lrange', 'lzip', 'native', 'native_bytes', 'native_str', + 'native_str_to_bytes', 'old_div', + 'python_2_unicode_compatible', 'raise_', + 'raise_with_traceback', 'reraise', 'string_types', + 'text_to_native_str', 'text_type', 'tobytes', 'viewitems', + 'viewkeys', 'viewvalues', 'with_metaclass' + ] diff --git a/pype/modules/ftrack/python2_vendor/future/utils/surrogateescape.py b/pype/modules/ftrack/python2_vendor/future/utils/surrogateescape.py new file mode 100644 index 0000000000..0dcc9fa6e6 --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/future/utils/surrogateescape.py @@ -0,0 +1,198 @@ +""" +This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error +handler of Python 3. + +Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc +""" + +# This code is released under the Python license and the BSD 2-clause license + +import codecs +import sys + +from future import utils + + +FS_ERRORS = 'surrogateescape' + +# # -- Python 2/3 compatibility ------------------------------------- +# FS_ERRORS = 'my_surrogateescape' + +def u(text): + if utils.PY3: + return text + else: + return text.decode('unicode_escape') + +def b(data): + if utils.PY3: + return data.encode('latin1') + else: + return data + +if utils.PY3: + _unichr = chr + bytes_chr = lambda code: bytes((code,)) +else: + _unichr = unichr + bytes_chr = chr + +def surrogateescape_handler(exc): + """ + Pure Python implementation of the PEP 383: the "surrogateescape" error + handler of Python 3. Undecodable bytes will be replaced by a Unicode + character U+DCxx on decoding, and these are translated into the + original bytes on encoding. + """ + mystring = exc.object[exc.start:exc.end] + + try: + if isinstance(exc, UnicodeDecodeError): + # mystring is a byte-string in this case + decoded = replace_surrogate_decode(mystring) + elif isinstance(exc, UnicodeEncodeError): + # In the case of u'\udcc3'.encode('ascii', + # 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an + # exception anyway after this function is called, even though I think + # it's doing what it should. It seems that the strict encoder is called + # to encode the unicode string that this function returns ... + decoded = replace_surrogate_encode(mystring) + else: + raise exc + except NotASurrogateError: + raise exc + return (decoded, exc.end) + + +class NotASurrogateError(Exception): + pass + + +def replace_surrogate_encode(mystring): + """ + Returns a (unicode) string, not the more logical bytes, because the codecs + register_error functionality expects this. + """ + decoded = [] + for ch in mystring: + # if utils.PY3: + # code = ch + # else: + code = ord(ch) + + # The following magic comes from Py3.3's Python/codecs.c file: + if not 0xD800 <= code <= 0xDCFF: + # Not a surrogate. Fail with the original exception. + raise NotASurrogateError + # mybytes = [0xe0 | (code >> 12), + # 0x80 | ((code >> 6) & 0x3f), + # 0x80 | (code & 0x3f)] + # Is this a good idea? + if 0xDC00 <= code <= 0xDC7F: + decoded.append(_unichr(code - 0xDC00)) + elif code <= 0xDCFF: + decoded.append(_unichr(code - 0xDC00)) + else: + raise NotASurrogateError + return str().join(decoded) + + +def replace_surrogate_decode(mybytes): + """ + Returns a (unicode) string + """ + decoded = [] + for ch in mybytes: + # We may be parsing newbytes (in which case ch is an int) or a native + # str on Py2 + if isinstance(ch, int): + code = ch + else: + code = ord(ch) + if 0x80 <= code <= 0xFF: + decoded.append(_unichr(0xDC00 + code)) + elif code <= 0x7F: + decoded.append(_unichr(code)) + else: + # # It may be a bad byte + # # Try swallowing it. + # continue + # print("RAISE!") + raise NotASurrogateError + return str().join(decoded) + + +def encodefilename(fn): + if FS_ENCODING == 'ascii': + # ASCII encoder of Python 2 expects that the error handler returns a + # Unicode string encodable to ASCII, whereas our surrogateescape error + # handler has to return bytes in 0x80-0xFF range. + encoded = [] + for index, ch in enumerate(fn): + code = ord(ch) + if code < 128: + ch = bytes_chr(code) + elif 0xDC80 <= code <= 0xDCFF: + ch = bytes_chr(code - 0xDC00) + else: + raise UnicodeEncodeError(FS_ENCODING, + fn, index, index+1, + 'ordinal not in range(128)') + encoded.append(ch) + return bytes().join(encoded) + elif FS_ENCODING == 'utf-8': + # UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF + # doesn't go through our error handler + encoded = [] + for index, ch in enumerate(fn): + code = ord(ch) + if 0xD800 <= code <= 0xDFFF: + if 0xDC80 <= code <= 0xDCFF: + ch = bytes_chr(code - 0xDC00) + encoded.append(ch) + else: + raise UnicodeEncodeError( + FS_ENCODING, + fn, index, index+1, 'surrogates not allowed') + else: + ch_utf8 = ch.encode('utf-8') + encoded.append(ch_utf8) + return bytes().join(encoded) + else: + return fn.encode(FS_ENCODING, FS_ERRORS) + +def decodefilename(fn): + return fn.decode(FS_ENCODING, FS_ERRORS) + +FS_ENCODING = 'ascii'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]') +# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]') +# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]') + + +# normalize the filesystem encoding name. +# For example, we expect "utf-8", not "UTF8". +FS_ENCODING = codecs.lookup(FS_ENCODING).name + + +def register_surrogateescape(): + """ + Registers the surrogateescape error handler on Python 2 (only) + """ + if utils.PY3: + return + try: + codecs.lookup_error(FS_ERRORS) + except LookupError: + codecs.register_error(FS_ERRORS, surrogateescape_handler) + + +if __name__ == '__main__': + pass + # # Tests: + # register_surrogateescape() + + # b = decodefilename(fn) + # assert b == encoded, "%r != %r" % (b, encoded) + # c = encodefilename(b) + # assert c == fn, '%r != %r' % (c, fn) + # # print("ok") diff --git a/pype/modules/ftrack/python2_vendor/reprlib/__init__.py b/pype/modules/ftrack/python2_vendor/reprlib/__init__.py new file mode 100644 index 0000000000..6ccf9c006f --- /dev/null +++ b/pype/modules/ftrack/python2_vendor/reprlib/__init__.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import +import sys + +if sys.version_info[0] < 3: + from repr import * +else: + raise ImportError('This package should not be accessible on Python 3. ' + 'Either you are trying to run from the python-future src folder ' + 'or your installation of python-future is corrupted.') diff --git a/pype/vendor/backports/configparser/__init__.py b/pype/vendor/backports/configparser/__init__.py deleted file mode 100644 index 06d7a0855f..0000000000 --- a/pype/vendor/backports/configparser/__init__.py +++ /dev/null @@ -1,1390 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -"""Configuration file parser. - -A configuration file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, - delimiters=('=', ':'), comment_prefixes=('#', ';'), - inline_comment_prefixes=None, strict=True, - empty_lines_in_values=True, default_section='DEFAULT', - interpolation=, converters=): - Create the parser. When `defaults' is given, it is initialized into the - dictionary or intrinsic defaults. The keys must be strings, the values - must be appropriate for %()s string interpolation. - - When `dict_type' is given, it will be used to create the dictionary - objects for the list of sections, for the options within a section, and - for the default values. - - When `delimiters' is given, it will be used as the set of substrings - that divide keys from values. - - When `comment_prefixes' is given, it will be used as the set of - substrings that prefix comments in empty lines. Comments can be - indented. - - When `inline_comment_prefixes' is given, it will be used as the set of - substrings that prefix comments in non-empty lines. - - When `strict` is True, the parser won't allow for any section or option - duplicates while reading from a single source (file, string or - dictionary). Default is True. - - When `empty_lines_in_values' is False (default: True), each empty line - marks the end of an option. Otherwise, internal empty lines of - a multiline option are kept as part of the value. - - When `allow_no_value' is True (default: False), options without - values are accepted; the value presented for these is None. - - sections() - Return all the configuration section names, sans DEFAULT. - - has_section(section) - Return whether the given section exists. - - has_option(section, option) - Return whether the given option exists in the given section. - - options(section) - Return list of configuration options for the named section. - - read(filenames, encoding=None) - Read and parse the list of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - read_file(f, filename=None) - Read and parse one configuration file, given as a file object. - The filename defaults to f.name; it is only used in error - messages (if f has no `name' attribute, the string `' is used). - - read_string(string) - Read configuration from a given string. - - read_dict(dictionary) - Read configuration from a dictionary. Keys are section names, - values are dictionaries with keys and values that should be present - in the section. If the used dictionary type preserves order, sections - and their keys will be added in order. Values are automatically - converted to strings. - - get(section, option, raw=False, vars=None, fallback=_UNSET) - Return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. If `option' is a key in - `vars', the value from `vars' is used. - - getint(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to an integer. - - getfloat(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to a float. - - getboolean(section, options, raw=False, vars=None, fallback=_UNSET) - Like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section=_UNSET, raw=False, vars=None) - If section is given, return a list of tuples with (name, value) for - each option in the section. Otherwise, return a list of tuples with - (section_name, section_proxy) for each section, including DEFAULTSECT. - - remove_section(section) - Remove the given file section and all its options. - - remove_option(section, option) - Remove the given option from the given section. - - set(section, option, value) - Set the given option. - - write(fp, space_around_delimiters=True) - Write the configuration state in .ini format. If - `space_around_delimiters' is True (the default), delimiters - between keys and values are surrounded by spaces. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from collections import MutableMapping -import functools -import io -import itertools -import re -import sys -import warnings - -from backports.configparser.helpers import OrderedDict as _default_dict -from backports.configparser.helpers import ChainMap as _ChainMap -from backports.configparser.helpers import from_none, open, str, PY2 - -__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", - "NoOptionError", "InterpolationError", "InterpolationDepthError", - "InterpolationMissingOptionError", "InterpolationSyntaxError", - "ParsingError", "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "Interpolation", "BasicInterpolation", "ExtendedInterpolation", - "LegacyInterpolation", "SectionProxy", "ConverterMapping", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - self.args = (section, ) - - -class DuplicateSectionError(Error): - """Raised when a section is repeated in an input source. - - Possible repetitions that raise this exception are: multiple creation - using the API or in strict parsers when a section is found more than once - in a single input file, string or dictionary. - """ - - def __init__(self, section, source=None, lineno=None): - msg = [repr(section), " already exists"] - if source is not None: - message = ["While reading from ", repr(source)] - if lineno is not None: - message.append(" [line {0:2d}]".format(lineno)) - message.append(": section ") - message.extend(msg) - msg = message - else: - msg.insert(0, "Section ") - Error.__init__(self, "".join(msg)) - self.section = section - self.source = source - self.lineno = lineno - self.args = (section, source, lineno) - - -class DuplicateOptionError(Error): - """Raised by strict parsers when an option is repeated in an input source. - - Current implementation raises this exception only when an option is found - more than once in a single file, string or dictionary. - """ - - def __init__(self, section, option, source=None, lineno=None): - msg = [repr(option), " in section ", repr(section), - " already exists"] - if source is not None: - message = ["While reading from ", repr(source)] - if lineno is not None: - message.append(" [line {0:2d}]".format(lineno)) - message.append(": option ") - message.extend(msg) - msg = message - else: - msg.insert(0, "Option ") - Error.__init__(self, "".join(msg)) - self.section = section - self.option = option - self.source = source - self.lineno = lineno - self.args = (section, option, source, lineno) - - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - self.args = (option, section) - - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - self.args = (option, section, msg) - - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution: option {0!r} in section {1!r} contains " - "an interpolation key {2!r} which is not a valid option name. " - "Raw value: {3!r}".format(option, section, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - self.args = (option, section, rawval, reference) - - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text contains invalid syntax. - - Current implementation raises this exception when the source text into - which substitutions are made does not conform to the required syntax. - """ - - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Recursion limit exceeded in value substitution: option {0!r} " - "in section {1!r} contains an interpolation key which " - "cannot be substituted in {2} steps. Raw value: {3!r}" - "".format(option, section, MAX_INTERPOLATION_DEPTH, - rawval)) - InterpolationError.__init__(self, option, section, msg) - self.args = (option, section, rawval) - - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, source=None, filename=None): - # Exactly one of `source'/`filename' arguments has to be given. - # `filename' kept for compatibility. - if filename and source: - raise ValueError("Cannot specify both `filename' and `source'. " - "Use `source'.") - elif not filename and not source: - raise ValueError("Required argument `source' not given.") - elif filename: - source = filename - Error.__init__(self, 'Source contains parsing errors: %r' % source) - self.source = source - self.errors = [] - self.args = (source, ) - - @property - def filename(self): - """Deprecated, use `source'.""" - warnings.warn( - "The 'filename' attribute will be removed in future versions. " - "Use 'source' instead.", - DeprecationWarning, stacklevel=2 - ) - return self.source - - @filename.setter - def filename(self, value): - """Deprecated, user `source'.""" - warnings.warn( - "The 'filename' attribute will be removed in future versions. " - "Use 'source' instead.", - DeprecationWarning, stacklevel=2 - ) - self.source = value - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %r, line: %d\n%r' % - (filename, lineno, line)) - self.source = filename - self.lineno = lineno - self.line = line - self.args = (filename, lineno, line) - - -# Used in parser getters to indicate the default behaviour when a specific -# option is not found it to raise an exception. Created to enable `None' as -# a valid fallback value. -_UNSET = object() - - -class Interpolation(object): - """Dummy interpolation that passes the value through with no changes.""" - - def before_get(self, parser, section, option, value, defaults): - return value - - def before_set(self, parser, section, option, value): - return value - - def before_read(self, parser, section, option, value): - return value - - def before_write(self, parser, section, option, value): - return value - - -class BasicInterpolation(Interpolation): - """Interpolation as implemented in the classic ConfigParser. - - The option values can contain format strings which refer to other values in - the same section, or values in the special default section. - - For example: - - something: %(dir)s/whatever - - would resolve the "%(dir)s" to the value of dir. All reference - expansions are done late, on demand. If a user needs to use a bare % in - a configuration file, she can escape it by writing %%. Other % usage - is considered a user error and raises `InterpolationSyntaxError'.""" - - _KEYCRE = re.compile(r"%\(([^)]+)\)s") - - def before_get(self, parser, section, option, value, defaults): - L = [] - self._interpolate_some(parser, option, L, value, section, defaults, 1) - return ''.join(L) - - def before_set(self, parser, section, option, value): - tmp_value = value.replace('%%', '') # escaped percent signs - tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax - if '%' in tmp_value: - raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('%'))) - return value - - def _interpolate_some(self, parser, option, accum, rest, section, map, - depth): - rawval = parser.get(section, option, raw=True, fallback=rest) - if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rawval) - while rest: - p = rest.find("%") - if p < 0: - accum.append(rest) - return - if p > 0: - accum.append(rest[:p]) - rest = rest[p:] - # p is no longer used - c = rest[1:2] - if c == "%": - accum.append("%") - rest = rest[2:] - elif c == "(": - m = self._KEYCRE.match(rest) - if m is None: - raise InterpolationSyntaxError(option, section, - "bad interpolation variable reference %r" % rest) - var = parser.optionxform(m.group(1)) - rest = rest[m.end():] - try: - v = map[var] - except KeyError: - raise from_none(InterpolationMissingOptionError( - option, section, rawval, var)) - if "%" in v: - self._interpolate_some(parser, option, accum, v, - section, map, depth + 1) - else: - accum.append(v) - else: - raise InterpolationSyntaxError( - option, section, - "'%%' must be followed by '%%' or '(', " - "found: %r" % (rest,)) - - -class ExtendedInterpolation(Interpolation): - """Advanced variant of interpolation, supports the syntax used by - `zc.buildout'. Enables interpolation between sections.""" - - _KEYCRE = re.compile(r"\$\{([^}]+)\}") - - def before_get(self, parser, section, option, value, defaults): - L = [] - self._interpolate_some(parser, option, L, value, section, defaults, 1) - return ''.join(L) - - def before_set(self, parser, section, option, value): - tmp_value = value.replace('$$', '') # escaped dollar signs - tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax - if '$' in tmp_value: - raise ValueError("invalid interpolation syntax in %r at " - "position %d" % (value, tmp_value.find('$'))) - return value - - def _interpolate_some(self, parser, option, accum, rest, section, map, - depth): - rawval = parser.get(section, option, raw=True, fallback=rest) - if depth > MAX_INTERPOLATION_DEPTH: - raise InterpolationDepthError(option, section, rawval) - while rest: - p = rest.find("$") - if p < 0: - accum.append(rest) - return - if p > 0: - accum.append(rest[:p]) - rest = rest[p:] - # p is no longer used - c = rest[1:2] - if c == "$": - accum.append("$") - rest = rest[2:] - elif c == "{": - m = self._KEYCRE.match(rest) - if m is None: - raise InterpolationSyntaxError(option, section, - "bad interpolation variable reference %r" % rest) - path = m.group(1).split(':') - rest = rest[m.end():] - sect = section - opt = option - try: - if len(path) == 1: - opt = parser.optionxform(path[0]) - v = map[opt] - elif len(path) == 2: - sect = path[0] - opt = parser.optionxform(path[1]) - v = parser.get(sect, opt, raw=True) - else: - raise InterpolationSyntaxError( - option, section, - "More than one ':' found: %r" % (rest,)) - except (KeyError, NoSectionError, NoOptionError): - raise from_none(InterpolationMissingOptionError( - option, section, rawval, ":".join(path))) - if "$" in v: - self._interpolate_some(parser, opt, accum, v, sect, - dict(parser.items(sect, raw=True)), - depth + 1) - else: - accum.append(v) - else: - raise InterpolationSyntaxError( - option, section, - "'$' must be followed by '$' or '{', " - "found: %r" % (rest,)) - - -class LegacyInterpolation(Interpolation): - """Deprecated interpolation used in old versions of ConfigParser. - Use BasicInterpolation or ExtendedInterpolation instead.""" - - _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") - - def before_get(self, parser, section, option, value, vars): - rawval = value - depth = MAX_INTERPOLATION_DEPTH - while depth: # Loop through this until it's done - depth -= 1 - if value and "%(" in value: - replace = functools.partial(self._interpolation_replace, - parser=parser) - value = self._KEYCRE.sub(replace, value) - try: - value = value % vars - except KeyError as e: - raise from_none(InterpolationMissingOptionError( - option, section, rawval, e.args[0])) - else: - break - if value and "%(" in value: - raise InterpolationDepthError(option, section, rawval) - return value - - def before_set(self, parser, section, option, value): - return value - - @staticmethod - def _interpolation_replace(match, parser): - s = match.group(1) - if s is None: - return match.group() - else: - return "%%(%s)s" % parser.optionxform(s) - - -class RawConfigParser(MutableMapping): - """ConfigParser that does not do interpolation.""" - - # Regular expressions for parsing section headers and options - _SECT_TMPL = r""" - \[ # [ - (?P
[^]]+) # very permissive! - \] # ] - """ - _OPT_TMPL = r""" - (?P