diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml index 6e1e38d0b2..ac7279117a 100644 --- a/.github/workflows/test_build.yml +++ b/.github/workflows/test_build.yml @@ -37,6 +37,7 @@ jobs: - name: 🔨 Build shell: pwsh run: | + $env:SKIP_THIRD_PARTY_VALIDATION="1" ./tools/build.ps1 Ubuntu-latest: @@ -61,6 +62,7 @@ jobs: - name: 🔨 Build run: | + export SKIP_THIRD_PARTY_VALIDATION="1" ./tools/build.sh # MacOS-latest: diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index cd67f8ca9e..74d38751e1 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -3,7 +3,6 @@ import re import tempfile import attr -from avalon import aftereffects import pyblish.api from openpype.settings import get_project_settings @@ -159,7 +158,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): in url Returns: - (list) of absolut urls to rendered file + (list) of absolute urls to rendered file """ start = render_instance.frameStart end = render_instance.frameEnd diff --git a/openpype/hosts/blender/api/action.py b/openpype/hosts/blender/api/action.py index f3426ac3cf..09ef76326e 100644 --- a/openpype/hosts/blender/api/action.py +++ b/openpype/hosts/blender/api/action.py @@ -25,7 +25,7 @@ class SelectInvalidAction(pyblish.api.Action): invalid.extend(invalid_nodes) else: self.log.warning( - "Failed plug-in doens't have any selectable objects." + "Failed plug-in doesn't have any selectable objects." ) bpy.ops.object.select_all(action='DESELECT') diff --git a/openpype/hosts/blender/api/lib.py b/openpype/hosts/blender/api/lib.py index fe5d3f93e9..e7210f7e31 100644 --- a/openpype/hosts/blender/api/lib.py +++ b/openpype/hosts/blender/api/lib.py @@ -9,7 +9,7 @@ import addon_utils def load_scripts(paths): """Copy of `load_scripts` from Blender's implementation. - It is possible that whis function will be changed in future and usage will + It is possible that this function will be changed in future and usage will be based on Blender version. """ import bpy_types diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py index e2a419c8ef..a37f8f0379 100644 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ b/openpype/hosts/blender/hooks/pre_pyside_install.py @@ -21,7 +21,7 @@ class InstallPySideToBlender(PreLaunchHook): platforms = ["windows"] def execute(self): - # Prelaunch hook is not crutial + # Prelaunch hook is not crucial try: self.inner_execute() except Exception: @@ -156,7 +156,7 @@ class InstallPySideToBlender(PreLaunchHook): except pywintypes.error: pass - self.log.warning("Failed to instal PySide2 module to blender.") + self.log.warning("Failed to install PySide2 module to blender.") def is_pyside_installed(self, python_executable): """Check if PySide2 module is in blender's pip list. diff --git a/openpype/hosts/blender/plugins/create/create_animation.py b/openpype/hosts/blender/plugins/create/create_animation.py index f7887b7e80..3b4cabe8ec 100644 --- a/openpype/hosts/blender/plugins/create/create_animation.py +++ b/openpype/hosts/blender/plugins/create/create_animation.py @@ -22,7 +22,7 @@ class CreateAnimation(plugin.Creator): ops.execute_in_main_thread(mti) def _process(self): - # Get Instance Containter or create it if it does not exist + # Get Instance Container or create it if it does not exist instances = bpy.data.collections.get(AVALON_INSTANCES) if not instances: instances = bpy.data.collections.new(name=AVALON_INSTANCES) diff --git a/openpype/hosts/blender/plugins/create/create_camera.py b/openpype/hosts/blender/plugins/create/create_camera.py index 98ccca313c..6fa80b5a5d 100644 --- a/openpype/hosts/blender/plugins/create/create_camera.py +++ b/openpype/hosts/blender/plugins/create/create_camera.py @@ -22,7 +22,7 @@ class CreateCamera(plugin.Creator): ops.execute_in_main_thread(mti) def _process(self): - # Get Instance Containter or create it if it does not exist + # Get Instance Container or create it if it does not exist instances = bpy.data.collections.get(AVALON_INSTANCES) if not instances: instances = bpy.data.collections.new(name=AVALON_INSTANCES) diff --git a/openpype/hosts/blender/plugins/create/create_layout.py b/openpype/hosts/blender/plugins/create/create_layout.py index 831261f027..dac12e19b1 100644 --- a/openpype/hosts/blender/plugins/create/create_layout.py +++ b/openpype/hosts/blender/plugins/create/create_layout.py @@ -22,7 +22,7 @@ class CreateLayout(plugin.Creator): ops.execute_in_main_thread(mti) def _process(self): - # Get Instance Containter or create it if it does not exist + # Get Instance Container or create it if it does not exist instances = bpy.data.collections.get(AVALON_INSTANCES) if not instances: instances = bpy.data.collections.new(name=AVALON_INSTANCES) diff --git a/openpype/hosts/blender/plugins/create/create_model.py b/openpype/hosts/blender/plugins/create/create_model.py index e778f5b74f..903b70033b 100644 --- a/openpype/hosts/blender/plugins/create/create_model.py +++ b/openpype/hosts/blender/plugins/create/create_model.py @@ -22,7 +22,7 @@ class CreateModel(plugin.Creator): ops.execute_in_main_thread(mti) def _process(self): - # Get Instance Containter or create it if it does not exist + # Get Instance Container or create it if it does not exist instances = bpy.data.collections.get(AVALON_INSTANCES) if not instances: instances = bpy.data.collections.new(name=AVALON_INSTANCES) diff --git a/openpype/hosts/blender/plugins/create/create_rig.py b/openpype/hosts/blender/plugins/create/create_rig.py index 2e1c71f570..ec74e279c6 100644 --- a/openpype/hosts/blender/plugins/create/create_rig.py +++ b/openpype/hosts/blender/plugins/create/create_rig.py @@ -22,7 +22,7 @@ class CreateRig(plugin.Creator): ops.execute_in_main_thread(mti) def _process(self): - # Get Instance Containter or create it if it does not exist + # Get Instance Container or create it if it does not exist instances = bpy.data.collections.get(AVALON_INSTANCES) if not instances: instances = bpy.data.collections.new(name=AVALON_INSTANCES) diff --git a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py b/openpype/hosts/blender/plugins/publish/increment_workfile_version.py index db73842323..b81e1111ea 100644 --- a/openpype/hosts/blender/plugins/publish/increment_workfile_version.py +++ b/openpype/hosts/blender/plugins/publish/increment_workfile_version.py @@ -14,7 +14,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin): def process(self, context): assert all(result["success"] for result in context.data["results"]), ( - "Publishing not succesfull so version is not increased.") + "Publishing not successful so version is not increased.") from openpype.lib import version_up path = context.data["currentFile"] diff --git a/openpype/hosts/celaction/plugins/publish/collect_audio.py b/openpype/hosts/celaction/plugins/publish/collect_audio.py index 8d3c1568e6..80c1c37d7e 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_audio.py +++ b/openpype/hosts/celaction/plugins/publish/collect_audio.py @@ -32,7 +32,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): repr = next((r for r in reprs), None) if not repr: raise "Missing `audioMain` representation" - self.log.info(f"represetation is: {repr}") + self.log.info(f"representation is: {repr}") audio_file = repr.get('data', {}).get('path', "") @@ -56,7 +56,7 @@ class AppendCelactionAudio(pyblish.api.ContextPlugin): representations (list): list for all representations Returns: - dict: subsets with version and representaions in keys + dict: subsets with version and representations in keys """ # Query all subsets for asset diff --git a/openpype/hosts/flame/api/__init__.py b/openpype/hosts/flame/api/__init__.py index dc47488dc1..8e5418c78b 100644 --- a/openpype/hosts/flame/api/__init__.py +++ b/openpype/hosts/flame/api/__init__.py @@ -23,10 +23,17 @@ from .lib import ( get_sequence_segments, maintained_segment_selection, reset_segment_selection, - get_segment_attributes + get_segment_attributes, + get_clips_in_reels, + get_reformated_filename, + get_frame_from_filename, + get_padding_from_filename, + maintained_object_duplication ) from .utils import ( - setup + setup, + get_flame_version, + get_flame_install_root ) from .pipeline import ( install, @@ -55,6 +62,10 @@ from .workio import ( file_extensions, work_root ) +from .render_utils import ( + export_clip, + get_preset_path_by_xml_name +) __all__ = [ # constants @@ -80,6 +91,11 @@ __all__ = [ "maintained_segment_selection", "reset_segment_selection", "get_segment_attributes", + "get_clips_in_reels", + "get_reformated_filename", + "get_frame_from_filename", + "get_padding_from_filename", + "maintained_object_duplication", # pipeline "install", @@ -96,6 +112,8 @@ __all__ = [ # utils "setup", + "get_flame_version", + "get_flame_install_root", # menu "FlameMenuProjectConnect", @@ -111,5 +129,9 @@ __all__ = [ "current_file", "has_unsaved_changes", "file_extensions", - "work_root" + "work_root", + + # render utils + "export_clip", + "get_preset_path_by_xml_name" ] diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 7788a6b3f4..f3c918caab 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -16,6 +16,7 @@ from openpype.api import Logger log = Logger.get_logger(__name__) +FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") class CTX: # singleton used for passing data between api modules @@ -445,6 +446,8 @@ def get_sequence_segments(sequence, selected=False): for segment in track.segments: if segment.name.get_value() == "": continue + if segment.hidden.get_value() is True: + continue if ( selected is True and segment.selected.get_value() is not True @@ -519,7 +522,7 @@ def _get_shot_tokens_values(clip, tokens): def get_segment_attributes(segment): - if str(segment.name)[1:-1] == "": + if segment.name.get_value() == "": return None # Add timeline segment to tree @@ -532,6 +535,12 @@ def get_segment_attributes(segment): "PySegment": segment } + # head and tail with forward compatibility + if segment.head: + clip_data["segment_head"] = int(segment.head) + if segment.tail: + clip_data["segment_tail"] = int(segment.tail) + # add all available shot tokens shot_tokens = _get_shot_tokens_values(segment, [ "", "", "", "", "", @@ -551,7 +560,7 @@ def get_segment_attributes(segment): attr = getattr(segment, attr_name) segment_attrs_data[attr] = str(attr).replace("+", ":") - if attr in ["record_in", "record_out"]: + if attr_name in ["record_in", "record_out"]: clip_data[attr_name] = attr.relative_frame else: clip_data[attr_name] = attr.frame @@ -559,3 +568,127 @@ def get_segment_attributes(segment): clip_data["segment_timecodes"] = segment_attrs_data return clip_data + + +def get_clips_in_reels(project): + output_clips = [] + project_desktop = project.current_workspace.desktop + + for reel_group in project_desktop.reel_groups: + for reel in reel_group.reels: + for clip in reel.clips: + clip_data = { + "PyClip": clip, + "fps": float(str(clip.frame_rate)[:-4]) + } + + attrs = [ + "name", "width", "height", + "ratio", "sample_rate", "bit_depth" + ] + + for attr in attrs: + val = getattr(clip, attr) + clip_data[attr] = val + + version = clip.versions[-1] + track = version.tracks[-1] + for segment in track.segments: + segment_data = get_segment_attributes(segment) + clip_data.update(segment_data) + + output_clips.append(clip_data) + + return output_clips + + +def get_reformated_filename(filename, padded=True): + """ + Return fixed python expression path + + Args: + filename (str): file name + + Returns: + type: string with reformated path + + Example: + get_reformated_filename("plate.1001.exr") > plate.%04d.exr + + """ + found = FRAME_PATTERN.search(filename) + + if not found: + log.info("File name is not sequence: {}".format(filename)) + return filename + + padding = get_padding_from_filename(filename) + + replacement = "%0{}d".format(padding) if padded else "%d" + start_idx, end_idx = found.span(1) + + return replacement.join( + [filename[:start_idx], filename[end_idx:]] + ) + + +def get_padding_from_filename(filename): + """ + Return padding number from Flame path style + + Args: + filename (str): file name + + Returns: + int: padding number + + Example: + get_padding_from_filename("plate.0001.exr") > 4 + + """ + found = get_frame_from_filename(filename) + + return len(found) if found else None + + +def get_frame_from_filename(filename): + """ + Return sequence number from Flame path style + + Args: + filename (str): file name + + Returns: + int: sequence frame number + + Example: + def get_frame_from_filename(path): + ("plate.0001.exr") > 0001 + + """ + + found = re.findall(FRAME_PATTERN, filename) + + return found.pop() if found else None + + +@contextlib.contextmanager +def maintained_object_duplication(item): + """Maintain input item duplication + + Attributes: + item (any flame.PyObject): python api object + + Yield: + duplicate input PyObject type + """ + import flame + # Duplicate the clip to avoid modifying the original clip + duplicate = flame.duplicate(item) + + try: + # do the operation on selected segments + yield duplicate + finally: + # delete the item at the end + flame.delete(duplicate) diff --git a/openpype/hosts/flame/api/menu.py b/openpype/hosts/flame/api/menu.py index b7a94e7866..8b4bfd4550 100644 --- a/openpype/hosts/flame/api/menu.py +++ b/openpype/hosts/flame/api/menu.py @@ -44,7 +44,7 @@ class _FlameMenuApp(object): self.menu_group_name = menu_group_name self.dynamic_menu_data = {} - # flame module is only avaliable when a + # flame module is only available when a # flame project is loaded and initialized self.flame = None try: diff --git a/openpype/hosts/flame/api/render_utils.py b/openpype/hosts/flame/api/render_utils.py new file mode 100644 index 0000000000..1b086646cc --- /dev/null +++ b/openpype/hosts/flame/api/render_utils.py @@ -0,0 +1,125 @@ +import os + + +def export_clip(export_path, clip, preset_path, **kwargs): + """Flame exported wrapper + + Args: + export_path (str): exporting directory path + clip (PyClip): flame api object + preset_path (str): full export path to xml file + + Kwargs: + thumb_frame_number (int)[optional]: source frame number + in_mark (int)[optional]: cut in mark + out_mark (int)[optional]: cut out mark + + Raises: + KeyError: Missing input kwarg `thumb_frame_number` + in case `thumbnail` in `export_preset` + FileExistsError: Missing export preset in shared folder + """ + import flame + + in_mark = out_mark = None + + # Set exporter + exporter = flame.PyExporter() + exporter.foreground = True + exporter.export_between_marks = True + + if kwargs.get("thumb_frame_number"): + thumb_frame_number = kwargs["thumb_frame_number"] + # make sure it exists in kwargs + if not thumb_frame_number: + raise KeyError( + "Missing key `thumb_frame_number` in input kwargs") + + in_mark = int(thumb_frame_number) + out_mark = int(thumb_frame_number) + 1 + + elif kwargs.get("in_mark") and kwargs.get("out_mark"): + in_mark = int(kwargs["in_mark"]) + out_mark = int(kwargs["out_mark"]) + else: + exporter.export_between_marks = False + + try: + # set in and out marks if they are available + if in_mark and out_mark: + clip.in_mark = in_mark + clip.out_mark = out_mark + + # export with exporter + exporter.export(clip, preset_path, export_path) + finally: + print('Exported: {} at {}-{}'.format( + clip.name.get_value(), + clip.in_mark, + clip.out_mark + )) + + +def get_preset_path_by_xml_name(xml_preset_name): + def _search_path(root): + output = [] + for root, _dirs, files in os.walk(root): + for f in files: + if f != xml_preset_name: + continue + file_path = os.path.join(root, f) + output.append(file_path) + return output + + def _validate_results(results): + if results and len(results) == 1: + return results.pop() + elif results and len(results) > 1: + print(( + "More matching presets for `{}`: /n" + "{}").format(xml_preset_name, results)) + return results.pop() + else: + return None + + from .utils import ( + get_flame_install_root, + get_flame_version + ) + + # get actual flame version and install path + _version = get_flame_version()["full"] + _install_root = get_flame_install_root() + + # search path templates + shared_search_root = "{install_root}/shared/export/presets" + install_search_root = ( + "{install_root}/presets/{version}/export/presets/flame") + + # fill templates + shared_search_root = shared_search_root.format( + install_root=_install_root + ) + install_search_root = install_search_root.format( + install_root=_install_root, + version=_version + ) + + # get search results + shared_results = _search_path(shared_search_root) + installed_results = _search_path(install_search_root) + + # first try to return shared results + shared_preset_path = _validate_results(shared_results) + + if shared_preset_path: + return os.path.dirname(shared_preset_path) + + # then try installed results + installed_preset_path = _validate_results(installed_results) + + if installed_preset_path: + return os.path.dirname(installed_preset_path) + + # if nothing found then return False + return False diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index 2cd9a46184..c864399608 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -25,7 +25,7 @@ class WireTapCom(object): This way we are able to set new project with settings and correct colorspace policy. Also we are able to create new user - or get actuall user with similar name (users are usually cloning + or get actual user with similar name (users are usually cloning their profiles and adding date stamp into suffix). """ @@ -214,7 +214,7 @@ class WireTapCom(object): volumes = [] - # go trough all children and get volume names + # go through all children and get volume names child_obj = WireTapNodeHandle() for child_idx in range(children_num): @@ -254,7 +254,7 @@ class WireTapCom(object): filtered_users = [user for user in used_names if user_name in user] if filtered_users: - # todo: need to find lastly created following regex patern for + # todo: need to find lastly created following regex pattern for # date used in name return filtered_users.pop() @@ -299,7 +299,7 @@ class WireTapCom(object): usernames = [] - # go trough all children and get volume names + # go through all children and get volume names child_obj = WireTapNodeHandle() for child_idx in range(children_num): @@ -346,7 +346,7 @@ class WireTapCom(object): if not requested: raise AttributeError(( "Error: Cannot request number of " - "childrens from the node {}. Make sure your " + "children from the node {}. Make sure your " "wiretap service is running: {}").format( parent_path, parent.lastError()) ) diff --git a/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/ftrack_lib.py b/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/ftrack_lib.py index 26b197ee1d..c2168016c6 100644 --- a/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/ftrack_lib.py +++ b/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/ftrack_lib.py @@ -234,7 +234,7 @@ class FtrackComponentCreator: ).first() if component_entity: - # overwrite existing members in component enity + # overwrite existing members in component entity # - get data for member from `ftrack.origin` location self._overwrite_members(component_entity, comp_data) diff --git a/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/panel_app.py b/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/panel_app.py index 9e39147776..648f902872 100644 --- a/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/panel_app.py +++ b/openpype/hosts/flame/api/utility_scripts/openpype_flame_to_ftrack/modules/panel_app.py @@ -304,7 +304,7 @@ class FlameToFtrackPanel(object): self._resolve_project_entity() self._save_ui_state_to_cfg() - # get hanldes from gui input + # get handles from gui input handles = self.handles_input.text() # get frame start from gui input @@ -517,7 +517,7 @@ class FlameToFtrackPanel(object): if self.temp_data_dir: shutil.rmtree(self.temp_data_dir) self.temp_data_dir = None - print("All Temp data were destroied ...") + print("All Temp data were destroyed ...") def close(self): self._save_ui_state_to_cfg() diff --git a/openpype/hosts/flame/api/utility_scripts/openpype_in_flame.py b/openpype/hosts/flame/api/utility_scripts/openpype_in_flame.py index 72614f2b5d..931c5a1b79 100644 --- a/openpype/hosts/flame/api/utility_scripts/openpype_in_flame.py +++ b/openpype/hosts/flame/api/utility_scripts/openpype_in_flame.py @@ -13,7 +13,7 @@ def openpype_install(): """ openpype.install() avalon.api.install(opfapi) - print("Avalon registred hosts: {}".format( + print("Avalon registered hosts: {}".format( avalon.api.registered_host())) @@ -101,7 +101,7 @@ def app_initialized(parent=None): """ Initialisation of the hook is starting from here -First it needs to test if it can import the flame modul. +First it needs to test if it can import the flame module. This will happen only in case a project has been loaded. Then `app_initialized` will load main Framework which will load all menu objects as flame_apps. diff --git a/openpype/hosts/flame/api/utils.py b/openpype/hosts/flame/api/utils.py index b9899900f5..2dfdfa8f48 100644 --- a/openpype/hosts/flame/api/utils.py +++ b/openpype/hosts/flame/api/utils.py @@ -65,7 +65,7 @@ def _sync_utility_scripts(env=None): if _itm not in remove_black_list: skip = True - # do not skyp if pyc in extension + # do not skip if pyc in extension if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]: skip = False @@ -125,3 +125,18 @@ def setup(env=None): _sync_utility_scripts(env) log.info("Flame OpenPype wrapper has been installed") + + +def get_flame_version(): + import flame + + return { + "full": flame.get_version(), + "major": flame.get_version_major(), + "minor": flame.get_version_minor(), + "patch": flame.get_version_patch() + } + + +def get_flame_install_root(): + return "/opt/Autodesk" diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index fe8acda257..e8bdd840f4 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -14,7 +14,7 @@ from pprint import pformat class FlamePrelaunch(PreLaunchHook): """ Flame prelaunch hook - Will make sure flame_script_dirs are coppied to user's folder defined + Will make sure flame_script_dirs are copied to user's folder defined in environment var FLAME_SCRIPT_DIR. """ app_groups = ["flame"] diff --git a/openpype/hosts/flame/otio/flame_export.py b/openpype/hosts/flame/otio/flame_export.py index aea1f387e8..8c240fc9d5 100644 --- a/openpype/hosts/flame/otio/flame_export.py +++ b/openpype/hosts/flame/otio/flame_export.py @@ -127,7 +127,7 @@ def create_time_effects(otio_clip, item): # # add otio effect to clip effects # otio_clip.effects.append(otio_effect) - # # loop trought and get all Timewarps + # # loop through and get all Timewarps # for effect in subTrackItems: # if ((track_item not in effect.linkedItems()) # and (len(effect.linkedItems()) > 0)): @@ -284,23 +284,20 @@ def create_otio_reference(clip_data): # get padding and other file infos log.debug("_ path: {}".format(path)) - is_sequence = padding = utils.get_frame_from_path(path) - if is_sequence: - number = utils.get_frame_from_path(path) - file_head = file_name.split(number)[:-1] - frame_start = int(number) - frame_duration = clip_data["source_duration"] + otio_ex_ref_item = None + is_sequence = frame_number = utils.get_frame_from_filename(file_name) if is_sequence: + file_head = file_name.split(frame_number)[:-1] + frame_start = int(frame_number) + padding = len(frame_number) + metadata.update({ "isSequence": True, "padding": padding }) - otio_ex_ref_item = None - - if is_sequence: # if it is file sequence try to create `ImageSequenceReference` # the OTIO might not be compatible so return nothing and do it old way try: @@ -322,10 +319,12 @@ def create_otio_reference(clip_data): pass if not otio_ex_ref_item: - reformat_path = utils.get_reformated_path(path, padded=False) + dirname, file_name = os.path.split(path) + file_name = utils.get_reformated_filename(file_name, padded=False) + reformated_path = os.path.join(dirname, file_name) # in case old OTIO or video file create `ExternalReference` otio_ex_ref_item = otio.schema.ExternalReference( - target_url=reformat_path, + target_url=reformated_path, available_range=create_otio_time_range( frame_start, frame_duration, @@ -346,7 +345,7 @@ def create_otio_clip(clip_data): media_reference = create_otio_reference(clip_data) # calculate source in - first_frame = utils.get_frame_from_path(clip_data["fpath"]) or 0 + first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0 source_in = int(clip_data["source_in"]) - int(first_frame) # creatae source range @@ -615,11 +614,11 @@ def create_otio_timeline(sequence): # Add Gap if needed if itemindex == 0: # if it is first track item at track then add - # it to previouse item + # it to previous item prev_item = segment_data else: - # get previouse item + # get previous item prev_item = segments_ordered[itemindex - 1] log.debug("_ segment_data: {}".format(segment_data)) diff --git a/openpype/hosts/flame/otio/utils.py b/openpype/hosts/flame/otio/utils.py index 229946343b..7ded8e55d8 100644 --- a/openpype/hosts/flame/otio/utils.py +++ b/openpype/hosts/flame/otio/utils.py @@ -3,6 +3,8 @@ import opentimelineio as otio import logging log = logging.getLogger(__name__) +FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]") + def timecode_to_frames(timecode, framerate): rt = otio.opentime.from_timecode(timecode, framerate) @@ -19,77 +21,71 @@ def frames_to_seconds(frames, framerate): return otio.opentime.to_seconds(rt) -def get_reformated_path(path, padded=True): +def get_reformated_filename(filename, padded=True): """ Return fixed python expression path Args: - path (str): path url or simple file name + filename (str): file name Returns: type: string with reformated path Example: - get_reformated_path("plate.1001.exr") > plate.%04d.exr + get_reformated_filename("plate.1001.exr") > plate.%04d.exr """ - padding = get_padding_from_path(path) - found = get_frame_from_path(path) + found = FRAME_PATTERN.search(filename) if not found: - log.info("Path is not sequence: {}".format(path)) - return path + log.info("File name is not sequence: {}".format(filename)) + return filename - if padded: - path = path.replace(found, "%0{}d".format(padding)) - else: - path = path.replace(found, "%d") + padding = get_padding_from_filename(filename) - return path + replacement = "%0{}d".format(padding) if padded else "%d" + start_idx, end_idx = found.span(1) + + return replacement.join( + [filename[:start_idx], filename[end_idx:]] + ) -def get_padding_from_path(path): +def get_padding_from_filename(filename): """ Return padding number from Flame path style Args: - path (str): path url or simple file name + filename (str): file name Returns: int: padding number Example: - get_padding_from_path("plate.0001.exr") > 4 + get_padding_from_filename("plate.0001.exr") > 4 """ - found = get_frame_from_path(path) + found = get_frame_from_filename(filename) - if found: - return len(found) - else: - return None + return len(found) if found else None -def get_frame_from_path(path): +def get_frame_from_filename(filename): """ Return sequence number from Flame path style Args: - path (str): path url or simple file name + filename (str): file name Returns: int: sequence frame number Example: - def get_frame_from_path(path): + def get_frame_from_filename(path): ("plate.0001.exr") > 0001 """ - frame_pattern = re.compile(r"[._](\d+)[.]") - found = re.findall(frame_pattern, path) + found = re.findall(FRAME_PATTERN, filename) - if found: - return found.pop() - else: - return None + return found.pop() if found else None diff --git a/openpype/hosts/flame/plugins/publish/collect_test_selection.py b/openpype/hosts/flame/plugins/publish/collect_test_selection.py index 73401368b1..9f982321cc 100644 --- a/openpype/hosts/flame/plugins/publish/collect_test_selection.py +++ b/openpype/hosts/flame/plugins/publish/collect_test_selection.py @@ -16,6 +16,7 @@ class CollectTestSelection(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder label = "test selection" hosts = ["flame"] + active = False def process(self, context): self.log.info( diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py new file mode 100644 index 0000000000..6424bce3bc --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -0,0 +1,256 @@ +import pyblish +import openpype +import openpype.hosts.flame.api as opfapi +from openpype.hosts.flame.otio import flame_export + +# # developer reload modules +from pprint import pformat + + +class CollectTimelineInstances(pyblish.api.ContextPlugin): + """Collect all Timeline segment selection.""" + + order = pyblish.api.CollectorOrder - 0.09 + label = "Collect timeline Instances" + hosts = ["flame"] + + audio_track_items = [] + + def process(self, context): + project = context.data["flameProject"] + sequence = context.data["flameSequence"] + self.otio_timeline = context.data["otioTimeline"] + self.clips_in_reels = opfapi.get_clips_in_reels(project) + self.fps = context.data["fps"] + + # process all sellected + with opfapi.maintained_segment_selection(sequence) as segments: + for segment in segments: + clip_data = opfapi.get_segment_attributes(segment) + clip_name = clip_data["segment_name"] + self.log.debug("clip_name: {}".format(clip_name)) + + # get openpype tag data + marker_data = opfapi.get_segment_data_marker(segment) + self.log.debug("__ marker_data: {}".format( + pformat(marker_data))) + + if not marker_data: + continue + + if marker_data.get("id") != "pyblish.avalon.instance": + continue + + # get file path + file_path = clip_data["fpath"] + + # get source clip + source_clip = self._get_reel_clip(file_path) + + first_frame = opfapi.get_frame_from_filename(file_path) or 0 + + head, tail = self._get_head_tail(clip_data, first_frame) + + # solve handles length + marker_data["handleStart"] = min( + marker_data["handleStart"], head) + marker_data["handleEnd"] = min( + marker_data["handleEnd"], tail) + + with_audio = bool(marker_data.pop("audio")) + + # add marker data to instance data + inst_data = dict(marker_data.items()) + + asset = marker_data["asset"] + subset = marker_data["subset"] + + # insert family into families + family = marker_data["family"] + families = [str(f) for f in marker_data["families"]] + families.insert(0, str(family)) + + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + label += " {}".format("[" + ", ".join(families) + "]") + + inst_data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "asset": asset, + "item": segment, + "families": families, + "publish": marker_data["publish"], + "fps": self.fps, + "flameSourceClip": source_clip, + "sourceFirstFrame": int(first_frame), + "path": file_path + }) + + # get otio clip data + otio_data = self._get_otio_clip_instance_data(clip_data) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + + # add to instance data + inst_data.update(otio_data) + self.log.debug("__ inst_data: {}".format(pformat(inst_data))) + + # add resolution + self._get_resolution_to_data(inst_data, context) + + # create instance + instance = context.create_instance(**inst_data) + + # add colorspace data + instance.data.update({ + "versionData": { + "colorspace": clip_data["colour_space"], + } + }) + + # create shot instance for shot attributes create/update + self._create_shot_instance(context, clip_name, **inst_data) + + self.log.info("Creating instance: {}".format(instance)) + self.log.info( + "_ instance.data: {}".format(pformat(instance.data))) + + if not with_audio: + continue + + # add audioReview attribute to plate instance data + # if reviewTrack is on + if marker_data.get("reviewTrack") is not None: + instance.data["reviewAudio"] = True + + def _get_head_tail(self, clip_data, first_frame): + # calculate head and tail with forward compatibility + head = clip_data.get("segment_head") + tail = clip_data.get("segment_tail") + + if not head: + head = int(clip_data["source_in"]) - int(first_frame) + if not tail: + tail = int( + clip_data["source_duration"] - ( + head + clip_data["record_duration"] + ) + ) + return head, tail + + def _get_reel_clip(self, path): + match_reel_clip = [ + clip for clip in self.clips_in_reels + if clip["fpath"] == path + ] + if match_reel_clip: + return match_reel_clip.pop() + + def _get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" + + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata[ + "openpype.source.width"], + "resolutionHeight": otio_clip_metadata[ + "openpype.source.height"], + "pixelAspect": otio_clip_metadata[ + "openpype.source.pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], + "resolutionHeight": otio_tl_metadata[ + "openpype.timeline.height"], + "pixelAspect": otio_tl_metadata[ + "openpype.timeline.pixelAspect"] + }) + + def _create_shot_instance(self, context, clip_name, **data): + master_layer = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + asset = data.get("asset") + + if not master_layer: + return + + if not hierarchy_data: + return + + asset = data["asset"] + subset = "shotMain" + + # insert family into families + family = "shot" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": [] + }) + + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def _get_otio_clip_instance_data(self, clip_data): + """ + Return otio objects for timeline, track and clip + + Args: + timeline_item_data (dict): timeline_item_data from list returned by + resolve.get_current_timeline_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + segment = clip_data["PySegment"] + s_track_name = segment.parent.name.get_value() + timeline_range = self._create_otio_time_range_from_timeline_item_data( + clip_data) + + for otio_clip in self.otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if s_track_name not in track_name: + continue + if otio_clip.name not in segment.name.get_value(): + continue + if openpype.lib.is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + + # add pypedata marker to otio_clip metadata + for marker in otio_clip.markers: + if opfapi.MARKER_NAME in marker.name: + otio_clip.metadata.update(marker.metadata) + return {"otioClip": otio_clip} + + return None + + def _create_otio_time_range_from_timeline_item_data(self, clip_data): + frame_start = int(clip_data["record_in"]) + frame_duration = int(clip_data["record_duration"]) + + return flame_export.create_otio_time_range( + frame_start, frame_duration, self.fps) diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py new file mode 100644 index 0000000000..faa5be9d68 --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_otio.py @@ -0,0 +1,57 @@ +import pyblish.api +import avalon.api as avalon +import openpype.lib as oplib +import openpype.hosts.flame.api as opfapi +from openpype.hosts.flame.otio import flame_export + + +class CollecTimelineOTIO(pyblish.api.ContextPlugin): + """Inject the current working context into publish context""" + + label = "Collect Timeline OTIO" + order = pyblish.api.CollectorOrder - 0.099 + + def process(self, context): + # plugin defined + family = "workfile" + variant = "otioTimeline" + + # main + asset_doc = context.data["assetEntity"] + task_name = avalon.Session["AVALON_TASK"] + project = opfapi.get_current_project() + sequence = opfapi.get_current_sequence(opfapi.CTX.selection) + + # create subset name + subset_name = oplib.get_subset_name_with_asset_doc( + family, + variant, + task_name, + asset_doc, + ) + + # adding otio timeline to context + with opfapi.maintained_segment_selection(sequence): + otio_timeline = flame_export.create_otio_timeline(sequence) + + instance_data = { + "name": subset_name, + "asset": asset_doc["name"], + "subset": subset_name, + "family": "workfile" + } + + # create instance with workfile + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) + + # update context with main project attributes + context.data.update({ + "flameProject": project, + "flameSequence": sequence, + "otioTimeline": otio_timeline, + "currentFile": "Flame/{}/{}".format( + project.name, sequence.name + ), + "fps": float(str(sequence.frame_rate)[:-4]) + }) diff --git a/openpype/hosts/flame/plugins/publish/extract_otio_file.py b/openpype/hosts/flame/plugins/publish/extract_otio_file.py new file mode 100644 index 0000000000..7dd75974fc --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/extract_otio_file.py @@ -0,0 +1,43 @@ +import os +import pyblish.api +import openpype.api +import opentimelineio as otio + + +class ExtractOTIOFile(openpype.api.Extractor): + """ + Extractor export OTIO file + """ + + label = "Extract OTIO file" + order = pyblish.api.ExtractorOrder - 0.45 + families = ["workfile"] + hosts = ["flame"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + name = instance.data["name"] + staging_dir = self.staging_dir(instance) + + otio_timeline = instance.context.data["otioTimeline"] + # create otio timeline representation + otio_file_name = name + ".otio" + otio_file_path = os.path.join(staging_dir, otio_file_name) + + # export otio file to temp dir + otio.adapters.write_to_file(otio_timeline, otio_file_path) + + representation_otio = { + 'name': "otio", + 'ext': "otio", + 'files': otio_file_name, + "stagingDir": staging_dir, + } + + instance.data["representations"].append(representation_otio) + + self.log.info("Added OTIO file representation: {}".format( + representation_otio)) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py new file mode 100644 index 0000000000..291e440cbe --- /dev/null +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -0,0 +1,172 @@ +import os +from pprint import pformat +from copy import deepcopy +import pyblish.api +import openpype.api +from openpype.hosts.flame import api as opfapi + + +class ExtractSubsetResources(openpype.api.Extractor): + """ + Extractor for transcoding files from Flame clip + """ + + label = "Extract subset resources" + order = pyblish.api.ExtractorOrder + families = ["clip"] + hosts = ["flame"] + + # plugin defaults + default_presets = { + "thumbnail": { + "ext": "jpg", + "xml_preset_file": "Jpeg (8-bit).xml", + "xml_preset_dir": "", + "representation_add_range": False, + "representation_tags": ["thumbnail"] + }, + "ftrackpreview": { + "ext": "mov", + "xml_preset_file": "Apple iPad (1920x1080).xml", + "xml_preset_dir": "", + "representation_add_range": True, + "representation_tags": [ + "review", + "delete" + ] + } + } + keep_original_representation = False + + # hide publisher during exporting + hide_ui_on_process = True + + # settings + export_presets_mapping = {} + + def process(self, instance): + + if ( + self.keep_original_representation + and "representations" not in instance.data + or not self.keep_original_representation + ): + instance.data["representations"] = [] + + frame_start = instance.data["frameStart"] + handle_start = instance.data["handleStart"] + frame_start_handle = frame_start - handle_start + source_first_frame = instance.data["sourceFirstFrame"] + source_start_handles = instance.data["sourceStartH"] + source_end_handles = instance.data["sourceEndH"] + source_duration_handles = ( + source_end_handles - source_start_handles) + 1 + + clip_data = instance.data["flameSourceClip"] + clip = clip_data["PyClip"] + + in_mark = (source_start_handles - source_first_frame) + 1 + out_mark = in_mark + source_duration_handles + + staging_dir = self.staging_dir(instance) + + # add default preset type for thumbnail and reviewable video + # update them with settings and overide in case the same + # are found in there + export_presets = deepcopy(self.default_presets) + export_presets.update(self.export_presets_mapping) + + # with maintained duplication loop all presets + with opfapi.maintained_object_duplication(clip) as duplclip: + # loop all preset names and + for unique_name, preset_config in export_presets.items(): + kwargs = {} + preset_file = preset_config["xml_preset_file"] + preset_dir = preset_config["xml_preset_dir"] + repre_tags = preset_config["representation_tags"] + + # validate xml preset file is filled + if preset_file == "": + raise ValueError( + ("Check Settings for {} preset: " + "`XML preset file` is not filled").format( + unique_name) + ) + + # resolve xml preset dir if not filled + if preset_dir == "": + preset_dir = opfapi.get_preset_path_by_xml_name( + preset_file) + + if not preset_dir: + raise ValueError( + ("Check Settings for {} preset: " + "`XML preset file` {} is not found").format( + unique_name, preset_file) + ) + + # create preset path + preset_path = str(os.path.join( + preset_dir, preset_file + )) + + # define kwargs based on preset type + if "thumbnail" in unique_name: + kwargs["thumb_frame_number"] = in_mark + ( + source_duration_handles / 2) + else: + kwargs.update({ + "in_mark": in_mark, + "out_mark": out_mark + }) + + export_dir_path = str(os.path.join( + staging_dir, unique_name + )) + os.makedirs(export_dir_path) + + # export + opfapi.export_clip( + export_dir_path, duplclip, preset_path, **kwargs) + + # create representation data + representation_data = { + "name": unique_name, + "outputName": unique_name, + "ext": preset_config["ext"], + "stagingDir": export_dir_path, + "tags": repre_tags + } + + files = os.listdir(export_dir_path) + + # add files to represetation but add + # imagesequence as list + if ( + "movie_file" in preset_path + or unique_name == "thumbnail" + ): + representation_data["files"] = files.pop() + else: + representation_data["files"] = files + + # add frame range + if preset_config["representation_add_range"]: + representation_data.update({ + "frameStart": frame_start_handle, + "frameEnd": ( + frame_start_handle + source_duration_handles), + "fps": instance.data["fps"] + }) + + instance.data["representations"].append(representation_data) + + # add review family if found in tags + if "review" in repre_tags: + instance.data["families"].append("review") + + self.log.info("Added representation: {}".format( + representation_data)) + + self.log.debug("All representations: {}".format( + pformat(instance.data["representations"]))) diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index c721146830..6b16339e53 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -52,7 +52,7 @@ def install(): def uninstall(): - """Uninstall all tha was installed + """Uninstall all that was installed This is where you undo everything that was done in `install()`. That means, removing menus, deregistering families and data diff --git a/openpype/hosts/fusion/hooks/pre_fusion_setup.py b/openpype/hosts/fusion/hooks/pre_fusion_setup.py index a0c16a6700..4a9dfaec15 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_setup.py +++ b/openpype/hosts/fusion/hooks/pre_fusion_setup.py @@ -1,6 +1,6 @@ import os import importlib -from openpype.lib import PreLaunchHook +from openpype.lib import PreLaunchHook, ApplicationLaunchFailed from openpype.hosts.fusion.api import utils @@ -12,27 +12,29 @@ class FusionPrelaunch(PreLaunchHook): app_groups = ["fusion"] def execute(self): - # making sure pyton 3.6 is installed at provided path + # making sure python 3.6 is installed at provided path py36_dir = os.path.normpath(self.launch_context.env.get("PYTHON36", "")) - assert os.path.isdir(py36_dir), ( - "Python 3.6 is not installed at the provided folder path. Either " - "make sure the `environments\resolve.json` is having correctly " - "set `PYTHON36` or make sure Python 3.6 is installed " - f"in given path. \nPYTHON36E: `{py36_dir}`" - ) - self.log.info(f"Path to Fusion Python folder: `{py36_dir}`...") + if not os.path.isdir(py36_dir): + raise ApplicationLaunchFailed( + "Python 3.6 is not installed at the provided path.\n" + "Either make sure the 'environments/fusion.json' has " + "'PYTHON36' set corectly or make sure Python 3.6 is installed " + f"in the given path.\n\nPYTHON36: {py36_dir}" + ) + self.log.info(f"Path to Fusion Python folder: '{py36_dir}'...") self.launch_context.env["PYTHON36"] = py36_dir # setting utility scripts dir for scripts syncing us_dir = os.path.normpath( self.launch_context.env.get("FUSION_UTILITY_SCRIPTS_DIR", "") ) - assert os.path.isdir(us_dir), ( - "Fusion utility script dir does not exists. Either make sure " - "the `environments\fusion.json` is having correctly set " - "`FUSION_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n" - f"FUSION_UTILITY_SCRIPTS_DIR: `{us_dir}`" - ) + if not os.path.isdir(us_dir): + raise ApplicationLaunchFailed( + "Fusion utility script dir does not exist. Either make sure " + "the 'environments/fusion.json' has " + "'FUSION_UTILITY_SCRIPTS_DIR' set correctly or reinstall " + f"Fusion.\n\nFUSION_UTILITY_SCRIPTS_DIR: '{us_dir}'" + ) try: __import__("avalon.fusion") diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index 24d48fb9da..8f5be75484 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -185,22 +185,22 @@ class FusionLoadSequence(api.Loader): - We do the same like Fusion - allow fusion to take control. - HoldFirstFrame: Fusion resets this to 0 - - We preverse the value. + - We preserve the value. - HoldLastFrame: Fusion resets this to 0 - - We preverse the value. + - We preserve the value. - Reverse: Fusion resets to disabled if "Loop" is not enabled. - We preserve the value. - Depth: Fusion resets to "Format" - - We preverse the value. + - We preserve the value. - KeyCode: Fusion resets to "" - - We preverse the value. + - We preserve the value. - TimeCodeOffset: Fusion resets to 0 - - We preverse the value. + - We preserve the value. """ diff --git a/openpype/hosts/fusion/plugins/publish/submit_deadline.py b/openpype/hosts/fusion/plugins/publish/submit_deadline.py index 050e558d2e..28671295ab 100644 --- a/openpype/hosts/fusion/plugins/publish/submit_deadline.py +++ b/openpype/hosts/fusion/plugins/publish/submit_deadline.py @@ -124,7 +124,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): # Include critical variables with submission keys = [ - # TODO: This won't work if the slaves don't have accesss to + # TODO: This won't work if the slaves don't have access to # these paths, such as if slaves are running Linux and the # submitter is on Windows. "PYTHONPATH", diff --git a/openpype/hosts/fusion/scripts/fusion_switch_shot.py b/openpype/hosts/fusion/scripts/fusion_switch_shot.py index 05b577c8ba..efb3cad800 100644 --- a/openpype/hosts/fusion/scripts/fusion_switch_shot.py +++ b/openpype/hosts/fusion/scripts/fusion_switch_shot.py @@ -85,7 +85,7 @@ def _format_filepath(session): new_filename = "{}_{}_slapcomp_v001.comp".format(project, asset) new_filepath = os.path.join(slapcomp_dir, new_filename) - # Create new unqiue filepath + # Create new unique filepath if os.path.exists(new_filepath): new_filepath = pype.version_up(new_filepath) diff --git a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py index 81df2bc31d..4f804f9bce 100644 --- a/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py +++ b/openpype/hosts/fusion/utility_scripts/__OpenPype_Menu__.py @@ -16,7 +16,7 @@ def main(env): # activate resolve from pype avalon.api.install(avalon.fusion) - log.info(f"Avalon registred hosts: {avalon.api.registered_host()}") + log.info(f"Avalon registered hosts: {avalon.api.registered_host()}") menu.launch_openpype_menu() diff --git a/openpype/hosts/harmony/js/README.md b/openpype/hosts/harmony/js/README.md index ca610e49f5..1265a38305 100644 --- a/openpype/hosts/harmony/js/README.md +++ b/openpype/hosts/harmony/js/README.md @@ -2,13 +2,13 @@ ### Development -#### Setting up ESLint as linter for javasript code +#### Setting up ESLint as linter for javascript code You nee [node.js](https://nodejs.org/en/) installed. All you need to do then is to run: ```sh -npm intall +npm install ``` in **js** directory. This will install eslint and all requirements locally. diff --git a/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js b/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js index d809c350ab..cf8a9a29ca 100644 --- a/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js +++ b/openpype/hosts/harmony/js/loaders/ImageSequenceLoader.js @@ -18,11 +18,11 @@ if (typeof $ === 'undefined'){ * @classdesc Image Sequence loader JS code. */ var ImageSequenceLoader = function() { - this.PNGTransparencyMode = 0; // Premultiplied wih Black - this.TGATransparencyMode = 0; // Premultiplied wih Black - this.SGITransparencyMode = 0; // Premultiplied wih Black + this.PNGTransparencyMode = 0; // Premultiplied with Black + this.TGATransparencyMode = 0; // Premultiplied with Black + this.SGITransparencyMode = 0; // Premultiplied with Black this.LayeredPSDTransparencyMode = 1; // Straight - this.FlatPSDTransparencyMode = 2; // Premultiplied wih White + this.FlatPSDTransparencyMode = 2; // Premultiplied with White }; @@ -84,7 +84,7 @@ ImageSequenceLoader.getUniqueColumnName = function(columnPrefix) { * @return {string} Read node name * * @example - * // Agrguments are in following order: + * // Arguments are in following order: * var args = [ * files, // Files in file sequences. * asset, // Asset name. @@ -97,11 +97,11 @@ ImageSequenceLoader.prototype.importFiles = function(args) { MessageLog.trace("ImageSequence:: " + typeof PypeHarmony); MessageLog.trace("ImageSequence $:: " + typeof $); MessageLog.trace("ImageSequence OH:: " + typeof PypeHarmony.OpenHarmony); - var PNGTransparencyMode = 0; // Premultiplied wih Black - var TGATransparencyMode = 0; // Premultiplied wih Black - var SGITransparencyMode = 0; // Premultiplied wih Black + var PNGTransparencyMode = 0; // Premultiplied with Black + var TGATransparencyMode = 0; // Premultiplied with Black + var SGITransparencyMode = 0; // Premultiplied with Black var LayeredPSDTransparencyMode = 1; // Straight - var FlatPSDTransparencyMode = 2; // Premultiplied wih White + var FlatPSDTransparencyMode = 2; // Premultiplied with White var doc = $.scn; var files = args[0]; @@ -224,7 +224,7 @@ ImageSequenceLoader.prototype.importFiles = function(args) { * @return {string} Read node name * * @example - * // Agrguments are in following order: + * // Arguments are in following order: * var args = [ * files, // Files in file sequences * name, // Node name diff --git a/openpype/hosts/harmony/plugins/load/load_background.py b/openpype/hosts/harmony/plugins/load/load_background.py index 946090f6e6..993a09e042 100644 --- a/openpype/hosts/harmony/plugins/load/load_background.py +++ b/openpype/hosts/harmony/plugins/load/load_background.py @@ -13,11 +13,11 @@ copy_files = """function copyFile(srcFilename, dstFilename) } """ -import_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black -var TGATransparencyMode = 0; //Premultiplied wih Black -var SGITransparencyMode = 0; //Premultiplied wih Black +import_files = """var PNGTransparencyMode = 1; //Premultiplied with Black +var TGATransparencyMode = 0; //Premultiplied with Black +var SGITransparencyMode = 0; //Premultiplied with Black var LayeredPSDTransparencyMode = 1; //Straight -var FlatPSDTransparencyMode = 2; //Premultiplied wih White +var FlatPSDTransparencyMode = 2; //Premultiplied with White function getUniqueColumnName( column_prefix ) { @@ -140,11 +140,11 @@ function import_files(args) import_files """ -replace_files = """var PNGTransparencyMode = 1; //Premultiplied wih Black -var TGATransparencyMode = 0; //Premultiplied wih Black -var SGITransparencyMode = 0; //Premultiplied wih Black +replace_files = """var PNGTransparencyMode = 1; //Premultiplied with Black +var TGATransparencyMode = 0; //Premultiplied with Black +var SGITransparencyMode = 0; //Premultiplied with Black var LayeredPSDTransparencyMode = 1; //Straight -var FlatPSDTransparencyMode = 2; //Premultiplied wih White +var FlatPSDTransparencyMode = 2; //Premultiplied with White function replace_files(args) { diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index 3df095f9e4..7563503593 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -31,7 +31,7 @@ def beforeNewProjectCreated(event): def afterNewProjectCreated(event): log.info("after new project created event...") - # sync avalon data to project properities + # sync avalon data to project properties sync_avalon_data_to_workfile() # add tags from preset @@ -51,7 +51,7 @@ def beforeProjectLoad(event): def afterProjectLoad(event): log.info("after project load event...") - # sync avalon data to project properities + # sync avalon data to project properties sync_avalon_data_to_workfile() # add tags from preset diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index 9a22d8cf27..a9467ae5a4 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -299,7 +299,7 @@ def get_track_item_pype_data(track_item): if not tag: return None - # get tag metadata attribut + # get tag metadata attribute tag_data = tag.metadata() # convert tag metadata to normal keys names and values to correct types for k, v in dict(tag_data).items(): @@ -402,7 +402,7 @@ def sync_avalon_data_to_workfile(): try: project.setProjectDirectory(active_project_root) except Exception: - # old way of seting it + # old way of setting it project.setProjectRoot(active_project_root) # get project data from avalon db @@ -614,7 +614,7 @@ def create_nuke_workfile_clips(nuke_workfiles, seq=None): if not seq: seq = hiero.core.Sequence('NewSequences') root.addItem(hiero.core.BinItem(seq)) - # todo will ned to define this better + # todo will need to define this better # track = seq[1] # lazy example to get a destination# track clips_lst = [] for nk in nuke_workfiles: @@ -838,7 +838,7 @@ def apply_colorspace_project(): # remove the TEMP file as we dont need it os.remove(copy_current_file_tmp) - # use the code from bellow for changing xml hrox Attributes + # use the code from below for changing xml hrox Attributes presets.update({"name": os.path.basename(copy_current_file)}) # read HROX in as QDomSocument @@ -874,7 +874,7 @@ def apply_colorspace_clips(): if "default" in clip_colorspace: continue - # check if any colorspace presets for read is mathing + # check if any colorspace presets for read is matching preset_clrsp = None for k in presets: if not bool(re.search(k["regex"], clip_media_source_path)): @@ -931,7 +931,7 @@ def get_sequence_pattern_and_padding(file): Can find file.0001.ext, file.%02d.ext, file.####.ext Return: - string: any matching sequence patern + string: any matching sequence pattern int: padding of sequnce numbering """ foundall = re.findall( @@ -950,7 +950,7 @@ def get_sequence_pattern_and_padding(file): def sync_clip_name_to_data_asset(track_items_list): - # loop trough all selected clips + # loop through all selected clips for track_item in track_items_list: # ignore if parent track is locked or disabled if track_item.parent().isLocked(): diff --git a/openpype/hosts/hiero/api/otio/hiero_export.py b/openpype/hosts/hiero/api/otio/hiero_export.py index abf510403e..1e4088d9c0 100644 --- a/openpype/hosts/hiero/api/otio/hiero_export.py +++ b/openpype/hosts/hiero/api/otio/hiero_export.py @@ -92,7 +92,7 @@ def create_time_effects(otio_clip, track_item): # add otio effect to clip effects otio_clip.effects.append(otio_effect) - # loop trought and get all Timewarps + # loop through and get all Timewarps for effect in subTrackItems: if ((track_item not in effect.linkedItems()) and (len(effect.linkedItems()) > 0)): @@ -388,11 +388,11 @@ def create_otio_timeline(): # Add Gap if needed if itemindex == 0: # if it is first track item at track then add - # it to previouse item + # it to previous item return track_item else: - # get previouse item + # get previous item return track_item.parent().items()[itemindex - 1] # get current timeline @@ -416,11 +416,11 @@ def create_otio_timeline(): # Add Gap if needed if itemindex == 0: # if it is first track item at track then add - # it to previouse item + # it to previous item prev_item = track_item else: - # get previouse item + # get previous item prev_item = track_item.parent().items()[itemindex - 1] # calculate clip frame range difference from each other diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 2bbb1df8c1..3506af2d6a 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -146,7 +146,7 @@ class CreatorWidget(QtWidgets.QDialog): # convert label text to normal capitalized text with spaces label_text = self.camel_case_split(text) - # assign the new text to lable widget + # assign the new text to label widget label = QtWidgets.QLabel(label_text) label.setObjectName("LineLabel") @@ -337,7 +337,7 @@ class SequenceLoader(avalon.Loader): "Sequentially in order" ], default="Original timing", - help="Would you like to place it at orignal timing?" + help="Would you like to place it at original timing?" ) ] @@ -475,7 +475,7 @@ class ClipLoader: def _get_asset_data(self): """ Get all available asset data - joint `data` key with asset.data dict into the representaion + joint `data` key with asset.data dict into the representation """ asset_name = self.context["representation"]["context"]["asset"] @@ -550,7 +550,7 @@ class ClipLoader: (self.timeline_out - self.timeline_in + 1) + self.handle_start + self.handle_end) < self.media_duration) - # if slate is on then remove the slate frame from begining + # if slate is on then remove the slate frame from beginning if slate_on: self.media_duration -= 1 self.handle_start += 1 @@ -634,8 +634,8 @@ class PublishClip: "track": "sequence", } - # parents search patern - parents_search_patern = r"\{([a-z]*?)\}" + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" # default templates for non-ui use rename_default = False @@ -719,7 +719,7 @@ class PublishClip: return self.track_item def _populate_track_item_default_data(self): - """ Populate default formating data from track item. """ + """ Populate default formatting data from track item. """ self.track_item_default_data = { "_folder_": "shots", @@ -814,7 +814,7 @@ class PublishClip: # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): - # if review layer is defined and not the same as defalut + # if review layer is defined and not the same as default self.review_layer = self.review_track # shot num calculate if self.rename_index == 0: @@ -863,7 +863,7 @@ class PublishClip: # in case track name and subset name is the same then add if self.subset_name == self.track_name: hero_data["subset"] = self.subset - # assing data to return hierarchy data to tag + # assign data to return hierarchy data to tag tag_hierarchy_data = hero_data # add data to return data dict @@ -897,7 +897,7 @@ class PublishClip: type ) - # first collect formating data to use for formating template + # first collect formatting data to use for formatting template formating_data = {} for _k, _v in self.hierarchy_data.items(): value = _v["value"].format( @@ -915,9 +915,9 @@ class PublishClip: """ Create parents and return it in list. """ self.parents = [] - patern = re.compile(self.parents_search_patern) + pattern = re.compile(self.parents_search_pattern) - par_split = [(patern.findall(t).pop(), t) + par_split = [(pattern.findall(t).pop(), t) for t in self.hierarchy.split("/")] for type, template in par_split: diff --git a/openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py b/openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py index 39a65045a7..b8dfb07b47 100644 --- a/openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py +++ b/openpype/hosts/hiero/api/startup/Python/StartupUI/PimpMySpreadsheet.py @@ -1,5 +1,5 @@ # PimpMySpreadsheet 1.0, Antony Nasce, 23/05/13. -# Adds custom spreadsheet columns and right-click menu for setting the Shot Status, and Artist Shot Assignement. +# Adds custom spreadsheet columns and right-click menu for setting the Shot Status, and Artist Shot Assignment. # gStatusTags is a global dictionary of key(status)-value(icon) pairs, which can be overridden with custom icons if required # Requires Hiero 1.7v2 or later. # Install Instructions: Copy to ~/.hiero/Python/StartupUI diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index 68f8d35106..fe5c0d5257 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -172,7 +172,7 @@ def add_tags_to_workfile(): } } - # loop trough tag data dict and create deep tag structure + # loop through tag data dict and create deep tag structure for _k, _val in nks_pres_tags.items(): # check if key is not decorated with [] so it is defined as bin bin_find = None diff --git a/openpype/hosts/hiero/plugins/create/create_shot_clip.py b/openpype/hosts/hiero/plugins/create/create_shot_clip.py index 0c5bf93a3f..d0c81cffa2 100644 --- a/openpype/hosts/hiero/plugins/create/create_shot_clip.py +++ b/openpype/hosts/hiero/plugins/create/create_shot_clip.py @@ -139,7 +139,7 @@ class CreateShotClip(phiero.Creator): "type": "QComboBox", "label": "Subset Name", "target": "ui", - "toolTip": "chose subset name patern, if is selected, name of track layer will be used", # noqa + "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa "order": 0}, "subsetFamily": { "value": ["plate", "take"], diff --git a/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py b/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py index 80c6abbaef..9ade7603e0 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py @@ -34,7 +34,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin): if clip_effect_items: tracks_effect_items[track_index] = clip_effect_items - # process all effects and devide them to instance + # process all effects and divide them to instance for _track_index, sub_track_items in tracks_effect_items.items(): # skip if track index is the same as review track index if review and review_track_index == _track_index: @@ -156,7 +156,7 @@ class PreCollectClipEffects(pyblish.api.InstancePlugin): 'postage_stamp_frame', 'maskChannel', 'export_cc', 'select_cccid', 'mix', 'version', 'matrix'] - # loop trough all knobs and collect not ignored + # loop through all knobs and collect not ignored # and any with any value for knob in node.knobs().keys(): # skip nodes in ignore keys diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index bf3a779ab1..4eac6a008a 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -264,7 +264,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): timeline_range = self.create_otio_time_range_from_timeline_item_data( track_item) - # loop trough audio track items and search for overlaping clip + # loop through audio track items and search for overlapping clip for otio_audio in self.audio_track_items: parent_range = otio_audio.range_in_parent() diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py index fea36d00fb..1d0727d0af 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py @@ -5,7 +5,7 @@ class CollectClipResolution(pyblish.api.InstancePlugin): """Collect clip geometry resolution""" order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Clip Resoluton" + label = "Collect Clip Resolution" hosts = ["hiero"] families = ["clip"] diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py index f0e0f1a1a3..2f65a8bd4f 100644 --- a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_retime.py @@ -52,7 +52,7 @@ class PrecollectRetime(api.InstancePlugin): handle_end )) - # loop withing subtrack items + # loop within subtrack items time_warp_nodes = [] source_in_change = 0 source_out_change = 0 @@ -76,7 +76,7 @@ class PrecollectRetime(api.InstancePlugin): (timeline_in - handle_start), (timeline_out + handle_end) + 1) ] - # calculate differnce + # calculate difference diff_in = (node["lookup"].getValueAt( timeline_in)) - timeline_in diff_out = (node["lookup"].getValueAt( diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py index ac081ac297..12d118f0cc 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py @@ -74,6 +74,9 @@ class CollectInstances(pyblish.api.ContextPlugin): instance = context.create_instance(label) + # Include `families` using `family` data + instance.data["families"] = [instance.data["family"]] + instance[:] = [node] instance.data.update(data) diff --git a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py index 78794acc97..113e1b0bcb 100644 --- a/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py +++ b/openpype/hosts/houdini/plugins/publish/extract_vdb_cache.py @@ -37,5 +37,7 @@ class ExtractVDBCache(openpype.api.Extractor): "ext": "vdb", "files": output, "stagingDir": staging_dir, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/api/customize.py b/openpype/hosts/maya/api/customize.py index 8474262626..c7fb042ead 100644 --- a/openpype/hosts/maya/api/customize.py +++ b/openpype/hosts/maya/api/customize.py @@ -93,31 +93,20 @@ def override_toolbox_ui(): return # Create our controls - background_color = (0.267, 0.267, 0.267) controls = [] - look_assigner = None - try: - look_assigner = host_tools.get_tool_by_name( - "lookassigner", - parent=pipeline._parent - ) - except Exception: - log.warning("Couldn't create Look assigner window.", exc_info=True) - if look_assigner is not None: - controls.append( - mc.iconTextButton( - "pype_toolbox_lookmanager", - annotation="Look Manager", - label="Look Manager", - image=os.path.join(icons, "lookmanager.png"), - command=host_tools.show_look_assigner, - bgc=background_color, - width=icon_size, - height=icon_size, - parent=parent - ) + controls.append( + mc.iconTextButton( + "pype_toolbox_lookmanager", + annotation="Look Manager", + label="Look Manager", + image=os.path.join(icons, "lookmanager.png"), + command=host_tools.show_look_assigner, + width=icon_size, + height=icon_size, + parent=parent ) + ) controls.append( mc.iconTextButton( @@ -128,7 +117,6 @@ def override_toolbox_ui(): command=lambda: host_tools.show_workfiles( parent=pipeline._parent ), - bgc=background_color, width=icon_size, height=icon_size, parent=parent @@ -144,7 +132,6 @@ def override_toolbox_ui(): command=lambda: host_tools.show_loader( parent=pipeline._parent, use_context=True ), - bgc=background_color, width=icon_size, height=icon_size, parent=parent @@ -160,7 +147,6 @@ def override_toolbox_ui(): command=lambda: host_tools.show_scene_inventory( parent=pipeline._parent ), - bgc=background_color, width=icon_size, height=icon_size, parent=parent diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 3f93bc2ab5..41528f20ba 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -184,7 +184,7 @@ def uv_from_element(element): parent = element.split(".", 1)[0] # Maya is funny in that when the transform of the shape - # of the component elemen has children, the name returned + # of the component element has children, the name returned # by that elementection is the shape. Otherwise, it is # the transform. So lets see what type we're dealing with here. if cmds.nodeType(parent) in supported: @@ -733,7 +733,7 @@ def namespaced(namespace, new=True): str: The namespace that is used during the context """ - original = cmds.namespaceInfo(cur=True) + original = cmds.namespaceInfo(cur=True, absoluteName=True) if new: namespace = avalon.maya.lib.unique_namespace(namespace) cmds.namespace(add=namespace) @@ -1630,7 +1630,7 @@ def get_container_transforms(container, members=None, root=False): Args: container (dict): the container members (list): optional and convenience argument - root (bool): return highest node in hierachy if True + root (bool): return highest node in hierarchy if True Returns: root (list / str): @@ -2517,7 +2517,7 @@ class shelf(): def _get_render_instances(): """Return all 'render-like' instances. - This returns list of instance sets that needs to receive informations + This returns list of instance sets that needs to receive information about render layer changes. Returns: diff --git a/openpype/hosts/maya/api/menu.json b/openpype/hosts/maya/api/menu.json index bf4d812d33..a2efd5233c 100644 --- a/openpype/hosts/maya/api/menu.json +++ b/openpype/hosts/maya/api/menu.json @@ -506,8 +506,8 @@ "transforms", "local" ], - "title": "# Copy Local Transfroms", - "tooltip": "Copy local transfroms" + "title": "# Copy Local Transforms", + "tooltip": "Copy local transforms" }, { "type": "action", @@ -520,8 +520,8 @@ "transforms", "matrix" ], - "title": "# Copy Matrix Transfroms", - "tooltip": "Copy Matrix transfroms" + "title": "# Copy Matrix Transforms", + "tooltip": "Copy Matrix transforms" }, { "type": "action", @@ -842,7 +842,7 @@ "sourcetype": "file", "tags": ["cleanup", "remove_user_defined_attributes"], "title": "# Remove User Defined Attributes", - "tooltip": "Remove all user-defined attributs from all nodes" + "tooltip": "Remove all user-defined attributes from all nodes" }, { "type": "action", diff --git a/openpype/hosts/maya/api/menu_backup.json b/openpype/hosts/maya/api/menu_backup.json index 731a33a630..e2a558aedc 100644 --- a/openpype/hosts/maya/api/menu_backup.json +++ b/openpype/hosts/maya/api/menu_backup.json @@ -794,8 +794,8 @@ "transforms", "local" ], - "title": "Copy Local Transfroms", - "tooltip": "Copy local transfroms" + "title": "Copy Local Transforms", + "tooltip": "Copy local transforms" }, { "type": "action", @@ -808,8 +808,8 @@ "transforms", "matrix" ], - "title": "Copy Matrix Transfroms", - "tooltip": "Copy Matrix transfroms" + "title": "Copy Matrix Transforms", + "tooltip": "Copy Matrix transforms" }, { "type": "action", @@ -1274,7 +1274,7 @@ "sourcetype": "file", "tags": ["cleanup", "remove_user_defined_attributes"], "title": "Remove User Defined Attributes", - "tooltip": "Remove all user-defined attributs from all nodes" + "tooltip": "Remove all user-defined attributes from all nodes" }, { "type": "action", diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 3537fa3837..4f826b8fde 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -341,7 +341,7 @@ def update_package(set_container, representation): def update_scene(set_container, containers, current_data, new_data, new_file): """Updates the hierarchy, assets and their matrix - Updates the following withing the scene: + Updates the following within the scene: * Setdress hierarchy alembic * Matrix * Parenting diff --git a/openpype/hosts/maya/api/shader_definition_editor.py b/openpype/hosts/maya/api/shader_definition_editor.py index ed425f4718..911db48ac2 100644 --- a/openpype/hosts/maya/api/shader_definition_editor.py +++ b/openpype/hosts/maya/api/shader_definition_editor.py @@ -92,7 +92,7 @@ class ShaderDefinitionsEditor(QtWidgets.QWidget): def _write_definition_file(self, content, force=False): """Write content as definition to file in database. - Before file is writen, check is made if its content has not + Before file is written, check is made if its content has not changed. If is changed, warning is issued to user if he wants it to overwrite. Note: GridFs doesn't allow changing file content. You need to delete existing file and create new one. diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 85919d1166..fa5e73f3ed 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -53,8 +53,8 @@ class CreateRender(plugin.Creator): renderer. ass (bool): Submit as ``ass`` file for standalone Arnold renderer. tileRendering (bool): Instance is set to tile rendering mode. We - won't submit actuall render, but we'll make publish job to wait - for Tile Assemly job done and then publish. + won't submit actual render, but we'll make publish job to wait + for Tile Assembly job done and then publish. See Also: https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py index b7d44dd431..891f21916c 100644 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ b/openpype/hosts/maya/plugins/load/load_ass.py @@ -1,5 +1,6 @@ from avalon import api import openpype.hosts.maya.api.plugin +from openpype.hosts.maya.api.plugin import get_reference_node import os from openpype.api import get_project_settings import clique @@ -111,7 +112,7 @@ class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): # Get reference node from container members members = cmds.sets(node, query=True, nodesOnly=True) - reference_node = self._get_reference_node(members) + reference_node = get_reference_node(members) assert os.path.exists(proxyPath), "%s does not exist." % proxyPath diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index 73a2a4f448..0611dcc189 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -3,16 +3,16 @@ from avalon.maya.pipeline import containerise from avalon.maya import lib from maya import cmds, mel + class AudioLoader(api.Loader): """Specific loader of audio.""" families = ["audio"] - label = "Import audio." + label = "Import audio" representations = ["wav"] icon = "volume-up" color = "orange" - def load(self, context, name, namespace, data): start_frame = cmds.playbackOptions(query=True, min=True) diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index d0a83b8177..444f98f22e 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -2,6 +2,7 @@ import os from avalon import api from openpype.api import get_project_settings + class GpuCacheLoader(api.Loader): """Load model Alembic as gpuCache""" diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index eea5844e8b..0652147324 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -77,7 +77,7 @@ class ImagePlaneLoader(api.Loader): """Specific loader of plate for image planes on selected camera.""" families = ["image", "plate", "render"] - label = "Load imagePlane." + label = "Load imagePlane" representations = ["mov", "exr", "preview", "png"] icon = "image" color = "orange" @@ -118,7 +118,7 @@ class ImagePlaneLoader(api.Loader): camera = pm.createNode("camera") if camera is None: - return + return try: camera.displayResolution.set(1) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index dd64fd0a16..2cc24f1360 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -63,6 +63,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): if current_namespace != ":": group_name = current_namespace + ":" + group_name + group_name = "|" + group_name + self[:] = new_nodes if attach_to_root: diff --git a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py index 80b453bd13..ed561e1131 100644 --- a/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py +++ b/openpype/hosts/maya/plugins/load/load_vdb_to_vray.py @@ -2,6 +2,72 @@ from avalon import api from openpype.api import get_project_settings import os +from maya import cmds + +# List of 3rd Party Channels Mapping names for VRayVolumeGrid +# See: https://docs.chaosgroup.com/display/VRAY4MAYA/Input +# #Input-3rdPartyChannelsMapping +THIRD_PARTY_CHANNELS = { + 2: "Smoke", + 1: "Temperature", + 10: "Fuel", + 4: "Velocity.x", + 5: "Velocity.y", + 6: "Velocity.z", + 7: "Red", + 8: "Green", + 9: "Blue", + 14: "Wavelet Energy", + 19: "Wavelet.u", + 20: "Wavelet.v", + 21: "Wavelet.w", + # These are not in UI or documentation but V-Ray does seem to set these. + 15: "AdvectionOrigin.x", + 16: "AdvectionOrigin.y", + 17: "AdvectionOrigin.z", + +} + + +def _fix_duplicate_vvg_callbacks(): + """Workaround to kill duplicate VRayVolumeGrids attribute callbacks. + + This fixes a huge lag in Maya on switching 3rd Party Channels Mappings + or to different .vdb file paths because it spams an attribute changed + callback: `vvgUserChannelMappingsUpdateUI`. + + ChaosGroup bug ticket: 154-008-9890 + + Found with: + - Maya 2019.2 on Windows 10 + - V-Ray: V-Ray Next for Maya, update 1 version 4.12.01.00001 + + Bug still present in: + - Maya 2022.1 on Windows 10 + - V-Ray 5 for Maya, Update 2.1 (v5.20.01 from Dec 16 2021) + + """ + # todo(roy): Remove when new V-Ray release fixes duplicate calls + + jobs = cmds.scriptJob(listJobs=True) + + matched = set() + for entry in jobs: + # Remove the number + index, callback = entry.split(":", 1) + callback = callback.strip() + + # Detect whether it is a `vvgUserChannelMappingsUpdateUI` + # attribute change callback + if callback.startswith('"-runOnce" 1 "-attributeChange" "'): + if '"vvgUserChannelMappingsUpdateUI(' in callback: + if callback in matched: + # If we've seen this callback before then + # delete the duplicate callback + cmds.scriptJob(kill=int(index)) + else: + matched.add(callback) + class LoadVDBtoVRay(api.Loader): @@ -14,15 +80,24 @@ class LoadVDBtoVRay(api.Loader): def load(self, context, name, namespace, data): - from maya import cmds import avalon.maya.lib as lib from avalon.maya.pipeline import containerise + assert os.path.exists(self.fname), ( + "Path does not exist: %s" % self.fname + ) + try: family = context["representation"]["context"]["family"] except ValueError: family = "vdbcache" + # Ensure V-ray is loaded with the vrayvolumegrid + if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): + cmds.loadPlugin("vrayformaya") + if not cmds.pluginInfo("vrayvolumegrid", query=True, loaded=True): + cmds.loadPlugin("vrayvolumegrid") + # Check if viewport drawing engine is Open GL Core (compat) render_engine = None compatible = "OpenGLCoreProfileCompat" @@ -30,13 +105,11 @@ class LoadVDBtoVRay(api.Loader): render_engine = cmds.optionVar(query="vp2RenderingEngine") if not render_engine or render_engine != compatible: - raise RuntimeError("Current scene's settings are incompatible." - "See Preferences > Display > Viewport 2.0 to " - "set the render engine to '%s'" % compatible) + self.log.warning("Current scene's settings are incompatible." + "See Preferences > Display > Viewport 2.0 to " + "set the render engine to '%s'" % compatible) asset = context['asset'] - version = context["version"] - asset_name = asset["name"] namespace = namespace or lib.unique_namespace( asset_name + "_", @@ -45,7 +118,7 @@ class LoadVDBtoVRay(api.Loader): ) # Root group - label = "{}:{}".format(namespace, name) + label = "{}:{}_VDB".format(namespace, name) root = cmds.group(name=label, empty=True) settings = get_project_settings(os.environ['AVALON_PROJECT']) @@ -55,20 +128,24 @@ class LoadVDBtoVRay(api.Loader): if c is not None: cmds.setAttr(root + ".useOutlinerColor", 1) cmds.setAttr(root + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) - ) + float(c[0]) / 255, + float(c[1]) / 255, + float(c[2]) / 255) - # Create VR + # Create VRayVolumeGrid grid_node = cmds.createNode("VRayVolumeGrid", - name="{}VVGShape".format(label), + name="{}Shape".format(label), parent=root) - # Set attributes - cmds.setAttr("{}.inFile".format(grid_node), self.fname, type="string") - cmds.setAttr("{}.inReadOffset".format(grid_node), - version["startFrames"]) + # Ensure .currentTime is connected to time1.outTime + cmds.connectAttr("time1.outTime", grid_node + ".currentTime") + + # Set path + self._set_path(grid_node, self.fname, show_preset_popup=True) + + # Lock the shape node so the user can't delete the transform/shape + # as if it was referenced + cmds.lockNode(grid_node, lock=True) nodes = [root, grid_node] self[:] = nodes @@ -79,3 +156,132 @@ class LoadVDBtoVRay(api.Loader): nodes=nodes, context=context, loader=self.__class__.__name__) + + def _set_path(self, grid_node, path, show_preset_popup=True): + + from openpype.hosts.maya.api.lib import attribute_values + from maya import cmds + + def _get_filename_from_folder(path): + # Using the sequence of .vdb files we check the frame range, etc. + # to set the filename with #### padding. + files = sorted(x for x in os.listdir(path) if x.endswith(".vdb")) + if not files: + raise RuntimeError("Couldn't find .vdb files in: %s" % path) + + if len(files) == 1: + # Ensure check for single file is also done in folder + fname = files[0] + else: + # Sequence + from avalon.vendor import clique + # todo: check support for negative frames as input + collections, remainder = clique.assemble(files) + assert len(collections) == 1, ( + "Must find a single image sequence, " + "found: %s" % (collections,) + ) + collection = collections[0] + + fname = collection.format('{head}{{padding}}{tail}') + padding = collection.padding + if padding == 0: + # Clique doesn't provide padding if the frame number never + # starts with a zero and thus has never any visual padding. + # So we fall back to the smallest frame number as padding. + padding = min(len(str(i)) for i in collection.indexes) + + # Supply frame/padding with # signs + padding_str = "#" * padding + fname = fname.format(padding=padding_str) + + return os.path.join(path, fname) + + # The path is either a single file or sequence in a folder so + # we do a quick lookup for our files + if os.path.isfile(path): + path = os.path.dirname(path) + path = _get_filename_from_folder(path) + + # Even when not applying a preset V-Ray will reset the 3rd Party + # Channels Mapping of the VRayVolumeGrid when setting the .inPath + # value. As such we try and preserve the values ourselves. + # Reported as ChaosGroup bug ticket: 154-011-2909  + # todo(roy): Remove when new V-Ray release preserves values + original_user_mapping = cmds.getAttr(grid_node + ".usrchmap") or "" + + # Workaround for V-Ray bug: fix lag on path change, see function + _fix_duplicate_vvg_callbacks() + + # Suppress preset pop-up if we want. + popup_attr = "{0}.inDontOfferPresets".format(grid_node) + popup = {popup_attr: not show_preset_popup} + with attribute_values(popup): + cmds.setAttr(grid_node + ".inPath", path, type="string") + + # Reapply the 3rd Party channels user mapping when no preset popup + # was shown to the user + if not show_preset_popup: + channels = cmds.getAttr(grid_node + ".usrchmapallch").split(";") + channels = set(channels) # optimize lookup + restored_mapping = "" + for entry in original_user_mapping.split(";"): + if not entry: + # Ignore empty entries + continue + + # If 3rd Party Channels selection channel still exists then + # add it again. + index, channel = entry.split(",") + attr = THIRD_PARTY_CHANNELS.get(int(index), + # Fallback for when a mapping + # was set that is not in the + # documentation + "???") + if channel in channels: + restored_mapping += entry + ";" + else: + self.log.warning("Can't preserve '%s' mapping due to " + "missing channel '%s' on node: " + "%s" % (attr, channel, grid_node)) + + if restored_mapping: + cmds.setAttr(grid_node + ".usrchmap", + restored_mapping, + type="string") + + def update(self, container, representation): + + path = api.get_representation_path(representation) + + # Find VRayVolumeGrid + members = cmds.sets(container['objectName'], query=True) + grid_nodes = cmds.ls(members, type="VRayVolumeGrid", long=True) + assert len(grid_nodes) > 0, "This is a bug" + + # Update the VRayVolumeGrid + for grid_node in grid_nodes: + self._set_path(grid_node, path=path, show_preset_popup=False) + + # Update container representation + cmds.setAttr(container["objectName"] + ".representation", + str(representation["_id"]), + type="string") + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + + # Get all members of the avalon container, ensure they are unlocked + # and delete everything + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/publish/collect_assembly.py b/openpype/hosts/maya/plugins/publish/collect_assembly.py index 22af1239b1..313636793b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_assembly.py +++ b/openpype/hosts/maya/plugins/publish/collect_assembly.py @@ -24,7 +24,7 @@ class CollectAssembly(pyblish.api.InstancePlugin): """ order = pyblish.api.CollectorOrder + 0.49 - label = "Assemby" + label = "Assembly" families = ["assembly"] def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index ac1e495f08..cbddb86e53 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -126,7 +126,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): r"^.+:(.*)", layer).group(1) except IndexError: msg = "Invalid layer name in set [ {} ]".format(layer) - self.log.warnig(msg) + self.log.warning(msg) continue self.log.info("processing %s" % layer) diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py index 7097d7ce9c..e5c182c908 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayscene.py @@ -48,7 +48,7 @@ class CollectVrayScene(pyblish.api.InstancePlugin): expected_layer_name = re.search(r"^.+:(.*)", layer).group(1) except IndexError: msg = "Invalid layer name in set [ {} ]".format(layer) - self.log.warnig(msg) + self.log.warning(msg) continue self.log.info("processing %s" % layer) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index 953539f65c..bf79ddbf44 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -22,6 +22,11 @@ COPY = 1 HARDLINK = 2 +def escape_space(path): + """Ensure path is enclosed by quotes to allow paths with spaces""" + return '"{}"'.format(path) if " " in path else path + + def find_paths_by_hash(texture_hash): """Find the texture hash key in the dictionary. @@ -76,7 +81,7 @@ def maketx(source, destination, *args): ] cmd.extend(args) - cmd.extend(["-o", destination, source]) + cmd.extend(["-o", escape_space(destination), escape_space(source)]) cmd = " ".join(cmd) @@ -314,7 +319,6 @@ class ExtractLook(openpype.api.Extractor): do_maketx = instance.data.get("maketx", False) # Collect all unique files used in the resources - files = set() files_metadata = {} for resource in resources: # Preserve color space values (force value after filepath change) @@ -325,7 +329,6 @@ class ExtractLook(openpype.api.Extractor): for f in resource["files"]: files_metadata[os.path.normpath(f)] = { "color_space": color_space} - # files.update(os.path.normpath(f)) # Process the resource files transfers = [] @@ -333,7 +336,6 @@ class ExtractLook(openpype.api.Extractor): hashes = {} force_copy = instance.data.get("forceCopy", False) - self.log.info(files) for filepath in files_metadata: linearize = False @@ -492,7 +494,7 @@ class ExtractLook(openpype.api.Extractor): # Include `source-hash` as string metadata "-sattrib", "sourceHash", - texture_hash, + escape_space(texture_hash), colorconvert, ) diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py b/openpype/hosts/maya/plugins/publish/extract_vrayscene.py index c9edfc8343..1d7c0fa717 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayscene.py +++ b/openpype/hosts/maya/plugins/publish/extract_vrayscene.py @@ -36,7 +36,7 @@ class ExtractVrayscene(openpype.api.Extractor): else: node = vray_settings[0] - # setMembers on vrayscene_layer shoudl contain layer name. + # setMembers on vrayscene_layer should contain layer name. layer_name = instance.data.get("layer") staging_dir = self.staging_dir(instance) @@ -111,7 +111,7 @@ class ExtractVrayscene(openpype.api.Extractor): layer (str): layer name. template (str): token template. start_frame (int, optional): start frame - if set we use - mutliple files export mode. + multiple files export mode. Returns: str: formatted path. diff --git a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py index ac3de4114c..f852904580 100644 --- a/openpype/hosts/maya/plugins/publish/submit_maya_muster.py +++ b/openpype/hosts/maya/plugins/publish/submit_maya_muster.py @@ -331,7 +331,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): # but dispatcher (Server) and not render clients. Render clients # inherit environment from publisher including PATH, so there's # no problem finding PYPE, but there is now way (as far as I know) - # to set environment dynamically for dispatcher. Therefor this hack. + # to set environment dynamically for dispatcher. Therefore this hack. args = [muster_python, _get_script().replace('\\', '\\\\'), "--paths", @@ -478,7 +478,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): # such that proper initialisation happens the same # way as it does on a local machine. # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are + # have access to these paths, such as if slaves are # running Linux and the submitter is on Windows. "PYTHONPATH", "PATH", diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index 57cf0803a4..5ce422239d 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -78,7 +78,7 @@ class GetOverlappingUVs(object): if len(uarray) == 0 or len(varray) == 0: return (False, None, None) - # loop throught all vertices to construct edges/rays + # loop through all vertices to construct edges/rays u = uarray[-1] v = varray[-1] for i in xrange(len(uarray)): # noqa: F821 diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py b/openpype/hosts/maya/plugins/publish/validate_rig_contents.py index 4a6914ef90..6fe51d7b51 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_contents.py @@ -9,7 +9,7 @@ class ValidateRigContents(pyblish.api.InstancePlugin): Every rig must contain at least two object sets: "controls_SET" - Set of all animatable controls - "out_SET" - Set of all cachable meshes + "out_SET" - Set of all cacheable meshes """ diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py index 99d6cfd4c5..00f1fda2d3 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_staticmesh_naming.py @@ -10,7 +10,7 @@ import re class ValidateUnrealStaticmeshName(pyblish.api.InstancePlugin): """Validate name of Unreal Static Mesh - Unreals naming convention states that staticMesh sould start with `SM` + Unreals naming convention states that staticMesh should start with `SM` prefix - SM_[Name]_## (Eg. SM_sube_01). This plugin also validates other types of meshes - collision meshes: diff --git a/openpype/hosts/nuke/__init__.py b/openpype/hosts/nuke/__init__.py index 366f704dd8..60b37ce1dd 100644 --- a/openpype/hosts/nuke/__init__.py +++ b/openpype/hosts/nuke/__init__.py @@ -6,10 +6,7 @@ def add_implementation_envs(env, _app): # Add requirements to NUKE_PATH pype_root = os.environ["OPENPYPE_REPOS_ROOT"] new_nuke_paths = [ - os.path.join(pype_root, "openpype", "hosts", "nuke", "startup"), - os.path.join( - pype_root, "repos", "avalon-core", "setup", "nuke", "nuke_path" - ) + os.path.join(pype_root, "openpype", "hosts", "nuke", "startup") ] old_nuke_path = env.get("NUKE_PATH") or "" for path in old_nuke_path.split(os.pathsep): diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index 1567189ed1..f7ebcb41da 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -1,130 +1,57 @@ -import os -import nuke +from .workio import ( + file_extensions, + has_unsaved_changes, + save_file, + open_file, + current_file, + work_root, +) -import avalon.api -import pyblish.api -import openpype -from . import lib, menu +from .command import ( + reset_frame_range, + get_handles, + reset_resolution, + viewer_update_and_undo_stop +) -log = openpype.api.Logger().get_logger(__name__) +from .plugin import OpenPypeCreator +from .pipeline import ( + install, + uninstall, -AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + ls, + + containerise, + parse_container, + update_container, +) +from .lib import ( + maintained_selection +) -# registering pyblish gui regarding settings in presets -if os.getenv("PYBLISH_GUI", None): - pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) +__all__ = ( + "file_extensions", + "has_unsaved_changes", + "save_file", + "open_file", + "current_file", + "work_root", + "reset_frame_range", + "get_handles", + "reset_resolution", + "viewer_update_and_undo_stop", -def reload_config(): - """Attempt to reload pipeline at run-time. + "OpenPypeCreator", + "install", + "uninstall", - CAUTION: This is primarily for development and debugging purposes. + "ls", - """ + "containerise", + "parse_container", + "update_container", - import importlib - - for module in ( - "{}.api".format(AVALON_CONFIG), - "{}.hosts.nuke.api.actions".format(AVALON_CONFIG), - "{}.hosts.nuke.api.menu".format(AVALON_CONFIG), - "{}.hosts.nuke.api.plugin".format(AVALON_CONFIG), - "{}.hosts.nuke.api.lib".format(AVALON_CONFIG), - ): - log.info("Reloading module: {}...".format(module)) - - module = importlib.import_module(module) - - try: - importlib.reload(module) - except AttributeError as e: - from importlib import reload - log.warning("Cannot reload module: {}".format(e)) - reload(module) - - -def install(): - ''' Installing all requarements for Nuke host - ''' - - # remove all registred callbacks form avalon.nuke - from avalon import pipeline - pipeline._registered_event_handlers.clear() - - log.info("Registering Nuke plug-ins..") - pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) - avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) - - # Register Avalon event for workfiles loading. - avalon.api.on("workio.open_file", lib.check_inventory_versions) - avalon.api.on("taskChanged", menu.change_context_label) - - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled) - workfile_settings = lib.WorkfileSettings() - # Disable all families except for the ones we explicitly want to see - family_states = [ - "write", - "review", - "nukenodes", - "model", - "gizmo" - ] - - avalon.api.data["familiesStateDefault"] = False - avalon.api.data["familiesStateToggled"] = family_states - - # Set context settings. - nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") - nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") - nuke.addOnCreate(lib.process_workfile_builder, nodeClass="Root") - nuke.addOnCreate(lib.launch_workfiles_app, nodeClass="Root") - menu.install() - - -def uninstall(): - '''Uninstalling host's integration - ''' - log.info("Deregistering Nuke plug-ins..") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) - avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) - - pyblish.api.deregister_callback( - "instanceToggled", on_pyblish_instance_toggled) - - reload_config() - menu.uninstall() - - -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle node passthrough states on instance toggles.""" - - log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( - instance, old_value, new_value)) - - from avalon.nuke import ( - viewer_update_and_undo_stop, - add_publish_knob - ) - - # Whether instances should be passthrough based on new value - - with viewer_update_and_undo_stop(): - n = instance[0] - try: - n["publish"].value() - except ValueError: - n = add_publish_knob(n) - log.info(" `Publish` knob was added to write node..") - - n["publish"].setValue(new_value) + "maintained_selection", +) diff --git a/openpype/hosts/nuke/api/actions.py b/openpype/hosts/nuke/api/actions.py index fd18c787c4..c4a6f0fb84 100644 --- a/openpype/hosts/nuke/api/actions.py +++ b/openpype/hosts/nuke/api/actions.py @@ -1,12 +1,11 @@ import pyblish.api -from avalon.nuke.lib import ( +from openpype.api import get_errored_instances_from_context +from .lib import ( reset_selection, select_nodes ) -from openpype.api import get_errored_instances_from_context - class SelectInvalidAction(pyblish.api.Action): """Select invalid nodes in Nuke when plug-in failed. diff --git a/openpype/hosts/nuke/api/command.py b/openpype/hosts/nuke/api/command.py new file mode 100644 index 0000000000..212d4757c6 --- /dev/null +++ b/openpype/hosts/nuke/api/command.py @@ -0,0 +1,135 @@ +import logging +import contextlib +import nuke + +from avalon import api, io + + +log = logging.getLogger(__name__) + + +def reset_frame_range(): + """ Set frame range to current asset + Also it will set a Viewer range with + displayed handles + """ + + fps = float(api.Session.get("AVALON_FPS", 25)) + + nuke.root()["fps"].setValue(fps) + name = api.Session["AVALON_ASSET"] + asset = io.find_one({"name": name, "type": "asset"}) + asset_data = asset["data"] + + handles = get_handles(asset) + + frame_start = int(asset_data.get( + "frameStart", + asset_data.get("edit_in"))) + + frame_end = int(asset_data.get( + "frameEnd", + asset_data.get("edit_out"))) + + if not all([frame_start, frame_end]): + missing = ", ".join(["frame_start", "frame_end"]) + msg = "'{}' are not set for asset '{}'!".format(missing, name) + log.warning(msg) + nuke.message(msg) + return + + frame_start -= handles + frame_end += handles + + nuke.root()["first_frame"].setValue(frame_start) + nuke.root()["last_frame"].setValue(frame_end) + + # setting active viewers + vv = nuke.activeViewer().node() + vv["frame_range_lock"].setValue(True) + vv["frame_range"].setValue("{0}-{1}".format( + int(asset_data["frameStart"]), + int(asset_data["frameEnd"])) + ) + + +def get_handles(asset): + """ Gets handles data + + Arguments: + asset (dict): avalon asset entity + + Returns: + handles (int) + """ + data = asset["data"] + if "handles" in data and data["handles"] is not None: + return int(data["handles"]) + + parent_asset = None + if "visualParent" in data: + vp = data["visualParent"] + if vp is not None: + parent_asset = io.find_one({"_id": io.ObjectId(vp)}) + + if parent_asset is None: + parent_asset = io.find_one({"_id": io.ObjectId(asset["parent"])}) + + if parent_asset is not None: + return get_handles(parent_asset) + else: + return 0 + + +def reset_resolution(): + """Set resolution to project resolution.""" + project = io.find_one({"type": "project"}) + p_data = project["data"] + + width = p_data.get("resolution_width", + p_data.get("resolutionWidth")) + height = p_data.get("resolution_height", + p_data.get("resolutionHeight")) + + if not all([width, height]): + missing = ", ".join(["width", "height"]) + msg = "No resolution information `{0}` found for '{1}'.".format( + missing, + project["name"]) + log.warning(msg) + nuke.message(msg) + return + + current_width = nuke.root()["format"].value().width() + current_height = nuke.root()["format"].value().height() + + if width != current_width or height != current_height: + + fmt = None + for f in nuke.formats(): + if f.width() == width and f.height() == height: + fmt = f.name() + + if not fmt: + nuke.addFormat( + "{0} {1} {2}".format(int(width), int(height), project["name"]) + ) + fmt = project["name"] + + nuke.root()["format"].setValue(fmt) + + +@contextlib.contextmanager +def viewer_update_and_undo_stop(): + """Lock viewer from updating and stop recording undo steps""" + try: + # stop active viewer to update any change + viewer = nuke.activeViewer() + if viewer: + viewer.stop() + else: + log.warning("No available active Viewer") + nuke.Undo.disable() + yield + finally: + nuke.Undo.enable() diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index e36a5aa5ba..26dab9a2bc 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -3,15 +3,15 @@ import re import sys import six import platform +import contextlib from collections import OrderedDict +import clique + +import nuke from avalon import api, io, lib -import avalon.nuke -from avalon.nuke import lib as anlib -from avalon.nuke import ( - save_file, open_file -) + from openpype.api import ( Logger, Anatomy, @@ -28,21 +28,476 @@ from openpype.lib.path_tools import HostDirmap from openpype.settings import get_project_settings from openpype.modules import ModulesManager -import nuke +from .workio import ( + save_file, + open_file +) -from .utils import set_context_favorites +log = Logger.get_logger(__name__) -log = Logger().get_logger(__name__) +_NODE_TAB_NAME = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") +AVALON_LABEL = os.getenv("AVALON_LABEL") or "Avalon" +AVALON_TAB = "{}".format(AVALON_LABEL) +AVALON_DATA_GROUP = "{}DataGroup".format(AVALON_LABEL.capitalize()) +EXCLUDED_KNOB_TYPE_ON_READ = ( + 20, # Tab Knob + 26, # Text Knob (But for backward compatibility, still be read + # if value is not an empty string.) +) -opnl = sys.modules[__name__] -opnl._project = None -opnl.project_name = os.getenv("AVALON_PROJECT") -opnl.workfiles_launched = False -opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") + +class Context: + main_window = None + context_label = None + project_name = os.getenv("AVALON_PROJECT") + workfiles_launched = False + # Seems unused + _project_doc = None + + +class Knobby(object): + """For creating knob which it's type isn't mapped in `create_knobs` + + Args: + type (string): Nuke knob type name + value: Value to be set with `Knob.setValue`, put `None` if not required + flags (list, optional): Knob flags to be set with `Knob.setFlag` + *args: Args other than knob name for initializing knob class + + """ + + def __init__(self, type, value, flags=None, *args): + self.type = type + self.value = value + self.flags = flags or [] + self.args = args + + def create(self, name, nice=None): + knob_cls = getattr(nuke, self.type) + knob = knob_cls(name, nice, *self.args) + if self.value is not None: + knob.setValue(self.value) + for flag in self.flags: + knob.setFlag(flag) + return knob + + +def create_knobs(data, tab=None): + """Create knobs by data + + Depending on the type of each dict value and creates the correct Knob. + + Mapped types: + bool: nuke.Boolean_Knob + int: nuke.Int_Knob + float: nuke.Double_Knob + list: nuke.Enumeration_Knob + six.string_types: nuke.String_Knob + + dict: If it's a nested dict (all values are dict), will turn into + A tabs group. Or just a knobs group. + + Args: + data (dict): collection of attributes and their value + tab (string, optional): Knobs' tab name + + Returns: + list: A list of `nuke.Knob` objects + + """ + def nice_naming(key): + """Convert camelCase name into UI Display Name""" + words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) + return " ".join(words) + + # Turn key-value pairs into knobs + knobs = list() + + if tab: + knobs.append(nuke.Tab_Knob(tab)) + + for key, value in data.items(): + # Knob name + if isinstance(key, tuple): + name, nice = key + else: + name, nice = key, nice_naming(key) + + # Create knob by value type + if isinstance(value, Knobby): + knobby = value + knob = knobby.create(name, nice) + + elif isinstance(value, float): + knob = nuke.Double_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, bool): + knob = nuke.Boolean_Knob(name, nice) + knob.setValue(value) + knob.setFlag(nuke.STARTLINE) + + elif isinstance(value, int): + knob = nuke.Int_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, six.string_types): + knob = nuke.String_Knob(name, nice) + knob.setValue(value) + + elif isinstance(value, list): + knob = nuke.Enumeration_Knob(name, nice, value) + + elif isinstance(value, dict): + if all(isinstance(v, dict) for v in value.values()): + # Create a group of tabs + begain = nuke.BeginTabGroup_Knob() + end = nuke.EndTabGroup_Knob() + begain.setName(name) + end.setName(name + "_End") + knobs.append(begain) + for k, v in value.items(): + knobs += create_knobs(v, tab=k) + knobs.append(end) + else: + # Create a group of knobs + knobs.append(nuke.Tab_Knob( + name, nice, nuke.TABBEGINCLOSEDGROUP)) + knobs += create_knobs(value) + knobs.append( + nuke.Tab_Knob(name + "_End", nice, nuke.TABENDGROUP)) + continue + + else: + raise TypeError("Unsupported type: %r" % type(value)) + + knobs.append(knob) + + return knobs + + +def imprint(node, data, tab=None): + """Store attributes with value on node + + Parse user data into Node knobs. + Use `collections.OrderedDict` to ensure knob order. + + Args: + node(nuke.Node): node object from Nuke + data(dict): collection of attributes and their value + + Returns: + None + + Examples: + ``` + import nuke + from avalon.nuke import lib + + node = nuke.createNode("NoOp") + data = { + # Regular type of attributes + "myList": ["x", "y", "z"], + "myBool": True, + "myFloat": 0.1, + "myInt": 5, + + # Creating non-default imprint type of knob + "MyFilePath": lib.Knobby("File_Knob", "/file/path"), + "divider": lib.Knobby("Text_Knob", ""), + + # Manual nice knob naming + ("my_knob", "Nice Knob Name"): "some text", + + # dict type will be created as knob group + "KnobGroup": { + "knob1": 5, + "knob2": "hello", + "knob3": ["a", "b"], + }, + + # Nested dict will be created as tab group + "TabGroup": { + "tab1": {"count": 5}, + "tab2": {"isGood": True}, + "tab3": {"direction": ["Left", "Right"]}, + }, + } + lib.imprint(node, data, tab="Demo") + + ``` + + """ + for knob in create_knobs(data, tab): + node.addKnob(knob) + + +def add_publish_knob(node): + """Add Publish knob to node + + Arguments: + node (nuke.Node): nuke node to be processed + + Returns: + node (nuke.Node): processed nuke node + + """ + if "publish" not in node.knobs(): + body = OrderedDict() + body[("divd", "Publishing")] = Knobby("Text_Knob", '') + body["publish"] = True + imprint(node, body) + return node + + +def set_avalon_knob_data(node, data=None, prefix="avalon:"): + """ Sets data into nodes's avalon knob + + Arguments: + node (nuke.Node): Nuke node to imprint with data, + data (dict, optional): Data to be imprinted into AvalonTab + prefix (str, optional): filtering prefix + + Returns: + node (nuke.Node) + + Examples: + data = { + 'asset': 'sq020sh0280', + 'family': 'render', + 'subset': 'subsetMain' + } + """ + data = data or dict() + create = OrderedDict() + + tab_name = AVALON_TAB + editable = ["asset", "subset", "name", "namespace"] + + existed_knobs = node.knobs() + + for key, value in data.items(): + knob_name = prefix + key + gui_name = key + + if knob_name in existed_knobs: + # Set value + try: + node[knob_name].setValue(value) + except TypeError: + node[knob_name].setValue(str(value)) + else: + # New knob + name = (knob_name, gui_name) # Hide prefix on GUI + if key in editable: + create[name] = value + else: + create[name] = Knobby("String_Knob", + str(value), + flags=[nuke.READ_ONLY]) + if tab_name in existed_knobs: + tab_name = None + else: + tab = OrderedDict() + warn = Knobby("Text_Knob", "Warning! Do not change following data!") + divd = Knobby("Text_Knob", "") + head = [ + (("warn", ""), warn), + (("divd", ""), divd), + ] + tab[AVALON_DATA_GROUP] = OrderedDict(head + list(create.items())) + create = tab + + imprint(node, create, tab=tab_name) + return node + + +def get_avalon_knob_data(node, prefix="avalon:"): + """ Gets a data from nodes's avalon knob + + Arguments: + node (obj): Nuke node to search for data, + prefix (str, optional): filtering prefix + + Returns: + data (dict) + """ + + # check if lists + if not isinstance(prefix, list): + prefix = list([prefix]) + + data = dict() + + # loop prefix + for p in prefix: + # check if the node is avalon tracked + if AVALON_TAB not in node.knobs(): + continue + try: + # check if data available on the node + test = node[AVALON_DATA_GROUP].value() + log.debug("Only testing if data avalable: `{}`".format(test)) + except NameError as e: + # if it doesn't then create it + log.debug("Creating avalon knob: `{}`".format(e)) + node = set_avalon_knob_data(node) + return get_avalon_knob_data(node) + + # get data from filtered knobs + data.update({k.replace(p, ''): node[k].value() + for k in node.knobs().keys() + if p in k}) + + return data + + +def fix_data_for_node_create(data): + """Fixing data to be used for nuke knobs + """ + for k, v in data.items(): + if isinstance(v, six.text_type): + data[k] = str(v) + if str(v).startswith("0x"): + data[k] = int(v, 16) + return data + + +def add_write_node(name, **kwarg): + """Adding nuke write node + + Arguments: + name (str): nuke node name + kwarg (attrs): data for nuke knobs + + Returns: + node (obj): nuke write node + """ + frame_range = kwarg.get("frame_range", None) + + w = nuke.createNode( + "Write", + "name {}".format(name)) + + w["file"].setValue(kwarg["file"]) + + for k, v in kwarg.items(): + if "frame_range" in k: + continue + log.info([k, v]) + try: + w[k].setValue(v) + except KeyError as e: + log.debug(e) + continue + + if frame_range: + w["use_limit"].setValue(True) + w["first"].setValue(frame_range[0]) + w["last"].setValue(frame_range[1]) + + return w + + +def read(node): + """Return user-defined knobs from given `node` + + Args: + node (nuke.Node): Nuke node object + + Returns: + list: A list of nuke.Knob object + + """ + def compat_prefixed(knob_name): + if knob_name.startswith("avalon:"): + return knob_name[len("avalon:"):] + elif knob_name.startswith("ak:"): + return knob_name[len("ak:"):] + else: + return knob_name + + data = dict() + + pattern = ("(?<=addUserKnob {)" + "([0-9]*) (\\S*)" # Matching knob type and knob name + "(?=[ |}])") + tcl_script = node.writeKnobs(nuke.WRITE_USER_KNOB_DEFS) + result = re.search(pattern, tcl_script) + + if result: + first_user_knob = result.group(2) + # Collect user knobs from the end of the knob list + for knob in reversed(node.allKnobs()): + knob_name = knob.name() + if not knob_name: + # Ignore unnamed knob + continue + + knob_type = nuke.knob(knob.fullyQualifiedName(), type=True) + value = knob.value() + + if ( + knob_type not in EXCLUDED_KNOB_TYPE_ON_READ or + # For compating read-only string data that imprinted + # by `nuke.Text_Knob`. + (knob_type == 26 and value) + ): + key = compat_prefixed(knob_name) + data[key] = value + + if knob_name == first_user_knob: + break + + return data + + +def get_node_path(path, padding=4): + """Get filename for the Nuke write with padded number as '#' + + Arguments: + path (str): The path to render to. + + Returns: + tuple: head, padding, tail (extension) + + Examples: + >>> get_frame_path("test.exr") + ('test', 4, '.exr') + + >>> get_frame_path("filename.#####.tif") + ('filename.', 5, '.tif') + + >>> get_frame_path("foobar##.tif") + ('foobar', 2, '.tif') + + >>> get_frame_path("foobar_%08d.tif") + ('foobar_', 8, '.tif') + """ + filename, ext = os.path.splitext(path) + + # Find a final number group + if '%' in filename: + match = re.match('.*?(%[0-9]+d)$', filename) + if match: + padding = int(match.group(1).replace('%', '').replace('d', '')) + # remove number from end since fusion + # will swap it with the frame number + filename = filename.replace(match.group(1), '') + elif '#' in filename: + match = re.match('.*?(#+)$', filename) + + if match: + padding = len(match.group(1)) + # remove number from end since fusion + # will swap it with the frame number + filename = filename.replace(match.group(1), '') + + return filename, padding, ext def get_nuke_imageio_settings(): - return get_anatomy_settings(opnl.project_name)["imageio"]["nuke"] + return get_anatomy_settings(Context.project_name)["imageio"]["nuke"] def get_created_node_imageio_setting(**kwarg): @@ -103,14 +558,15 @@ def check_inventory_versions(): and check if the node is having actual version. If not then it will color it to red. """ + from .pipeline import parse_container + # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): - container = avalon.nuke.parse_container(each) + container = parse_container(each) if container: node = nuke.toNode(container["objectName"]) - avalon_knob_data = avalon.nuke.read( - node) + avalon_knob_data = read(node) # get representation from io representation = io.find_one({ @@ -141,7 +597,7 @@ def check_inventory_versions(): max_version = max(versions) # check the available version and do match - # change color of node if not max verion + # change color of node if not max version if version.get("name") not in [max_version]: node["tile_color"].setValue(int("0xd84f20ff", 16)) else: @@ -163,11 +619,10 @@ def writes_version_sync(): for each in nuke.allNodes(filter="Write"): # check if the node is avalon tracked - if opnl._node_tab_name not in each.knobs(): + if _NODE_TAB_NAME not in each.knobs(): continue - avalon_knob_data = avalon.nuke.read( - each) + avalon_knob_data = read(each) try: if avalon_knob_data['families'] not in ["render"]: @@ -209,14 +664,14 @@ def check_subsetname_exists(nodes, subset_name): bool: True of False """ return next((True for n in nodes - if subset_name in avalon.nuke.read(n).get("subset", "")), + if subset_name in read(n).get("subset", "")), False) def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' - data = {'avalon': avalon.nuke.read(node)} + data = {'avalon': read(node)} data_preset = { "nodeclass": data['avalon']['family'], "families": [data['avalon']['families']], @@ -236,10 +691,10 @@ def get_render_path(node): def format_anatomy(data): - ''' Helping function for formating of anatomy paths + ''' Helping function for formatting of anatomy paths Arguments: - data (dict): dictionary with attributes used for formating + data (dict): dictionary with attributes used for formatting Return: path (str) @@ -385,7 +840,7 @@ def create_write_node(name, data, input=None, prenodes=None, for knob in imageio_writes["knobs"]: _data.update({knob["name"]: knob["value"]}) - _data = anlib.fix_data_for_node_create(_data) + _data = fix_data_for_node_create(_data) log.debug("_data: `{}`".format(_data)) @@ -462,11 +917,11 @@ def create_write_node(name, data, input=None, prenodes=None, else: now_node.setInput(0, prev_node) - # swith actual node to previous + # switch actual node to previous prev_node = now_node # creating write node - write_node = now_node = anlib.add_write_node( + write_node = now_node = add_write_node( "inside_{}".format(name), **_data ) @@ -474,7 +929,7 @@ def create_write_node(name, data, input=None, prenodes=None, # connect to previous node now_node.setInput(0, prev_node) - # swith actual node to previous + # switch actual node to previous prev_node = now_node now_node = nuke.createNode("Output", "name Output1") @@ -484,8 +939,8 @@ def create_write_node(name, data, input=None, prenodes=None, now_node.setInput(0, prev_node) # imprinting group node - anlib.set_avalon_knob_data(GN, data["avalon"]) - anlib.add_publish_knob(GN) + set_avalon_knob_data(GN, data["avalon"]) + add_publish_knob(GN) add_rendering_knobs(GN, farm) if review: @@ -516,7 +971,7 @@ def create_write_node(name, data, input=None, prenodes=None, GN.addKnob(knob) else: if "___" in _k_name: - # add devider + # add divider GN.addKnob(nuke.Text_Knob("")) else: # add linked knob by _k_name @@ -537,7 +992,7 @@ def create_write_node(name, data, input=None, prenodes=None, add_deadline_tab(GN) # open the our Tab as default - GN[opnl._node_tab_name].setFlag(0) + GN[_NODE_TAB_NAME].setFlag(0) # set tile color tile_color = _data.get("tile_color", "0xff0000ff") @@ -663,7 +1118,7 @@ class WorkfileSettings(object): root_node=None, nodes=None, **kwargs): - opnl._project = kwargs.get( + Context._project_doc = kwargs.get( "project") or io.find_one({"type": "project"}) self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"] self._asset_entity = get_asset(self._asset) @@ -725,7 +1180,7 @@ class WorkfileSettings(object): for i, n in enumerate(copy_inputs): nv.setInput(i, n) - # set coppied knobs + # set copied knobs for k, v in copy_knobs.items(): print(k, v) nv[k].setValue(v) @@ -804,8 +1259,6 @@ class WorkfileSettings(object): ''' Adds correct colorspace to write node dict ''' - from avalon.nuke import read - for node in nuke.allNodes(filter="Group"): # get data from avalon knob @@ -862,7 +1315,7 @@ class WorkfileSettings(object): def set_reads_colorspace(self, read_clrs_inputs): """ Setting colorspace to Read nodes - Looping trought all read nodes and tries to set colorspace based + Looping through all read nodes and tries to set colorspace based on regex rules in presets """ changes = {} @@ -871,7 +1324,7 @@ class WorkfileSettings(object): if n.Class() != "Read": continue - # check if any colorspace presets for read is mathing + # check if any colorspace presets for read is matching preset_clrsp = None for input in read_clrs_inputs: @@ -1005,7 +1458,7 @@ class WorkfileSettings(object): node['frame_range_lock'].setValue(True) # adding handle_start/end to root avalon knob - if not anlib.set_avalon_knob_data(self._root_node, { + if not set_avalon_knob_data(self._root_node, { "handleStart": int(handle_start), "handleEnd": int(handle_end) }): @@ -1013,7 +1466,7 @@ class WorkfileSettings(object): def reset_resolution(self): """Set resolution to project resolution.""" - log.info("Reseting resolution") + log.info("Resetting resolution") project = io.find_one({"type": "project"}) asset = api.Session["AVALON_ASSET"] asset = io.find_one({"name": asset, "type": "asset"}) @@ -1089,6 +1542,8 @@ class WorkfileSettings(object): self.set_colorspace() def set_favorites(self): + from .utils import set_context_favorites + work_dir = os.getenv("AVALON_WORKDIR") asset = os.getenv("AVALON_ASSET") favorite_items = OrderedDict() @@ -1096,9 +1551,9 @@ class WorkfileSettings(object): # project # get project's root and split to parts projects_root = os.path.normpath(work_dir.split( - opnl.project_name)[0]) + Context.project_name)[0]) # add project name - project_dir = os.path.join(projects_root, opnl.project_name) + "/" + project_dir = os.path.join(projects_root, Context.project_name) + "/" # add to favorites favorite_items.update({"Project dir": project_dir.replace("\\", "/")}) @@ -1145,8 +1600,7 @@ def get_write_node_template_attr(node): ''' # get avalon data from node data = dict() - data['avalon'] = avalon.nuke.read( - node) + data['avalon'] = read(node) data_preset = { "nodeclass": data['avalon']['family'], "families": [data['avalon']['families']], @@ -1167,7 +1621,7 @@ def get_write_node_template_attr(node): if k not in ["_id", "_previous"]} # fix badly encoded data - return anlib.fix_data_for_node_create(correct_data) + return fix_data_for_node_create(correct_data) def get_dependent_nodes(nodes): @@ -1274,13 +1728,53 @@ def find_free_space_to_paste_nodes( return xpos, ypos +@contextlib.contextmanager +def maintained_selection(): + """Maintain selection during context + + Example: + >>> with maintained_selection(): + ... node['selected'].setValue(True) + >>> print(node['selected'].value()) + False + """ + previous_selection = nuke.selectedNodes() + try: + yield + finally: + # unselect all selection in case there is some + current_seletion = nuke.selectedNodes() + [n['selected'].setValue(False) for n in current_seletion] + # and select all previously selected nodes + if previous_selection: + [n['selected'].setValue(True) for n in previous_selection] + + +def reset_selection(): + """Deselect all selected nodes""" + for node in nuke.selectedNodes(): + node["selected"].setValue(False) + + +def select_nodes(nodes): + """Selects all inputed nodes + + Arguments: + nodes (list): nuke nodes to be selected + """ + assert isinstance(nodes, (list, tuple)), "nodes has to be list or tuple" + + for node in nodes: + node["selected"].setValue(True) + + def launch_workfiles_app(): '''Function letting start workfiles after start of host ''' from openpype.lib import ( env_value_to_bool ) - from avalon.nuke.pipeline import get_main_window + from .pipeline import get_main_window # get all imortant settings open_at_start = env_value_to_bool( @@ -1291,8 +1785,8 @@ def launch_workfiles_app(): if not open_at_start: return - if not opnl.workfiles_launched: - opnl.workfiles_launched = True + if not Context.workfiles_launched: + Context.workfiles_launched = True main_window = get_main_window() host_tools.show_workfiles(parent=main_window) @@ -1378,7 +1872,7 @@ def recreate_instance(origin_node, avalon_data=None): knobs_wl = ["render", "publish", "review", "ypos", "use_limit", "first", "last"] # get data from avalon knobs - data = anlib.get_avalon_knob_data( + data = get_avalon_knob_data( origin_node) # add input data to avalon data @@ -1494,3 +1988,45 @@ def dirmap_file_name_filter(file_name): if os.path.exists(dirmap_processor.file_name): return dirmap_processor.file_name return file_name + + +# ------------------------------------ +# This function seems to be deprecated +# ------------------------------------ +def ls_img_sequence(path): + """Listing all available coherent image sequence from path + + Arguments: + path (str): A nuke's node object + + Returns: + data (dict): with nuke formated path and frameranges + """ + file = os.path.basename(path) + dirpath = os.path.dirname(path) + base, ext = os.path.splitext(file) + name, padding = os.path.splitext(base) + + # populate list of files + files = [ + f for f in os.listdir(dirpath) + if name in f + if ext in f + ] + + # create collection from list of files + collections, reminder = clique.assemble(files) + + if len(collections) > 0: + head = collections[0].format("{head}") + padding = collections[0].format("{padding}") % 1 + padding = "#" * len(padding) + tail = collections[0].format("{tail}") + file = head + padding + tail + + return { + "path": os.path.join(dirpath, file).replace("\\", "/"), + "frames": collections[0].format("[{ranges}]") + } + + return False diff --git a/openpype/hosts/nuke/api/menu.py b/openpype/hosts/nuke/api/menu.py deleted file mode 100644 index 86293edb99..0000000000 --- a/openpype/hosts/nuke/api/menu.py +++ /dev/null @@ -1,166 +0,0 @@ -import os -import nuke -from avalon.nuke.pipeline import get_main_window - -from .lib import WorkfileSettings -from openpype.api import Logger, BuildWorkfile, get_current_project_settings -from openpype.tools.utils import host_tools - - -log = Logger().get_logger(__name__) - -menu_label = os.environ["AVALON_LABEL"] -context_label = None - - -def change_context_label(*args): - global context_label - menubar = nuke.menu("Nuke") - menu = menubar.findItem(menu_label) - - label = "{0}, {1}".format( - os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] - ) - - rm_item = [ - (i, item) for i, item in enumerate(menu.items()) - if context_label in item.name() - ][0] - - menu.removeItem(rm_item[1].name()) - - context_action = menu.addCommand( - label, - index=(rm_item[0]) - ) - context_action.setEnabled(False) - - log.info("Task label changed from `{}` to `{}`".format( - context_label, label)) - - context_label = label - - - -def install(): - from openpype.hosts.nuke.api import reload_config - - global context_label - - # uninstall original avalon menu - uninstall() - - main_window = get_main_window() - menubar = nuke.menu("Nuke") - menu = menubar.addMenu(menu_label) - - label = "{0}, {1}".format( - os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] - ) - context_label = label - context_action = menu.addCommand(label) - context_action.setEnabled(False) - - menu.addSeparator() - menu.addCommand( - "Work Files...", - lambda: host_tools.show_workfiles(parent=main_window) - ) - - menu.addSeparator() - menu.addCommand( - "Create...", - lambda: host_tools.show_creator(parent=main_window) - ) - menu.addCommand( - "Load...", - lambda: host_tools.show_loader( - parent=main_window, - use_context=True - ) - ) - menu.addCommand( - "Publish...", - lambda: host_tools.show_publish(parent=main_window) - ) - menu.addCommand( - "Manage...", - lambda: host_tools.show_scene_inventory(parent=main_window) - ) - - menu.addSeparator() - menu.addCommand( - "Set Resolution", - lambda: WorkfileSettings().reset_resolution() - ) - menu.addCommand( - "Set Frame Range", - lambda: WorkfileSettings().reset_frame_range_handles() - ) - menu.addCommand( - "Set Colorspace", - lambda: WorkfileSettings().set_colorspace() - ) - menu.addCommand( - "Apply All Settings", - lambda: WorkfileSettings().set_context_settings() - ) - - menu.addSeparator() - menu.addCommand( - "Build Workfile", - lambda: BuildWorkfile().process() - ) - - menu.addSeparator() - menu.addCommand( - "Experimental tools...", - lambda: host_tools.show_experimental_tools_dialog(parent=main_window) - ) - - # add reload pipeline only in debug mode - if bool(os.getenv("NUKE_DEBUG")): - menu.addSeparator() - menu.addCommand("Reload Pipeline", reload_config) - - # adding shortcuts - add_shortcuts_from_presets() - - -def uninstall(): - - menubar = nuke.menu("Nuke") - menu = menubar.findItem(menu_label) - - for item in menu.items(): - log.info("Removing menu item: {}".format(item.name())) - menu.removeItem(item.name()) - - -def add_shortcuts_from_presets(): - menubar = nuke.menu("Nuke") - nuke_presets = get_current_project_settings()["nuke"]["general"] - - if nuke_presets.get("menu"): - menu_label_mapping = { - "manage": "Manage...", - "create": "Create...", - "load": "Load...", - "build_workfile": "Build Workfile", - "publish": "Publish..." - } - - for command_name, shortcut_str in nuke_presets.get("menu").items(): - log.info("menu_name `{}` | menu_label `{}`".format( - command_name, menu_label - )) - log.info("Adding Shortcut `{}` to `{}`".format( - shortcut_str, command_name - )) - try: - menu = menubar.findItem(menu_label) - item_label = menu_label_mapping[command_name] - menuitem = menu.findItem(item_label) - menuitem.setShortcut(shortcut_str) - except AttributeError as e: - log.error(e) diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py new file mode 100644 index 0000000000..c47187666b --- /dev/null +++ b/openpype/hosts/nuke/api/pipeline.py @@ -0,0 +1,421 @@ +import os +import importlib +from collections import OrderedDict + +import nuke + +import pyblish.api +import avalon.api +from avalon import pipeline + +import openpype +from openpype.api import ( + Logger, + BuildWorkfile, + get_current_project_settings +) +from openpype.tools.utils import host_tools + +from .command import viewer_update_and_undo_stop +from .lib import ( + add_publish_knob, + WorkfileSettings, + process_workfile_builder, + launch_workfiles_app, + check_inventory_versions, + set_avalon_knob_data, + read, + Context +) + +log = Logger.get_logger(__name__) + +AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") +HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.nuke.__file__)) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + +MENU_LABEL = os.environ["AVALON_LABEL"] + + +# registering pyblish gui regarding settings in presets +if os.getenv("PYBLISH_GUI", None): + pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) + + +def get_main_window(): + """Acquire Nuke's main window""" + if Context.main_window is None: + from Qt import QtWidgets + + top_widgets = QtWidgets.QApplication.topLevelWidgets() + name = "Foundry::UI::DockMainWindow" + for widget in top_widgets: + if ( + widget.inherits("QMainWindow") + and widget.metaObject().className() == name + ): + Context.main_window = widget + break + return Context.main_window + + +def reload_config(): + """Attempt to reload pipeline at run-time. + + CAUTION: This is primarily for development and debugging purposes. + + """ + + for module in ( + "{}.api".format(AVALON_CONFIG), + "{}.hosts.nuke.api.actions".format(AVALON_CONFIG), + "{}.hosts.nuke.api.menu".format(AVALON_CONFIG), + "{}.hosts.nuke.api.plugin".format(AVALON_CONFIG), + "{}.hosts.nuke.api.lib".format(AVALON_CONFIG), + ): + log.info("Reloading module: {}...".format(module)) + + module = importlib.import_module(module) + + try: + importlib.reload(module) + except AttributeError as e: + from importlib import reload + log.warning("Cannot reload module: {}".format(e)) + reload(module) + + +def install(): + ''' Installing all requarements for Nuke host + ''' + + pyblish.api.register_host("nuke") + + log.info("Registering Nuke plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + avalon.api.register_plugin_path(avalon.api.Loader, LOAD_PATH) + avalon.api.register_plugin_path(avalon.api.Creator, CREATE_PATH) + avalon.api.register_plugin_path(avalon.api.InventoryAction, INVENTORY_PATH) + + # Register Avalon event for workfiles loading. + avalon.api.on("workio.open_file", check_inventory_versions) + avalon.api.on("taskChanged", change_context_label) + + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled) + workfile_settings = WorkfileSettings() + # Disable all families except for the ones we explicitly want to see + family_states = [ + "write", + "review", + "nukenodes", + "model", + "gizmo" + ] + + avalon.api.data["familiesStateDefault"] = False + avalon.api.data["familiesStateToggled"] = family_states + + # Set context settings. + nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") + nuke.addOnCreate(process_workfile_builder, nodeClass="Root") + nuke.addOnCreate(launch_workfiles_app, nodeClass="Root") + _install_menu() + + +def uninstall(): + '''Uninstalling host's integration + ''' + log.info("Deregistering Nuke plug-ins..") + pyblish.deregister_host("nuke") + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + avalon.api.deregister_plugin_path(avalon.api.Loader, LOAD_PATH) + avalon.api.deregister_plugin_path(avalon.api.Creator, CREATE_PATH) + + pyblish.api.deregister_callback( + "instanceToggled", on_pyblish_instance_toggled) + + reload_config() + _uninstall_menu() + + +def _install_menu(): + # uninstall original avalon menu + main_window = get_main_window() + menubar = nuke.menu("Nuke") + menu = menubar.addMenu(MENU_LABEL) + + label = "{0}, {1}".format( + os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] + ) + Context.context_label = label + context_action = menu.addCommand(label) + context_action.setEnabled(False) + + menu.addSeparator() + menu.addCommand( + "Work Files...", + lambda: host_tools.show_workfiles(parent=main_window) + ) + + menu.addSeparator() + menu.addCommand( + "Create...", + lambda: host_tools.show_creator(parent=main_window) + ) + menu.addCommand( + "Load...", + lambda: host_tools.show_loader( + parent=main_window, + use_context=True + ) + ) + menu.addCommand( + "Publish...", + lambda: host_tools.show_publish(parent=main_window) + ) + menu.addCommand( + "Manage...", + lambda: host_tools.show_scene_inventory(parent=main_window) + ) + + menu.addSeparator() + menu.addCommand( + "Set Resolution", + lambda: WorkfileSettings().reset_resolution() + ) + menu.addCommand( + "Set Frame Range", + lambda: WorkfileSettings().reset_frame_range_handles() + ) + menu.addCommand( + "Set Colorspace", + lambda: WorkfileSettings().set_colorspace() + ) + menu.addCommand( + "Apply All Settings", + lambda: WorkfileSettings().set_context_settings() + ) + + menu.addSeparator() + menu.addCommand( + "Build Workfile", + lambda: BuildWorkfile().process() + ) + + menu.addSeparator() + menu.addCommand( + "Experimental tools...", + lambda: host_tools.show_experimental_tools_dialog(parent=main_window) + ) + + # add reload pipeline only in debug mode + if bool(os.getenv("NUKE_DEBUG")): + menu.addSeparator() + menu.addCommand("Reload Pipeline", reload_config) + + # adding shortcuts + add_shortcuts_from_presets() + + +def _uninstall_menu(): + menubar = nuke.menu("Nuke") + menu = menubar.findItem(MENU_LABEL) + + for item in menu.items(): + log.info("Removing menu item: {}".format(item.name())) + menu.removeItem(item.name()) + + +def change_context_label(*args): + menubar = nuke.menu("Nuke") + menu = menubar.findItem(MENU_LABEL) + + label = "{0}, {1}".format( + os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] + ) + + rm_item = [ + (i, item) for i, item in enumerate(menu.items()) + if Context.context_label in item.name() + ][0] + + menu.removeItem(rm_item[1].name()) + + context_action = menu.addCommand( + label, + index=(rm_item[0]) + ) + context_action.setEnabled(False) + + log.info("Task label changed from `{}` to `{}`".format( + Context.context_label, label)) + + +def add_shortcuts_from_presets(): + menubar = nuke.menu("Nuke") + nuke_presets = get_current_project_settings()["nuke"]["general"] + + if nuke_presets.get("menu"): + menu_label_mapping = { + "manage": "Manage...", + "create": "Create...", + "load": "Load...", + "build_workfile": "Build Workfile", + "publish": "Publish..." + } + + for command_name, shortcut_str in nuke_presets.get("menu").items(): + log.info("menu_name `{}` | menu_label `{}`".format( + command_name, MENU_LABEL + )) + log.info("Adding Shortcut `{}` to `{}`".format( + shortcut_str, command_name + )) + try: + menu = menubar.findItem(MENU_LABEL) + item_label = menu_label_mapping[command_name] + menuitem = menu.findItem(item_label) + menuitem.setShortcut(shortcut_str) + except AttributeError as e: + log.error(e) + + +def on_pyblish_instance_toggled(instance, old_value, new_value): + """Toggle node passthrough states on instance toggles.""" + + log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( + instance, old_value, new_value)) + + # Whether instances should be passthrough based on new value + + with viewer_update_and_undo_stop(): + n = instance[0] + try: + n["publish"].value() + except ValueError: + n = add_publish_knob(n) + log.info(" `Publish` knob was added to write node..") + + n["publish"].setValue(new_value) + + +def containerise(node, + name, + namespace, + context, + loader=None, + data=None): + """Bundle `node` into an assembly and imprint it with metadata + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + node (nuke.Node): Nuke's node object to imprint as container + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (str, optional): Name of node used to produce this container. + + Returns: + node (nuke.Node): containerised nuke's node object + + """ + data = OrderedDict( + [ + ("schema", "openpype:container-2.0"), + ("id", pipeline.AVALON_CONTAINER_ID), + ("name", name), + ("namespace", namespace), + ("loader", str(loader)), + ("representation", context["representation"]["_id"]), + ], + + **data or dict() + ) + + set_avalon_knob_data(node, data) + + return node + + +def parse_container(node): + """Returns containerised data of a node + + Reads the imprinted data from `containerise`. + + Arguments: + node (nuke.Node): Nuke's node object to read imprinted data + + Returns: + dict: The container schema data for this container node. + + """ + data = read(node) + + # (TODO) Remove key validation when `ls` has re-implemented. + # + # If not all required data return the empty container + required = ["schema", "id", "name", + "namespace", "loader", "representation"] + if not all(key in data for key in required): + return + + # Store the node's name + data["objectName"] = node["name"].value() + + return data + + +def update_container(node, keys=None): + """Returns node with updateted containder data + + Arguments: + node (nuke.Node): The node in Nuke to imprint as container, + keys (dict, optional): data which should be updated + + Returns: + node (nuke.Node): nuke node with updated container data + + Raises: + TypeError on given an invalid container node + + """ + keys = keys or dict() + + container = parse_container(node) + if not container: + raise TypeError("Not a valid container node.") + + container.update(keys) + node = set_avalon_knob_data(node, container) + + return node + + +def ls(): + """List available containers. + + This function is used by the Container Manager in Nuke. You'll + need to implement a for-loop that then *yields* one Container at + a time. + + See the `container.json` schema for details on how it should look, + and the Maya equivalent, which is in `avalon.maya.pipeline` + """ + all_nodes = nuke.allNodes(recurseGroups=False) + + # TODO: add readgeo, readcamera, readimage + nodes = [n for n in all_nodes] + + for n in nodes: + log.debug("name: `{}`".format(n.name())) + container = parse_container(n) + if container: + yield container diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 82299dd354..fd754203d4 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -2,23 +2,30 @@ import os import random import string -import avalon.nuke -from avalon.nuke import lib as anlib -from avalon import api +import nuke + +import avalon.api from openpype.api import ( get_current_project_settings, PypeCreatorMixin ) -from .lib import check_subsetname_exists -import nuke +from .lib import ( + Knobby, + check_subsetname_exists, + reset_selection, + maintained_selection, + set_avalon_knob_data, + add_publish_knob +) -class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator): - """Pype Nuke Creator class wrapper - """ +class OpenPypeCreator(PypeCreatorMixin, avalon.api.Creator): + """Pype Nuke Creator class wrapper""" + node_color = "0xdfea5dff" + def __init__(self, *args, **kwargs): - super(PypeCreator, self).__init__(*args, **kwargs) + super(OpenPypeCreator, self).__init__(*args, **kwargs) self.presets = get_current_project_settings()["nuke"]["create"].get( self.__class__.__name__, {} ) @@ -31,6 +38,38 @@ class PypeCreator(PypeCreatorMixin, avalon.nuke.pipeline.Creator): raise NameError("`{0}: {1}".format(__name__, msg)) return + def process(self): + from nukescripts import autoBackdrop + + instance = None + + if (self.options or {}).get("useSelection"): + + nodes = nuke.selectedNodes() + if not nodes: + nuke.message("Please select nodes that you " + "wish to add to a container") + return + + elif len(nodes) == 1: + # only one node is selected + instance = nodes[0] + + if not instance: + # Not using selection or multiple nodes selected + bckd_node = autoBackdrop() + bckd_node["tile_color"].setValue(int(self.node_color, 16)) + bckd_node["note_font_size"].setValue(24) + bckd_node["label"].setValue("[{}]".format(self.name)) + + instance = bckd_node + + # add avalon knobs + set_avalon_knob_data(instance, self.data) + add_publish_knob(instance) + + return instance + def get_review_presets_config(): settings = get_current_project_settings() @@ -48,7 +87,7 @@ def get_review_presets_config(): return [str(name) for name, _prop in outputs.items()] -class NukeLoader(api.Loader): +class NukeLoader(avalon.api.Loader): container_id_knob = "containerId" container_id = None @@ -74,7 +113,7 @@ class NukeLoader(api.Loader): node[self.container_id_knob].setValue(source_id) else: HIDEN_FLAG = 0x00040000 - _knob = anlib.Knobby( + _knob = Knobby( "String_Knob", self.container_id, flags=[ @@ -183,7 +222,7 @@ class ExporterReview(object): Returns: nuke.Node: copy node of Input Process node """ - anlib.reset_selection() + reset_selection() ipn_orig = None for v in nuke.allNodes(filter="Viewer"): ip = v["input_process"].getValue() @@ -196,7 +235,7 @@ class ExporterReview(object): # copy selected to clipboard nuke.nodeCopy("%clipboard%") # reset selection - anlib.reset_selection() + reset_selection() # paste node and selection is on it only nuke.nodePaste("%clipboard%") # assign to variable @@ -209,7 +248,7 @@ class ExporterReview(object): nuke_imageio = opnlib.get_nuke_imageio_settings() # TODO: this is only securing backward compatibility lets remove - # this once all projects's anotomy are upated to newer config + # this once all projects's anotomy are updated to newer config if "baking" in nuke_imageio.keys(): return nuke_imageio["baking"]["viewerProcess"] else: @@ -396,7 +435,7 @@ class ExporterReviewMov(ExporterReview): def save_file(self): import shutil - with anlib.maintained_selection(): + with maintained_selection(): self.log.info("Saving nodes as file... ") # create nk path path = os.path.splitext(self.path)[0] + ".nk" @@ -477,7 +516,7 @@ class ExporterReviewMov(ExporterReview): write_node["file_type"].setValue(str(self.ext)) # Knobs `meta_codec` and `mov64_codec` are not available on centos. - # TODO should't this come from settings on outputs? + # TODO shouldn't this come from settings on outputs? try: write_node["meta_codec"].setValue("ap4h") except Exception: diff --git a/openpype/hosts/nuke/api/utils.py b/openpype/hosts/nuke/api/utils.py index e43c11a380..0ed84f9560 100644 --- a/openpype/hosts/nuke/api/utils.py +++ b/openpype/hosts/nuke/api/utils.py @@ -1,13 +1,14 @@ import os import nuke -from avalon.nuke import lib as anlib + from openpype.api import resources +from .lib import maintained_selection def set_context_favorites(favorites=None): - """ Addig favorite folders to nuke's browser + """ Adding favorite folders to nuke's browser - Argumets: + Arguments: favorites (dict): couples of {name:path} """ favorites = favorites or {} @@ -48,14 +49,16 @@ def gizmo_is_nuke_default(gizmo): return gizmo.filename().startswith(plug_dir) -def bake_gizmos_recursively(in_group=nuke.Root()): +def bake_gizmos_recursively(in_group=None): """Converting a gizmo to group - Argumets: + Arguments: is_group (nuke.Node)[optonal]: group node or all nodes """ + if in_group is None: + in_group = nuke.Root() # preserve selection after all is done - with anlib.maintained_selection(): + with maintained_selection(): # jump to the group with in_group: for node in nuke.allNodes(): diff --git a/openpype/hosts/nuke/api/workio.py b/openpype/hosts/nuke/api/workio.py new file mode 100644 index 0000000000..dbc24fdc9b --- /dev/null +++ b/openpype/hosts/nuke/api/workio.py @@ -0,0 +1,55 @@ +"""Host API required Work Files tool""" +import os +import nuke +import avalon.api + + +def file_extensions(): + return avalon.api.HOST_WORKFILE_EXTENSIONS["nuke"] + + +def has_unsaved_changes(): + return nuke.root().modified() + + +def save_file(filepath): + path = filepath.replace("\\", "/") + nuke.scriptSaveAs(path) + nuke.Root()["name"].setValue(path) + nuke.Root()["project_directory"].setValue(os.path.dirname(path)) + nuke.Root().setModified(False) + + +def open_file(filepath): + filepath = filepath.replace("\\", "/") + + # To remain in the same window, we have to clear the script and read + # in the contents of the workfile. + nuke.scriptClear() + nuke.scriptReadFile(filepath) + nuke.Root()["name"].setValue(filepath) + nuke.Root()["project_directory"].setValue(os.path.dirname(filepath)) + nuke.Root().setModified(False) + return True + + +def current_file(): + current_file = nuke.root().name() + + # Unsaved current file + if current_file == 'Root': + return None + + return os.path.normpath(current_file).replace("\\", "/") + + +def work_root(session): + + work_dir = session["AVALON_WORKDIR"] + scene_dir = session.get("AVALON_SCENEDIR") + if scene_dir: + path = os.path.join(work_dir, scene_dir) + else: + path = work_dir + + return os.path.normpath(path).replace("\\", "/") diff --git a/openpype/hosts/nuke/plugins/create/create_backdrop.py b/openpype/hosts/nuke/plugins/create/create_backdrop.py index cda2629587..0c11b3f274 100644 --- a/openpype/hosts/nuke/plugins/create/create_backdrop.py +++ b/openpype/hosts/nuke/plugins/create/create_backdrop.py @@ -1,9 +1,12 @@ -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import ( + select_nodes, + set_avalon_knob_data +) -class CreateBackdrop(plugin.PypeCreator): +class CreateBackdrop(plugin.OpenPypeCreator): """Add Publishable Backdrop""" name = "nukenodes" @@ -25,14 +28,14 @@ class CreateBackdrop(plugin.PypeCreator): nodes = self.nodes if len(nodes) >= 1: - anlib.select_nodes(nodes) + select_nodes(nodes) bckd_node = autoBackdrop() bckd_node["name"].setValue("{}_BDN".format(self.name)) bckd_node["tile_color"].setValue(int(self.node_color, 16)) bckd_node["note_font_size"].setValue(24) bckd_node["label"].setValue("[{}]".format(self.name)) # add avalon knobs - instance = anlib.set_avalon_knob_data(bckd_node, self.data) + instance = set_avalon_knob_data(bckd_node, self.data) return instance else: @@ -48,6 +51,6 @@ class CreateBackdrop(plugin.PypeCreator): bckd_node["note_font_size"].setValue(24) bckd_node["label"].setValue("[{}]".format(self.name)) # add avalon knobs - instance = anlib.set_avalon_knob_data(bckd_node, self.data) + instance = set_avalon_knob_data(bckd_node, self.data) return instance diff --git a/openpype/hosts/nuke/plugins/create/create_camera.py b/openpype/hosts/nuke/plugins/create/create_camera.py index 359086d48f..3b13c80dc4 100644 --- a/openpype/hosts/nuke/plugins/create/create_camera.py +++ b/openpype/hosts/nuke/plugins/create/create_camera.py @@ -1,9 +1,11 @@ -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import ( + set_avalon_knob_data +) -class CreateCamera(plugin.PypeCreator): +class CreateCamera(plugin.OpenPypeCreator): """Add Publishable Backdrop""" name = "camera" @@ -36,7 +38,7 @@ class CreateCamera(plugin.PypeCreator): # change node color n["tile_color"].setValue(int(self.node_color, 16)) # add avalon knobs - anlib.set_avalon_knob_data(n, data) + set_avalon_knob_data(n, data) return True else: msg = str("Please select nodes you " @@ -49,5 +51,5 @@ class CreateCamera(plugin.PypeCreator): camera_node = nuke.createNode("Camera2") camera_node["tile_color"].setValue(int(self.node_color, 16)) # add avalon knobs - instance = anlib.set_avalon_knob_data(camera_node, self.data) + instance = set_avalon_knob_data(camera_node, self.data) return instance diff --git a/openpype/hosts/nuke/plugins/create/create_gizmo.py b/openpype/hosts/nuke/plugins/create/create_gizmo.py index c59713cff1..d616f6f7ad 100644 --- a/openpype/hosts/nuke/plugins/create/create_gizmo.py +++ b/openpype/hosts/nuke/plugins/create/create_gizmo.py @@ -1,9 +1,14 @@ -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + select_nodes, + set_avalon_knob_data +) -class CreateGizmo(plugin.PypeCreator): + +class CreateGizmo(plugin.OpenPypeCreator): """Add Publishable "gizmo" group The name is symbolically gizmo as presumably @@ -28,13 +33,13 @@ class CreateGizmo(plugin.PypeCreator): nodes = self.nodes self.log.info(len(nodes)) if len(nodes) == 1: - anlib.select_nodes(nodes) + select_nodes(nodes) node = nodes[-1] # check if Group node if node.Class() in "Group": node["name"].setValue("{}_GZM".format(self.name)) node["tile_color"].setValue(int(self.node_color, 16)) - return anlib.set_avalon_knob_data(node, self.data) + return set_avalon_knob_data(node, self.data) else: msg = ("Please select a group node " "you wish to publish as the gizmo") @@ -42,13 +47,13 @@ class CreateGizmo(plugin.PypeCreator): nuke.message(msg) if len(nodes) >= 2: - anlib.select_nodes(nodes) + select_nodes(nodes) nuke.makeGroup() gizmo_node = nuke.selectedNode() gizmo_node["name"].setValue("{}_GZM".format(self.name)) gizmo_node["tile_color"].setValue(int(self.node_color, 16)) - # add sticky node wit guide + # add sticky node with guide with gizmo_node: sticky = nuke.createNode("StickyNote") sticky["label"].setValue( @@ -57,21 +62,20 @@ class CreateGizmo(plugin.PypeCreator): "- create User knobs on the group") # add avalon knobs - return anlib.set_avalon_knob_data(gizmo_node, self.data) + return set_avalon_knob_data(gizmo_node, self.data) else: - msg = ("Please select nodes you " - "wish to add to the gizmo") + msg = "Please select nodes you wish to add to the gizmo" self.log.error(msg) nuke.message(msg) return else: - with anlib.maintained_selection(): + with maintained_selection(): gizmo_node = nuke.createNode("Group") gizmo_node["name"].setValue("{}_GZM".format(self.name)) gizmo_node["tile_color"].setValue(int(self.node_color, 16)) - # add sticky node wit guide + # add sticky node with guide with gizmo_node: sticky = nuke.createNode("StickyNote") sticky["label"].setValue( @@ -80,4 +84,4 @@ class CreateGizmo(plugin.PypeCreator): "- create User knobs on the group") # add avalon knobs - return anlib.set_avalon_knob_data(gizmo_node, self.data) + return set_avalon_knob_data(gizmo_node, self.data) diff --git a/openpype/hosts/nuke/plugins/create/create_model.py b/openpype/hosts/nuke/plugins/create/create_model.py index 4e30860e05..15a4e3ab8a 100644 --- a/openpype/hosts/nuke/plugins/create/create_model.py +++ b/openpype/hosts/nuke/plugins/create/create_model.py @@ -1,9 +1,11 @@ -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import ( + set_avalon_knob_data +) -class CreateModel(plugin.PypeCreator): +class CreateModel(plugin.OpenPypeCreator): """Add Publishable Model Geometry""" name = "model" @@ -68,7 +70,7 @@ class CreateModel(plugin.PypeCreator): # change node color n["tile_color"].setValue(int(self.node_color, 16)) # add avalon knobs - anlib.set_avalon_knob_data(n, data) + set_avalon_knob_data(n, data) return True else: msg = str("Please select nodes you " @@ -81,5 +83,5 @@ class CreateModel(plugin.PypeCreator): model_node = nuke.createNode("WriteGeo") model_node["tile_color"].setValue(int(self.node_color, 16)) # add avalon knobs - instance = anlib.set_avalon_knob_data(model_node, self.data) + instance = set_avalon_knob_data(model_node, self.data) return instance diff --git a/openpype/hosts/nuke/plugins/create/create_read.py b/openpype/hosts/nuke/plugins/create/create_read.py index bf5de23346..bdc67add42 100644 --- a/openpype/hosts/nuke/plugins/create/create_read.py +++ b/openpype/hosts/nuke/plugins/create/create_read.py @@ -1,13 +1,16 @@ from collections import OrderedDict -import avalon.api -import avalon.nuke -from openpype import api as pype -from openpype.hosts.nuke.api import plugin import nuke +import avalon.api +from openpype import api as pype +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import ( + set_avalon_knob_data +) -class CrateRead(plugin.PypeCreator): + +class CrateRead(plugin.OpenPypeCreator): # change this to template preset name = "ReadCopy" label = "Create Read Copy" @@ -45,7 +48,7 @@ class CrateRead(plugin.PypeCreator): continue avalon_data = self.data avalon_data['subset'] = "{}".format(self.name) - avalon.nuke.lib.set_avalon_knob_data(node, avalon_data) + set_avalon_knob_data(node, avalon_data) node['tile_color'].setValue(16744935) count_reads += 1 diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 1b925014ad..3285e5f92d 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -1,11 +1,12 @@ from collections import OrderedDict -from openpype.hosts.nuke.api import ( - plugin, - lib) + import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import create_write_node -class CreateWritePrerender(plugin.PypeCreator): + +class CreateWritePrerender(plugin.OpenPypeCreator): # change this to template preset name = "WritePrerender" label = "Create Write Prerender" @@ -98,7 +99,7 @@ class CreateWritePrerender(plugin.PypeCreator): self.log.info("write_data: {}".format(write_data)) - write_node = lib.create_write_node( + write_node = create_write_node( self.data["subset"], write_data, input=selected_node, diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 5f13fddf4e..a9c4b5341e 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -1,11 +1,12 @@ from collections import OrderedDict -from openpype.hosts.nuke.api import ( - plugin, - lib) + import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteRender(plugin.PypeCreator): + +class CreateWriteRender(plugin.OpenPypeCreator): # change this to template preset name = "WriteRender" label = "Create Write Render" @@ -119,7 +120,7 @@ class CreateWriteRender(plugin.PypeCreator): } ] - write_node = lib.create_write_node( + write_node = create_write_node( self.data["subset"], write_data, input=selected_node, diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py index eebb5613c3..0037b64ce3 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ b/openpype/hosts/nuke/plugins/create/create_write_still.py @@ -1,11 +1,12 @@ from collections import OrderedDict -from openpype.hosts.nuke.api import ( - plugin, - lib) + import nuke +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import create_write_node -class CreateWriteStill(plugin.PypeCreator): + +class CreateWriteStill(plugin.OpenPypeCreator): # change this to template preset name = "WriteStillFrame" label = "Create Write Still Image" @@ -108,7 +109,7 @@ class CreateWriteStill(plugin.PypeCreator): } ] - write_node = lib.create_write_node( + write_node = create_write_node( self.name, write_data, input=selected_node, diff --git a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py index e7ae51fa86..49405fd213 100644 --- a/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py +++ b/openpype/hosts/nuke/plugins/inventory/repair_old_loaders.py @@ -1,7 +1,6 @@ from avalon import api, style -from avalon.nuke import lib as anlib -from openpype.api import ( - Logger) +from openpype.api import Logger +from openpype.hosts.nuke.api.lib import set_avalon_knob_data class RepairOldLoaders(api.InventoryAction): @@ -10,7 +9,7 @@ class RepairOldLoaders(api.InventoryAction): icon = "gears" color = style.colors.alert - log = Logger().get_logger(__name__) + log = Logger.get_logger(__name__) def process(self, containers): import nuke @@ -34,4 +33,4 @@ class RepairOldLoaders(api.InventoryAction): }) node["name"].setValue(new_name) # get data from avalon knob - anlib.set_avalon_knob_data(node, cdata) + set_avalon_knob_data(node, cdata) diff --git a/openpype/hosts/nuke/plugins/inventory/select_containers.py b/openpype/hosts/nuke/plugins/inventory/select_containers.py index bd00983172..3f174b3562 100644 --- a/openpype/hosts/nuke/plugins/inventory/select_containers.py +++ b/openpype/hosts/nuke/plugins/inventory/select_containers.py @@ -1,4 +1,5 @@ from avalon import api +from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop class SelectContainers(api.InventoryAction): @@ -9,11 +10,10 @@ class SelectContainers(api.InventoryAction): def process(self, containers): import nuke - import avalon.nuke nodes = [nuke.toNode(i["objectName"]) for i in containers] - with avalon.nuke.viewer_update_and_undo_stop(): + with viewer_update_and_undo_stop(): # clear previous_selection [n['selected'].setValue(False) for n in nodes] # Select tool diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index 9148260e9e..6619cfb414 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -1,9 +1,18 @@ from avalon import api, style, io import nuke import nukescripts -from openpype.hosts.nuke.api import lib as pnlib -from avalon.nuke import lib as anlib -from avalon.nuke import containerise, update_container + +from openpype.hosts.nuke.api.lib import ( + find_free_space_to_paste_nodes, + maintained_selection, + reset_selection, + select_nodes, + get_avalon_knob_data, + set_avalon_knob_data +) +from openpype.hosts.nuke.api.commands import viewer_update_and_undo_stop +from openpype.hosts.nuke.api import containerise, update_container + class LoadBackdropNodes(api.Loader): """Loading Published Backdrop nodes (workfile, nukenodes)""" @@ -66,12 +75,12 @@ class LoadBackdropNodes(api.Loader): # Get mouse position n = nuke.createNode("NoOp") xcursor, ycursor = (n.xpos(), n.ypos()) - anlib.reset_selection() + reset_selection() nuke.delete(n) bdn_frame = 50 - with anlib.maintained_selection(): + with maintained_selection(): # add group from nk nuke.nodePaste(file) @@ -81,11 +90,13 @@ class LoadBackdropNodes(api.Loader): nodes = nuke.selectedNodes() # get pointer position in DAG - xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame) + xpointer, ypointer = find_free_space_to_paste_nodes( + nodes, direction="right", offset=200 + bdn_frame + ) # reset position to all nodes and replace inputs and output for n in nodes: - anlib.reset_selection() + reset_selection() xpos = (n.xpos() - xcursor) + xpointer ypos = (n.ypos() - ycursor) + ypointer n.setXYpos(xpos, ypos) @@ -108,7 +119,7 @@ class LoadBackdropNodes(api.Loader): d.setInput(index, dot) # remove Input node - anlib.reset_selection() + reset_selection() nuke.delete(n) continue @@ -127,15 +138,15 @@ class LoadBackdropNodes(api.Loader): dot.setInput(0, dep) # remove Input node - anlib.reset_selection() + reset_selection() nuke.delete(n) continue else: new_nodes.append(n) # reselect nodes with new Dot instead of Inputs and Output - anlib.reset_selection() - anlib.select_nodes(new_nodes) + reset_selection() + select_nodes(new_nodes) # place on backdrop bdn = nukescripts.autoBackdrop() @@ -208,16 +219,16 @@ class LoadBackdropNodes(api.Loader): # just in case we are in group lets jump out of it nuke.endGroup() - with anlib.maintained_selection(): + with maintained_selection(): xpos = GN.xpos() ypos = GN.ypos() - avalon_data = anlib.get_avalon_knob_data(GN) + avalon_data = get_avalon_knob_data(GN) nuke.delete(GN) # add group from nk nuke.nodePaste(file) GN = nuke.selectedNode() - anlib.set_avalon_knob_data(GN, avalon_data) + set_avalon_knob_data(GN, avalon_data) GN.setXYpos(xpos, ypos) GN["name"].setValue(object_name) @@ -235,7 +246,7 @@ class LoadBackdropNodes(api.Loader): else: GN["tile_color"].setValue(int(self.node_color, 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) return update_container(GN, data_imprint) @@ -243,7 +254,6 @@ class LoadBackdropNodes(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index 377d60e84b..9610940619 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -1,8 +1,15 @@ -from avalon import api, io -from avalon.nuke import lib as anlib -from avalon.nuke import containerise, update_container import nuke +from avalon import api, io +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) +from openpype.hosts.nuke.api.lib import ( + maintained_selection +) + class AlembicCameraLoader(api.Loader): """ @@ -43,7 +50,7 @@ class AlembicCameraLoader(api.Loader): # getting file path file = self.fname.replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): camera_node = nuke.createNode( "Camera2", "name {} file {} read_from_file True".format( @@ -122,7 +129,7 @@ class AlembicCameraLoader(api.Loader): # getting file path file = api.get_representation_path(representation).replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): camera_node = nuke.toNode(object_name) camera_node['selected'].setValue(True) @@ -156,7 +163,7 @@ class AlembicCameraLoader(api.Loader): # color node by correct color by actual version self.node_version_color(version, camera_node) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) return update_container(camera_node, data_imprint) @@ -181,7 +188,6 @@ class AlembicCameraLoader(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 9ce72c0519..3a9c9ca691 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -3,13 +3,13 @@ from avalon.vendor import qargparse from avalon import api, io from openpype.hosts.nuke.api.lib import ( - get_imageio_input_colorspace + get_imageio_input_colorspace, + maintained_selection ) -from avalon.nuke import ( +from openpype.hosts.nuke.api import ( containerise, update_container, - viewer_update_and_undo_stop, - maintained_selection + viewer_update_and_undo_stop ) from openpype.hosts.nuke.api import plugin @@ -270,7 +270,7 @@ class LoadClip(plugin.NukeLoader): read_node, updated_dict ) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) if version_data.get("retime", None): self._make_retimes(read_node, version_data) @@ -280,9 +280,6 @@ class LoadClip(plugin.NukeLoader): self.set_as_member(read_node) def remove(self, container): - - from avalon.nuke import viewer_update_and_undo_stop - read_node = nuke.toNode(container['objectName']) assert read_node.Class() == "Read", "Must be Read" @@ -302,7 +299,7 @@ class LoadClip(plugin.NukeLoader): self._loader_shift(read_node, start_at_workfile) def _make_retimes(self, parent_node, version_data): - ''' Create all retime and timewarping nodes with coppied animation ''' + ''' Create all retime and timewarping nodes with copied animation ''' speed = version_data.get('speed', 1) time_warp_nodes = version_data.get('timewarps', []) last_node = None @@ -378,4 +375,4 @@ class LoadClip(plugin.NukeLoader): "class_name": self.__class__.__name__ } - return self.node_name_template.format(**name_data) \ No newline at end of file + return self.node_name_template.format(**name_data) diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index 8ba1b6b7c1..f636c6b510 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -1,7 +1,12 @@ -from avalon import api, style, io -import nuke import json from collections import OrderedDict +import nuke +from avalon import api, style, io +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LoadEffects(api.Loader): @@ -30,9 +35,6 @@ class LoadEffects(api.Loader): Returns: nuke node: containerised nuke node object """ - # import dependencies - from avalon.nuke import containerise - # get main variables version = context['version'] version_data = version.get("data", {}) @@ -138,10 +140,6 @@ class LoadEffects(api.Loader): inputs: """ - - from avalon.nuke import ( - update_container - ) # get main variables # Get version from io version = io.find_one({ @@ -253,7 +251,7 @@ class LoadEffects(api.Loader): else: GN["tile_color"].setValue(int("0x3469ffff", 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) def connect_read_node(self, group_node, asset, subset): """ @@ -314,7 +312,7 @@ class LoadEffects(api.Loader): def byteify(self, input): """ Converts unicode strings to strings - It goes trought all dictionary + It goes through all dictionary Arguments: input (dict/str): input @@ -338,7 +336,6 @@ class LoadEffects(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index d0cab26842..990bce54f1 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -1,8 +1,15 @@ -from avalon import api, style, io -import nuke import json from collections import OrderedDict + +import nuke + +from avalon import api, style, io from openpype.hosts.nuke.api import lib +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LoadEffectsInputProcess(api.Loader): @@ -30,8 +37,6 @@ class LoadEffectsInputProcess(api.Loader): Returns: nuke node: containerised nuke node object """ - # import dependencies - from avalon.nuke import containerise # get main variables version = context['version'] @@ -142,9 +147,6 @@ class LoadEffectsInputProcess(api.Loader): """ - from avalon.nuke import ( - update_container - ) # get main variables # Get version from io version = io.find_one({ @@ -258,7 +260,7 @@ class LoadEffectsInputProcess(api.Loader): else: GN["tile_color"].setValue(int("0x3469ffff", 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) def connect_active_viewer(self, group_node): """ @@ -331,7 +333,7 @@ class LoadEffectsInputProcess(api.Loader): def byteify(self, input): """ Converts unicode strings to strings - It goes trought all dictionary + It goes through all dictionary Arguments: input (dict/str): input @@ -355,7 +357,6 @@ class LoadEffectsInputProcess(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index c6228b95f6..659977d789 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -1,7 +1,15 @@ -from avalon import api, style, io import nuke -from avalon.nuke import lib as anlib -from avalon.nuke import containerise, update_container +from avalon import api, style, io +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + get_avalon_knob_data, + set_avalon_knob_data +) +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LoadGizmo(api.Loader): @@ -61,7 +69,7 @@ class LoadGizmo(api.Loader): # just in case we are in group lets jump out of it nuke.endGroup() - with anlib.maintained_selection(): + with maintained_selection(): # add group from nk nuke.nodePaste(file) @@ -122,16 +130,16 @@ class LoadGizmo(api.Loader): # just in case we are in group lets jump out of it nuke.endGroup() - with anlib.maintained_selection(): + with maintained_selection(): xpos = GN.xpos() ypos = GN.ypos() - avalon_data = anlib.get_avalon_knob_data(GN) + avalon_data = get_avalon_knob_data(GN) nuke.delete(GN) # add group from nk nuke.nodePaste(file) GN = nuke.selectedNode() - anlib.set_avalon_knob_data(GN, avalon_data) + set_avalon_knob_data(GN, avalon_data) GN.setXYpos(xpos, ypos) GN["name"].setValue(object_name) @@ -149,7 +157,7 @@ class LoadGizmo(api.Loader): else: GN["tile_color"].setValue(int(self.node_color, 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) return update_container(GN, data_imprint) @@ -157,7 +165,6 @@ class LoadGizmo(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 5ca101d6cb..240bfd467d 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -1,8 +1,16 @@ from avalon import api, style, io import nuke -from openpype.hosts.nuke.api import lib as pnlib -from avalon.nuke import lib as anlib -from avalon.nuke import containerise, update_container +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + create_backdrop, + get_avalon_knob_data, + set_avalon_knob_data +) +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LoadGizmoInputProcess(api.Loader): @@ -62,7 +70,7 @@ class LoadGizmoInputProcess(api.Loader): # just in case we are in group lets jump out of it nuke.endGroup() - with anlib.maintained_selection(): + with maintained_selection(): # add group from nk nuke.nodePaste(file) @@ -128,16 +136,16 @@ class LoadGizmoInputProcess(api.Loader): # just in case we are in group lets jump out of it nuke.endGroup() - with anlib.maintained_selection(): + with maintained_selection(): xpos = GN.xpos() ypos = GN.ypos() - avalon_data = anlib.get_avalon_knob_data(GN) + avalon_data = get_avalon_knob_data(GN) nuke.delete(GN) # add group from nk nuke.nodePaste(file) GN = nuke.selectedNode() - anlib.set_avalon_knob_data(GN, avalon_data) + set_avalon_knob_data(GN, avalon_data) GN.setXYpos(xpos, ypos) GN["name"].setValue(object_name) @@ -155,7 +163,7 @@ class LoadGizmoInputProcess(api.Loader): else: GN["tile_color"].setValue(int(self.node_color, 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) return update_container(GN, data_imprint) @@ -197,8 +205,12 @@ class LoadGizmoInputProcess(api.Loader): viewer["input_process_node"].setValue(group_node_name) # put backdrop under - pnlib.create_backdrop(label="Input Process", layer=2, - nodes=[viewer, group_node], color="0x7c7faaff") + create_backdrop( + label="Input Process", + layer=2, + nodes=[viewer, group_node], + color="0x7c7faaff" + ) return True @@ -210,7 +222,7 @@ class LoadGizmoInputProcess(api.Loader): def byteify(self, input): """ Converts unicode strings to strings - It goes trought all dictionary + It goes through all dictionary Arguments: input (dict/str): input @@ -234,7 +246,6 @@ class LoadGizmoInputProcess(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 02a5b55c18..d36226b139 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -7,6 +7,11 @@ from avalon import api, io from openpype.hosts.nuke.api.lib import ( get_imageio_input_colorspace ) +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LoadImage(api.Loader): @@ -46,10 +51,6 @@ class LoadImage(api.Loader): return cls.representations + cls._representations def load(self, context, name, namespace, options): - from avalon.nuke import ( - containerise, - viewer_update_and_undo_stop - ) self.log.info("__ options: `{}`".format(options)) frame_number = options.get("frame_number", 1) @@ -154,11 +155,6 @@ class LoadImage(api.Loader): inputs: """ - - from avalon.nuke import ( - update_container - ) - node = nuke.toNode(container["objectName"]) frame_number = node["first"].value() @@ -231,12 +227,9 @@ class LoadImage(api.Loader): node, updated_dict ) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) def remove(self, container): - - from avalon.nuke import viewer_update_and_undo_stop - node = nuke.toNode(container['objectName']) assert node.Class() == "Read", "Must be Read" diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index 15fa4fa35c..2b52bbf00f 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -1,7 +1,11 @@ -from avalon import api, io -from avalon.nuke import lib as anlib -from avalon.nuke import containerise, update_container import nuke +from avalon import api, io +from openpype.hosts.nuke.api.lib import maintained_selection +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class AlembicModelLoader(api.Loader): @@ -43,7 +47,7 @@ class AlembicModelLoader(api.Loader): # getting file path file = self.fname.replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): model_node = nuke.createNode( "ReadGeo2", "name {} file {} ".format( @@ -122,7 +126,7 @@ class AlembicModelLoader(api.Loader): # getting file path file = api.get_representation_path(representation).replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): model_node = nuke.toNode(object_name) model_node['selected'].setValue(True) @@ -156,7 +160,7 @@ class AlembicModelLoader(api.Loader): # color node by correct color by actual version self.node_version_color(version, model_node) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) return update_container(model_node, data_imprint) @@ -181,7 +185,6 @@ class AlembicModelLoader(api.Loader): self.update(container, representation) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index 7444dd6e96..aa48b631c5 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -1,6 +1,11 @@ -from avalon import api, style, io -from avalon.nuke import get_avalon_knob_data import nuke +from avalon import api, style, io +from openpype.hosts.nuke.api.lib import get_avalon_knob_data +from openpype.hosts.nuke.api import ( + containerise, + update_container, + viewer_update_and_undo_stop +) class LinkAsGroup(api.Loader): @@ -15,8 +20,6 @@ class LinkAsGroup(api.Loader): color = style.colors.alert def load(self, context, name, namespace, data): - - from avalon.nuke import containerise # for k, v in context.items(): # log.info("key: `{}`, value: {}\n".format(k, v)) version = context['version'] @@ -67,7 +70,7 @@ class LinkAsGroup(api.Loader): P["useOutput"].setValue(True) with P: - # iterate trough all nodes in group node and find pype writes + # iterate through all nodes in group node and find pype writes writes = [n.name() for n in nuke.allNodes() if n.Class() == "Group" if get_avalon_knob_data(n)] @@ -103,11 +106,6 @@ class LinkAsGroup(api.Loader): inputs: """ - - from avalon.nuke import ( - update_container - ) - node = nuke.toNode(container['objectName']) root = api.get_representation_path(representation).replace("\\", "/") @@ -152,10 +150,9 @@ class LinkAsGroup(api.Loader): else: node["tile_color"].setValue(int("0xff0ff0ff", 16)) - self.log.info("udated to version: {}".format(version.get("name"))) + self.log.info("updated to version: {}".format(version.get("name"))) def remove(self, container): - from avalon.nuke import viewer_update_and_undo_stop node = nuke.toNode(container['objectName']) with viewer_update_and_undo_stop(): nuke.delete(node) diff --git a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py index 0747c15ea7..0a2df0898e 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py @@ -1,9 +1,16 @@ -import pyblish.api -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import lib as pnlib -import nuke import os + +import nuke + +import pyblish.api + import openpype +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + reset_selection, + select_nodes +) + class ExtractBackdropNode(openpype.api.Extractor): """Extracting content of backdrop nodes @@ -27,7 +34,7 @@ class ExtractBackdropNode(openpype.api.Extractor): path = os.path.join(stagingdir, filename) # maintain selection - with anlib.maintained_selection(): + with maintained_selection(): # all connections outside of backdrop connections_in = instance.data["nodeConnectionsIn"] connections_out = instance.data["nodeConnectionsOut"] @@ -44,7 +51,7 @@ class ExtractBackdropNode(openpype.api.Extractor): nodes.append(inpn) tmp_nodes.append(inpn) - anlib.reset_selection() + reset_selection() # connect output node for n, output in connections_out.items(): @@ -58,11 +65,11 @@ class ExtractBackdropNode(openpype.api.Extractor): opn.autoplace() nodes.append(opn) tmp_nodes.append(opn) - anlib.reset_selection() + reset_selection() # select nodes to copy - anlib.reset_selection() - anlib.select_nodes(nodes) + reset_selection() + select_nodes(nodes) # create tmp nk file # save file to the path nuke.nodeCopy(path) diff --git a/openpype/hosts/nuke/plugins/publish/extract_camera.py b/openpype/hosts/nuke/plugins/publish/extract_camera.py index bc50dac108..54f65a0be3 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_camera.py +++ b/openpype/hosts/nuke/plugins/publish/extract_camera.py @@ -1,10 +1,12 @@ -import nuke import os import math +from pprint import pformat + +import nuke + import pyblish.api import openpype.api -from avalon.nuke import lib as anlib -from pprint import pformat +from openpype.hosts.nuke.api.lib import maintained_selection class ExtractCamera(openpype.api.Extractor): @@ -52,7 +54,7 @@ class ExtractCamera(openpype.api.Extractor): filename = subset + ".{}".format(extension) file_path = os.path.join(staging_dir, filename).replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): # bake camera with axeses onto word coordinate XYZ rm_n = bakeCameraWithAxeses( nuke.toNode(instance.data["name"]), output_range) @@ -113,7 +115,7 @@ class ExtractCamera(openpype.api.Extractor): def bakeCameraWithAxeses(camera_node, output_range): - """ Baking all perent hiearchy of axeses into camera + """ Baking all perent hierarchy of axeses into camera with transposition onto word XYZ coordinance """ bakeFocal = False diff --git a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py index 78bf9c998d..2d5bfdeb5e 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py @@ -1,9 +1,15 @@ -import pyblish.api -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import utils as pnutils -import nuke import os +import nuke + +import pyblish.api + import openpype +from openpype.hosts.nuke.api import utils as pnutils +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + reset_selection, + select_nodes +) class ExtractGizmo(openpype.api.Extractor): @@ -26,17 +32,17 @@ class ExtractGizmo(openpype.api.Extractor): path = os.path.join(stagingdir, filename) # maintain selection - with anlib.maintained_selection(): + with maintained_selection(): orig_grpn_name = orig_grpn.name() tmp_grpn_name = orig_grpn_name + "_tmp" # select original group node - anlib.select_nodes([orig_grpn]) + select_nodes([orig_grpn]) # copy to clipboard nuke.nodeCopy("%clipboard%") # reset selection to none - anlib.reset_selection() + reset_selection() # paste clipboard nuke.nodePaste("%clipboard%") diff --git a/openpype/hosts/nuke/plugins/publish/extract_model.py b/openpype/hosts/nuke/plugins/publish/extract_model.py index 43214bf3e9..0375263338 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_model.py +++ b/openpype/hosts/nuke/plugins/publish/extract_model.py @@ -1,9 +1,12 @@ -import nuke import os +from pprint import pformat +import nuke import pyblish.api import openpype.api -from avalon.nuke import lib as anlib -from pprint import pformat +from openpype.hosts.nuke.api.lib import ( + maintained_selection, + select_nodes +) class ExtractModel(openpype.api.Extractor): @@ -49,9 +52,9 @@ class ExtractModel(openpype.api.Extractor): filename = subset + ".{}".format(extension) file_path = os.path.join(staging_dir, filename).replace("\\", "/") - with anlib.maintained_selection(): + with maintained_selection(): # select model node - anlib.select_nodes([model_node]) + select_nodes([model_node]) # create write geo node wg_n = nuke.createNode("WriteGeo") diff --git a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py b/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py index c3a6a3b167..eb9bc0b429 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py +++ b/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py @@ -1,10 +1,10 @@ import nuke import pyblish.api -from avalon.nuke import maintained_selection +from openpype.hosts.nuke.api.lib import maintained_selection class CreateOutputNode(pyblish.api.ContextPlugin): - """Adding output node for each ouput write node + """Adding output node for each output write node So when latly user will want to Load .nk as LifeGroup or Precomp Nuke will not complain about missing Output node """ diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py index 8ba746a3c4..4cf2fd7d9f 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py @@ -1,8 +1,8 @@ import os import pyblish.api -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import openpype +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import maintained_selection class ExtractReviewDataLut(openpype.api.Extractor): @@ -37,7 +37,7 @@ class ExtractReviewDataLut(openpype.api.Extractor): "StagingDir `{0}`...".format(instance.data["stagingDir"])) # generate data - with anlib.maintained_selection(): + with maintained_selection(): exporter = plugin.ExporterReviewLut( self, instance ) diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 32962b57a6..5bbc88266a 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -1,8 +1,8 @@ import os import pyblish.api -from avalon.nuke import lib as anlib -from openpype.hosts.nuke.api import plugin import openpype +from openpype.hosts.nuke.api import plugin +from openpype.hosts.nuke.api.lib import maintained_selection class ExtractReviewDataMov(openpype.api.Extractor): @@ -41,7 +41,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): self.log.info(self.outputs) # generate data - with anlib.maintained_selection(): + with maintained_selection(): generated_repres = [] for o_name, o_data in self.outputs.items(): f_families = o_data["filter"]["families"] @@ -49,7 +49,7 @@ class ExtractReviewDataMov(openpype.api.Extractor): # test if family found in context test_families = any([ - # first if exact family set is mathing + # first if exact family set is matching # make sure only interesetion of list is correct bool(set(families).intersection(f_families)), # and if famiies are set at all diff --git a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py index 0f68680742..50e5f995f4 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py +++ b/openpype/hosts/nuke/plugins/publish/extract_slate_frame.py @@ -1,8 +1,8 @@ import os import nuke -from avalon.nuke import lib as anlib import pyblish.api import openpype +from openpype.hosts.nuke.api.lib import maintained_selection class ExtractSlateFrame(openpype.api.Extractor): @@ -25,7 +25,7 @@ class ExtractSlateFrame(openpype.api.Extractor): else: self.viewer_lut_raw = False - with anlib.maintained_selection(): + with maintained_selection(): self.log.debug("instance: {}".format(instance)) self.log.debug("instance.data[families]: {}".format( instance.data["families"])) diff --git a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py index 0c9af66435..ef6d486ca2 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py @@ -1,9 +1,9 @@ import sys import os import nuke -from avalon.nuke import lib as anlib import pyblish.api import openpype +from openpype.hosts.nuke.api.lib import maintained_selection if sys.version_info[0] >= 3: @@ -30,7 +30,7 @@ class ExtractThumbnail(openpype.api.Extractor): if "render.farm" in instance.data["families"]: return - with anlib.maintained_selection(): + with maintained_selection(): self.log.debug("instance: {}".format(instance)) self.log.debug("instance.data[families]: {}".format( instance.data["families"])) diff --git a/openpype/hosts/nuke/plugins/publish/increment_script_version.py b/openpype/hosts/nuke/plugins/publish/increment_script_version.py index f55ed21ee2..b854dc0aa1 100644 --- a/openpype/hosts/nuke/plugins/publish/increment_script_version.py +++ b/openpype/hosts/nuke/plugins/publish/increment_script_version.py @@ -15,7 +15,7 @@ class IncrementScriptVersion(pyblish.api.ContextPlugin): def process(self, context): assert all(result["success"] for result in context.data["results"]), ( - "Publishing not succesfull so version is not increased.") + "Publishing not successful so version is not increased.") from openpype.lib import version_up path = context.data["currentFile"] diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py index 5c30df9a62..97ddef0a59 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_instances.py @@ -1,7 +1,10 @@ import nuke import pyblish.api from avalon import io, api -from avalon.nuke import lib as anlib +from openpype.hosts.nuke.api.lib import ( + add_publish_knob, + get_avalon_knob_data +) @pyblish.api.log @@ -39,7 +42,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): self.log.warning(E) # get data from avalon knob - avalon_knob_data = anlib.get_avalon_knob_data( + avalon_knob_data = get_avalon_knob_data( node, ["avalon:", "ak:"]) self.log.debug("avalon_knob_data: {}".format(avalon_knob_data)) @@ -115,7 +118,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin): # get publish knob value if "publish" not in node.knobs(): - anlib.add_publish_knob(node) + add_publish_knob(node) # sync workfile version _families_test = [family] + families diff --git a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py b/openpype/hosts/nuke/plugins/publish/precollect_workfile.py index 0e27273ceb..a2d1c80628 100644 --- a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/nuke/plugins/publish/precollect_workfile.py @@ -1,8 +1,13 @@ -import nuke -import pyblish.api import os + +import nuke + +import pyblish.api import openpype.api as pype -from avalon.nuke import lib as anlib +from openpype.hosts.nuke.api.lib import ( + add_publish_knob, + get_avalon_knob_data +) class CollectWorkfile(pyblish.api.ContextPlugin): @@ -17,9 +22,9 @@ class CollectWorkfile(pyblish.api.ContextPlugin): current_file = os.path.normpath(nuke.root().name()) - knob_data = anlib.get_avalon_knob_data(root) + knob_data = get_avalon_knob_data(root) - anlib.add_publish_knob(root) + add_publish_knob(root) family = "workfile" task = os.getenv("AVALON_TASK", None) diff --git a/openpype/hosts/nuke/plugins/publish/remove_ouput_node.py b/openpype/hosts/nuke/plugins/publish/remove_ouput_node.py index 12361595fe..fb77e8638c 100644 --- a/openpype/hosts/nuke/plugins/publish/remove_ouput_node.py +++ b/openpype/hosts/nuke/plugins/publish/remove_ouput_node.py @@ -3,7 +3,7 @@ import pyblish.api class RemoveOutputNode(pyblish.api.ContextPlugin): - """Removing output node for each ouput write node + """Removing output node for each output write node """ label = 'Output Node Remove' diff --git a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py index f280ad4af1..e2843d146e 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py @@ -1,6 +1,6 @@ -import pyblish -from avalon.nuke import lib as anlib import nuke +import pyblish +from openpype.hosts.nuke.api.lib import maintained_selection class SelectCenterInNodeGraph(pyblish.api.Action): @@ -28,7 +28,7 @@ class SelectCenterInNodeGraph(pyblish.api.Action): all_yC = list() # maintain selection - with anlib.maintained_selection(): + with maintained_selection(): # collect all failed nodes xpos and ypos for instance in instances: bdn = instance[0] @@ -48,7 +48,7 @@ class SelectCenterInNodeGraph(pyblish.api.Action): @pyblish.api.log class ValidateBackdrop(pyblish.api.InstancePlugin): """Validate amount of nodes on backdrop node in case user - forgoten to add nodes above the publishing backdrop node""" + forgotten to add nodes above the publishing backdrop node""" order = pyblish.api.ValidatorOrder optional = True diff --git a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py index 9c94ea88ef..d0d930f50c 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py @@ -1,6 +1,6 @@ -import pyblish -from avalon.nuke import lib as anlib import nuke +import pyblish +from openpype.hosts.nuke.api.lib import maintained_selection class OpenFailedGroupNode(pyblish.api.Action): @@ -25,7 +25,7 @@ class OpenFailedGroupNode(pyblish.api.Action): instances = pyblish.api.instances_by_plugin(failed, plugin) # maintain selection - with anlib.maintained_selection(): + with maintained_selection(): # collect all failed nodes xpos and ypos for instance in instances: grpn = instance[0] diff --git a/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py b/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py index ddf46a0873..842f74b6f6 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py +++ b/openpype/hosts/nuke/plugins/publish/validate_instance_in_context.py @@ -6,8 +6,11 @@ import nuke import pyblish.api import openpype.api -import avalon.nuke.lib -import openpype.hosts.nuke.api as nuke_api +from openpype.hosts.nuke.api.lib import ( + recreate_instance, + reset_selection, + select_nodes +) class SelectInvalidInstances(pyblish.api.Action): @@ -47,12 +50,12 @@ class SelectInvalidInstances(pyblish.api.Action): self.deselect() def select(self, instances): - avalon.nuke.lib.select_nodes( + select_nodes( [nuke.toNode(str(x)) for x in instances] ) def deselect(self): - avalon.nuke.lib.reset_selection() + reset_selection() class RepairSelectInvalidInstances(pyblish.api.Action): @@ -82,7 +85,7 @@ class RepairSelectInvalidInstances(pyblish.api.Action): context_asset = context.data["assetEntity"]["name"] for instance in instances: origin_node = instance[0] - nuke_api.lib.recreate_instance( + recreate_instance( origin_node, avalon_data={"asset": context_asset} ) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py index 72fd51a900..5ee93403d0 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py @@ -23,7 +23,7 @@ class RepairNukeWriteDeadlineTab(pyblish.api.Action): for instance in instances: group_node = [x for x in instance if x.Class() == "Group"][0] - # Remove exising knobs. + # Remove existing knobs. knob_names = openpype.hosts.nuke.lib.get_deadline_knob_names() for name, knob in group_node.knobs().iteritems(): if name in knob_names: diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py index ba34ec8338..a73bed8edd 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py @@ -1,13 +1,12 @@ -import toml import os +import toml import nuke from avalon import api -import re import pyblish.api import openpype.api -from avalon.nuke import get_avalon_knob_data +from openpype.hosts.nuke.api.lib import get_avalon_knob_data class ValidateWriteLegacy(pyblish.api.InstancePlugin): diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py index 732f321b85..c0d5c8f402 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py @@ -1,8 +1,11 @@ import os import pyblish.api import openpype.utils -import openpype.hosts.nuke.lib as nukelib -import avalon.nuke +from openpype.hosts.nuke.api.lib import ( + get_write_node_template_attr, + get_node_path +) + @pyblish.api.log class RepairNukeWriteNodeAction(pyblish.api.Action): @@ -15,7 +18,7 @@ class RepairNukeWriteNodeAction(pyblish.api.Action): for instance in instances: node = instance[1] - correct_data = nukelib.get_write_node_template_attr(node) + correct_data = get_write_node_template_attr(node) for k, v in correct_data.items(): node[k].setValue(v) self.log.info("Node attributes were fixed") @@ -34,14 +37,14 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin): def process(self, instance): node = instance[1] - correct_data = nukelib.get_write_node_template_attr(node) + correct_data = get_write_node_template_attr(node) check = [] for k, v in correct_data.items(): if k is 'file': padding = len(v.split('#')) - ref_path = avalon.nuke.lib.get_node_path(v, padding) - n_path = avalon.nuke.lib.get_node_path(node[k].value(), padding) + ref_path = get_node_path(v, padding) + n_path = get_node_path(node[k].value(), padding) isnt = False for i, p in enumerate(ref_path): if str(n_path[i]) not in str(p): diff --git a/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py b/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py index f03067aa4b..368ee64e32 100644 --- a/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py +++ b/openpype/hosts/nuke/startup/KnobScripter/knob_scripter.py @@ -1,6 +1,6 @@ # ------------------------------------------------- # KnobScripter by Adrian Pueyo -# Complete python sript editor for Nuke +# Complete python script editor for Nuke # adrianpueyo.com, 2016-2019 import string import traceback @@ -2539,7 +2539,7 @@ class KnobScripterTextEdit(QtWidgets.QPlainTextEdit): if self.noSelection: self.cursor.setPosition(self.lastChar) - # check whether the the orignal selection was from top to bottom or vice versa + # check whether the the original selection was from top to bottom or vice versa else: if self.originalPosition == self.firstChar: first = self.lastChar @@ -3012,7 +3012,7 @@ class KnobScripterTextEditMain(KnobScripterTextEdit): return match_key, match_snippet def placeholderToEnd(self, text, placeholder): - '''Returns distance (int) from the first ocurrence of the placeholder, to the end of the string with placeholders removed''' + '''Returns distance (int) from the first occurrence of the placeholder, to the end of the string with placeholders removed''' search = re.search(placeholder, text) if not search: return -1 @@ -3671,7 +3671,7 @@ class KnobScripterPrefs(QtWidgets.QDialog): def updateContext(): ''' - Get the current selection of nodes with their appropiate context + Get the current selection of nodes with their appropriate context Doing this outside the KnobScripter -> forces context update inside groups when needed ''' global knobScripterSelectedNodes diff --git a/openpype/hosts/nuke/startup/init.py b/openpype/hosts/nuke/startup/init.py index 0ea5d1ad7d..d7560814bf 100644 --- a/openpype/hosts/nuke/startup/init.py +++ b/openpype/hosts/nuke/startup/init.py @@ -1,2 +1,4 @@ +import nuke + # default write mov nuke.knobDefault('Write.mov.colorspace', 'sRGB') diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index b7ed35b3b4..2cac6d09e7 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,14 +1,19 @@ +import nuke +import avalon.api + +from openpype.api import Logger +from openpype.hosts.nuke import api from openpype.hosts.nuke.api.lib import ( on_script_load, check_inventory_versions, - WorkfileSettings + WorkfileSettings, + dirmap_file_name_filter ) -import nuke -from openpype.api import Logger -from openpype.hosts.nuke.api.lib import dirmap_file_name_filter +log = Logger.get_logger(__name__) -log = Logger().get_logger(__name__) + +avalon.api.install(api) # fix ffmpeg settings on script nuke.addOnScriptLoad(on_script_load) diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index ab4682e63e..6627aded51 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -8,7 +8,7 @@ from openpype.hosts.photoshop.api import get_unique_layer_name class ImageFromSequenceLoader(photoshop.PhotoshopLoader): - """ Load specifing image from sequence + """ Load specific image from sequence Used only as quick load of reference file from a sequence. @@ -92,3 +92,4 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader): def remove(self, container): """No update possible, not containerized.""" pass + diff --git a/openpype/hosts/resolve/README.markdown b/openpype/hosts/resolve/README.markdown index 50664fbd21..8c9f72fb0c 100644 --- a/openpype/hosts/resolve/README.markdown +++ b/openpype/hosts/resolve/README.markdown @@ -4,10 +4,10 @@ - add absolute path to ffmpeg into openpype settings ![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png) - install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve) -- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move builded files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and +- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and ![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH. - install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2` -- make sure Resovle Fusion (Fusion Tab/menu/Fusion/Fusion Setings) is set to Python 3.6 +- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6 ![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png) #### Editorial setup diff --git a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt b/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt index a24a053cd7..f1b8b81a71 100644 --- a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt +++ b/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt @@ -366,7 +366,7 @@ TimelineItem DeleteTakeByIndex(idx) --> Bool # Deletes a take by index, 1 <= idx <= number of takes. SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes. FinalizeTake() --> Bool # Finalizes take selection. - CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occured. + CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred. List and Dict Data Structures diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index aa4b2e7219..22f83c6eed 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -16,7 +16,7 @@ self = sys.modules[__name__] self.project_manager = None self.media_storage = None -# OpenPype sequencial rename variables +# OpenPype sequential rename variables self.rename_index = 0 self.rename_add = 0 @@ -59,7 +59,7 @@ def maintain_current_timeline(to_timeline: object, project = get_current_project() working_timeline = from_timeline or project.GetCurrentTimeline() - # swith to the input timeline + # switch to the input timeline project.SetCurrentTimeline(to_timeline) try: @@ -566,7 +566,7 @@ def create_compound_clip(clip_data, name, folder): mp_in_rc = opentime.RationalTime((ci_l_offset), rate) mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate) - # get frame in and out for clip swaping + # get frame in and out for clip swapping in_frame = opentime.to_frames(mp_in_rc) out_frame = opentime.to_frames(mp_out_rc) @@ -628,7 +628,7 @@ def create_compound_clip(clip_data, name, folder): def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame): """ - Swaping clips on timeline in timelineItem + Swapping clips on timeline in timelineItem It will add take and activate it to the frame range which is inputted @@ -699,7 +699,7 @@ def get_pype_clip_metadata(clip): def get_clip_attributes(clip): """ - Collect basic atrributes from resolve timeline item + Collect basic attributes from resolve timeline item Args: clip (resolve.TimelineItem): timeline item object diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index ce95cfe02a..8b7e2a6c6a 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -64,7 +64,7 @@ def install(): def uninstall(): - """Uninstall all tha was installed + """Uninstall all that was installed This is where you undo everything that was done in `install()`. That means, removing menus, deregistering families and data diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index f1c55a6180..8612cf82ec 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -133,7 +133,7 @@ class CreatorWidget(QtWidgets.QDialog): # convert label text to normal capitalized text with spaces label_text = self.camel_case_split(text) - # assign the new text to lable widget + # assign the new text to label widget label = QtWidgets.QLabel(label_text) label.setObjectName("LineLabel") @@ -367,7 +367,7 @@ class ClipLoader: def _get_asset_data(self): """ Get all available asset data - joint `data` key with asset.data dict into the representaion + joint `data` key with asset.data dict into the representation """ asset_name = self.context["representation"]["context"]["asset"] @@ -540,8 +540,8 @@ class PublishClip: "track": "sequence", } - # parents search patern - parents_search_patern = r"\{([a-z]*?)\}" + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" # default templates for non-ui use rename_default = False @@ -630,7 +630,7 @@ class PublishClip: return self.timeline_item def _populate_timeline_item_default_data(self): - """ Populate default formating data from track item. """ + """ Populate default formatting data from track item. """ self.timeline_item_default_data = { "_folder_": "shots", @@ -722,7 +722,7 @@ class PublishClip: # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): - # if review layer is defined and not the same as defalut + # if review layer is defined and not the same as default self.review_layer = self.review_track # shot num calculate if self.rename_index == 0: @@ -771,7 +771,7 @@ class PublishClip: # in case track name and subset name is the same then add if self.subset_name == self.track_name: hero_data["subset"] = self.subset - # assing data to return hierarchy data to tag + # assign data to return hierarchy data to tag tag_hierarchy_data = hero_data # add data to return data dict @@ -823,8 +823,8 @@ class PublishClip: """ Create parents and return it in list. """ self.parents = [] - patern = re.compile(self.parents_search_patern) - par_split = [patern.findall(t).pop() + pattern = re.compile(self.parents_search_pattern) + par_split = [pattern.findall(t).pop() for t in self.hierarchy.split("/")] for key in par_split: diff --git a/openpype/hosts/resolve/api/testing_utils.py b/openpype/hosts/resolve/api/testing_utils.py index 98ad6abcf1..4aac66f4b7 100644 --- a/openpype/hosts/resolve/api/testing_utils.py +++ b/openpype/hosts/resolve/api/testing_utils.py @@ -25,12 +25,12 @@ class TestGUI: ui.Button( { "ID": "inputTestSourcesFolder", - "Text": "Select folder with testing medias", + "Text": "Select folder with testing media", "Weight": 1.25, "ToolTip": ( "Chose folder with videos, sequences, " "single images, nested folders with " - "medias" + "media" ), "Flat": False } diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/openpype/hosts/resolve/hooks/pre_resolve_setup.py index bcb27e24fc..978e3760fd 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py +++ b/openpype/hosts/resolve/hooks/pre_resolve_setup.py @@ -15,7 +15,7 @@ class ResolvePrelaunch(PreLaunchHook): def execute(self): # TODO: add OTIO installation from `openpype/requirements.py` - # making sure pyton 3.6 is installed at provided path + # making sure python 3.6 is installed at provided path py36_dir = os.path.normpath( self.launch_context.env.get("PYTHON36_RESOLVE", "")) assert os.path.isdir(py36_dir), ( diff --git a/openpype/hosts/resolve/otio/davinci_export.py b/openpype/hosts/resolve/otio/davinci_export.py index 2c276d9888..5f11c81fc5 100644 --- a/openpype/hosts/resolve/otio/davinci_export.py +++ b/openpype/hosts/resolve/otio/davinci_export.py @@ -306,7 +306,7 @@ def create_otio_timeline(resolve_project): if index == 0: otio_track.append(clip) else: - # add previouse otio track to timeline + # add previous otio track to timeline otio_timeline.tracks.append(otio_track) # convert track to otio otio_track = create_otio_track( diff --git a/openpype/hosts/resolve/plugins/create/create_shot_clip.py b/openpype/hosts/resolve/plugins/create/create_shot_clip.py index 41fdbf5c61..62d5557a50 100644 --- a/openpype/hosts/resolve/plugins/create/create_shot_clip.py +++ b/openpype/hosts/resolve/plugins/create/create_shot_clip.py @@ -135,7 +135,7 @@ class CreateShotClip(resolve.Creator): "type": "QComboBox", "label": "Subset Name", "target": "ui", - "toolTip": "chose subset name patern, if is selected, name of track layer will be used", # noqa + "toolTip": "chose subset name pattern, if is selected, name of track layer will be used", # noqa "order": 0}, "subsetFamily": { "value": ["plate", "take"], diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index b1037a9c93..b0cef1838a 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -16,7 +16,7 @@ def main(env): # activate resolve from openpype avalon.install(bmdvr) - log.info(f"Avalon registred hosts: {avalon.registered_host()}") + log.info(f"Avalon registered hosts: {avalon.registered_host()}") bmdvr.launch_pype_menu() diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index 45c6a264dd..d0d36bb717 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -83,7 +83,7 @@ class CollectInstances(pyblish.api.InstancePlugin): if isinstance(clip, otio.schema.Gap): continue - # skip all generators like black ampty + # skip all generators like black empty if isinstance( clip.media_reference, otio.schema.GeneratorReference): @@ -142,7 +142,7 @@ class CollectInstances(pyblish.api.InstancePlugin): "item": clip, "clipName": clip_name, - # parent time properities + # parent time properties "trackStartFrame": track_start_frame, "handleStart": handle_start, "handleEnd": handle_end, @@ -180,14 +180,14 @@ class CollectInstances(pyblish.api.InstancePlugin): "families": [] } }) - for subset, properities in self.subsets.items(): - version = properities.get("version") + for subset, properties in self.subsets.items(): + version = properties.get("version") if version == 0: - properities.pop("version") + properties.pop("version") # adding Review-able instance subset_instance_data = deepcopy(instance_data) - subset_instance_data.update(deepcopy(properities)) + subset_instance_data.update(deepcopy(properties)) subset_instance_data.update({ # unique attributes "name": f"{name}_{subset}", diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py index 36bacceb1c..4d7a13fcf2 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_resources.py @@ -31,7 +31,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): editorial_source_root = instance.data["editorialSourceRoot"] editorial_source_path = instance.data["editorialSourcePath"] - # if `editorial_source_path` then loop trough + # if `editorial_source_path` then loop through if editorial_source_path: # add family if mov or mp4 found which is longer for # cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin @@ -42,7 +42,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): instance.data["families"] += ["trimming"] return - # if template patern in path then fill it with `anatomy_data` + # if template pattern in path then fill it with `anatomy_data` if "{" in editorial_source_root: editorial_source_root = editorial_source_root.format( **anatomy_data) @@ -86,7 +86,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): subset_files.update({clip_dir_path: subset_files_items}) # break the loop if correct_clip_dir was captured - # no need to cary on if corect folder was found + # no need to cary on if correct folder was found if correct_clip_dir: break @@ -113,10 +113,10 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): unique_subset_names = list() root_dir = list(subset_files.keys()).pop() files_list = subset_files[root_dir] - search_patern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])" + search_pattern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])" for _file in files_list: - patern = re.compile(search_patern) - match = patern.findall(_file) + pattern = re.compile(search_pattern) + match = pattern.findall(_file) if not match: continue match_subset = match.pop() @@ -175,7 +175,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): instance_data["representations"] = list() collection_head_name = None - # loop trough collections and create representations + # loop through collections and create representations for _collection in collections: ext = _collection.tail[1:] collection_head_name = _collection.head @@ -210,7 +210,7 @@ class CollectInstanceResources(pyblish.api.InstancePlugin): frames.append(frame_start) frames.append(frame_end) - # loop trough reminders and create representations + # loop through reminders and create representations for _reminding_file in remainder: ext = os.path.splitext(_reminding_file)[-1][1:] if ext not in instance_data["extensions"]: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index acad98d784..b2735f3428 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -99,7 +99,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # in case SP context is set to the same folder if (_index == 0) and ("folder" in parent_key) \ and (parents[-1]["entity_name"] == parent_filled): - self.log.debug(f" skiping : {parent_filled}") + self.log.debug(f" skipping : {parent_filled}") continue # in case first parent is project then start parents from start @@ -119,7 +119,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # convert hierarchy to string hierarchy = "/".join(hierarchy) - # assing to instance data + # assign to instance data instance.data["hierarchy"] = hierarchy instance.data["parents"] = parents @@ -202,7 +202,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): class CollectHierarchyContext(pyblish.api.ContextPlugin): - '''Collecting Hierarchy from instaces and building + '''Collecting Hierarchy from instances and building context hierarchy tree ''' diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py index c9063c22ed..82dbba3345 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_representation_names.py @@ -8,7 +8,7 @@ class CollectRepresentationNames(pyblish.api.InstancePlugin): Sets the representation names for given families based on RegEx filter """ - label = "Collect Representaion Names" + label = "Collect Representation Names" order = pyblish.api.CollectorOrder families = [] hosts = ["standalonepublisher"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py index f210be3631..4bafe81020 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -16,7 +16,7 @@ class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): if isinstance(file_name, list): file_name = file_name[0] - msg = "Couldnt find asset name in '{}'\n".format(file_name) + \ + msg = "Couldn't find asset name in '{}'\n".format(file_name) + \ "File name doesn't follow configured pattern.\n" + \ "Please rename the file." assert "NOT_AVAIL" not in instance.data["asset_build"], msg diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index 6c8aca5445..c8d6d3b458 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -351,7 +351,7 @@ class QtTVPaintRpc(BaseTVPaintRpc): async def scene_inventory_tool(self): """Open Scene Inventory tool. - Funciton can't confirm if tool was opened becauise one part of + Function can't confirm if tool was opened becauise one part of SceneInventory initialization is calling websocket request to host but host can't response because is waiting for response from this call. """ @@ -578,7 +578,7 @@ class BaseCommunicator: # Folder for right windows plugin files source_plugins_dir = os.path.join(plugin_files_path, subfolder) - # Path to libraies (.dll) required for plugin library + # Path to libraries (.dll) required for plugin library # - additional libraries can be copied to TVPaint installation folder # (next to executable) or added to PATH environment variable additional_libs_folder = os.path.join( diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 654aff19d8..9e6404e72f 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -159,7 +159,7 @@ def get_layers_data(layer_ids=None, communicator=None): def parse_group_data(data): - """Paser group data collected in 'get_groups_data'.""" + """Parse group data collected in 'get_groups_data'.""" output = [] groups_raw = data.split("\n") for group_raw in groups_raw: diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index e7c5159bbc..6b4632e2f2 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -112,7 +112,7 @@ def containerise( members (list): List of members that were loaded and belongs to the container (layer names). current_containers (list): Preloaded containers. Should be used only - on update/switch when containers were modified durring the process. + on update/switch when containers were modified during the process. Returns: dict: Container data stored to workfile metadata. @@ -166,7 +166,7 @@ def split_metadata_string(text, chunk_length=None): set to global variable `TVPAINT_CHUNK_LENGTH`. Returns: - list: List of strings wil at least one item. + list: List of strings with at least one item. """ if chunk_length is None: chunk_length = TVPAINT_CHUNK_LENGTH diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index e65c25b8d1..af80c9eae2 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -35,7 +35,7 @@ class Creator(PypeCreatorMixin, avalon.api.Creator): def are_instances_same(instance_1, instance_2): """Compare instances but skip keys with unique values. - During compare are skiped keys that will be 100% sure + During compare are skipped keys that will be 100% sure different on new instance, like "id". Returns: diff --git a/openpype/hosts/tvpaint/lib.py b/openpype/hosts/tvpaint/lib.py index 513bb2d952..715ebb4a9d 100644 --- a/openpype/hosts/tvpaint/lib.py +++ b/openpype/hosts/tvpaint/lib.py @@ -278,7 +278,7 @@ def _cleanup_out_range_frames(output_idx_by_frame_idx, range_start, range_end): } // Result { - 2: 2, // Redirect to self as is first that refence out range + 2: 2, // Redirect to self as is first that reference out range 3: 2 // Redirect to first redirected frame } ``` @@ -593,7 +593,7 @@ def composite_rendered_layers( transparent_filepaths.add(dst_filepath) continue - # Store first destionation filepath to be used for transparent images + # Store first destination filepath to be used for transparent images if first_dst_filepath is None: first_dst_filepath = dst_filepath @@ -657,7 +657,7 @@ def rename_filepaths_by_frame_start( max(range_end, new_frame_end) ) - # Use differnet ranges based on Mark In and output Frame Start values + # Use different ranges based on Mark In and output Frame Start values # - this is to make sure that filename renaming won't affect files that # are not renamed yet if range_start < new_frame_start: diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 31d2fd1fd5..9cbfb61550 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -77,7 +77,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Project name from workfile context project_name = context.data["workfile_context"]["project"] - # Host name from environemnt variable + # Host name from environment variable host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 68ba350a85..89348037d3 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -35,7 +35,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): # Project name from workfile context project_name = context.data["workfile_context"]["project"] - # Host name from environemnt variable + # Host name from environment variable host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index b6b8bd0d9e..729c545545 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -168,7 +168,7 @@ class ExtractSequence(pyblish.api.Extractor): if single_file: repre_files = repre_files[0] - # Extension is harcoded + # Extension is hardcoded # - changing extension would require change code new_repre = { "name": "png", @@ -235,7 +235,7 @@ class ExtractSequence(pyblish.api.Extractor): scene_bg_color (list): Bg color set in scene. Result of george script command `tv_background`. - Retruns: + Returns: tuple: With 2 items first is list of filenames second is path to thumbnail. """ @@ -311,7 +311,7 @@ class ExtractSequence(pyblish.api.Extractor): mark_out (int): On which frame index export will end. layers (list): List of layers to be exported. - Retruns: + Returns: tuple: With 2 items first is list of filenames second is path to thumbnail. """ diff --git a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py index c9f2434cef..24d6558168 100644 --- a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py +++ b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py @@ -15,7 +15,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin): def process(self, context): assert all(result["success"] for result in context.data["results"]), ( - "Publishing not succesfull so version is not increased.") + "Publishing not successful so version is not increased.") path = context.data["currentFile"] workio.save_file(version_up(path)) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 9d55bb21a9..f45247ceac 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -44,7 +44,7 @@ class ValidateMarks(pyblish.api.ContextPlugin): handle_start = context.data["handleStart"] handle_end = context.data["handleEnd"] - # Calculate expeted Mark out (Mark In + duration - 1) + # Calculate expected Mark out (Mark In + duration - 1) expected_mark_out = ( scene_mark_in + (frame_end - frame_start) diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md index 03b0a31f51..70a96b2919 100644 --- a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md +++ b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/README.md @@ -3,7 +3,7 @@ README for TVPaint Avalon plugin Introduction ------------ This project is dedicated to integrate Avalon functionality to TVPaint. -This implementaiton is using TVPaint plugin (C/C++) which can communicate with python process. The communication should allow to trigger tools or pipeline functions from TVPaint and accept requests from python process at the same time. +This implementation is using TVPaint plugin (C/C++) which can communicate with python process. The communication should allow to trigger tools or pipeline functions from TVPaint and accept requests from python process at the same time. Current implementation is based on websocket protocol, using json-rpc communication (specification 2.0). Project is in beta stage, tested only on Windows. diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp index a57124084b..bb67715cbd 100644 --- a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp +++ b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp @@ -41,7 +41,7 @@ static struct { nlohmann::json menuItemsById; std::list menuItemsIds; // Messages from server before processing. - // - messages can't be process at the moment of recieve as client is running in thread + // - messages can't be process at the moment of receive as client is running in thread std::queue messages; // Responses to requests mapped by request id std::map responses; @@ -694,7 +694,7 @@ int newMenuItemsProcess(PIFilter* iFilter) { return 1; } /**************************************************************************************/ -// something happenned that needs our attention. +// something happened that needs our attention. // Global variable where current button up data are stored std::string button_up_item_id_str; int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iArgs ) diff --git a/openpype/hosts/tvpaint/worker/worker_job.py b/openpype/hosts/tvpaint/worker/worker_job.py index 519d42ce73..1c785ab2ee 100644 --- a/openpype/hosts/tvpaint/worker/worker_job.py +++ b/openpype/hosts/tvpaint/worker/worker_job.py @@ -41,7 +41,7 @@ class BaseCommand: Command also have id which is created on command creation. The idea is that command is just a data container on sender side send - througth server to a worker where is replicated one by one, executed and + through server to a worker where is replicated one by one, executed and result sent back to sender through server. """ @abstractproperty @@ -248,7 +248,7 @@ class ExecuteGeorgeScript(BaseCommand): class CollectSceneData(BaseCommand): - """Helper command which will collect all usefull info about workfile. + """Helper command which will collect all useful info about workfile. Result is dictionary with all layers data, exposure frames by layer ids pre/post behavior of layers by their ids, group information and scene data. diff --git a/openpype/hosts/unreal/api/lib.py b/openpype/hosts/unreal/api/lib.py index c0fafbb667..61dac46fac 100644 --- a/openpype/hosts/unreal/api/lib.py +++ b/openpype/hosts/unreal/api/lib.py @@ -115,7 +115,7 @@ def _darwin_get_engine_version() -> dict: Returns: dict: version as a key and path as a value. - See Aslo: + See Also: :func:`_win_get_engine_versions`. """ diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py index ad37a7a068..e2023e8b47 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -98,7 +98,7 @@ class PointCacheAlembicLoader(api.Loader): frame_start = context.get('asset').get('data').get('frameStart') frame_end = context.get('asset').get('data').get('frameEnd') - # If frame start and end are the same, we increse the end frame by + # If frame start and end are the same, we increase the end frame by # one, otherwise Unreal will not import it if frame_start == frame_end: frame_end += 1 diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index a710fcb3e8..062c5ce0da 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -12,7 +12,7 @@ from openpype.lib.plugin_tools import ( parse_json, get_batch_asset_task_info ) -from openpype.lib.remote_publish import get_webpublish_conn +from openpype.lib.remote_publish import get_webpublish_conn, IN_PROGRESS_STATUS class CollectBatchData(pyblish.api.ContextPlugin): @@ -74,7 +74,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): dbcon.update_one( { "batch_id": batch_id, - "status": "in_progress" + "status": IN_PROGRESS_STATUS }, { "$set": { diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index d2754b3df3..c1b1d66cb8 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -21,6 +21,11 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + This covers 'basic' webpublishes, eg artists uses Standalone Publisher to + publish rendered frames or assets. + + This is not applicable for 'studio' processing where host application is + called to process uploaded workfile and render frames itself. """ # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py index 976a14e808..92f581be5f 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py @@ -28,7 +28,7 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): render_layer_pass_name = "beauty" # Set by settings - # Regex must constain 'layer' and 'variant' groups which are extracted from + # Regex must contain 'layer' and 'variant' groups which are extracted from # name when instances are created layer_name_regex = r"(?PL[0-9]{3}_\w+)_(?P.+)" diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py b/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py index 85c8526c83..2142d740a5 100644 --- a/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py +++ b/openpype/hosts/webpublisher/plugins/publish/extract_tvpaint_workfile.py @@ -286,7 +286,7 @@ class ExtractTVPaintSequences(pyblish.api.Extractor): if single_file: repre_files = repre_files[0] - # Extension is harcoded + # Extension is hardcoded # - changing extension would require change code new_repre = { "name": "png", @@ -407,7 +407,7 @@ class ExtractTVPaintSequences(pyblish.api.Extractor): mark_out (int): On which frame index export will end. layers (list): List of layers to be exported. - Retruns: + Returns: tuple: With 2 items first is list of filenames second is path to thumbnail. """ diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 30399a6ba7..e2d041b512 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -11,10 +11,14 @@ from avalon.api import AvalonMongoDB from openpype.lib import OpenPypeMongoConnection from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint -from openpype.lib.remote_publish import get_task_data from openpype.settings import get_project_settings from openpype.lib import PypeLogger +from openpype.lib.remote_publish import ( + get_task_data, + ERROR_STATUS, + REPROCESS_STATUS +) log = PypeLogger.get_logger("WebServer") @@ -61,7 +65,7 @@ class OpenPypeRestApiResource(RestApiResource): self.dbcon = mongo_client[database_name]["webpublishes"] -class WebpublisherProjectsEndpoint(_RestApiEndpoint): +class ProjectsEndpoint(_RestApiEndpoint): """Returns list of dict with project info (id, name).""" async def get(self) -> Response: output = [] @@ -82,7 +86,7 @@ class WebpublisherProjectsEndpoint(_RestApiEndpoint): ) -class WebpublisherHiearchyEndpoint(_RestApiEndpoint): +class HiearchyEndpoint(_RestApiEndpoint): """Returns dictionary with context tree from assets.""" async def get(self, project_name) -> Response: query_projection = { @@ -181,7 +185,7 @@ class TaskNode(Node): self["attributes"] = {} -class WebpublisherBatchPublishEndpoint(_RestApiEndpoint): +class BatchPublishEndpoint(_RestApiEndpoint): """Triggers headless publishing of batch.""" async def post(self, request) -> Response: # Validate existence of openpype executable @@ -190,7 +194,7 @@ class WebpublisherBatchPublishEndpoint(_RestApiEndpoint): msg = "Non existent OpenPype executable {}".format(openpype_app) raise RuntimeError(msg) - log.info("WebpublisherBatchPublishEndpoint called") + log.info("BatchPublishEndpoint called") content = await request.json() # Each filter have extensions which are checked on first task item @@ -286,7 +290,7 @@ class WebpublisherBatchPublishEndpoint(_RestApiEndpoint): ) -class WebpublisherTaskPublishEndpoint(_RestApiEndpoint): +class TaskPublishEndpoint(_RestApiEndpoint): """Prepared endpoint triggered after each task - for future development.""" async def post(self, request) -> Response: return Response( @@ -301,21 +305,37 @@ class BatchStatusEndpoint(_RestApiEndpoint): async def get(self, batch_id) -> Response: output = self.dbcon.find_one({"batch_id": batch_id}) + if output: + status = 200 + else: + output = {"msg": "Batch id {} not found".format(batch_id), + "status": "queued", + "progress": 0} + status = 404 + body = self.resource.encode(output) return Response( - status=200, - body=self.resource.encode(output), + status=status, + body=body, content_type="application/json" ) -class PublishesStatusEndpoint(_RestApiEndpoint): +class UserReportEndpoint(_RestApiEndpoint): """Returns list of dict with batch info for user (email address).""" async def get(self, user) -> Response: - output = list(self.dbcon.find({"user": user})) + output = list(self.dbcon.find({"user": user}, + projection={"log": False})) + + if output: + status = 200 + else: + output = {"msg": "User {} not found".format(user)} + status = 404 + body = self.resource.encode(output) return Response( - status=200, - body=self.resource.encode(output), + status=status, + body=body, content_type="application/json" ) @@ -335,7 +355,7 @@ class ConfiguredExtensionsEndpoint(_RestApiEndpoint): configured = { "file_exts": set(), "sequence_exts": set(), - # workfiles that could have "Studio Procesing" hardcoded for now + # workfiles that could have "Studio Processing" hardcoded for now "studio_exts": set(["psd", "psb", "tvpp", "tvp"]) } collect_conf = sett["webpublisher"]["publish"]["CollectPublishedFiles"] @@ -351,3 +371,28 @@ class ConfiguredExtensionsEndpoint(_RestApiEndpoint): body=self.resource.encode(dict(configured)), content_type="application/json" ) + + +class BatchReprocessEndpoint(_RestApiEndpoint): + """Marks latest 'batch_id' for reprocessing, returns 404 if not found.""" + async def post(self, batch_id) -> Response: + batches = self.dbcon.find({"batch_id": batch_id, + "status": ERROR_STATUS}).sort("_id", -1) + + if batches: + self.dbcon.update_one( + {"_id": batches[0]["_id"]}, + {"$set": {"status": REPROCESS_STATUS}} + ) + output = [{"msg": "Batch id {} set to reprocess".format(batch_id)}] + status = 200 + else: + output = [{"msg": "Batch id {} not found".format(batch_id)}] + status = 404 + body = self.resource.encode(output) + + return Response( + status=status, + body=body, + content_type="application/json" + ) diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py index c96ad8e110..909ea38bc6 100644 --- a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py +++ b/openpype/hosts/webpublisher/webserver_service/webserver_cli.py @@ -11,13 +11,19 @@ from openpype.lib import PypeLogger from .webpublish_routes import ( RestApiResource, OpenPypeRestApiResource, - WebpublisherBatchPublishEndpoint, - WebpublisherTaskPublishEndpoint, - WebpublisherHiearchyEndpoint, - WebpublisherProjectsEndpoint, + HiearchyEndpoint, + ProjectsEndpoint, + ConfiguredExtensionsEndpoint, + BatchPublishEndpoint, + BatchReprocessEndpoint, BatchStatusEndpoint, - PublishesStatusEndpoint, - ConfiguredExtensionsEndpoint + TaskPublishEndpoint, + UserReportEndpoint +) +from openpype.lib.remote_publish import ( + ERROR_STATUS, + REPROCESS_STATUS, + SENT_REPROCESSING_STATUS ) @@ -41,14 +47,14 @@ def run_webserver(*args, **kwargs): upload_dir=kwargs["upload_dir"], executable=kwargs["executable"], studio_task_queue=studio_task_queue) - projects_endpoint = WebpublisherProjectsEndpoint(resource) + projects_endpoint = ProjectsEndpoint(resource) server_manager.add_route( "GET", "/api/projects", projects_endpoint.dispatch ) - hiearchy_endpoint = WebpublisherHiearchyEndpoint(resource) + hiearchy_endpoint = HiearchyEndpoint(resource) server_manager.add_route( "GET", "/api/hierarchy/{project_name}", @@ -64,7 +70,7 @@ def run_webserver(*args, **kwargs): # triggers publish webpublisher_task_publish_endpoint = \ - WebpublisherBatchPublishEndpoint(resource) + BatchPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/batch", @@ -72,7 +78,7 @@ def run_webserver(*args, **kwargs): ) webpublisher_batch_publish_endpoint = \ - WebpublisherTaskPublishEndpoint(resource) + TaskPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/task", @@ -88,13 +94,21 @@ def run_webserver(*args, **kwargs): batch_status_endpoint.dispatch ) - user_status_endpoint = PublishesStatusEndpoint(openpype_resource) + user_status_endpoint = UserReportEndpoint(openpype_resource) server_manager.add_route( "GET", "/api/publishes/{user}", user_status_endpoint.dispatch ) + webpublisher_batch_reprocess_endpoint = \ + BatchReprocessEndpoint(openpype_resource) + server_manager.add_route( + "POST", + "/api/webpublish/reprocess/{batch_id}", + webpublisher_batch_reprocess_endpoint.dispatch + ) + server_manager.start_server() last_reprocessed = time.time() while True: @@ -116,8 +130,12 @@ def reprocess_failed(upload_dir, webserver_url): database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] - results = dbcon.find({"status": "reprocess"}) + results = dbcon.find({"status": REPROCESS_STATUS}) + reprocessed_batches = set() for batch in results: + if batch["batch_id"] in reprocessed_batches: + continue + batch_url = os.path.join(upload_dir, batch["batch_id"], "manifest.json") @@ -130,8 +148,8 @@ def reprocess_failed(upload_dir, webserver_url): {"$set": { "finish_date": datetime.now(), - "status": "error", - "progress": 1, + "status": ERROR_STATUS, + "progress": 100, "log": batch.get("log") + msg }} ) @@ -141,18 +159,24 @@ def reprocess_failed(upload_dir, webserver_url): with open(batch_url) as f: data = json.loads(f.read()) + dbcon.update_many( + { + "batch_id": batch["batch_id"], + "status": {"$in": [ERROR_STATUS, REPROCESS_STATUS]} + }, + { + "$set": { + "finish_date": datetime.now(), + "status": SENT_REPROCESSING_STATUS, + "progress": 100 + } + } + ) + try: r = requests.post(server_url, json=data) log.info("response{}".format(r)) except Exception: log.info("exception", exc_info=True) - dbcon.update_one( - {"_id": batch["_id"]}, - {"$set": - { - "finish_date": datetime.now(), - "status": "sent_for_reprocessing", - "progress": 1 - }} - ) + reprocessed_batches.add(batch["batch_id"]) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index 62d204186d..1c8f7a57af 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -169,6 +169,7 @@ from .editorial import ( ) from .openpype_version import ( + op_version_control_available, get_openpype_version, get_build_version, get_expected_version, @@ -306,6 +307,7 @@ __all__ = [ "create_workdir_extra_folders", "get_project_basic_paths", + "op_version_control_available", "get_openpype_version", "get_build_version", "get_expected_version", diff --git a/openpype/lib/abstract_collect_render.py b/openpype/lib/abstract_collect_render.py index 2ac0fe434d..d9c8a0993d 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/lib/abstract_collect_render.py @@ -49,7 +49,7 @@ class RenderInstance(object): handleStart = attr.ib(default=None) # start frame handleEnd = attr.ib(default=None) # start frame - # for softwares (like Harmony) where frame range cannot be set by DB + # for software (like Harmony) where frame range cannot be set by DB # handles need to be propagated if exist ignoreFrameHandleCheck = attr.ib(default=False) @@ -57,7 +57,7 @@ class RenderInstance(object): # With default values # metadata renderer = attr.ib(default="") # renderer - can be used in Deadline - review = attr.ib(default=False) # genereate review from instance (bool) + review = attr.ib(default=False) # generate review from instance (bool) priority = attr.ib(default=50) # job priority on farm family = attr.ib(default="renderlayer") diff --git a/openpype/lib/abstract_submit_deadline.py b/openpype/lib/abstract_submit_deadline.py index 5b6e1743e0..a0925283ac 100644 --- a/openpype/lib/abstract_submit_deadline.py +++ b/openpype/lib/abstract_submit_deadline.py @@ -485,7 +485,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): def get_aux_files(self): """Return list of auxiliary files for Deadline job. - If needed this should be overriden, otherwise return empty list as + If needed this should be overridden, otherwise return empty list as that field even empty must be present on Deadline submission. Returns: diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py index 5f7285fe6c..fa81a18ff7 100644 --- a/openpype/lib/anatomy.py +++ b/openpype/lib/anatomy.py @@ -125,7 +125,7 @@ class Anatomy: @staticmethod def _prepare_anatomy_data(anatomy_data): - """Prepare anatomy data for futher processing. + """Prepare anatomy data for further processing. Method added to replace `{task}` with `{task[name]}` in templates. """ @@ -722,7 +722,7 @@ class Templates: First is collecting all global keys (keys in top hierarchy where value is not dictionary). All global keys are set for all group keys (keys in top hierarchy where value is dictionary). Value of a key is not - overriden in group if already contain value for the key. + overridden in group if already contain value for the key. In second part all keys with "at" symbol in value are replaced with value of the key afterward "at" symbol from the group. @@ -802,7 +802,7 @@ class Templates: Result: tuple: Contain origin template without missing optional keys and - withoud optional keys identificator ("<" and ">"), information + without optional keys identificator ("<" and ">"), information about missing optional keys and invalid types of optional keys. """ @@ -1628,7 +1628,7 @@ class Roots: This property returns roots for current project or default root values. Warning: Default roots value may cause issues when project use different - roots settings. That may happend when project use multiroot + roots settings. That may happen when project use multiroot templates but default roots miss their keys. """ if self.project_name != self.loaded_project: diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index d0438e12a6..0e1f44391e 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -47,7 +47,7 @@ DEFAULT_ENV_SUBGROUP = "standard" def parse_environments(env_data, env_group=None, platform_name=None): - """Parse environment values from settings byt group and platfrom. + """Parse environment values from settings byt group and platform. Data may contain up to 2 hierarchical levels of dictionaries. At the end of the last level must be string or list. List is joined using platform @@ -261,7 +261,7 @@ class Application: data (dict): Data for the version containing information about executables, variant label or if is enabled. Only required key is `executables`. - group (ApplicationGroup): App group object that created the applicaiton + group (ApplicationGroup): App group object that created the application and under which application belongs. """ @@ -775,7 +775,7 @@ class PostLaunchHook(LaunchHook): class ApplicationLaunchContext: """Context of launching application. - Main purpose of context is to prepare launch arguments and keword arguments + Main purpose of context is to prepare launch arguments and keyword arguments for new process. Most important part of keyword arguments preparations are environment variables. @@ -969,7 +969,7 @@ class ApplicationLaunchContext: hook = klass(self) if not hook.is_valid: self.log.debug( - "Hook is not valid for curent launch context." + "Hook is not valid for current launch context." ) continue @@ -1113,7 +1113,7 @@ class ApplicationLaunchContext: )) # TODO how to handle errors? - # - store to variable to let them accesible? + # - store to variable to let them accessible? try: postlaunch_hook.execute() @@ -1357,11 +1357,11 @@ def apply_project_environments_value( ): """Apply project specific environments on passed environments. - The enviornments are applied on passed `env` argument value so it is not + The environments are applied on passed `env` argument value so it is not required to apply changes back. Args: - project_name (str): Name of project for which environemnts should be + project_name (str): Name of project for which environments should be received. env (dict): Environment values on which project specific environments will be applied. @@ -1391,7 +1391,7 @@ def apply_project_environments_value( def prepare_context_environments(data, env_group=None): - """Modify launch environemnts with context data for launched host. + """Modify launch environments with context data for launched host. Args: data (EnvironmentPrepData): Dictionary where result and intermediate @@ -1463,7 +1463,7 @@ def prepare_context_environments(data, env_group=None): "AVALON_WORKDIR": workdir } log.debug( - "Context environemnts set:\n{}".format( + "Context environments set:\n{}".format( json.dumps(context_env, indent=4) ) ) @@ -1567,7 +1567,7 @@ def should_start_last_workfile( ): """Define if host should start last version workfile if possible. - Default output is `False`. Can be overriden with environment variable + Default output is `False`. Can be overridden with environment variable `AVALON_OPEN_LAST_WORKFILE`, valid values without case sensitivity are `"0", "1", "true", "false", "yes", "no"`. @@ -1617,7 +1617,7 @@ def should_workfile_tool_start( ): """Define if host should start workfile tool at host launch. - Default output is `False`. Can be overriden with environment variable + Default output is `False`. Can be overridden with environment variable `OPENPYPE_WORKFILE_TOOL_ON_START`, valid values without case sensitivity are `"0", "1", "true", "false", "yes", "no"`. diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 8180e416a9..1254580657 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -443,7 +443,7 @@ def get_workfile_template_key( Function is using profiles from project settings to return right template for passet task type and host name. - One of 'project_name' or 'project_settings' must be passed it is preffered + One of 'project_name' or 'project_settings' must be passed it is preferred to pass settings if are already available. Args: @@ -545,7 +545,7 @@ def get_workdir_with_workdir_data( """Fill workdir path from entered data and project's anatomy. It is possible to pass only project's name instead of project's anatomy but - one of them **must** be entered. It is preffered to enter anatomy if is + one of them **must** be entered. It is preferred to enter anatomy if is available as initialization of a new Anatomy object may be time consuming. Args: @@ -582,7 +582,7 @@ def get_workdir_with_workdir_data( ) anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain usefull data + # Output is TemplateResult object which contain useful data return anatomy_filled[template_key]["folder"] @@ -604,7 +604,7 @@ def get_workdir( because workdir template may contain `{app}` key. In `Session` is stored under `AVALON_APP` key. anatomy (Anatomy): Optional argument. Anatomy object is created using - project name from `project_doc`. It is preffered to pass this + project name from `project_doc`. It is preferred to pass this argument as initialization of a new Anatomy object may be time consuming. template_key (str): Key of work templates in anatomy templates. Default @@ -619,7 +619,7 @@ def get_workdir( workdir_data = get_workdir_data( project_doc, asset_doc, task_name, host_name ) - # Output is TemplateResult object which contain usefull data + # Output is TemplateResult object which contain useful data return get_workdir_with_workdir_data( workdir_data, anatomy, template_key=template_key ) @@ -1036,7 +1036,7 @@ class BuildWorkfile: return valid_profiles def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset byt it's data. + """Select profile for each subset by it's data. Profiles are filtered for each subset individually. Profile is filtered by subset's family, optionally by name regex and @@ -1197,7 +1197,7 @@ class BuildWorkfile: Representations are tried to load by names defined in configuration. If subset has representation matching representation name each loader is tried to load it until any is successful. If none of them was - successful then next reprensentation name is tried. + successful then next representation name is tried. Subset process loop ends when any representation is loaded or all matching representations were already tried. @@ -1240,7 +1240,7 @@ class BuildWorkfile: print("representations", representations) - # Load ordered reprensentations. + # Load ordered representations. for subset_id, repres in representations_ordered: subset_name = subsets_by_id[subset_id]["name"] diff --git a/openpype/lib/editorial.py b/openpype/lib/editorial.py index 8e8e365bdb..bf868953ea 100644 --- a/openpype/lib/editorial.py +++ b/openpype/lib/editorial.py @@ -116,7 +116,7 @@ def range_from_frames(start, duration, fps): fps (float): frame range Returns: - otio._ot._ot.TimeRange: crated range + otio._ot._ot.TimeRange: created range """ return _ot.TimeRange( @@ -131,7 +131,7 @@ def frames_to_secons(frames, framerate): Args: frames (int): frame - framerate (flaot): frame rate + framerate (float): frame rate Returns: float: second value @@ -257,7 +257,7 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): ((source_range.duration.value - 1) * abs( time_scalar)) + offset_out)) - # calculate available hanles + # calculate available handles if (media_in_trimmed - media_in) < handle_start: handle_start = (media_in_trimmed - media_in) if (media_out - media_out_trimmed) < handle_end: diff --git a/openpype/lib/env_tools.py b/openpype/lib/env_tools.py index ede14e00b2..6521d20f1e 100644 --- a/openpype/lib/env_tools.py +++ b/openpype/lib/env_tools.py @@ -28,11 +28,11 @@ def env_value_to_bool(env_key=None, value=None, default=False): def get_paths_from_environ(env_key=None, env_value=None, return_first=False): - """Return existing paths from specific envirnment variable. + """Return existing paths from specific environment variable. Args: env_key (str): Environment key where should look for paths. - env_value (str): Value of environemnt variable. Argument `env_key` is + env_value (str): Value of environment variable. Argument `env_key` is skipped if this argument is entered. return_first (bool): Return first found value or return list of found paths. `None` or empty list returned if nothing found. diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index 3cf67a379c..afde844f2d 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -79,7 +79,7 @@ def run_subprocess(*args, **kwargs): Args: *args: Variable length arument list passed to Popen. - **kwargs : Arbitary keyword arguments passed to Popen. Is possible to + **kwargs : Arbitrary keyword arguments passed to Popen. Is possible to pass `logging.Logger` object under "logger" if want to use different than lib's logger. @@ -119,7 +119,7 @@ def run_subprocess(*args, **kwargs): if _stderr: _stderr = _stderr.decode("utf-8") - # Add additional line break if output already containt stdout + # Add additional line break if output already contains stdout if full_output: full_output += "\n" full_output += _stderr diff --git a/openpype/lib/git_progress.py b/openpype/lib/git_progress.py index e9cf9a12e1..331b7b6745 100644 --- a/openpype/lib/git_progress.py +++ b/openpype/lib/git_progress.py @@ -33,7 +33,7 @@ class _GitProgress(git.remote.RemoteProgress): self._t.close() def _detroy_tqdm(self): - """ Used to close tqdm when opration ended. + """ Used to close tqdm when operation ended. """ if self._t is not None: diff --git a/openpype/lib/import_utils.py b/openpype/lib/import_utils.py index 4e72618803..e88c07fca6 100644 --- a/openpype/lib/import_utils.py +++ b/openpype/lib/import_utils.py @@ -14,7 +14,7 @@ def discover_host_vendor_module(module_name): pype_root, "hosts", host, "vendor", main_module) log.debug( - "Importing moduel from host vendor path: `{}`".format(module_path)) + "Importing module from host vendor path: `{}`".format(module_path)) if not os.path.exists(module_path): log.warning( diff --git a/openpype/lib/mongo.py b/openpype/lib/mongo.py index 7e0bd4f796..c08e76c75c 100644 --- a/openpype/lib/mongo.py +++ b/openpype/lib/mongo.py @@ -24,7 +24,7 @@ def _decompose_url(url): validation pass. """ # Use first url from passed url - # - this is beacuse it is possible to pass multiple urls for multiple + # - this is because it is possible to pass multiple urls for multiple # replica sets which would crash on urlparse otherwise # - please don't use comma in username of password url = url.split(",")[0] diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index 12e9e2db9c..c0b78c5724 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -116,10 +116,10 @@ def get_last_version_from_path(path_dir, filter): filtred_files = list() # form regex for filtering - patern = r".*".join(filter) + pattern = r".*".join(filter) for file in os.listdir(path_dir): - if not re.findall(patern, file): + if not re.findall(pattern, file): continue filtred_files.append(file) diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 7c66f9760d..183aad939a 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -164,7 +164,7 @@ def prepare_template_data(fill_pairs): """ Prepares formatted data for filling template. - It produces mutliple variants of keys (key, Key, KEY) to control + It produces multiple variants of keys (key, Key, KEY) to control format of filled template. Args: @@ -288,7 +288,7 @@ def set_plugin_attributes_from_settings( if project_name is None: project_name = os.environ.get("AVALON_PROJECT") - # map plugin superclass to preset json. Currenly suppoted is load and + # map plugin superclass to preset json. Currently supported is load and # create (avalon.api.Loader and avalon.api.Creator) plugin_type = None if superclass.__name__.split(".")[-1] in ("Loader", "SubsetLoader"): diff --git a/openpype/lib/remote_publish.py b/openpype/lib/remote_publish.py index 8074b2d112..9632e63ea0 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/lib/remote_publish.py @@ -11,6 +11,13 @@ from openpype import uninstall from openpype.lib.mongo import OpenPypeMongoConnection from openpype.lib.plugin_tools import parse_json +ERROR_STATUS = "error" +IN_PROGRESS_STATUS = "in_progress" +REPROCESS_STATUS = "reprocess" +SENT_REPROCESSING_STATUS = "sent_for_reprocessing" +FINISHED_REPROCESS_STATUS = "republishing_finished" +FINISHED_OK_STATUS = "finished_ok" + def headless_publish(log, close_plugin_name=None, is_test=False): """Runs publish in a opened host with a context and closes Python process. @@ -26,7 +33,7 @@ def headless_publish(log, close_plugin_name=None, is_test=False): "batch will be unfinished!") return - publish_and_log(dbcon, _id, log, close_plugin_name) + publish_and_log(dbcon, _id, log, close_plugin_name=close_plugin_name) else: publish(log, close_plugin_name) @@ -52,8 +59,8 @@ def start_webpublish_log(dbcon, batch_id, user): "batch_id": batch_id, "start_date": datetime.now(), "user": user, - "status": "in_progress", - "progress": 0.0 + "status": IN_PROGRESS_STATUS, + "progress": 0 # integer 0-100, percentage }).inserted_id @@ -84,18 +91,20 @@ def publish(log, close_plugin_name=None): sys.exit(1) -def publish_and_log(dbcon, _id, log, close_plugin_name=None): +def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): """Loops through all plugins, logs ok and fails into OP DB. Args: dbcon (OpenPypeMongoConnection) - _id (str) + _id (str) - id of current job in DB log (OpenPypeLogger) + batch_id (str) - id sent from frontend close_plugin_name (str): name of plugin with responsibility to close host app """ # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" + error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}\n" + error_format += "-" * 80 + "\n" close_plugin = _get_close_plugin(close_plugin_name, log) @@ -103,21 +112,24 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None): _id = ObjectId(_id) log_lines = [] + processed = 0 + log_every = 5 for result in pyblish.util.publish_iter(): for record in result["records"]: log_lines.append("{}: {}".format( result["plugin"].label, record.msg)) + processed += 1 if result["error"]: log.error(error_format.format(**result)) uninstall() - log_lines.append(error_format.format(**result)) + log_lines = [error_format.format(**result)] + log_lines dbcon.update_one( {"_id": _id}, {"$set": { "finish_date": datetime.now(), - "status": "error", + "status": ERROR_STATUS, "log": os.linesep.join(log_lines) }} @@ -126,26 +138,42 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None): context = pyblish.api.Context() close_plugin().process(context) sys.exit(1) - else: + elif processed % log_every == 0: + # pyblish returns progress in 0.0 - 2.0 + progress = min(round(result["progress"] / 2 * 100), 99) dbcon.update_one( {"_id": _id}, {"$set": { - "progress": max(result["progress"], 0.95), + "progress": progress, "log": os.linesep.join(log_lines) }} ) # final update + if batch_id: + dbcon.update_many( + {"batch_id": batch_id, "status": SENT_REPROCESSING_STATUS}, + { + "$set": + { + "finish_date": datetime.now(), + "status": FINISHED_REPROCESS_STATUS, + } + } + ) + dbcon.update_one( {"_id": _id}, - {"$set": - { - "finish_date": datetime.now(), - "status": "finished_ok", - "progress": 1, - "log": os.linesep.join(log_lines) - }} + { + "$set": + { + "finish_date": datetime.now(), + "status": FINISHED_OK_STATUS, + "progress": 100, + "log": os.linesep.join(log_lines) + } + } ) @@ -162,7 +190,7 @@ def fail_batch(_id, batches_in_progress, dbcon): {"$set": { "finish_date": datetime.now(), - "status": "error", + "status": ERROR_STATUS, "log": msg }} diff --git a/openpype/lib/terminal.py b/openpype/lib/terminal.py index ddc917ac4e..bc0744931a 100644 --- a/openpype/lib/terminal.py +++ b/openpype/lib/terminal.py @@ -130,7 +130,7 @@ class Terminal: def _multiple_replace(text, adict): """Replace multiple tokens defined in dict. - Find and replace all occurances of strings defined in dict is + Find and replace all occurrences of strings defined in dict is supplied string. Args: diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py index d3cc0ad971..676dd80e93 100644 --- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_delete_asset.py @@ -163,24 +163,27 @@ class DeleteAssetSubset(BaseAction): if not selected_av_entities: return { - "success": False, - "message": "Didn't found entities in avalon" + "success": True, + "message": ( + "Didn't found entities in avalon." + " You can use Ftrack's Delete button for the selection." + ) } # Remove cached action older than 2 minutes old_action_ids = [] - for id, data in self.action_data_by_id.items(): + for action_id, data in self.action_data_by_id.items(): created_at = data.get("created_at") if not created_at: - old_action_ids.append(id) + old_action_ids.append(action_id) continue cur_time = datetime.now() existing_in_sec = (created_at - cur_time).total_seconds() if existing_in_sec > 60 * 2: - old_action_ids.append(id) + old_action_ids.append(action_id) - for id in old_action_ids: - self.action_data_by_id.pop(id, None) + for action_id in old_action_ids: + self.action_data_by_id.pop(action_id, None) # Store data for action id action_id = str(uuid.uuid1()) @@ -439,7 +442,11 @@ class DeleteAssetSubset(BaseAction): subsets_to_delete = to_delete.get("subsets") or [] # Convert asset ids to ObjectId obj - assets_to_delete = [ObjectId(id) for id in assets_to_delete if id] + assets_to_delete = [ + ObjectId(asset_id) + for asset_id in assets_to_delete + if asset_id + ] subset_ids_by_parent = spec_data["subset_ids_by_parent"] subset_ids_by_name = spec_data["subset_ids_by_name"] @@ -468,9 +475,8 @@ class DeleteAssetSubset(BaseAction): if not ftrack_id: ftrack_id = asset["data"].get("ftrackId") - if not ftrack_id: - continue - ftrack_ids_to_delete.append(ftrack_id) + if ftrack_id: + ftrack_ids_to_delete.append(ftrack_id) children_queue = collections.deque() for mongo_id in assets_to_delete: @@ -569,12 +575,12 @@ class DeleteAssetSubset(BaseAction): exc_info=True ) - if not_deleted_entities_id: - joined_not_deleted = ", ".join([ + if not_deleted_entities_id and asset_names_to_delete: + joined_not_deleted = ",".join([ "\"{}\"".format(ftrack_id) for ftrack_id in not_deleted_entities_id ]) - joined_asset_names = ", ".join([ + joined_asset_names = ",".join([ "\"{}\"".format(name) for name in asset_names_to_delete ]) @@ -613,6 +619,25 @@ class DeleteAssetSubset(BaseAction): joined_ids_to_delete ) ).all() + # Find all children entities and add them to list + # - Delete tasks first then their parents and continue + parent_ids_to_delete = [ + entity["id"] + for entity in to_delete_entities + ] + while parent_ids_to_delete: + joined_parent_ids_to_delete = ",".join([ + "\"{}\"".format(ftrack_id) + for ftrack_id in parent_ids_to_delete + ]) + _to_delete = session.query(( + "select id, link from TypedContext where parent_id in ({})" + ).format(joined_parent_ids_to_delete)).all() + parent_ids_to_delete = [] + for entity in _to_delete: + parent_ids_to_delete.append(entity["id"]) + to_delete_entities.append(entity) + entities_by_link_len = collections.defaultdict(list) for entity in to_delete_entities: entities_by_link_len[len(entity["link"])].append(entity) diff --git a/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py b/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py index c603a2d200..334519b4bb 100644 --- a/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py +++ b/openpype/modules/default_modules/ftrack/event_handlers_user/action_djvview.py @@ -1,6 +1,8 @@ import os +import time import subprocess from operator import itemgetter +from openpype.lib import ApplicationManager from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -23,15 +25,25 @@ class DJVViewAction(BaseAction): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.djv_path = self.find_djv_path() + self.application_manager = ApplicationManager() + self._last_check = time.time() + self._check_interval = 10 - def preregister(self): - if self.djv_path is None: - return ( - 'DJV View is not installed' - ' or paths in presets are not set correctly' - ) - return True + def _get_djv_apps(self): + app_group = self.application_manager.app_groups["djvview"] + + output = [] + for app in app_group: + executable = app.find_executable() + if executable is not None: + output.append(app) + return output + + def get_djv_apps(self): + cur_time = time.time() + if (cur_time - self._last_check) > self._check_interval: + self.application_manager.refresh() + return self._get_djv_apps() def discover(self, session, entities, event): """Return available actions based on *event*. """ @@ -40,15 +52,13 @@ class DJVViewAction(BaseAction): return False entityType = selection[0].get("entityType", None) - if entityType in ["assetversion", "task"]: + if entityType not in ["assetversion", "task"]: + return False + + if self.get_djv_apps(): return True return False - def find_djv_path(self): - for path in (os.environ.get("DJV_PATH") or "").split(os.pathsep): - if os.path.exists(path): - return path - def interface(self, session, entities, event): if event['data'].get('values', {}): return @@ -88,7 +98,37 @@ class DJVViewAction(BaseAction): 'message': 'There are no Asset Versions to open.' } - items = [] + # TODO sort them (somehow?) + enum_items = [] + first_value = None + for app in self.get_djv_apps(): + if first_value is None: + first_value = app.full_name + enum_items.append({ + "value": app.full_name, + "label": app.full_label + }) + + if not enum_items: + return { + "success": False, + "message": "Couldn't find DJV executable." + } + + items = [ + { + "type": "enumerator", + "label": "DJV version:", + "name": "djv_app_name", + "data": enum_items, + "value": first_value + }, + { + "type": "label", + "value": "---" + } + ] + version_items = [] base_label = "v{0} - {1} - {2}" default_component = None last_available = None @@ -115,11 +155,11 @@ class DJVViewAction(BaseAction): last_available = file_path if component['name'] == default_component: select_value = file_path - items.append( + version_items.append( {'label': label, 'value': file_path} ) - if len(items) == 0: + if len(version_items) == 0: return { 'success': False, 'message': ( @@ -132,7 +172,7 @@ class DJVViewAction(BaseAction): 'type': 'enumerator', 'name': 'path', 'data': sorted( - items, + version_items, key=itemgetter('label'), reverse=True ) @@ -142,21 +182,37 @@ class DJVViewAction(BaseAction): else: item['value'] = last_available - return {'items': [item]} + items.append(item) + + return {'items': items} def launch(self, session, entities, event): """Callback method for DJVView action.""" # Launching application - if "values" not in event["data"]: + event_data = event["data"] + if "values" not in event_data: return - filpath = event['data']['values']['path'] + + djv_app_name = event_data["djv_app_name"] + app = self.applicaion_manager.applications.get(djv_app_name) + executable = None + if app is not None: + executable = app.find_executable() + + if not executable: + return { + "success": False, + "message": "Couldn't find DJV executable." + } + + filpath = os.path.normpath(event_data["values"]["path"]) cmd = [ # DJV path - os.path.normpath(self.djv_path), + executable, # PATH TO COMPONENT - os.path.normpath(filpath) + filpath ] try: @@ -164,8 +220,8 @@ class DJVViewAction(BaseAction): subprocess.Popen(cmd) except FileNotFoundError: return { - 'success': False, - 'message': 'File "{}" was not found.'.format( + "success": False, + "message": "File \"{}\" was not found.".format( os.path.basename(filpath) ) } diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py index 7ea1c1f323..303490189b 100644 --- a/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py +++ b/openpype/modules/default_modules/ftrack/plugins/publish/collect_username.py @@ -37,16 +37,27 @@ class CollectUsername(pyblish.api.ContextPlugin): os.environ["FTRACK_API_USER"] = os.environ["FTRACK_BOT_API_USER"] os.environ["FTRACK_API_KEY"] = os.environ["FTRACK_BOT_API_KEY"] - for instance in context: - email = instance.data["user_email"] - self.log.info("email:: {}".format(email)) - session = ftrack_api.Session(auto_connect_event_hub=False) - user = session.query("User where email like '{}'".format( - email)) + # for publishes with studio processing + user_email = os.environ.get("USER_EMAIL") + self.log.debug("Email from env:: {}".format(user_email)) + if not user_email: + # for basic webpublishes + for instance in context: + user_email = instance.data.get("user_email") + self.log.debug("Email from instance:: {}".format(user_email)) + break - if not user: - raise ValueError( - "Couldnt find user with {} email".format(email)) + if not user_email: + self.log.info("No email found") + return - os.environ["FTRACK_API_USER"] = user[0].get("username") - break + session = ftrack_api.Session(auto_connect_event_hub=False) + user = session.query("User where email like '{}'".format(user_email)) + + if not user: + raise ValueError( + "Couldn't find user with {} email".format(user_email)) + + username = user[0].get("username") + self.log.debug("Resolved ftrack username:: {}".format(username)) + os.environ["FTRACK_API_USER"] = username diff --git a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index fbd64d9f70..61892240d7 100644 --- a/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/default_modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -63,7 +63,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher"] + hosts = ["hiero", "resolve", "standalonepublisher", "flame"] optional = False def process(self, context): diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index f7d1c6b4be..efb40407d9 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -13,9 +13,9 @@ class CollectHierarchy(pyblish.api.ContextPlugin): """ label = "Collect Hierarchy" - order = pyblish.api.CollectorOrder - 0.47 + order = pyblish.api.CollectorOrder - 0.076 families = ["shot"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] def process(self, context): temp_context = {} diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index a35ef47e79..ee7b7957ad 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -12,15 +12,15 @@ import openpype.lib from pprint import pformat -class CollectOcioFrameRanges(pyblish.api.InstancePlugin): +class CollectOtioFrameRanges(pyblish.api.InstancePlugin): """Getting otio ranges from otio_clip Adding timeline and source ranges to instance data""" label = "Collect OTIO Frame Ranges" - order = pyblish.api.CollectorOrder - 0.48 + order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_review.py b/openpype/plugins/publish/collect_otio_review.py index 10ceafdcca..35c77a24cb 100644 --- a/openpype/plugins/publish/collect_otio_review.py +++ b/openpype/plugins/publish/collect_otio_review.py @@ -16,13 +16,13 @@ import pyblish.api from pprint import pformat -class CollectOcioReview(pyblish.api.InstancePlugin): +class CollectOtioReview(pyblish.api.InstancePlugin): """Get matching otio track from defined review layer""" label = "Collect OTIO Review" - order = pyblish.api.CollectorOrder - 0.47 + order = pyblish.api.CollectorOrder - 0.078 families = ["clip"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index 571d0d56a4..7c11462ef0 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -14,13 +14,13 @@ import openpype from openpype.lib import editorial -class CollectOcioSubsetResources(pyblish.api.InstancePlugin): +class CollectOtioSubsetResources(pyblish.api.InstancePlugin): """Get Resources for a subset version""" label = "Collect OTIO Subset Resources" - order = pyblish.api.CollectorOrder - 0.47 + order = pyblish.api.CollectorOrder - 0.077 families = ["clip"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] def process(self, instance): @@ -64,7 +64,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): a_frame_start_h = media_in - handle_start a_frame_end_h = media_out + handle_end - # create trimmed ocio time range + # create trimmed otio time range trimmed_media_range_h = editorial.range_from_frames( a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1), media_fps diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index 459c66ee43..7ff1b24689 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -48,7 +48,8 @@ class ExtractBurnin(openpype.api.Extractor): "tvpaint", "webpublisher", "aftereffects", - "photoshop" + "photoshop", + "flame" # "resolve" ] optional = True diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index be0bae5cdc..00c1748cdc 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -19,7 +19,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): order = pyblish.api.ExtractorOrder - 0.44 label = "Extract OTIO Audio Tracks" - hosts = ["hiero", "resolve"] + hosts = ["hiero", "resolve", "flame"] # FFmpeg tools paths ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") diff --git a/openpype/plugins/publish/extract_otio_review.py b/openpype/plugins/publish/extract_otio_review.py index ed2ba017d5..78570488b3 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/openpype/plugins/publish/extract_otio_review.py @@ -41,7 +41,7 @@ class ExtractOTIOReview(openpype.api.Extractor): order = api.ExtractorOrder - 0.45 label = "Extract OTIO review" families = ["review"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] # plugin default attributes temp_file_head = "tempFile." @@ -85,6 +85,28 @@ class ExtractOTIOReview(openpype.api.Extractor): for index, r_otio_cl in enumerate(otio_review_clips): # QUESTION: what if transition on clip? + # check if resolution is the same + width = self.to_width + height = self.to_height + otio_media = r_otio_cl.media_reference + media_metadata = otio_media.metadata + + # get from media reference metadata source + if media_metadata.get("openpype.source.width"): + width = int(media_metadata.get("openpype.source.width")) + if media_metadata.get("openpype.source.height"): + height = int(media_metadata.get("openpype.source.height")) + + # compare and reset + if width != self.to_width: + self.to_width = width + if height != self.to_height: + self.to_height = height + + self.log.debug("> self.to_width x self.to_height: {} x {}".format( + self.to_width, self.to_height + )) + # get frame range values src_range = r_otio_cl.source_range start = src_range.start_time.value diff --git a/openpype/plugins/publish/extract_otio_trimming_video.py b/openpype/plugins/publish/extract_otio_trimming_video.py index 3e2d39c99c..30b57e2c69 100644 --- a/openpype/plugins/publish/extract_otio_trimming_video.py +++ b/openpype/plugins/publish/extract_otio_trimming_video.py @@ -19,7 +19,7 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): order = api.ExtractorOrder label = "Extract OTIO trim longer video" families = ["trim"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "flame"] def process(self, instance): self.staging_dir = self.staging_dir(instance) diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index be29c7bf9c..d223c31291 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -51,7 +51,8 @@ class ExtractReview(pyblish.api.InstancePlugin): "tvpaint", "resolve", "webpublisher", - "aftereffects" + "aftereffects", + "flame" ] # Supported extensions @@ -292,6 +293,21 @@ class ExtractReview(pyblish.api.InstancePlugin): temp_data["frame_start"], temp_data["frame_end"]) + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + try: # temporary until oiiotool is supported cross platform ffmpeg_args = self._ffmpeg_arguments( output_def, instance, new_repre, temp_data, fill_data @@ -317,14 +333,6 @@ class ExtractReview(pyblish.api.InstancePlugin): for f in files_to_clean: os.unlink(f) - output_name = new_repre.get("outputName", "") - output_ext = new_repre["ext"] - if output_name: - output_name += "_" - output_name += output_def["filename_suffix"] - if temp_data["without_handles"]: - output_name += "_noHandles" - new_repre.update({ "name": "{}_{}".format(output_name, output_ext), "outputName": output_name, diff --git a/openpype/plugins/publish/validate_version.py b/openpype/plugins/publish/validate_version.py index e48ce6e3c3..b94152ef2d 100644 --- a/openpype/plugins/publish/validate_version.py +++ b/openpype/plugins/publish/validate_version.py @@ -10,7 +10,7 @@ class ValidateVersion(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = "Validate Version" - hosts = ["nuke", "maya", "blender", "standalonepublisher"] + hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher"] optional = False active = True diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index e25b56744e..de0336be2b 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -14,7 +14,8 @@ from openpype.lib.remote_publish import ( publish_and_log, fail_batch, find_variant_key, - get_task_data + get_task_data, + IN_PROGRESS_STATUS ) @@ -161,21 +162,32 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_dir, host_name, - user, targets=None): + def remotepublishfromapp(project, batch_path, host_name, + user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. - Currently implemented and tested for Photoshop where customer - wants to process uploaded .psd file and publish collected layers - from there. + Currently implemented and tested for Photoshop where customer + wants to process uploaded .psd file and publish collected layers + from there. - Checks if no other batches are running (status =='in_progress). If - so, it sleeps for SLEEP (this is separate process), - waits for WAIT_FOR seconds altogether. + Checks if no other batches are running (status =='in_progress). If + so, it sleeps for SLEEP (this is separate process), + waits for WAIT_FOR seconds altogether. - Requires installed host application on the machine. + Requires installed host application on the machine. - Runs publish process as user would, in automatic fashion. + Runs publish process as user would, in automatic fashion. + + Args: + project (str): project to publish (only single context is expected + per call of remotepublish + batch_path (str): Path batch folder. Contains subfolders with + resources (workfile, another subfolder 'renders' etc.) + host_name (str): 'photoshop' + user_email (string): email address for webpublisher - used to + find Ftrack user with same email + targets (list): Pyblish targets + (to choose validator for example) """ import pyblish.api from openpype.api import Logger @@ -185,9 +197,9 @@ class PypeCommands: log.info("remotepublishphotoshop command") - task_data = get_task_data(batch_dir) + task_data = get_task_data(batch_path) - workfile_path = os.path.join(batch_dir, + workfile_path = os.path.join(batch_path, task_data["task"], task_data["files"][0]) @@ -196,9 +208,9 @@ class PypeCommands: batch_id = task_data["batch"] dbcon = get_webpublish_conn() # safer to start logging here, launch might be broken altogether - _id = start_webpublish_log(dbcon, batch_id, user) + _id = start_webpublish_log(dbcon, batch_id, user_email) - batches_in_progress = list(dbcon.find({"status": "in_progress"})) + batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) if len(batches_in_progress) > 1: fail_batch(_id, batches_in_progress, dbcon) print("Another batch running, probably stuck, ask admin for help") @@ -219,10 +231,11 @@ class PypeCommands: print("env:: {}".format(env)) os.environ.update(env) - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_dir + os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path # must pass identifier to update log lines for a batch os.environ["BATCH_LOG_ID"] = str(_id) os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + os.environ["USER_EMAIL"] = user_email pyblish.api.register_host(host_name) if targets: @@ -247,7 +260,7 @@ class PypeCommands: time.sleep(0.5) @staticmethod - def remotepublish(project, batch_path, user, targets=None): + def remotepublish(project, batch_path, user_email, targets=None): """Start headless publishing. Used to publish rendered assets, workfiles etc. @@ -259,7 +272,8 @@ class PypeCommands: per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) - user (string): email address for webpublisher + user_email (string): email address for webpublisher - used to + find Ftrack user with same email targets (list): Pyblish targets (to choose validator for example) @@ -283,6 +297,7 @@ class PypeCommands: os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path os.environ["AVALON_PROJECT"] = project os.environ["AVALON_APP"] = host_name + os.environ["USER_EMAIL"] = user_email pyblish.api.register_host(host_name) @@ -298,9 +313,9 @@ class PypeCommands: _, batch_id = os.path.split(batch_path) dbcon = get_webpublish_conn() - _id = start_webpublish_log(dbcon, batch_id, user) + _id = start_webpublish_log(dbcon, batch_id, user_email) - publish_and_log(dbcon, _id, log) + publish_and_log(dbcon, _id, log, batch_id=batch_id) log.info("Publish finished.") diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index b6fbdecc95..c81069ef5c 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -3,18 +3,33 @@ "CreateShotClip": { "hierarchy": "{folder}/{sequence}", "clipRename": true, - "clipName": "{track}{sequence}{shot}", + "clipName": "{sequence}{shot}", + "segmentIndex": true, "countFrom": 10, "countSteps": 10, "folder": "shots", "episode": "ep01", - "sequence": "sq01", + "sequence": "a", "track": "{_track_}", - "shot": "sh###", + "shot": "####", "vSyncOn": false, "workfileFrameStart": 1001, - "handleStart": 10, - "handleEnd": 10 + "handleStart": 5, + "handleEnd": 5 + } + }, + "publish": { + "ExtractSubsetResources": { + "keep_original_representation": false, + "export_presets_mapping": { + "exr16fpdwaa": { + "ext": "exr", + "xml_preset_dir": "", + "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", + "representation_add_range": true, + "representation_tags": [] + } + } } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index b3ea77a584..513611ebfb 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -318,6 +318,19 @@ "tasks": [], "add_ftrack_family": true, "advanced_filtering": [] + }, + { + "hosts": [ + "flame" + ], + "families": [ + "plate", + "take" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index d713c37620..76576ebf73 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -38,6 +38,11 @@ "key": "clipName", "label": "Clip name template" }, + { + "type": "boolean", + "key": "segmentIndex", + "label": "Accept segment order" + }, { "type": "number", "key": "countFrom", @@ -119,6 +124,71 @@ ] } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "ExtractSubsetResources", + "label": "Extract Subset Resources", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "keep_original_representation", + "label": "Publish clip's original media" + }, + { + "key": "export_presets_mapping", + "label": "Export presets mapping", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "ext", + "label": "Output extension", + "type": "text" + }, + { + "key": "xml_preset_file", + "label": "XML preset file (with ext)", + "type": "text" + }, + { + "key": "xml_preset_dir", + "label": "XML preset folder (optional)", + "type": "text" + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "representation_add_range", + "label": "Add frame range to representation" + }, + { + "type": "list", + "key": "representation_tags", + "label": "Add representation tags", + "object_type": { + "type": "text", + "multiline": false + } + } + ] + } + } + ] + } + ] } ] } diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index fb99333f87..31bb455f95 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -24,7 +24,6 @@ from .commands import ( ) from .vray_proxies import vrayproxy_assign_look - module = sys.modules[__name__] module.window = None @@ -210,7 +209,7 @@ class App(QtWidgets.QWidget): # Assign the first matching look relevant for this asset # (since assigning multiple to the same nodes makes no sense) assign_look = next((subset for subset in item["looks"] - if subset["name"] in looks), None) + if subset["name"] in looks), None) if not assign_look: self.echo("{} No matching selected " "look for {}".format(prefix, asset)) @@ -229,11 +228,14 @@ class App(QtWidgets.QWidget): if cmds.pluginInfo('vrayformaya', query=True, loaded=True): self.echo("Getting vray proxy nodes ...") - vray_proxies = set(cmds.ls(type="VRayProxy")) - nodes = list(set(item["nodes"]).difference(vray_proxies)) + vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) + if vray_proxies: for vp in vray_proxies: - vrayproxy_assign_look(vp, subset_name) + if vp in nodes: + vrayproxy_assign_look(vp, subset_name) + + nodes = list(set(item["nodes"]).difference(vray_proxies)) # Assign look if nodes: diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index f7d26f9adb..b9402d8ea1 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -8,7 +8,6 @@ from openpype.hosts.maya.api import lib from avalon import io, api - from .vray_proxies import get_alembic_ids_cache log = logging.getLogger(__name__) @@ -90,6 +89,7 @@ def get_all_asset_nodes(): container_name = container["objectName"] nodes += cmds.sets(container_name, query=True, nodesOnly=True) or [] + nodes = list(set(nodes)) return nodes @@ -106,9 +106,19 @@ def create_asset_id_hash(nodes): # iterate over content of reference node if cmds.nodeType(node) == "reference": ref_hashes = create_asset_id_hash( - cmds.referenceQuery(node, nodes=True)) + list(set(cmds.referenceQuery(node, nodes=True, dp=True)))) for asset_id, ref_nodes in ref_hashes.items(): node_id_hash[asset_id] += ref_nodes + elif cmds.pluginInfo('vrayformaya', query=True, + loaded=True) and cmds.nodeType( + node) == "VRayProxy": + path = cmds.getAttr("{}.fileName".format(node)) + ids = get_alembic_ids_cache(path) + for k, _ in ids.items(): + pid = k.split(":")[0] + if node not in node_id_hash[pid]: + node_id_hash[pid].append(node) + else: value = lib.get_id(node) if value is None: @@ -141,22 +151,8 @@ def create_items_from_nodes(nodes): id_hashes = create_asset_id_hash(nodes) - # get ids from alembic - if cmds.pluginInfo('vrayformaya', query=True, loaded=True): - vray_proxy_nodes = cmds.ls(nodes, type="VRayProxy") - for vp in vray_proxy_nodes: - path = cmds.getAttr("{}.fileName".format(vp)) - ids = get_alembic_ids_cache(path) - parent_id = {} - for k, _ in ids.items(): - pid = k.split(":")[0] - if not parent_id.get(pid): - parent_id.update({pid: [vp]}) - - print("Adding ids from alembic {}".format(path)) - id_hashes.update(parent_id) - if not id_hashes: + log.warning("No id hashes") return asset_view_items for _id, id_nodes in id_hashes.items(): diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index d2f345e628..ecc9a2a873 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -41,7 +41,12 @@ def get_alembic_paths_by_property(filename, attr, verbose=False): filename = filename.replace("\\", "/") filename = str(filename) # path must be string - archive = alembic.Abc.IArchive(filename) + try: + archive = alembic.Abc.IArchive(filename) + except RuntimeError: + # invalid alembic file - probably vrmesh + log.warning("{} is not an alembic file".format(filename)) + return {} root = archive.getTop() iterator = list(root.children) @@ -201,9 +206,7 @@ def load_look(version_id): with avalon.maya.maintained_selection(): container_node = api.load(loader, look_representation) - # Get container members - shader_nodes = cmds.sets(container_node, query=True) - return shader_nodes + return cmds.sets(container_node, query=True) def get_latest_version(asset_id, subset): diff --git a/openpype/tools/mayalookassigner/widgets.py b/openpype/tools/mayalookassigner/widgets.py index 625e9ef8c6..fceaf27244 100644 --- a/openpype/tools/mayalookassigner/widgets.py +++ b/openpype/tools/mayalookassigner/widgets.py @@ -20,7 +20,6 @@ MODELINDEX = QtCore.QModelIndex() class AssetOutliner(QtWidgets.QWidget): - refreshed = QtCore.Signal() selection_changed = QtCore.Signal() @@ -84,14 +83,13 @@ class AssetOutliner(QtWidgets.QWidget): """ selection_model = self.view.selectionModel() - items = [row.data(TreeModel.ItemRole) for row in - selection_model.selectedRows(0)] - - return items + return [row.data(TreeModel.ItemRole) + for row in selection_model.selectedRows(0)] def get_all_assets(self): """Add all items from the current scene""" + items = [] with lib.preserve_expanded_rows(self.view): with lib.preserve_selection(self.view): self.clear() @@ -118,7 +116,7 @@ class AssetOutliner(QtWidgets.QWidget): # Collect all nodes by hash (optimization) if not selection: - nodes = cmds.ls(dag=True, long=True) + nodes = cmds.ls(dag=True, long=True) else: nodes = commands.get_selected_nodes() id_nodes = commands.create_asset_id_hash(nodes) @@ -187,7 +185,6 @@ class AssetOutliner(QtWidgets.QWidget): class LookOutliner(QtWidgets.QWidget): - menu_apply_action = QtCore.Signal() def __init__(self, parent=None): @@ -237,9 +234,7 @@ class LookOutliner(QtWidgets.QWidget): """ datas = [i.data(TreeModel.ItemRole) for i in self.view.get_indices()] - items = [d for d in datas if d is not None] # filter Nones - - return items + return [d for d in datas if d is not None] def right_mouse_menu(self, pos): """Build RMB menu for look view""" diff --git a/openpype/tools/pyblish_pype/window.py b/openpype/tools/pyblish_pype/window.py index fdd2d80e23..edcf6f53b6 100644 --- a/openpype/tools/pyblish_pype/window.py +++ b/openpype/tools/pyblish_pype/window.py @@ -909,6 +909,13 @@ class Window(QtWidgets.QDialog): self.tr("Processing"), plugin_item.data(QtCore.Qt.DisplayRole) )) + visibility = True + if hasattr(plugin, "hide_ui_on_process") and plugin.hide_ui_on_process: + visibility = False + + if self.isVisible() != visibility: + self.setVisible(visibility) + def on_plugin_action_menu_requested(self, pos): """The user right-clicked on a plug-in __________ diff --git a/openpype/tools/tray/pype_tray.py b/openpype/tools/tray/pype_tray.py index 0d3e7ae04c..c9b8aaa842 100644 --- a/openpype/tools/tray/pype_tray.py +++ b/openpype/tools/tray/pype_tray.py @@ -16,6 +16,7 @@ from openpype.api import ( ) from openpype.lib import ( get_openpype_execute_args, + op_version_control_available, is_current_version_studio_latest, is_running_from_build, is_running_staging, @@ -218,7 +219,7 @@ class TrayManager: def _on_version_check_timer(self): # Check if is running from build and stop future validations if yes - if not is_running_from_build(): + if not is_running_from_build() or not op_version_control_available(): self._version_check_timer.stop() return diff --git a/website/yarn.lock b/website/yarn.lock index 89da2289de..098aed5e85 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -2250,9 +2250,9 @@ bail@^1.0.0: integrity sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ== balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== base16@^1.0.0: version "1.0.0" @@ -3983,9 +3983,9 @@ flux@^4.0.1: fbjs "^3.0.0" follow-redirects@^1.0.0, follow-redirects@^1.14.0: - version "1.14.4" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.4.tgz#838fdf48a8bbdd79e52ee51fb1c94e3ed98b9379" - integrity sha512-zwGkiSXC1MUJG/qmeIFH2HBJx9u0V46QGUe3YR1fXG8bXQxq7fLj0RjLZQ5nubr9qNJUZrH+xUcwXEoXNpfS+g== + version "1.14.7" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.7.tgz#2004c02eb9436eee9a21446a6477debf17e81685" + integrity sha512-+hbxoLbFMbRKDwohX8GkTataGqO6Jb7jGwpAlwgy2bIz25XtRm7KEzJM76R1WiNT5SwZkX4Y75SwBolkpmE7iQ== for-in@^1.0.2: version "1.0.2" @@ -4136,9 +4136,9 @@ glob-to-regexp@^0.4.1: integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== glob@^7.0.0, glob@^7.0.3, glob@^7.1.3: - version "7.1.6" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" - integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== + version "7.2.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" + integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== dependencies: fs.realpath "^1.0.0" inflight "^1.0.4" @@ -4825,6 +4825,13 @@ is-core-module@^2.2.0: dependencies: has "^1.0.3" +is-core-module@^2.8.0: + version "2.8.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.1.tgz#f59fdfca701d5879d0a6b100a40aa1560ce27211" + integrity sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA== + dependencies: + has "^1.0.3" + is-data-descriptor@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" @@ -6167,7 +6174,7 @@ path-key@^3.0.0, path-key@^3.1.0: resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== -path-parse@^1.0.6: +path-parse@^1.0.6, path-parse@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== @@ -7208,7 +7215,16 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= -resolve@^1.1.6, resolve@^1.14.2, resolve@^1.3.2: +resolve@^1.1.6: + version "1.21.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.21.0.tgz#b51adc97f3472e6a5cf4444d34bc9d6b9037591f" + integrity sha512-3wCbTpk5WJlyE4mSOtDLhqQmGFi0/TD9VPwmiolnk8U0wRgMEktqCXd3vy5buTO3tljvalNvKrjHEfrd2WpEKA== + dependencies: + is-core-module "^2.8.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +resolve@^1.14.2, resolve@^1.3.2: version "1.20.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== @@ -7533,9 +7549,9 @@ shell-quote@1.7.2: integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg== shelljs@^0.8.4: - version "0.8.4" - resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.4.tgz#de7684feeb767f8716b326078a8a00875890e3c2" - integrity sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ== + version "0.8.5" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.5.tgz#de055408d8361bed66c669d2f000538ced8ee20c" + integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow== dependencies: glob "^7.0.0" interpret "^1.0.0" @@ -7896,6 +7912,11 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + svg-parser@^2.0.2: version "2.0.4" resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5"