diff --git a/.gitignore b/.gitignore index ba8805e013..ebb47e55d2 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,6 @@ coverage.xml .hypothesis/ .pytest_cache/ - # Node JS packages ################## node_modules diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index f624b96125..754a2d2e25 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -285,7 +285,7 @@ class BootstrapRepos: """Get version of local OpenPype.""" version = {} - path = Path(os.path.dirname(__file__)).parent / "openpype" / "version.py" + path = Path(os.environ["OPENPYPE_ROOT"]) / "openpype" / "version.py" with open(path, "r") as fp: exec(fp.read(), version) return version["__version__"] diff --git a/igniter/openpype.icns b/igniter/openpype.icns new file mode 100644 index 0000000000..792f819ad9 Binary files /dev/null and b/igniter/openpype.icns differ diff --git a/igniter/tools.py b/igniter/tools.py index ff2db6bc7e..368e9a2b3d 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -130,7 +130,7 @@ def validate_mongo_connection(cnx: str) -> (bool, str): mongo_args["port"] = int(port) try: - client = MongoClient(**mongo_args) + client = MongoClient(cnx) client.server_info() client.close() except ServerSelectionTimeoutError as e: diff --git a/inno_setup.iss b/inno_setup.iss new file mode 100644 index 0000000000..ead9907955 --- /dev/null +++ b/inno_setup.iss @@ -0,0 +1,50 @@ +; Script generated by the Inno Setup Script Wizard. +; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! + + +#define MyAppName "OpenPype" +#define Build GetEnv("BUILD_DIR") +#define AppVer GetEnv("BUILD_VERSION") + + +[Setup] +; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications. +; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) +AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93} +AppName={#MyAppName} +AppVersion={#AppVer} +AppVerName={#MyAppName} version {#AppVer} +AppPublisher=Orbi Tools s.r.o +AppPublisherURL=http://pype.club +AppSupportURL=http://pype.club +AppUpdatesURL=http://pype.club +DefaultDirName={autopf}\{#MyAppName} +DisableProgramGroupPage=yes +OutputBaseFilename={#MyAppName}-{#AppVer}-install +AllowCancelDuringInstall=yes +; Uncomment the following line to run in non administrative install mode (install for current user only.) +;PrivilegesRequired=lowest +PrivilegesRequiredOverridesAllowed=dialog +SetupIconFile=igniter\openpype.ico +OutputDir=build\ +Compression=lzma +SolidCompression=yes +WizardStyle=modern + +[Languages] +Name: "english"; MessagesFile: "compiler:Default.isl" + +[Tasks] +Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked + +[Files] +Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs +; NOTE: Don't use "Flags: ignoreversion" on any shared system files + +[Icons] +Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe" +Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon + +[Run] +Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent + diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index 9a80801652..e914c26435 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -5,7 +5,7 @@ import logging from avalon import io from avalon import api as avalon from avalon.vendor import Qt -from openpype import lib +from openpype import lib, api import pyblish.api as pyblish import openpype.hosts.aftereffects @@ -81,3 +81,69 @@ def uninstall(): def on_pyblish_instance_toggled(instance, old_value, new_value): """Toggle layer visibility on instance toggles.""" instance[0].Visible = new_value + + +def get_asset_settings(): + """Get settings on current asset from database. + + Returns: + dict: Scene data. + + """ + asset_data = lib.get_asset()["data"] + fps = asset_data.get("fps") + frame_start = asset_data.get("frameStart") + frame_end = asset_data.get("frameEnd") + handle_start = asset_data.get("handleStart") + handle_end = asset_data.get("handleEnd") + resolution_width = asset_data.get("resolutionWidth") + resolution_height = asset_data.get("resolutionHeight") + duration = (frame_end - frame_start + 1) + handle_start + handle_end + entity_type = asset_data.get("entityType") + + scene_data = { + "fps": fps, + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "duration": duration + } + + try: + # temporary, in pype3 replace with api.get_current_project_settings + skip_resolution_check = ( + api.get_current_project_settings() + ["plugins"] + ["aftereffects"] + ["publish"] + ["ValidateSceneSettings"] + ["skip_resolution_check"] + ) + skip_timelines_check = ( + api.get_current_project_settings() + ["plugins"] + ["aftereffects"] + ["publish"] + ["ValidateSceneSettings"] + ["skip_timelines_check"] + ) + except KeyError: + skip_resolution_check = ['*'] + skip_timelines_check = ['*'] + + if os.getenv('AVALON_TASK') in skip_resolution_check or \ + '*' in skip_timelines_check: + scene_data.pop("resolutionWidth") + scene_data.pop("resolutionHeight") + + if entity_type in skip_timelines_check or '*' in skip_timelines_check: + scene_data.pop('fps', None) + scene_data.pop('frameStart', None) + scene_data.pop('frameEnd', None) + scene_data.pop('handleStart', None) + scene_data.pop('handleEnd', None) + + return scene_data diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index ba64551283..baac64ed0c 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -12,6 +12,7 @@ class AERenderInstance(RenderInstance): # extend generic, composition name is needed comp_name = attr.ib(default=None) comp_id = attr.ib(default=None) + fps = attr.ib(default=None) class CollectAERender(abstract_collect_render.AbstractCollectRender): @@ -45,6 +46,7 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): raise ValueError("Couldn't find id, unable to publish. " + "Please recreate instance.") item_id = inst["members"][0] + work_area_info = self.stub.get_work_area(int(item_id)) if not work_area_info: @@ -57,6 +59,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): frameEnd = round(work_area_info.workAreaStart + float(work_area_info.workAreaDuration) * float(work_area_info.frameRate)) - 1 + fps = work_area_info.frameRate + # TODO add resolution when supported by extension if inst["family"] == "render" and inst["active"]: instance = AERenderInstance( @@ -86,7 +90,8 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): frameStart=frameStart, frameEnd=frameEnd, frameStep=1, - toBeRenderedOn='deadline' + toBeRenderedOn='deadline', + fps=fps ) comp = compositions_by_id.get(int(item_id)) @@ -102,7 +107,6 @@ class CollectAERender(abstract_collect_render.AbstractCollectRender): instances.append(instance) - self.log.debug("instances::{}".format(instances)) return instances def get_expected_files(self, render_instance): diff --git a/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py new file mode 100644 index 0000000000..cc7db3141f --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/validate_scene_settings.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +"""Validate scene settings.""" +import os + +import pyblish.api + +from avalon import aftereffects + +import openpype.hosts.aftereffects.api as api + +stub = aftereffects.stub() + + +class ValidateSceneSettings(pyblish.api.InstancePlugin): + """ + Ensures that Composition Settings (right mouse on comp) are same as + in FTrack on task. + + By default checks only duration - how many frames should be rendered. + Compares: + Frame start - Frame end + 1 from FTrack + against + Duration in Composition Settings. + + If this complains: + Check error message where is discrepancy. + Check FTrack task 'pype' section of task attributes for expected + values. + Check/modify rendered Composition Settings. + + If you know what you are doing run publishing again, uncheck this + validation before Validation phase. + """ + + """ + Dev docu: + Could be configured by 'presets/plugins/aftereffects/publish' + + skip_timelines_check - fill task name for which skip validation of + frameStart + frameEnd + fps + handleStart + handleEnd + skip_resolution_check - fill entity type ('asset') to skip validation + resolutionWidth + resolutionHeight + TODO support in extension is missing for now + + By defaults validates duration (how many frames should be published) + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Scene Settings" + families = ["render.farm"] + hosts = ["aftereffects"] + optional = True + + skip_timelines_check = ["*"] # * >> skip for all + skip_resolution_check = ["*"] + + def process(self, instance): + """Plugin entry point.""" + expected_settings = api.get_asset_settings() + self.log.info("expected_settings::{}".format(expected_settings)) + + # handle case where ftrack uses only two decimal places + # 23.976023976023978 vs. 23.98 + fps = instance.data.get("fps") + if fps: + if isinstance(fps, float): + fps = float( + "{:.2f}".format(fps)) + expected_settings["fps"] = fps + + duration = instance.data.get("frameEndHandle") - \ + instance.data.get("frameStartHandle") + 1 + + current_settings = { + "fps": fps, + "frameStartHandle": instance.data.get("frameStartHandle"), + "frameEndHandle": instance.data.get("frameEndHandle"), + "resolutionWidth": instance.data.get("resolutionWidth"), + "resolutionHeight": instance.data.get("resolutionHeight"), + "duration": duration + } + self.log.info("current_settings:: {}".format(current_settings)) + + invalid_settings = [] + for key, value in expected_settings.items(): + if value != current_settings[key]: + invalid_settings.append( + "{} expected: {} found: {}".format(key, value, + current_settings[key]) + ) + + if ((expected_settings.get("handleStart") + or expected_settings.get("handleEnd")) + and invalid_settings): + msg = "Handles included in calculation. Remove handles in DB " +\ + "or extend frame range in Composition Setting." + invalid_settings[-1]["reason"] = msg + + msg = "Found invalid settings:\n{}".format( + "\n".join(invalid_settings) + ) + assert not invalid_settings, msg + assert os.path.exists(instance.data.get("source")), ( + "Scene file not found (saved under wrong name)" + ) diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index eb88e7af63..de30da3319 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -9,7 +9,7 @@ from avalon import api import avalon.blender from openpype.api import PypeCreatorMixin -VALID_EXTENSIONS = [".blend", ".json"] +VALID_EXTENSIONS = [".blend", ".json", ".abc"] def asset_name( diff --git a/openpype/hosts/blender/hooks/pre_pyside_install.py b/openpype/hosts/blender/hooks/pre_pyside_install.py index 088a27566d..6d253300d9 100644 --- a/openpype/hosts/blender/hooks/pre_pyside_install.py +++ b/openpype/hosts/blender/hooks/pre_pyside_install.py @@ -1,4 +1,5 @@ import os +import re import subprocess from openpype.lib import PreLaunchHook @@ -31,10 +32,46 @@ class InstallPySideToBlender(PreLaunchHook): def inner_execute(self): # Get blender's python directory + version_regex = re.compile(r"^2\.[0-9]{2}$") + executable = self.launch_context.executable.executable_path - # Blender installation contain subfolder named with it's version where - # python binaries are stored. - version_subfolder = self.launch_context.app_name.split("_")[1] + if os.path.basename(executable).lower() != "blender.exe": + self.log.info(( + "Executable does not lead to blender.exe file. Can't determine" + " blender's python to check/install PySide2." + )) + return + + executable_dir = os.path.dirname(executable) + version_subfolders = [] + for name in os.listdir(executable_dir): + fullpath = os.path.join(name, executable_dir) + if not os.path.isdir(fullpath): + continue + + if not version_regex.match(name): + continue + + version_subfolders.append(name) + + if not version_subfolders: + self.log.info( + "Didn't find version subfolder next to Blender executable" + ) + return + + if len(version_subfolders) > 1: + self.log.info(( + "Found more than one version subfolder next" + " to blender executable. {}" + ).format(", ".join([ + '"./{}"'.format(name) + for name in version_subfolders + ]))) + return + + version_subfolder = version_subfolders[0] + pythond_dir = os.path.join( os.path.dirname(executable), version_subfolder, @@ -65,6 +102,7 @@ class InstallPySideToBlender(PreLaunchHook): # Check if PySide2 is installed and skip if yes if self.is_pyside_installed(python_executable): + self.log.debug("Blender has already installed PySide2.") return # Install PySide2 in blender's python diff --git a/openpype/hosts/blender/plugins/create/create_pointcache.py b/openpype/hosts/blender/plugins/create/create_pointcache.py new file mode 100644 index 0000000000..03a468f82e --- /dev/null +++ b/openpype/hosts/blender/plugins/create/create_pointcache.py @@ -0,0 +1,35 @@ +"""Create a pointcache asset.""" + +import bpy + +from avalon import api +from avalon.blender import lib +import openpype.hosts.blender.api.plugin + + +class CreatePointcache(openpype.hosts.blender.api.plugin.Creator): + """Polygonal static geometry""" + + name = "pointcacheMain" + label = "Point Cache" + family = "pointcache" + icon = "gears" + + def process(self): + + asset = self.data["asset"] + subset = self.data["subset"] + name = openpype.hosts.blender.api.plugin.asset_name(asset, subset) + collection = bpy.data.collections.new(name=name) + bpy.context.scene.collection.children.link(collection) + self.data['task'] = api.Session.get('AVALON_TASK') + lib.imprint(collection, self.data) + + if (self.options or {}).get("useSelection"): + objects = lib.get_selection() + for obj in objects: + collection.objects.link(obj) + if obj.type == 'EMPTY': + objects.extend(obj.children) + + return collection diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py new file mode 100644 index 0000000000..4248cffd69 --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -0,0 +1,246 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +from avalon import api, blender +import bpy +import openpype.hosts.blender.api.plugin as plugin + + +class CacheModelLoader(plugin.AssetLoader): + """Load cache models. + + Stores the imported asset in a collection named after the asset. + + Note: + At least for now it only supports Alembic files. + """ + + families = ["model", "pointcache"] + representations = ["abc"] + + label = "Link Alembic" + icon = "code-fork" + color = "orange" + + def _remove(self, objects, container): + for obj in list(objects): + if obj.type == 'MESH': + bpy.data.meshes.remove(obj.data) + elif obj.type == 'EMPTY': + bpy.data.objects.remove(obj) + + bpy.data.collections.remove(container) + + def _process(self, libpath, container_name, parent_collection): + bpy.ops.object.select_all(action='DESELECT') + + view_layer = bpy.context.view_layer + view_layer_collection = view_layer.active_layer_collection.collection + + relative = bpy.context.preferences.filepaths.use_relative_paths + bpy.ops.wm.alembic_import( + filepath=libpath, + relative_path=relative + ) + + parent = parent_collection + + if parent is None: + parent = bpy.context.scene.collection + + model_container = bpy.data.collections.new(container_name) + parent.children.link(model_container) + for obj in bpy.context.selected_objects: + model_container.objects.link(obj) + view_layer_collection.objects.unlink(obj) + + name = obj.name + obj.name = f"{name}:{container_name}" + + # Groups are imported as Empty objects in Blender + if obj.type == 'MESH': + data_name = obj.data.name + obj.data.name = f"{data_name}:{container_name}" + + if not obj.get(blender.pipeline.AVALON_PROPERTY): + obj[blender.pipeline.AVALON_PROPERTY] = dict() + + avalon_info = obj[blender.pipeline.AVALON_PROPERTY] + avalon_info.update({"container_name": container_name}) + + bpy.ops.object.select_all(action='DESELECT') + + return model_container + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + lib_container = plugin.asset_name( + asset, subset + ) + unique_number = plugin.get_unique_number( + asset, subset + ) + namespace = namespace or f"{asset}_{unique_number}" + container_name = plugin.asset_name( + asset, subset, unique_number + ) + + container = bpy.data.collections.new(lib_container) + container.name = container_name + blender.pipeline.containerise_existing( + container, + name, + namespace, + context, + self.__class__.__name__, + ) + + container_metadata = container.get( + blender.pipeline.AVALON_PROPERTY) + + container_metadata["libpath"] = libpath + container_metadata["lib_container"] = lib_container + + obj_container = self._process( + libpath, container_name, None) + + container_metadata["obj_container"] = obj_container + + # Save the list of objects in the metadata container + container_metadata["objects"] = obj_container.all_objects + + nodes = list(container.objects) + nodes.append(container) + self[:] = nodes + return nodes + + def update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert collection, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert not (collection.children), ( + "Nested collections are not supported." + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + + collection_metadata = collection.get( + blender.pipeline.AVALON_PROPERTY) + collection_libpath = collection_metadata["libpath"] + + obj_container = plugin.get_local_collection_with_name( + collection_metadata["obj_container"].name + ) + objects = obj_container.all_objects + + container_name = obj_container.name + + normalized_collection_libpath = ( + str(Path(bpy.path.abspath(collection_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + self.log.debug( + "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_collection_libpath, + normalized_libpath, + ) + if normalized_collection_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + parent = plugin.get_parent_collection(obj_container) + + self._remove(objects, obj_container) + + obj_container = self._process( + str(libpath), container_name, parent) + + collection_metadata["obj_container"] = obj_container + collection_metadata["objects"] = obj_container.all_objects + collection_metadata["libpath"] = str(libpath) + collection_metadata["representation"] = str(representation["_id"]) + + def remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + if not collection: + return False + assert not (collection.children), ( + "Nested collections are not supported." + ) + + collection_metadata = collection.get( + blender.pipeline.AVALON_PROPERTY) + + obj_container = plugin.get_local_collection_with_name( + collection_metadata["obj_container"].name + ) + objects = obj_container.all_objects + + self._remove(objects, obj_container) + + bpy.data.collections.remove(collection) + + return True diff --git a/openpype/hosts/blender/plugins/load/load_model.py b/openpype/hosts/blender/plugins/load/load_model.py index 7297e459a6..d645bedfcc 100644 --- a/openpype/hosts/blender/plugins/load/load_model.py +++ b/openpype/hosts/blender/plugins/load/load_model.py @@ -242,65 +242,3 @@ class BlendModelLoader(plugin.AssetLoader): bpy.data.collections.remove(collection) return True - - -class CacheModelLoader(plugin.AssetLoader): - """Load cache models. - - Stores the imported asset in a collection named after the asset. - - Note: - At least for now it only supports Alembic files. - """ - - families = ["model"] - representations = ["abc"] - - label = "Link Model" - icon = "code-fork" - color = "orange" - - def process_asset( - self, context: dict, name: str, namespace: Optional[str] = None, - options: Optional[Dict] = None - ) -> Optional[List]: - """ - Arguments: - name: Use pre-defined name - namespace: Use pre-defined namespace - context: Full parenthood of representation to load - options: Additional settings dictionary - """ - raise NotImplementedError( - "Loading of Alembic files is not yet implemented.") - # TODO (jasper): implement Alembic import. - - libpath = self.fname - asset = context["asset"]["name"] - subset = context["subset"]["name"] - # TODO (jasper): evaluate use of namespace which is 'alien' to Blender. - lib_container = container_name = ( - plugin.asset_name(asset, subset, namespace) - ) - relative = bpy.context.preferences.filepaths.use_relative_paths - - with bpy.data.libraries.load( - libpath, link=True, relative=relative - ) as (data_from, data_to): - data_to.collections = [lib_container] - - scene = bpy.context.scene - instance_empty = bpy.data.objects.new( - container_name, None - ) - scene.collection.objects.link(instance_empty) - instance_empty.instance_type = 'COLLECTION' - collection = bpy.data.collections[lib_container] - collection.name = container_name - instance_empty.instance_collection = collection - - nodes = list(collection.objects) - nodes.append(collection) - nodes.append(instance_empty) - self[:] = nodes - return nodes diff --git a/openpype/hosts/blender/plugins/publish/extract_abc.py b/openpype/hosts/blender/plugins/publish/extract_abc.py index 6a89c6019b..a6315908fc 100644 --- a/openpype/hosts/blender/plugins/publish/extract_abc.py +++ b/openpype/hosts/blender/plugins/publish/extract_abc.py @@ -11,14 +11,14 @@ class ExtractABC(openpype.api.Extractor): label = "Extract ABC" hosts = ["blender"] - families = ["model"] + families = ["model", "pointcache"] optional = True def process(self, instance): # Define extract output file path stagingdir = self.staging_dir(instance) - filename = f"{instance.name}.fbx" + filename = f"{instance.name}.abc" filepath = os.path.join(stagingdir, filename) context = bpy.context @@ -52,6 +52,8 @@ class ExtractABC(openpype.api.Extractor): old_scale = scene.unit_settings.scale_length + bpy.ops.object.select_all(action='DESELECT') + selected = list() for obj in instance: @@ -67,14 +69,11 @@ class ExtractABC(openpype.api.Extractor): # We set the scale of the scene for the export scene.unit_settings.scale_length = 0.01 - self.log.info(new_context) - # We export the abc bpy.ops.wm.alembic_export( new_context, filepath=filepath, - start=1, - end=1 + selected=True ) view_layer.active_layer_collection = old_active_layer_collection diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index fcb1d50ea8..8d0105ae5f 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -22,6 +22,7 @@ from .pipeline import ( ) from .lib import ( + pype_tag_name, get_track_items, get_current_project, get_current_sequence, @@ -73,6 +74,7 @@ __all__ = [ "work_root", # Lib functions + "pype_tag_name", "get_track_items", "get_current_project", "get_current_sequence", diff --git a/openpype/hosts/hiero/api/events.py b/openpype/hosts/hiero/api/events.py index c02e3e2ac4..3df095f9e4 100644 --- a/openpype/hosts/hiero/api/events.py +++ b/openpype/hosts/hiero/api/events.py @@ -2,7 +2,12 @@ import os import hiero.core.events import avalon.api as avalon from openpype.api import Logger -from .lib import sync_avalon_data_to_workfile, launch_workfiles_app +from .lib import ( + sync_avalon_data_to_workfile, + launch_workfiles_app, + selection_changed_timeline, + before_project_save +) from .tags import add_tags_to_workfile from .menu import update_menu_task_label @@ -78,7 +83,7 @@ def register_hiero_events(): "Registering events for: kBeforeNewProjectCreated, " "kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, " "kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, " - "kAfterProjectClose, kShutdown, kStartup" + "kAfterProjectClose, kShutdown, kStartup, kSelectionChanged" ) # hiero.core.events.registerInterest( @@ -91,8 +96,8 @@ def register_hiero_events(): hiero.core.events.registerInterest( "kAfterProjectLoad", afterProjectLoad) - # hiero.core.events.registerInterest( - # "kBeforeProjectSave", beforeProjectSaved) + hiero.core.events.registerInterest( + "kBeforeProjectSave", before_project_save) # hiero.core.events.registerInterest( # "kAfterProjectSave", afterProjectSaved) # @@ -104,10 +109,16 @@ def register_hiero_events(): # hiero.core.events.registerInterest("kShutdown", shutDown) # hiero.core.events.registerInterest("kStartup", startupCompleted) - # workfiles - hiero.core.events.registerEventType("kStartWorkfiles") - hiero.core.events.registerInterest("kStartWorkfiles", launch_workfiles_app) + hiero.core.events.registerInterest( + ("kSelectionChanged", "kTimeline"), selection_changed_timeline) + # workfiles + try: + hiero.core.events.registerEventType("kStartWorkfiles") + hiero.core.events.registerInterest( + "kStartWorkfiles", launch_workfiles_app) + except RuntimeError: + pass def register_events(): """ diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index b74e70cae3..a9982d96c4 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -9,7 +9,7 @@ import hiero import avalon.api as avalon import avalon.io from avalon.vendor.Qt import QtWidgets -from openpype.api import (Logger, Anatomy, config) +from openpype.api import (Logger, Anatomy, get_anatomy_settings) from . import tags import shutil from compiler.ast import flatten @@ -30,9 +30,9 @@ self = sys.modules[__name__] self._has_been_setup = False self._has_menu = False self._registered_gui = None -self.pype_tag_name = "Pype Data" -self.default_sequence_name = "PypeSequence" -self.default_bin_name = "PypeBin" +self.pype_tag_name = "openpypeData" +self.default_sequence_name = "openpypeSequence" +self.default_bin_name = "openpypeBin" AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype") @@ -150,15 +150,27 @@ def get_track_items( # get selected track items or all in active sequence if selected: - selected_items = list(hiero.selection) - for item in selected_items: - if track_name and track_name in item.parent().name(): - # filter only items fitting input track name - track_items.append(item) - elif not track_name: - # or add all if no track_name was defined - track_items.append(item) - else: + try: + selected_items = list(hiero.selection) + for item in selected_items: + if track_name and track_name in item.parent().name(): + # filter only items fitting input track name + track_items.append(item) + elif not track_name: + # or add all if no track_name was defined + track_items.append(item) + except AttributeError: + pass + + # check if any collected track items are + # `core.Hiero.Python.TrackItem` instance + if track_items: + any_track_item = track_items[0] + if not isinstance(any_track_item, hiero.core.TrackItem): + selected_items = [] + + # collect all available active sequence track items + if not track_items: sequence = get_current_sequence(name=sequence_name) # get all available tracks from sequence tracks = list(sequence.audioTracks()) + list(sequence.videoTracks()) @@ -240,7 +252,7 @@ def set_track_item_pype_tag(track_item, data=None): # basic Tag's attribute tag_data = { "editable": "0", - "note": "Pype data holder", + "note": "OpenPype data container", "icon": "openpype_icon.png", "metadata": {k: v for k, v in data.items()} } @@ -744,10 +756,13 @@ def _set_hrox_project_knobs(doc, **knobs): # set attributes to Project Tag proj_elem = doc.documentElement().firstChildElement("Project") for k, v in knobs.items(): - proj_elem.setAttribute(k, v) + if isinstance(v, dict): + continue + proj_elem.setAttribute(str(k), v) def apply_colorspace_project(): + project_name = os.getenv("AVALON_PROJECT") # get path the the active projects project = get_current_project(remove_untitled=True) current_file = project.path() @@ -756,9 +771,9 @@ def apply_colorspace_project(): project.close() # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_project_clrs = colorspace.get("hiero", {}).get("project", {}) + imageio = get_anatomy_settings( + project_name)["imageio"].get("hiero", None) + presets = imageio.get("workfile") # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) @@ -789,13 +804,13 @@ def apply_colorspace_project(): os.remove(copy_current_file_tmp) # use the code from bellow for changing xml hrox Attributes - hiero_project_clrs.update({"name": os.path.basename(copy_current_file)}) + presets.update({"name": os.path.basename(copy_current_file)}) # read HROX in as QDomSocument doc = _read_doc_from_path(copy_current_file) # apply project colorspace properties - _set_hrox_project_knobs(doc, **hiero_project_clrs) + _set_hrox_project_knobs(doc, **presets) # write QDomSocument back as HROX _write_doc_to_path(doc, copy_current_file) @@ -805,14 +820,17 @@ def apply_colorspace_project(): def apply_colorspace_clips(): + project_name = os.getenv("AVALON_PROJECT") project = get_current_project(remove_untitled=True) clips = project.clips() # get presets for hiero - presets = config.get_init_presets() - colorspace = presets["colorspace"] - hiero_clips_clrs = colorspace.get("hiero", {}).get("clips", {}) + imageio = get_anatomy_settings( + project_name)["imageio"].get("hiero", None) + from pprint import pprint + presets = imageio.get("regexInputs", {}).get("inputs", {}) + pprint(presets) for clip in clips: clip_media_source_path = clip.mediaSource().firstpath() clip_name = clip.name() @@ -822,10 +840,11 @@ def apply_colorspace_clips(): continue # check if any colorspace presets for read is mathing - preset_clrsp = next((hiero_clips_clrs[k] - for k in hiero_clips_clrs - if bool(re.search(k, clip_media_source_path))), - None) + preset_clrsp = None + for k in presets: + if not bool(re.search(k["regex"], clip_media_source_path)): + continue + preset_clrsp = k["colorspace"] if preset_clrsp: log.debug("Changing clip.path: {}".format(clip_media_source_path)) @@ -893,3 +912,61 @@ def get_sequence_pattern_and_padding(file): return found, padding else: return None, None + + +def sync_clip_name_to_data_asset(track_items_list): + # loop trough all selected clips + for track_item in track_items_list: + # ignore if parent track is locked or disabled + if track_item.parent().isLocked(): + continue + if not track_item.parent().isEnabled(): + continue + # ignore if the track item is disabled + if not track_item.isEnabled(): + continue + + # get name and data + ti_name = track_item.name() + data = get_track_item_pype_data(track_item) + + # ignore if no data on the clip or not publish instance + if not data: + continue + if data.get("id") != "pyblish.avalon.instance": + continue + + # fix data if wrong name + if data["asset"] != ti_name: + data["asset"] = ti_name + # remove the original tag + tag = get_track_item_pype_tag(track_item) + track_item.removeTag(tag) + # create new tag with updated data + set_track_item_pype_tag(track_item, data) + print("asset was changed in clip: {}".format(ti_name)) + + +def selection_changed_timeline(event): + """Callback on timeline to check if asset in data is the same as clip name. + + Args: + event (hiero.core.Event): timeline event + """ + timeline_editor = event.sender + selection = timeline_editor.selection() + + # run checking function + sync_clip_name_to_data_asset(selection) + + +def before_project_save(event): + track_items = get_track_items( + selected=False, + track_type="video", + check_enabled=True, + check_locked=True, + check_tagged=True) + + # run checking function + sync_clip_name_to_data_asset(track_items) diff --git a/openpype/hosts/hiero/api/menu.py b/openpype/hosts/hiero/api/menu.py index 9ccf5e39d1..ab49251093 100644 --- a/openpype/hosts/hiero/api/menu.py +++ b/openpype/hosts/hiero/api/menu.py @@ -68,50 +68,45 @@ def menu_install(): menu.addSeparator() - workfiles_action = menu.addAction("Work Files...") + workfiles_action = menu.addAction("Work Files ...") workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) workfiles_action.triggered.connect(launch_workfiles_app) - default_tags_action = menu.addAction("Create Default Tags...") + default_tags_action = menu.addAction("Create Default Tags") default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) default_tags_action.triggered.connect(tags.add_tags_to_workfile) menu.addSeparator() - publish_action = menu.addAction("Publish...") + publish_action = menu.addAction("Publish ...") publish_action.setIcon(QtGui.QIcon("icons:Output.png")) publish_action.triggered.connect( lambda *args: publish(hiero.ui.mainWindow()) ) - creator_action = menu.addAction("Create...") + creator_action = menu.addAction("Create ...") creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) creator_action.triggered.connect(creator.show) - loader_action = menu.addAction("Load...") + loader_action = menu.addAction("Load ...") loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) loader_action.triggered.connect(cbloader.show) - sceneinventory_action = menu.addAction("Manage...") + sceneinventory_action = menu.addAction("Manage ...") sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) sceneinventory_action.triggered.connect(sceneinventory.show) menu.addSeparator() - reload_action = menu.addAction("Reload pipeline...") - reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) - reload_action.triggered.connect(reload_config) + if os.getenv("OPENPYPE_DEVELOP"): + reload_action = menu.addAction("Reload pipeline") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + reload_action.triggered.connect(reload_config) menu.addSeparator() - apply_colorspace_p_action = menu.addAction("Apply Colorspace Project...") + apply_colorspace_p_action = menu.addAction("Apply Colorspace Project") apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) apply_colorspace_p_action.triggered.connect(apply_colorspace_project) - apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips...") + apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips") apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) apply_colorspace_c_action.triggered.connect(apply_colorspace_clips) - - self.context_label_action = context_label_action - self.workfile_actions = workfiles_action - self.default_tags_action = default_tags_action - self.publish_action = publish_action - self.reload_action = reload_action diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 92e15cfae4..c46ef9abfa 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -4,10 +4,10 @@ import hiero from Qt import QtWidgets, QtCore from avalon.vendor import qargparse import avalon.api as avalon -import openpype.api as pype +import openpype.api as openpype from . import lib -log = pype.Logger().get_logger(__name__) +log = openpype.Logger().get_logger(__name__) def load_stylesheet(): @@ -266,7 +266,8 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMaximum=10000, setToolTip=tool_tip) + setValue=v["value"], setMinimum=0, + setMaximum=100000, setToolTip=tool_tip) return data @@ -387,7 +388,8 @@ class ClipLoader: # try to get value from options or evaluate key value for `load_to` self.new_sequence = options.get("newSequence") or bool( "New timeline" in options.get("load_to", "")) - + self.clip_name_template = options.get( + "clipNameTemplate") or "{asset}_{subset}_{representation}" assert self._populate_data(), str( "Cannot Load selected data, look into database " "or call your supervisor") @@ -432,7 +434,7 @@ class ClipLoader: asset = str(repr_cntx["asset"]) subset = str(repr_cntx["subset"]) representation = str(repr_cntx["representation"]) - self.data["clip_name"] = "_".join([asset, subset, representation]) + self.data["clip_name"] = self.clip_name_template.format(**repr_cntx) self.data["track_name"] = "_".join([subset, representation]) self.data["versionData"] = self.context["version"]["data"] # gets file path @@ -476,7 +478,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = pype.get_asset(asset_name)["data"] + self.data["assetData"] = openpype.get_asset(asset_name)["data"] def _make_track_item(self, source_bin_item, audio=False): """ Create track item with """ @@ -543,15 +545,9 @@ class ClipLoader: if "slate" in f), # if nothing was found then use default None # so other bool could be used - None) or bool((( - # put together duration of clip attributes - self.timeline_out - self.timeline_in + 1) \ - + self.handle_start \ - + self.handle_end - # and compare it with meda duration - ) > self.media_duration) - - print("__ slate_on: `{}`".format(slate_on)) + None) or bool(int( + (self.timeline_out - self.timeline_in + 1) + + self.handle_start + self.handle_end) < self.media_duration) # if slate is on then remove the slate frame from begining if slate_on: @@ -592,7 +588,7 @@ class ClipLoader: return track_item -class Creator(pype.Creator): +class Creator(openpype.Creator): """Creator class wrapper """ clip_color = "Purple" @@ -601,7 +597,7 @@ class Creator(pype.Creator): def __init__(self, *args, **kwargs): import openpype.hosts.hiero.api as phiero super(Creator, self).__init__(*args, **kwargs) - self.presets = pype.get_current_project_settings()[ + self.presets = openpype.get_current_project_settings()[ "hiero"]["create"].get(self.__class__.__name__, {}) # adding basic current context resolve objects @@ -674,6 +670,9 @@ class PublishClip: if kwargs.get("avalon"): self.tag_data.update(kwargs["avalon"]) + # add publish attribute to tag data + self.tag_data.update({"publish": True}) + # adding ui inputs if any self.ui_inputs = kwargs.get("ui_inputs", {}) @@ -687,6 +686,7 @@ class PublishClip: self._create_parents() def convert(self): + # solve track item data and add them to tag data self._convert_to_tag_data() @@ -705,6 +705,12 @@ class PublishClip: self.tag_data["asset"] = new_name else: self.tag_data["asset"] = self.ti_name + self.tag_data["hierarchyData"]["shot"] = self.ti_name + + if self.tag_data["heroTrack"] and self.review_layer: + self.tag_data.update({"reviewTrack": self.review_layer}) + else: + self.tag_data.update({"reviewTrack": None}) # create pype tag on track_item and add data lib.imprint(self.track_item, self.tag_data) @@ -773,8 +779,8 @@ class PublishClip: _spl = text.split("#") _len = (len(_spl) - 1) _repl = "{{{0}:0>{1}}}".format(name, _len) - new_text = text.replace(("#" * _len), _repl) - return new_text + return text.replace(("#" * _len), _repl) + def _convert_to_tag_data(self): """ Convert internal data to tag data. @@ -782,13 +788,13 @@ class PublishClip: Populating the tag data into internal variable self.tag_data """ # define vertical sync attributes - master_layer = True + hero_track = True self.review_layer = "" if self.vertical_sync: # check if track name is not in driving layer if self.track_name not in self.driving_layer: # if it is not then define vertical sync as None - master_layer = False + hero_track = False # increasing steps by index of rename iteration self.count_steps *= self.rename_index @@ -802,7 +808,7 @@ class PublishClip: self.tag_data[_k] = _v["value"] # driving layer is set as positive match - if master_layer or self.vertical_sync: + if hero_track or self.vertical_sync: # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): @@ -836,40 +842,40 @@ class PublishClip: hierarchy_formating_data ) - tag_hierarchy_data.update({"masterLayer": True}) - if master_layer and self.vertical_sync: + tag_hierarchy_data.update({"heroTrack": True}) + if hero_track and self.vertical_sync: self.vertical_clip_match.update({ (self.clip_in, self.clip_out): tag_hierarchy_data }) - if not master_layer and self.vertical_sync: + if not hero_track and self.vertical_sync: # driving layer is set as negative match - for (_in, _out), master_data in self.vertical_clip_match.items(): - master_data.update({"masterLayer": False}) + for (_in, _out), hero_data in self.vertical_clip_match.items(): + hero_data.update({"heroTrack": False}) if _in == self.clip_in and _out == self.clip_out: - data_subset = master_data["subset"] - # add track index in case duplicity of names in master data + data_subset = hero_data["subset"] + # add track index in case duplicity of names in hero data if self.subset in data_subset: - master_data["subset"] = self.subset + str( + hero_data["subset"] = self.subset + str( self.track_index) # in case track name and subset name is the same then add if self.subset_name == self.track_name: - master_data["subset"] = self.subset + hero_data["subset"] = self.subset # assing data to return hierarchy data to tag - tag_hierarchy_data = master_data + tag_hierarchy_data = hero_data # add data to return data dict self.tag_data.update(tag_hierarchy_data) - if master_layer and self.review_layer: - self.tag_data.update({"reviewTrack": self.review_layer}) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + # remove shot from hierarchy data: is not needed anymore + hierarchy_formating_data.pop("shot") + return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, diff --git a/openpype/hosts/hiero/api/tags.py b/openpype/hosts/hiero/api/tags.py index 06fa655a2e..d2502f3c71 100644 --- a/openpype/hosts/hiero/api/tags.py +++ b/openpype/hosts/hiero/api/tags.py @@ -84,6 +84,13 @@ def update_tag(tag, data): mtd = tag.metadata() # get metadata key from data data_mtd = data.get("metadata", {}) + + # due to hiero bug we have to make sure keys which are not existent in + # data are cleared of value by `None` + for _mk in mtd.keys(): + if _mk.replace("tag.", "") not in data_mtd.keys(): + mtd.setValue(_mk, str(None)) + # set all data metadata to tag metadata for k, v in data_mtd.items(): mtd.setValue( diff --git a/openpype/hosts/hiero/otio/__init__.py b/openpype/hosts/hiero/otio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/hosts/hiero/otio/hiero_export.py b/openpype/hosts/hiero/otio/hiero_export.py new file mode 100644 index 0000000000..6e751d3aa4 --- /dev/null +++ b/openpype/hosts/hiero/otio/hiero_export.py @@ -0,0 +1,366 @@ +""" compatibility OpenTimelineIO 0.12.0 and newer +""" + +import os +import re +import sys +import ast +from compiler.ast import flatten +import opentimelineio as otio +from . import utils +import hiero.core +import hiero.ui + +self = sys.modules[__name__] +self.track_types = { + hiero.core.VideoTrack: otio.schema.TrackKind.Video, + hiero.core.AudioTrack: otio.schema.TrackKind.Audio +} +self.project_fps = None +self.marker_color_map = { + "magenta": otio.schema.MarkerColor.MAGENTA, + "red": otio.schema.MarkerColor.RED, + "yellow": otio.schema.MarkerColor.YELLOW, + "green": otio.schema.MarkerColor.GREEN, + "cyan": otio.schema.MarkerColor.CYAN, + "blue": otio.schema.MarkerColor.BLUE, +} +self.timeline = None +self.include_tags = True + + +def get_current_hiero_project(remove_untitled=False): + projects = flatten(hiero.core.projects()) + if not remove_untitled: + return next(iter(projects)) + + # if remove_untitled + for proj in projects: + if "Untitled" in proj.name(): + proj.close() + else: + return proj + + +def create_otio_rational_time(frame, fps): + return otio.opentime.RationalTime( + float(frame), + float(fps) + ) + + +def create_otio_time_range(start_frame, frame_duration, fps): + return otio.opentime.TimeRange( + start_time=create_otio_rational_time(start_frame, fps), + duration=create_otio_rational_time(frame_duration, fps) + ) + + +def _get_metadata(item): + if hasattr(item, 'metadata'): + return {key: value for key, value in dict(item.metadata()).items()} + return {} + + +def create_otio_reference(clip): + metadata = _get_metadata(clip) + media_source = clip.mediaSource() + + # get file info for path and start frame + file_info = media_source.fileinfos().pop() + frame_start = file_info.startFrame() + path = file_info.filename() + + # get padding and other file infos + padding = media_source.filenamePadding() + file_head = media_source.filenameHead() + is_sequence = not media_source.singleFile() + frame_duration = media_source.duration() + fps = utils.get_rate(clip) or self.project_fps + extension = os.path.splitext(path)[-1] + + if is_sequence: + metadata.update({ + "isSequence": True, + "padding": padding + }) + + # add resolution metadata + metadata.update({ + "openpype.source.colourtransform": clip.sourceMediaColourTransform(), + "openpype.source.width": int(media_source.width()), + "openpype.source.height": int(media_source.height()), + "openpype.source.pixelAspect": float(media_source.pixelAspect()) + }) + + otio_ex_ref_item = None + + if is_sequence: + # if it is file sequence try to create `ImageSequenceReference` + # the OTIO might not be compatible so return nothing and do it old way + try: + dirname = os.path.dirname(path) + otio_ex_ref_item = otio.schema.ImageSequenceReference( + target_url_base=dirname + os.sep, + name_prefix=file_head, + name_suffix=extension, + start_frame=frame_start, + frame_zero_padding=padding, + rate=fps, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) + ) + except AttributeError: + pass + + if not otio_ex_ref_item: + reformat_path = utils.get_reformated_path(path, padded=False) + # in case old OTIO or video file create `ExternalReference` + otio_ex_ref_item = otio.schema.ExternalReference( + target_url=reformat_path, + available_range=create_otio_time_range( + frame_start, + frame_duration, + fps + ) + ) + + # add metadata to otio item + add_otio_metadata(otio_ex_ref_item, media_source, **metadata) + + return otio_ex_ref_item + + +def get_marker_color(tag): + icon = tag.icon() + pat = r'icons:Tag(?P\w+)\.\w+' + + res = re.search(pat, icon) + if res: + color = res.groupdict().get('color') + if color.lower() in self.marker_color_map: + return self.marker_color_map[color.lower()] + + return otio.schema.MarkerColor.RED + + +def create_otio_markers(otio_item, item): + for tag in item.tags(): + if not tag.visible(): + continue + + if tag.name() == 'Copy': + # Hiero adds this tag to a lot of clips + continue + + frame_rate = utils.get_rate(item) or self.project_fps + + marked_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + tag.inTime(), + frame_rate + ), + duration=otio.opentime.RationalTime( + int(tag.metadata().dict().get('tag.length', '0')), + frame_rate + ) + ) + # add tag metadata but remove "tag." string + metadata = {} + + for key, value in tag.metadata().dict().items(): + _key = key.replace("tag.", "") + + try: + # capture exceptions which are related to strings only + _value = ast.literal_eval(value) + except (ValueError, SyntaxError): + _value = value + + metadata.update({_key: _value}) + + # Store the source item for future import assignment + metadata['hiero_source_type'] = item.__class__.__name__ + + marker = otio.schema.Marker( + name=tag.name(), + color=get_marker_color(tag), + marked_range=marked_range, + metadata=metadata + ) + + otio_item.markers.append(marker) + + +def create_otio_clip(track_item): + clip = track_item.source() + source_in = track_item.sourceIn() + duration = track_item.sourceDuration() + fps = utils.get_rate(track_item) or self.project_fps + name = track_item.name() + + media_reference = create_otio_reference(clip) + source_range = create_otio_time_range( + int(source_in), + int(duration), + fps + ) + + otio_clip = otio.schema.Clip( + name=name, + source_range=source_range, + media_reference=media_reference + ) + + # Add tags as markers + if self.include_tags: + create_otio_markers(otio_clip, track_item) + create_otio_markers(otio_clip, track_item.source()) + + return otio_clip + + +def create_otio_gap(gap_start, clip_start, tl_start_frame, fps): + return otio.schema.Gap( + source_range=create_otio_time_range( + gap_start, + (clip_start - tl_start_frame) - gap_start, + fps + ) + ) + + +def _create_otio_timeline(): + project = get_current_hiero_project(remove_untitled=False) + metadata = _get_metadata(self.timeline) + + metadata.update({ + "openpype.timeline.width": int(self.timeline.format().width()), + "openpype.timeline.height": int(self.timeline.format().height()), + "openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa + "openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa + "openpype.project.lutSetting16Bit": project.lutSetting16Bit(), + "openpype.project.lutSetting8Bit": project.lutSetting8Bit(), + "openpype.project.lutSettingFloat": project.lutSettingFloat(), + "openpype.project.lutSettingLog": project.lutSettingLog(), + "openpype.project.lutSettingViewer": project.lutSettingViewer(), + "openpype.project.lutSettingWorkingSpace": project.lutSettingWorkingSpace(), # noqa + "openpype.project.lutUseOCIOForExport": project.lutUseOCIOForExport(), + "openpype.project.ocioConfigName": project.ocioConfigName(), + "openpype.project.ocioConfigPath": project.ocioConfigPath() + }) + + start_time = create_otio_rational_time( + self.timeline.timecodeStart(), self.project_fps) + + return otio.schema.Timeline( + name=self.timeline.name(), + global_start_time=start_time, + metadata=metadata + ) + + +def create_otio_track(track_type, track_name): + return otio.schema.Track( + name=track_name, + kind=self.track_types[track_type] + ) + + +def add_otio_gap(track_item, otio_track, prev_out): + gap_length = track_item.timelineIn() - prev_out + if prev_out != 0: + gap_length -= 1 + + gap = otio.opentime.TimeRange( + duration=otio.opentime.RationalTime( + gap_length, + self.project_fps + ) + ) + otio_gap = otio.schema.Gap(source_range=gap) + otio_track.append(otio_gap) + + +def add_otio_metadata(otio_item, media_source, **kwargs): + metadata = _get_metadata(media_source) + + # add additional metadata from kwargs + if kwargs: + metadata.update(kwargs) + + # add metadata to otio item metadata + for key, value in metadata.items(): + otio_item.metadata.update({key: value}) + + +def create_otio_timeline(): + + # get current timeline + self.timeline = hiero.ui.activeSequence() + self.project_fps = self.timeline.framerate().toFloat() + + # convert timeline to otio + otio_timeline = _create_otio_timeline() + + # loop all defined track types + for track in self.timeline.items(): + # skip if track is disabled + if not track.isEnabled(): + continue + + # convert track to otio + otio_track = create_otio_track( + type(track), track.name()) + + for itemindex, track_item in enumerate(track): + # skip offline track items + if not track_item.isMediaPresent(): + continue + + # skip if track item is disabled + if not track_item.isEnabled(): + continue + + # Add Gap if needed + if itemindex == 0: + # if it is first track item at track then add + # it to previouse item + prev_item = track_item + + else: + # get previouse item + prev_item = track_item.parent().items()[itemindex - 1] + + # calculate clip frame range difference from each other + clip_diff = track_item.timelineIn() - prev_item.timelineOut() + + # add gap if first track item is not starting + # at first timeline frame + if itemindex == 0 and track_item.timelineIn() > 0: + add_otio_gap(track_item, otio_track, 0) + + # or add gap if following track items are having + # frame range differences from each other + elif itemindex and clip_diff != 1: + add_otio_gap(track_item, otio_track, prev_item.timelineOut()) + + # create otio clip and add it to track + otio_clip = create_otio_clip(track_item) + otio_track.append(otio_clip) + + # Add tags as markers + if self.include_tags: + create_otio_markers(otio_track, track) + + # add track to otio timeline + otio_timeline.tracks.append(otio_track) + + return otio_timeline + + +def write_to_file(otio_timeline, path): + otio.adapters.write_to_file(otio_timeline, path) diff --git a/openpype/hosts/hiero/otio/hiero_import.py b/openpype/hosts/hiero/otio/hiero_import.py new file mode 100644 index 0000000000..257c434011 --- /dev/null +++ b/openpype/hosts/hiero/otio/hiero_import.py @@ -0,0 +1,545 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] + + +import os +import hiero.core +import hiero.ui + +import PySide2.QtWidgets as qw + +try: + from urllib import unquote + +except ImportError: + from urllib.parse import unquote # lint:ok + +import opentimelineio as otio + +_otio_old = False + + +def inform(messages): + if isinstance(messages, type('')): + messages = [messages] + + qw.QMessageBox.information( + hiero.ui.mainWindow(), + 'OTIO Import', + '\n'.join(messages), + qw.QMessageBox.StandardButton.Ok + ) + + +def get_transition_type(otio_item, otio_track): + _in, _out = otio_track.neighbors_of(otio_item) + + if isinstance(_in, otio.schema.Gap): + _in = None + + if isinstance(_out, otio.schema.Gap): + _out = None + + if _in and _out: + return 'dissolve' + + elif _in and not _out: + return 'fade_out' + + elif not _in and _out: + return 'fade_in' + + else: + return 'unknown' + + +def find_trackitem(otio_clip, hiero_track): + for item in hiero_track.items(): + if item.timelineIn() == otio_clip.range_in_parent().start_time.value: + if item.name() == otio_clip.name: + return item + + return None + + +def get_neighboring_trackitems(otio_item, otio_track, hiero_track): + _in, _out = otio_track.neighbors_of(otio_item) + trackitem_in = None + trackitem_out = None + + if _in: + trackitem_in = find_trackitem(_in, hiero_track) + + if _out: + trackitem_out = find_trackitem(_out, hiero_track) + + return trackitem_in, trackitem_out + + +def apply_transition(otio_track, otio_item, track): + warning = None + + # Figure out type of transition + transition_type = get_transition_type(otio_item, otio_track) + + # Figure out track kind for getattr below + kind = '' + if isinstance(track, hiero.core.AudioTrack): + kind = 'Audio' + + # Gather TrackItems involved in trasition + item_in, item_out = get_neighboring_trackitems( + otio_item, + otio_track, + track + ) + + # Create transition object + if transition_type == 'dissolve': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}DissolveTransition'.format(kind=kind) + ) + + try: + transition = transition_func( + item_in, + item_out, + otio_item.in_offset.value, + otio_item.out_offset.value + ) + + # Catch error raised if transition is bigger than TrackItem source + except RuntimeError as e: + transition = None + warning = ( + "Unable to apply transition \"{t.name}\": {e} " + "Ignoring the transition.").format(t=otio_item, e=str(e)) + + elif transition_type == 'fade_in': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}FadeInTransition'.format(kind=kind) + ) + + # Warn user if part of fade is outside of clip + if otio_item.in_offset.value: + warning = \ + 'Fist half of transition "{t.name}" is outside of clip and ' \ + 'not valid in Hiero. Only applied second half.' \ + .format(t=otio_item) + + transition = transition_func( + item_out, + otio_item.out_offset.value + ) + + elif transition_type == 'fade_out': + transition_func = getattr( + hiero.core.Transition, + 'create{kind}FadeOutTransition'.format(kind=kind) + ) + transition = transition_func( + item_in, + otio_item.in_offset.value + ) + + # Warn user if part of fade is outside of clip + if otio_item.out_offset.value: + warning = \ + 'Second half of transition "{t.name}" is outside of clip ' \ + 'and not valid in Hiero. Only applied first half.' \ + .format(t=otio_item) + + else: + # Unknown transition + return + + # Apply transition to track + if transition: + track.addTransition(transition) + + # Inform user about missing or adjusted transitions + return warning + + +def prep_url(url_in): + url = unquote(url_in) + + if url.startswith('file://localhost/'): + return url + + url = 'file://localhost{sep}{url}'.format( + sep=url.startswith(os.sep) and '' or os.sep, + url=url.startswith(os.sep) and url[1:] or url + ) + + return url + + +def create_offline_mediasource(otio_clip, path=None): + global _otio_old + + hiero_rate = hiero.core.TimeBase( + otio_clip.source_range.start_time.rate + ) + + try: + legal_media_refs = ( + otio.schema.ExternalReference, + otio.schema.ImageSequenceReference + ) + except AttributeError: + _otio_old = True + legal_media_refs = ( + otio.schema.ExternalReference + ) + + if isinstance(otio_clip.media_reference, legal_media_refs): + source_range = otio_clip.available_range() + + else: + source_range = otio_clip.source_range + + if path is None: + path = otio_clip.name + + media = hiero.core.MediaSource.createOfflineVideoMediaSource( + prep_url(path), + source_range.start_time.value, + source_range.duration.value, + hiero_rate, + source_range.start_time.value + ) + + return media + + +def load_otio(otio_file, project=None, sequence=None): + otio_timeline = otio.adapters.read_from_file(otio_file) + build_sequence(otio_timeline, project=project, sequence=sequence) + + +marker_color_map = { + "PINK": "Magenta", + "RED": "Red", + "ORANGE": "Yellow", + "YELLOW": "Yellow", + "GREEN": "Green", + "CYAN": "Cyan", + "BLUE": "Blue", + "PURPLE": "Magenta", + "MAGENTA": "Magenta", + "BLACK": "Blue", + "WHITE": "Green" +} + + +def get_tag(tagname, tagsbin): + for tag in tagsbin.items(): + if tag.name() == tagname: + return tag + + if isinstance(tag, hiero.core.Bin): + tag = get_tag(tagname, tag) + + if tag is not None: + return tag + + return None + + +def add_metadata(metadata, hiero_item): + for key, value in metadata.get('Hiero', dict()).items(): + if key == 'source_type': + # Only used internally to reassign tag to correct Hiero item + continue + + if isinstance(value, dict): + add_metadata(value, hiero_item) + continue + + if value is not None: + if not key.startswith('tag.'): + key = 'tag.' + key + + hiero_item.metadata().setValue(key, str(value)) + + +def add_markers(otio_item, hiero_item, tagsbin): + if isinstance(otio_item, (otio.schema.Stack, otio.schema.Clip)): + markers = otio_item.markers + + elif isinstance(otio_item, otio.schema.Timeline): + markers = otio_item.tracks.markers + + else: + markers = [] + + for marker in markers: + meta = marker.metadata.get('Hiero', dict()) + if 'source_type' in meta: + if hiero_item.__class__.__name__ != meta.get('source_type'): + continue + + marker_color = marker.color + + _tag = get_tag(marker.name, tagsbin) + if _tag is None: + _tag = get_tag(marker_color_map[marker_color], tagsbin) + + if _tag is None: + _tag = hiero.core.Tag(marker_color_map[marker.color]) + + start = marker.marked_range.start_time.value + end = ( + marker.marked_range.start_time.value + + marker.marked_range.duration.value + ) + + if hasattr(hiero_item, 'addTagToRange'): + tag = hiero_item.addTagToRange(_tag, start, end) + + else: + tag = hiero_item.addTag(_tag) + + tag.setName(marker.name or marker_color_map[marker_color]) + # tag.setNote(meta.get('tag.note', '')) + + # Add metadata + add_metadata(marker.metadata, tag) + + +def create_track(otio_track, tracknum, track_kind): + if track_kind is None and hasattr(otio_track, 'kind'): + track_kind = otio_track.kind + + # Create a Track + if track_kind == otio.schema.TrackKind.Video: + track = hiero.core.VideoTrack( + otio_track.name or 'Video{n}'.format(n=tracknum) + ) + + else: + track = hiero.core.AudioTrack( + otio_track.name or 'Audio{n}'.format(n=tracknum) + ) + + return track + + +def create_clip(otio_clip, tagsbin, sequencebin): + # Create MediaSource + url = None + media = None + otio_media = otio_clip.media_reference + + if isinstance(otio_media, otio.schema.ExternalReference): + url = prep_url(otio_media.target_url) + media = hiero.core.MediaSource(url) + + elif not _otio_old: + if isinstance(otio_media, otio.schema.ImageSequenceReference): + url = prep_url(otio_media.abstract_target_url('#')) + media = hiero.core.MediaSource(url) + + if media is None or media.isOffline(): + media = create_offline_mediasource(otio_clip, url) + + # Reuse previous clip if possible + clip = None + for item in sequencebin.clips(): + if item.activeItem().mediaSource() == media: + clip = item.activeItem() + break + + if not clip: + # Create new Clip + clip = hiero.core.Clip(media) + + # Add Clip to a Bin + sequencebin.addItem(hiero.core.BinItem(clip)) + + # Add markers + add_markers(otio_clip, clip, tagsbin) + + return clip + + +def create_trackitem(playhead, track, otio_clip, clip): + source_range = otio_clip.source_range + + trackitem = track.createTrackItem(otio_clip.name) + trackitem.setPlaybackSpeed(source_range.start_time.rate) + trackitem.setSource(clip) + + time_scalar = 1. + + # Check for speed effects and adjust playback speed accordingly + for effect in otio_clip.effects: + if isinstance(effect, otio.schema.LinearTimeWarp): + time_scalar = effect.time_scalar + # Only reverse effect can be applied here + if abs(time_scalar) == 1.: + trackitem.setPlaybackSpeed( + trackitem.playbackSpeed() * time_scalar) + + elif isinstance(effect, otio.schema.FreezeFrame): + # For freeze frame, playback speed must be set after range + time_scalar = 0. + + # If reverse playback speed swap source in and out + if trackitem.playbackSpeed() < 0: + source_out = source_range.start_time.value + source_in = source_range.end_time_inclusive().value + + timeline_in = playhead + source_out + timeline_out = ( + timeline_in + + source_range.duration.value + ) - 1 + else: + # Normal playback speed + source_in = source_range.start_time.value + source_out = source_range.end_time_inclusive().value + + timeline_in = playhead + timeline_out = ( + timeline_in + + source_range.duration.value + ) - 1 + + # Set source and timeline in/out points + trackitem.setTimes( + timeline_in, + timeline_out, + source_in, + source_out + + ) + + # Apply playback speed for freeze frames + if abs(time_scalar) != 1.: + trackitem.setPlaybackSpeed(trackitem.playbackSpeed() * time_scalar) + + # Link audio to video when possible + if isinstance(track, hiero.core.AudioTrack): + for other in track.parent().trackItemsAt(playhead): + if other.source() == clip: + trackitem.link(other) + + return trackitem + + +def build_sequence( + otio_timeline, project=None, sequence=None, track_kind=None): + if project is None: + if sequence: + project = sequence.project() + + else: + # Per version 12.1v2 there is no way of getting active project + project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1] + + projectbin = project.clipsBin() + + if not sequence: + # Create a Sequence + sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence') + + # Set sequence settings from otio timeline if available + if ( + hasattr(otio_timeline, 'global_start_time') + and otio_timeline.global_start_time + ): + start_time = otio_timeline.global_start_time + sequence.setFramerate(start_time.rate) + sequence.setTimecodeStart(start_time.value) + + # Create a Bin to hold clips + projectbin.addItem(hiero.core.BinItem(sequence)) + + sequencebin = hiero.core.Bin(sequence.name()) + projectbin.addItem(sequencebin) + + else: + sequencebin = projectbin + + # Get tagsBin + tagsbin = hiero.core.project("Tag Presets").tagsBin() + + # Add timeline markers + add_markers(otio_timeline, sequence, tagsbin) + + if isinstance(otio_timeline, otio.schema.Timeline): + tracks = otio_timeline.tracks + + else: + tracks = [otio_timeline] + + for tracknum, otio_track in enumerate(tracks): + playhead = 0 + _transitions = [] + + # Add track to sequence + track = create_track(otio_track, tracknum, track_kind) + sequence.addTrack(track) + + # iterate over items in track + for _itemnum, otio_clip in enumerate(otio_track): + if isinstance(otio_clip, (otio.schema.Track, otio.schema.Stack)): + inform('Nested sequences/tracks are created separately.') + + # Add gap where the nested sequence would have been + playhead += otio_clip.source_range.duration.value + + # Process nested sequence + build_sequence( + otio_clip, + project=project, + track_kind=otio_track.kind + ) + + elif isinstance(otio_clip, otio.schema.Clip): + # Create a Clip + clip = create_clip(otio_clip, tagsbin, sequencebin) + + # Create TrackItem + trackitem = create_trackitem( + playhead, + track, + otio_clip, + clip + ) + + # Add markers + add_markers(otio_clip, trackitem, tagsbin) + + # Add trackitem to track + track.addTrackItem(trackitem) + + # Update playhead + playhead = trackitem.timelineOut() + 1 + + elif isinstance(otio_clip, otio.schema.Transition): + # Store transitions for when all clips in the track are created + _transitions.append((otio_track, otio_clip)) + + elif isinstance(otio_clip, otio.schema.Gap): + # Hiero has no fillers, slugs or blanks at the moment + playhead += otio_clip.source_range.duration.value + + # Apply transitions we stored earlier now that all clips are present + warnings = [] + for otio_track, otio_item in _transitions: + # Catch warnings form transitions in case + # of unsupported transitions + warning = apply_transition(otio_track, otio_item, track) + if warning: + warnings.append(warning) + + if warnings: + inform(warnings) diff --git a/openpype/hosts/hiero/otio/utils.py b/openpype/hosts/hiero/otio/utils.py new file mode 100644 index 0000000000..f882a5d1f2 --- /dev/null +++ b/openpype/hosts/hiero/otio/utils.py @@ -0,0 +1,76 @@ +import re +import opentimelineio as otio + + +def timecode_to_frames(timecode, framerate): + rt = otio.opentime.from_timecode(timecode, 24) + return int(otio.opentime.to_frames(rt)) + + +def frames_to_timecode(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_timecode(rt) + + +def frames_to_secons(frames, framerate): + rt = otio.opentime.from_frames(frames, framerate) + return otio.opentime.to_seconds(rt) + + +def get_reformated_path(path, padded=True): + """ + Return fixed python expression path + + Args: + path (str): path url or simple file name + + Returns: + type: string with reformated path + + Example: + get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr + + """ + if "%" in path: + padding_pattern = r"(\d+)" + padding = int(re.findall(padding_pattern, path).pop()) + num_pattern = r"(%\d+d)" + if padded: + path = re.sub(num_pattern, "%0{}d".format(padding), path) + else: + path = re.sub(num_pattern, "%d", path) + return path + + +def get_padding_from_path(path): + """ + Return padding number from DaVinci Resolve sequence path style + + Args: + path (str): path url or simple file name + + Returns: + int: padding number + + Example: + get_padding_from_path("plate.[0001-1008].exr") > 4 + + """ + padding_pattern = "(\\d+)(?=-)" + if "[" in path: + return len(re.findall(padding_pattern, path).pop()) + + return None + + +def get_rate(item): + if not hasattr(item, 'framerate'): + return None + + num, den = item.framerate().toRational() + rate = float(num) / float(den) + + if rate.is_integer(): + return rate + + return round(rate, 4) diff --git a/openpype/hosts/hiero/plugins/create/create_shot_clip.py b/openpype/hosts/hiero/plugins/create/create_shot_clip.py index 07b7a62b2a..25be9f090b 100644 --- a/openpype/hosts/hiero/plugins/create/create_shot_clip.py +++ b/openpype/hosts/hiero/plugins/create/create_shot_clip.py @@ -120,9 +120,9 @@ class CreateShotClip(phiero.Creator): "vSyncTrack": { "value": gui_tracks, # noqa "type": "QComboBox", - "label": "Master track", + "label": "Hero track", "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa + "toolTip": "Select driving track name which should be hero for all others", # noqa "order": 1} } }, diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index 4eadf28956..9e12fa360e 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -29,13 +29,19 @@ class LoadClip(phiero.SequenceLoader): clip_color_last = "green" clip_color = "red" - def load(self, context, name, namespace, options): + clip_name_template = "{asset}_{subset}_{representation}" + def load(self, context, name, namespace, options): + # add clip name template to options + options.update({ + "clipNameTemplate": self.clip_name_template + }) # in case loader uses multiselection if self.track and self.sequence: options.update({ "sequence": self.sequence, - "track": self.track + "track": self.track, + "clipNameTemplate": self.clip_name_template }) # load clip to timeline and get main variables @@ -45,7 +51,8 @@ class LoadClip(phiero.SequenceLoader): version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) - object_name = "{}_{}".format(name, namespace) + object_name = self.clip_name_template.format( + **context["representation"]["context"]) # add additional metadata from the version to imprint Avalon knob add_keys = [ diff --git a/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..d12e7665bf --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish/extract_thumbnail.py @@ -0,0 +1,59 @@ +import os +import pyblish.api +import openpype.api + + +class ExtractThumnail(openpype.api.Extractor): + """ + Extractor for track item's tumnails + """ + + label = "Extract Thumnail" + order = pyblish.api.ExtractorOrder + families = ["plate", "take"] + hosts = ["hiero"] + + def process(self, instance): + # create representation data + if "representations" not in instance.data: + instance.data["representations"] = [] + + staging_dir = self.staging_dir(instance) + + self.create_thumbnail(staging_dir, instance) + + def create_thumbnail(self, staging_dir, instance): + track_item = instance.data["item"] + track_item_name = track_item.name() + + # frames + duration = track_item.sourceDuration() + frame_start = track_item.sourceIn() + self.log.debug( + "__ frame_start: `{}`, duration: `{}`".format( + frame_start, duration)) + + # get thumbnail frame from the middle + thumb_frame = int(frame_start + (duration / 2)) + + thumb_file = "{}thumbnail{}{}".format( + track_item_name, thumb_frame, ".png") + thumb_path = os.path.join(staging_dir, thumb_file) + + thumbnail = track_item.thumbnail(thumb_frame).save( + thumb_path, + format='png' + ) + self.log.debug( + "__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) + + self.log.info("Thumnail was generated to: {}".format(thumb_path)) + thumb_representation = { + 'files': thumb_file, + 'stagingDir': staging_dir, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + instance.data["representations"].append( + thumb_representation) diff --git a/openpype/hosts/hiero/plugins/publish/version_up_workfile.py b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py similarity index 90% rename from openpype/hosts/hiero/plugins/publish/version_up_workfile.py rename to openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py index ae03513d78..934e7112fa 100644 --- a/openpype/hosts/hiero/plugins/publish/version_up_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/integrate_version_up_workfile.py @@ -2,7 +2,7 @@ from pyblish import api import openpype.api as pype -class VersionUpWorkfile(api.ContextPlugin): +class IntegrateVersionUpWorkfile(api.ContextPlugin): """Save as new workfile version""" order = api.IntegratorOrder + 10.1 diff --git a/openpype/hosts/hiero/plugins/publish/precollect_instances.py b/openpype/hosts/hiero/plugins/publish/precollect_instances.py index bdf007de06..a1dee711b7 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_instances.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_instances.py @@ -1,221 +1,204 @@ -from compiler.ast import flatten -from pyblish import api +import pyblish +import openpype from openpype.hosts.hiero import api as phiero -import hiero -# from openpype.hosts.hiero.api import lib -# reload(lib) -# reload(phiero) +from openpype.hosts.hiero.otio import hiero_export + +# # developer reload modules +from pprint import pformat -class PreCollectInstances(api.ContextPlugin): +class PrecollectInstances(pyblish.api.ContextPlugin): """Collect all Track items selection.""" - order = api.CollectorOrder - 0.509 - label = "Pre-collect Instances" + order = pyblish.api.CollectorOrder - 0.59 + label = "Precollect Instances" hosts = ["hiero"] def process(self, context): - track_items = phiero.get_track_items( - selected=True, check_tagged=True, check_enabled=True) - # only return enabled track items - if not track_items: - track_items = phiero.get_track_items( - check_enabled=True, check_tagged=True) - # get sequence and video tracks - sequence = context.data["activeSequence"] - tracks = sequence.videoTracks() - - # add collection to context - tracks_effect_items = self.collect_sub_track_items(tracks) - - context.data["tracksEffectItems"] = tracks_effect_items - + otio_timeline = context.data["otioTimeline"] + selected_timeline_items = phiero.get_track_items( + selected=True, check_enabled=True, check_tagged=True) self.log.info( - "Processing enabled track items: {}".format(len(track_items))) + "Processing enabled track items: {}".format( + selected_timeline_items)) + + for track_item in selected_timeline_items: - for _ti in track_items: data = dict() - clip = _ti.source() + clip_name = track_item.name() - # get clips subtracks and anotations - annotations = self.clip_annotations(clip) - subtracks = self.clip_subtrack(_ti) - self.log.debug("Annotations: {}".format(annotations)) - self.log.debug(">> Subtracks: {}".format(subtracks)) + # get openpype tag data + tag_data = phiero.get_track_item_pype_data(track_item) + self.log.debug("__ tag_data: {}".format(pformat(tag_data))) - # get pype tag data - tag_parsed_data = phiero.get_track_item_pype_data(_ti) - # self.log.debug(pformat(tag_parsed_data)) - - if not tag_parsed_data: + if not tag_data: continue - if tag_parsed_data.get("id") != "pyblish.avalon.instance": + if tag_data.get("id") != "pyblish.avalon.instance": continue + + # solve handles length + tag_data["handleStart"] = min( + tag_data["handleStart"], int(track_item.handleInLength())) + tag_data["handleEnd"] = min( + tag_data["handleEnd"], int(track_item.handleOutLength())) + # add tag data to instance data data.update({ - k: v for k, v in tag_parsed_data.items() + k: v for k, v in tag_data.items() if k not in ("id", "applieswhole", "label") }) - asset = tag_parsed_data["asset"] - subset = tag_parsed_data["subset"] - review = tag_parsed_data.get("review") - audio = tag_parsed_data.get("audio") - - # remove audio attribute from data - data.pop("audio") + asset = tag_data["asset"] + subset = tag_data["subset"] # insert family into families - family = tag_parsed_data["family"] - families = [str(f) for f in tag_parsed_data["families"]] + family = tag_data["family"] + families = [str(f) for f in tag_data["families"]] families.insert(0, str(family)) - track = _ti.parent() - media_source = _ti.source().mediaSource() - source_path = media_source.firstpath() - file_head = media_source.filenameHead() - file_info = media_source.fileinfos().pop() - source_first_frame = int(file_info.startFrame()) - - # apply only for feview and master track instance - if review: - families += ["review", "ftrack"] + # form label + label = asset + if asset != clip_name: + label += " ({})".format(clip_name) + label += " {}".format(subset) + label += " {}".format("[" + ", ".join(families) + "]") data.update({ - "name": "{} {} {}".format(asset, subset, families), + "name": "{}_{}".format(asset, subset), + "label": label, "asset": asset, - "item": _ti, + "item": track_item, "families": families, - - # tags - "tags": _ti.tags(), - - # track item attributes - "track": track.name(), - "trackItem": track, - - # version data - "versionData": { - "colorspace": _ti.sourceMediaColourTransform() - }, - - # source attribute - "source": source_path, - "sourceMedia": media_source, - "sourcePath": source_path, - "sourceFileHead": file_head, - "sourceFirst": source_first_frame, - - # clip's effect - "clipEffectItems": subtracks + "publish": tag_data["publish"], + "fps": context.data["fps"] }) + # otio clip data + otio_data = self.get_otio_clip_instance_data( + otio_timeline, track_item) or {} + self.log.debug("__ otio_data: {}".format(pformat(otio_data))) + data.update(otio_data) + self.log.debug("__ data: {}".format(pformat(data))) + + # add resolution + self.get_resolution_to_data(data, context) + + # create instance instance = context.create_instance(**data) + # create shot instance for shot attributes create/update + self.create_shot_instance(context, **data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) - if audio: - a_data = dict() + def get_resolution_to_data(self, data, context): + assert data.get("otioClip"), "Missing `otioClip` data" - # add tag data to instance data - a_data.update({ - k: v for k, v in tag_parsed_data.items() - if k not in ("id", "applieswhole", "label") - }) + # solve source resolution option + if data.get("sourceResolution", None): + otio_clip_metadata = data[ + "otioClip"].media_reference.metadata + data.update({ + "resolutionWidth": otio_clip_metadata[ + "openpype.source.width"], + "resolutionHeight": otio_clip_metadata[ + "openpype.source.height"], + "pixelAspect": otio_clip_metadata[ + "openpype.source.pixelAspect"] + }) + else: + otio_tl_metadata = context.data["otioTimeline"].metadata + data.update({ + "resolutionWidth": otio_tl_metadata["openpype.timeline.width"], + "resolutionHeight": otio_tl_metadata[ + "openpype.timeline.height"], + "pixelAspect": otio_tl_metadata[ + "openpype.timeline.pixelAspect"] + }) - # create main attributes - subset = "audioMain" - family = "audio" - families = ["clip", "ftrack"] - families.insert(0, str(family)) + def create_shot_instance(self, context, **data): + master_layer = data.get("heroTrack") + hierarchy_data = data.get("hierarchyData") + asset = data.get("asset") + item = data.get("item") + clip_name = item.name() - name = "{} {} {}".format(asset, subset, families) + if not master_layer: + return - a_data.update({ - "name": name, - "subset": subset, - "asset": asset, - "family": family, - "families": families, - "item": _ti, + if not hierarchy_data: + return - # tags - "tags": _ti.tags(), - }) + asset = data["asset"] + subset = "shotMain" - a_instance = context.create_instance(**a_data) - self.log.info("Creating audio instance: {}".format(a_instance)) + # insert family into families + family = "shot" + + # form label + label = asset + if asset != clip_name: + label += " ({}) ".format(clip_name) + label += " {}".format(subset) + label += " [{}]".format(family) + + data.update({ + "name": "{}_{}".format(asset, subset), + "label": label, + "subset": subset, + "asset": asset, + "family": family, + "families": [] + }) + + instance = context.create_instance(**data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug( + "_ instance.data: {}".format(pformat(instance.data))) + + def get_otio_clip_instance_data(self, otio_timeline, track_item): + """ + Return otio objects for timeline, track and clip + + Args: + timeline_item_data (dict): timeline_item_data from list returned by + resolve.get_current_timeline_items() + otio_timeline (otio.schema.Timeline): otio object + + Returns: + dict: otio clip object + + """ + ti_track_name = track_item.parent().name() + timeline_range = self.create_otio_time_range_from_timeline_item_data( + track_item) + for otio_clip in otio_timeline.each_clip(): + track_name = otio_clip.parent().name + parent_range = otio_clip.range_in_parent() + if ti_track_name not in track_name: + continue + if otio_clip.name not in track_item.name(): + continue + if openpype.lib.is_overlapping_otio_ranges( + parent_range, timeline_range, strict=True): + + # add pypedata marker to otio_clip metadata + for marker in otio_clip.markers: + if phiero.pype_tag_name in marker.name: + otio_clip.metadata.update(marker.metadata) + return {"otioClip": otio_clip} + + return None @staticmethod - def clip_annotations(clip): - """ - Returns list of Clip's hiero.core.Annotation - """ - annotations = [] - subTrackItems = flatten(clip.subTrackItems()) - annotations += [item for item in subTrackItems if isinstance( - item, hiero.core.Annotation)] - return annotations + def create_otio_time_range_from_timeline_item_data(track_item): + timeline = phiero.get_current_sequence() + frame_start = int(track_item.timelineIn()) + frame_duration = int(track_item.sourceDuration()) + fps = timeline.framerate().toFloat() - @staticmethod - def clip_subtrack(clip): - """ - Returns list of Clip's hiero.core.SubTrackItem - """ - subtracks = [] - subTrackItems = flatten(clip.parent().subTrackItems()) - for item in subTrackItems: - # avoid all anotation - if isinstance(item, hiero.core.Annotation): - continue - # # avoid all not anaibled - if not item.isEnabled(): - continue - subtracks.append(item) - return subtracks - - @staticmethod - def collect_sub_track_items(tracks): - """ - Returns dictionary with track index as key and list of subtracks - """ - # collect all subtrack items - sub_track_items = dict() - for track in tracks: - items = track.items() - - # skip if no clips on track > need track with effect only - if items: - continue - - # skip all disabled tracks - if not track.isEnabled(): - continue - - track_index = track.trackIndex() - _sub_track_items = flatten(track.subTrackItems()) - - # continue only if any subtrack items are collected - if len(_sub_track_items) < 1: - continue - - enabled_sti = list() - # loop all found subtrack items and check if they are enabled - for _sti in _sub_track_items: - # checking if not enabled - if not _sti.isEnabled(): - continue - if isinstance(_sti, hiero.core.Annotation): - continue - # collect the subtrack item - enabled_sti.append(_sti) - - # continue only if any subtrack items are collected - if len(enabled_sti) < 1: - continue - - # add collection of subtrackitems to dict - sub_track_items[track_index] = enabled_sti - - return sub_track_items + return hiero_export.create_otio_time_range( + frame_start, frame_duration, fps) diff --git a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py index ef7d07421b..bc4ef7e150 100644 --- a/openpype/hosts/hiero/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/hiero/plugins/publish/precollect_workfile.py @@ -1,52 +1,57 @@ import os import pyblish.api +import hiero.ui from openpype.hosts.hiero import api as phiero from avalon import api as avalon +from pprint import pformat +from openpype.hosts.hiero.otio import hiero_export +from Qt.QtGui import QPixmap +import tempfile - -class PreCollectWorkfile(pyblish.api.ContextPlugin): +class PrecollectWorkfile(pyblish.api.ContextPlugin): """Inject the current working file into context""" - label = "Pre-collect Workfile" - order = pyblish.api.CollectorOrder - 0.51 + label = "Precollect Workfile" + order = pyblish.api.CollectorOrder - 0.6 def process(self, context): + asset = avalon.Session["AVALON_ASSET"] subset = "workfile" - project = phiero.get_current_project() - active_sequence = phiero.get_current_sequence() - video_tracks = active_sequence.videoTracks() - audio_tracks = active_sequence.audioTracks() - current_file = project.path() - staging_dir = os.path.dirname(current_file) - base_name = os.path.basename(current_file) + active_timeline = hiero.ui.activeSequence() + fps = active_timeline.framerate().toFloat() - # get workfile's colorspace properties - _clrs = {} - _clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa - _clrs["lutSetting16Bit"] = project.lutSetting16Bit() - _clrs["lutSetting8Bit"] = project.lutSetting8Bit() - _clrs["lutSettingFloat"] = project.lutSettingFloat() - _clrs["lutSettingLog"] = project.lutSettingLog() - _clrs["lutSettingViewer"] = project.lutSettingViewer() - _clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace() - _clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport() - _clrs["ocioConfigName"] = project.ocioConfigName() - _clrs["ocioConfigPath"] = project.ocioConfigPath() + # adding otio timeline to context + otio_timeline = hiero_export.create_otio_timeline() - # set main project attributes to context - context.data["activeProject"] = project - context.data["activeSequence"] = active_sequence - context.data["videoTracks"] = video_tracks - context.data["audioTracks"] = audio_tracks - context.data["currentFile"] = current_file - context.data["colorspace"] = _clrs + # get workfile thumnail paths + tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + thumbnail_name = "workfile_thumbnail.png" + thumbnail_path = os.path.join(tmp_staging, thumbnail_name) - self.log.info("currentFile: {}".format(current_file)) + # search for all windows with name of actual sequence + _windows = [w for w in hiero.ui.windowManager().windows() + if active_timeline.name() in w.windowTitle()] + + # export window to thumb path + QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png') + + # thumbnail + thumb_representation = { + 'files': thumbnail_name, + 'stagingDir': tmp_staging, + 'name': "thumbnail", + 'thumbnail': True, + 'ext': "png" + } + + # get workfile paths + curent_file = project.path() + staging_dir, base_name = os.path.split(curent_file) # creating workfile representation - representation = { + workfile_representation = { 'name': 'hrox', 'ext': 'hrox', 'files': base_name, @@ -59,16 +64,21 @@ class PreCollectWorkfile(pyblish.api.ContextPlugin): "subset": "{}{}".format(asset, subset.capitalize()), "item": project, "family": "workfile", - - # version data - "versionData": { - "colorspace": _clrs - }, - - # source attribute - "sourcePath": current_file, - "representations": [representation] + "representations": [workfile_representation, thumb_representation] } + # create instance with workfile instance = context.create_instance(**instance_data) + + # update context with main project attributes + context_data = { + "activeProject": project, + "otioTimeline": otio_timeline, + "currentFile": curent_file, + "fps": fps, + } + context.data.update(context_data) + self.log.info("Creating instance: {}".format(instance)) + self.log.debug("__ instance.data: {}".format(pformat(instance.data))) + self.log.debug("__ context_data: {}".format(pformat(context_data))) diff --git a/openpype/hosts/hiero/plugins/publish/collect_assetbuilds.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_assetbuilds.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_resolution.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_clip_resolution.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_clip_resolution.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py index 39387578d2..21e12e89fa 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_frame_ranges.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_frame_ranges.py @@ -5,7 +5,7 @@ class CollectFrameRanges(pyblish.api.InstancePlugin): """ Collect all framranges. """ - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.1 label = "Collect Frame Ranges" hosts = ["hiero"] families = ["clip", "effect"] diff --git a/openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py similarity index 97% rename from openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py index ba3e388c53..0696a58e39 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_hierarchy_context.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_hierarchy_context.py @@ -39,8 +39,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin): if not set(self.families).intersection(families): continue - # exclude if not masterLayer True - if not instance.data.get("masterLayer"): + # exclude if not heroTrack True + if not instance.data.get("heroTrack"): continue # update families to include `shot` for hierarchy integration diff --git a/openpype/hosts/hiero/plugins/publish/collect_host_version.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_host_version.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_host_version.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_plates.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_plates.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_plates.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_plates.py diff --git a/openpype/hosts/hiero/plugins/publish/collect_review.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py similarity index 99% rename from openpype/hosts/hiero/plugins/publish/collect_review.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py index a0ab00b355..b1d97a71d7 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_review.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_review.py @@ -29,7 +29,7 @@ class CollectReview(api.InstancePlugin): Exception: description """ - review_track = instance.data.get("review") + review_track = instance.data.get("reviewTrack") video_tracks = instance.context.data["videoTracks"] for track in video_tracks: if review_track not in track.name(): diff --git a/openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py b/openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/collect_tag_tasks.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/collect_tag_tasks.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_audio.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_audio.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/extract_audio.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_audio.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_clip_effects.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_clip_effects.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/extract_clip_effects.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_clip_effects.py diff --git a/openpype/hosts/hiero/plugins/publish/extract_review_preparation.py b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py similarity index 98% rename from openpype/hosts/hiero/plugins/publish/extract_review_preparation.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py index 5456ddc3c4..aac476e27a 100644 --- a/openpype/hosts/hiero/plugins/publish/extract_review_preparation.py +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/extract_review_preparation.py @@ -132,7 +132,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): ).format(**locals()) self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) - audio_check_output = openpype.api.subprocess(ffprob_cmd) + audio_check_output = openpype.api.run_subprocess(ffprob_cmd) self.log.debug( "audio_check_output: {}".format(audio_check_output)) @@ -167,7 +167,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): # try to get video native resolution data try: - resolution_output = openpype.api.subprocess(( + resolution_output = openpype.api.run_subprocess(( "\"{ffprobe_path}\" -i \"{full_input_path}\"" " -v error " "-select_streams v:0 -show_entries " @@ -280,7 +280,7 @@ class ExtractReviewPreparation(openpype.api.Extractor): # run subprocess self.log.debug("Executing: {}".format(subprcs_cmd)) - output = openpype.api.subprocess(subprcs_cmd) + output = openpype.api.run_subprocess(subprcs_cmd) self.log.debug("Output: {}".format(output)) repre_new = { diff --git a/openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_clip_effects.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/precollect_clip_effects.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/precollect_clip_effects.py diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py new file mode 100644 index 0000000000..f9cc158e79 --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_instances.py @@ -0,0 +1,223 @@ +from compiler.ast import flatten +from pyblish import api +from openpype.hosts.hiero import api as phiero +import hiero +# from openpype.hosts.hiero.api import lib +# reload(lib) +# reload(phiero) + + +class PreCollectInstances(api.ContextPlugin): + """Collect all Track items selection.""" + + order = api.CollectorOrder - 0.509 + label = "Pre-collect Instances" + hosts = ["hiero"] + + def process(self, context): + track_items = phiero.get_track_items( + selected=True, check_tagged=True, check_enabled=True) + # only return enabled track items + if not track_items: + track_items = phiero.get_track_items( + check_enabled=True, check_tagged=True) + # get sequence and video tracks + sequence = context.data["activeSequence"] + tracks = sequence.videoTracks() + + # add collection to context + tracks_effect_items = self.collect_sub_track_items(tracks) + + context.data["tracksEffectItems"] = tracks_effect_items + + self.log.info( + "Processing enabled track items: {}".format(len(track_items))) + + for _ti in track_items: + data = {} + clip = _ti.source() + + # get clips subtracks and anotations + annotations = self.clip_annotations(clip) + subtracks = self.clip_subtrack(_ti) + self.log.debug("Annotations: {}".format(annotations)) + self.log.debug(">> Subtracks: {}".format(subtracks)) + + # get pype tag data + tag_parsed_data = phiero.get_track_item_pype_data(_ti) + # self.log.debug(pformat(tag_parsed_data)) + + if not tag_parsed_data: + continue + + if tag_parsed_data.get("id") != "pyblish.avalon.instance": + continue + # add tag data to instance data + data.update({ + k: v for k, v in tag_parsed_data.items() + if k not in ("id", "applieswhole", "label") + }) + + asset = tag_parsed_data["asset"] + subset = tag_parsed_data["subset"] + review_track = tag_parsed_data.get("reviewTrack") + hiero_track = tag_parsed_data.get("heroTrack") + audio = tag_parsed_data.get("audio") + + # remove audio attribute from data + data.pop("audio") + + # insert family into families + family = tag_parsed_data["family"] + families = [str(f) for f in tag_parsed_data["families"]] + families.insert(0, str(family)) + + track = _ti.parent() + media_source = _ti.source().mediaSource() + source_path = media_source.firstpath() + file_head = media_source.filenameHead() + file_info = media_source.fileinfos().pop() + source_first_frame = int(file_info.startFrame()) + + # apply only for review and master track instance + if review_track and hiero_track: + families += ["review", "ftrack"] + + data.update({ + "name": "{} {} {}".format(asset, subset, families), + "asset": asset, + "item": _ti, + "families": families, + + # tags + "tags": _ti.tags(), + + # track item attributes + "track": track.name(), + "trackItem": track, + "reviewTrack": review_track, + + # version data + "versionData": { + "colorspace": _ti.sourceMediaColourTransform() + }, + + # source attribute + "source": source_path, + "sourceMedia": media_source, + "sourcePath": source_path, + "sourceFileHead": file_head, + "sourceFirst": source_first_frame, + + # clip's effect + "clipEffectItems": subtracks + }) + + instance = context.create_instance(**data) + + self.log.info("Creating instance.data: {}".format(instance.data)) + + if audio: + a_data = dict() + + # add tag data to instance data + a_data.update({ + k: v for k, v in tag_parsed_data.items() + if k not in ("id", "applieswhole", "label") + }) + + # create main attributes + subset = "audioMain" + family = "audio" + families = ["clip", "ftrack"] + families.insert(0, str(family)) + + name = "{} {} {}".format(asset, subset, families) + + a_data.update({ + "name": name, + "subset": subset, + "asset": asset, + "family": family, + "families": families, + "item": _ti, + + # tags + "tags": _ti.tags(), + }) + + a_instance = context.create_instance(**a_data) + self.log.info("Creating audio instance: {}".format(a_instance)) + + @staticmethod + def clip_annotations(clip): + """ + Returns list of Clip's hiero.core.Annotation + """ + annotations = [] + subTrackItems = flatten(clip.subTrackItems()) + annotations += [item for item in subTrackItems if isinstance( + item, hiero.core.Annotation)] + return annotations + + @staticmethod + def clip_subtrack(clip): + """ + Returns list of Clip's hiero.core.SubTrackItem + """ + subtracks = [] + subTrackItems = flatten(clip.parent().subTrackItems()) + for item in subTrackItems: + # avoid all anotation + if isinstance(item, hiero.core.Annotation): + continue + # # avoid all not anaibled + if not item.isEnabled(): + continue + subtracks.append(item) + return subtracks + + @staticmethod + def collect_sub_track_items(tracks): + """ + Returns dictionary with track index as key and list of subtracks + """ + # collect all subtrack items + sub_track_items = dict() + for track in tracks: + items = track.items() + + # skip if no clips on track > need track with effect only + if items: + continue + + # skip all disabled tracks + if not track.isEnabled(): + continue + + track_index = track.trackIndex() + _sub_track_items = flatten(track.subTrackItems()) + + # continue only if any subtrack items are collected + if len(_sub_track_items) < 1: + continue + + enabled_sti = list() + # loop all found subtrack items and check if they are enabled + for _sti in _sub_track_items: + # checking if not enabled + if not _sti.isEnabled(): + continue + if isinstance(_sti, hiero.core.Annotation): + continue + # collect the subtrack item + enabled_sti.append(_sti) + + # continue only if any subtrack items are collected + if len(enabled_sti) < 1: + continue + + # add collection of subtrackitems to dict + sub_track_items[track_index] = enabled_sti + + return sub_track_items diff --git a/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py new file mode 100644 index 0000000000..ef7d07421b --- /dev/null +++ b/openpype/hosts/hiero/plugins/publish_old_workflow/precollect_workfile.py @@ -0,0 +1,74 @@ +import os +import pyblish.api +from openpype.hosts.hiero import api as phiero +from avalon import api as avalon + + +class PreCollectWorkfile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + label = "Pre-collect Workfile" + order = pyblish.api.CollectorOrder - 0.51 + + def process(self, context): + asset = avalon.Session["AVALON_ASSET"] + subset = "workfile" + + project = phiero.get_current_project() + active_sequence = phiero.get_current_sequence() + video_tracks = active_sequence.videoTracks() + audio_tracks = active_sequence.audioTracks() + current_file = project.path() + staging_dir = os.path.dirname(current_file) + base_name = os.path.basename(current_file) + + # get workfile's colorspace properties + _clrs = {} + _clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa + _clrs["lutSetting16Bit"] = project.lutSetting16Bit() + _clrs["lutSetting8Bit"] = project.lutSetting8Bit() + _clrs["lutSettingFloat"] = project.lutSettingFloat() + _clrs["lutSettingLog"] = project.lutSettingLog() + _clrs["lutSettingViewer"] = project.lutSettingViewer() + _clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace() + _clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport() + _clrs["ocioConfigName"] = project.ocioConfigName() + _clrs["ocioConfigPath"] = project.ocioConfigPath() + + # set main project attributes to context + context.data["activeProject"] = project + context.data["activeSequence"] = active_sequence + context.data["videoTracks"] = video_tracks + context.data["audioTracks"] = audio_tracks + context.data["currentFile"] = current_file + context.data["colorspace"] = _clrs + + self.log.info("currentFile: {}".format(current_file)) + + # creating workfile representation + representation = { + 'name': 'hrox', + 'ext': 'hrox', + 'files': base_name, + "stagingDir": staging_dir, + } + + instance_data = { + "name": "{}_{}".format(asset, subset), + "asset": asset, + "subset": "{}{}".format(asset, subset.capitalize()), + "item": project, + "family": "workfile", + + # version data + "versionData": { + "colorspace": _clrs + }, + + # source attribute + "sourcePath": current_file, + "representations": [representation] + } + + instance = context.create_instance(**instance_data) + self.log.info("Creating instance: {}".format(instance)) diff --git a/openpype/hosts/hiero/plugins/publish/validate_audio.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_audio.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_audio.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_audio.py diff --git a/openpype/hosts/hiero/plugins/publish/validate_hierarchy.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_hierarchy.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_hierarchy.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_hierarchy.py diff --git a/openpype/hosts/hiero/plugins/publish/validate_names.py b/openpype/hosts/hiero/plugins/publish_old_workflow/validate_names.py similarity index 100% rename from openpype/hosts/hiero/plugins/publish/validate_names.py rename to openpype/hosts/hiero/plugins/publish_old_workflow/validate_names.py diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py index 90504ccd18..7e1a8df2dc 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportTask.py @@ -1,338 +1,28 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +#!/usr/bin/env python +# -*- coding: utf-8 -*- +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] import os -import re import hiero.core from hiero.core import util import opentimelineio as otio - - -marker_color_map = { - "magenta": otio.schema.MarkerColor.MAGENTA, - "red": otio.schema.MarkerColor.RED, - "yellow": otio.schema.MarkerColor.YELLOW, - "green": otio.schema.MarkerColor.GREEN, - "cyan": otio.schema.MarkerColor.CYAN, - "blue": otio.schema.MarkerColor.BLUE, -} - +from openpype.hosts.hiero.otio import hiero_export class OTIOExportTask(hiero.core.TaskBase): def __init__(self, initDict): """Initialize""" hiero.core.TaskBase.__init__(self, initDict) + self.otio_timeline = None def name(self): return str(type(self)) - def get_rate(self, item): - if not hasattr(item, 'framerate'): - item = item.sequence() - - num, den = item.framerate().toRational() - rate = float(num) / float(den) - - if rate.is_integer(): - return rate - - return round(rate, 2) - - def get_clip_ranges(self, trackitem): - # Get rate from source or sequence - if trackitem.source().mediaSource().hasVideo(): - rate_item = trackitem.source() - - else: - rate_item = trackitem.sequence() - - source_rate = self.get_rate(rate_item) - - # Reversed video/audio - if trackitem.playbackSpeed() < 0: - start = trackitem.sourceOut() - - else: - start = trackitem.sourceIn() - - source_start_time = otio.opentime.RationalTime( - start, - source_rate - ) - source_duration = otio.opentime.RationalTime( - trackitem.duration(), - source_rate - ) - - source_range = otio.opentime.TimeRange( - start_time=source_start_time, - duration=source_duration - ) - - hiero_clip = trackitem.source() - - available_range = None - if hiero_clip.mediaSource().isMediaPresent(): - start_time = otio.opentime.RationalTime( - hiero_clip.mediaSource().startTime(), - source_rate - ) - duration = otio.opentime.RationalTime( - hiero_clip.mediaSource().duration(), - source_rate - ) - available_range = otio.opentime.TimeRange( - start_time=start_time, - duration=duration - ) - - return source_range, available_range - - def add_gap(self, trackitem, otio_track, prev_out): - gap_length = trackitem.timelineIn() - prev_out - if prev_out != 0: - gap_length -= 1 - - rate = self.get_rate(trackitem.sequence()) - gap = otio.opentime.TimeRange( - duration=otio.opentime.RationalTime( - gap_length, - rate - ) - ) - otio_gap = otio.schema.Gap(source_range=gap) - otio_track.append(otio_gap) - - def get_marker_color(self, tag): - icon = tag.icon() - pat = r'icons:Tag(?P\w+)\.\w+' - - res = re.search(pat, icon) - if res: - color = res.groupdict().get('color') - if color.lower() in marker_color_map: - return marker_color_map[color.lower()] - - return otio.schema.MarkerColor.RED - - def add_markers(self, hiero_item, otio_item): - for tag in hiero_item.tags(): - if not tag.visible(): - continue - - if tag.name() == 'Copy': - # Hiero adds this tag to a lot of clips - continue - - frame_rate = self.get_rate(hiero_item) - - marked_range = otio.opentime.TimeRange( - start_time=otio.opentime.RationalTime( - tag.inTime(), - frame_rate - ), - duration=otio.opentime.RationalTime( - int(tag.metadata().dict().get('tag.length', '0')), - frame_rate - ) - ) - - metadata = dict( - Hiero=tag.metadata().dict() - ) - # Store the source item for future import assignment - metadata['Hiero']['source_type'] = hiero_item.__class__.__name__ - - marker = otio.schema.Marker( - name=tag.name(), - color=self.get_marker_color(tag), - marked_range=marked_range, - metadata=metadata - ) - - otio_item.markers.append(marker) - - def add_clip(self, trackitem, otio_track, itemindex): - hiero_clip = trackitem.source() - - # Add Gap if needed - if itemindex == 0: - prev_item = trackitem - - else: - prev_item = trackitem.parent().items()[itemindex - 1] - - clip_diff = trackitem.timelineIn() - prev_item.timelineOut() - - if itemindex == 0 and trackitem.timelineIn() > 0: - self.add_gap(trackitem, otio_track, 0) - - elif itemindex and clip_diff != 1: - self.add_gap(trackitem, otio_track, prev_item.timelineOut()) - - # Create Clip - source_range, available_range = self.get_clip_ranges(trackitem) - - otio_clip = otio.schema.Clip( - name=trackitem.name(), - source_range=source_range - ) - - # Add media reference - media_reference = otio.schema.MissingReference() - if hiero_clip.mediaSource().isMediaPresent(): - source = hiero_clip.mediaSource() - first_file = source.fileinfos()[0] - path = first_file.filename() - - if "%" in path: - path = re.sub(r"%\d+d", "%d", path) - if "#" in path: - path = re.sub(r"#+", "%d", path) - - media_reference = otio.schema.ExternalReference( - target_url=u'{}'.format(path), - available_range=available_range - ) - - otio_clip.media_reference = media_reference - - # Add Time Effects - playbackspeed = trackitem.playbackSpeed() - if playbackspeed != 1: - if playbackspeed == 0: - time_effect = otio.schema.FreezeFrame() - - else: - time_effect = otio.schema.LinearTimeWarp( - time_scalar=playbackspeed - ) - otio_clip.effects.append(time_effect) - - # Add tags as markers - if self._preset.properties()["includeTags"]: - self.add_markers(trackitem, otio_clip) - self.add_markers(trackitem.source(), otio_clip) - - otio_track.append(otio_clip) - - # Add Transition if needed - if trackitem.inTransition() or trackitem.outTransition(): - self.add_transition(trackitem, otio_track) - - def add_transition(self, trackitem, otio_track): - transitions = [] - - if trackitem.inTransition(): - if trackitem.inTransition().alignment().name == 'kFadeIn': - transitions.append(trackitem.inTransition()) - - if trackitem.outTransition(): - transitions.append(trackitem.outTransition()) - - for transition in transitions: - alignment = transition.alignment().name - - if alignment == 'kFadeIn': - in_offset_frames = 0 - out_offset_frames = ( - transition.timelineOut() - transition.timelineIn() - ) + 1 - - elif alignment == 'kFadeOut': - in_offset_frames = ( - trackitem.timelineOut() - transition.timelineIn() - ) + 1 - out_offset_frames = 0 - - elif alignment == 'kDissolve': - in_offset_frames = ( - transition.inTrackItem().timelineOut() - - transition.timelineIn() - ) - out_offset_frames = ( - transition.timelineOut() - - transition.outTrackItem().timelineIn() - ) - - else: - # kUnknown transition is ignored - continue - - rate = trackitem.source().framerate().toFloat() - in_time = otio.opentime.RationalTime(in_offset_frames, rate) - out_time = otio.opentime.RationalTime(out_offset_frames, rate) - - otio_transition = otio.schema.Transition( - name=alignment, # Consider placing Hiero name in metadata - transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve, - in_offset=in_time, - out_offset=out_time - ) - - if alignment == 'kFadeIn': - otio_track.insert(-1, otio_transition) - - else: - otio_track.append(otio_transition) - - - def add_tracks(self): - for track in self._sequence.items(): - if isinstance(track, hiero.core.AudioTrack): - kind = otio.schema.TrackKind.Audio - - else: - kind = otio.schema.TrackKind.Video - - otio_track = otio.schema.Track(name=track.name(), kind=kind) - - for itemindex, trackitem in enumerate(track): - if isinstance(trackitem.source(), hiero.core.Clip): - self.add_clip(trackitem, otio_track, itemindex) - - self.otio_timeline.tracks.append(otio_track) - - # Add tags as markers - if self._preset.properties()["includeTags"]: - self.add_markers(self._sequence, self.otio_timeline.tracks) - - def create_OTIO(self): - self.otio_timeline = otio.schema.Timeline() - - # Set global start time based on sequence - self.otio_timeline.global_start_time = otio.opentime.RationalTime( - self._sequence.timecodeStart(), - self._sequence.framerate().toFloat() - ) - self.otio_timeline.name = self._sequence.name() - - self.add_tracks() - def startTask(self): - self.create_OTIO() + self.otio_timeline = hiero_export.create_otio_timeline() def taskStep(self): return False @@ -350,7 +40,7 @@ class OTIOExportTask(hiero.core.TaskBase): util.filesystem.makeDirs(dirname) # write otio file - otio.adapters.write_to_file(self.otio_timeline, exportPath) + hiero_export.write_to_file(self.otio_timeline, exportPath) # Catch all exceptions and log error except Exception as e: @@ -370,7 +60,7 @@ class OTIOExportPreset(hiero.core.TaskPresetBase): """Initialise presets to default values""" hiero.core.TaskPresetBase.__init__(self, OTIOExportTask, name) - self.properties()["includeTags"] = True + self.properties()["includeTags"] = hiero_export.include_tags = True self.properties().update(properties) def supportedItems(self): diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py index 887ff05ec8..9b83eefedf 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/OTIOExportUI.py @@ -1,3 +1,9 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] + import hiero.ui import OTIOExportTask @@ -14,6 +20,7 @@ except ImportError: FormLayout = QFormLayout # lint:ok +from openpype.hosts.hiero.otio import hiero_export class OTIOExportUI(hiero.ui.TaskUIBase): def __init__(self, preset): @@ -27,7 +34,7 @@ class OTIOExportUI(hiero.ui.TaskUIBase): def includeMarkersCheckboxChanged(self, state): # Slot to handle change of checkbox state - self._preset.properties()["includeTags"] = state == QtCore.Qt.Checked + hiero_export.include_tags = state == QtCore.Qt.Checked def populateUI(self, widget, exportTemplate): layout = widget.layout() diff --git a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py index 67e6e78d35..3c09655f01 100644 --- a/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py +++ b/openpype/hosts/hiero/startup/Python/Startup/otioexporter/__init__.py @@ -1,25 +1,3 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - from OTIOExportTask import OTIOExportTask from OTIOExportUI import OTIOExportUI diff --git a/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py b/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py index 1503a9e9ac..0f0a643909 100644 --- a/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py +++ b/openpype/hosts/hiero/startup/Python/StartupUI/otioimporter/__init__.py @@ -1,42 +1,91 @@ -# MIT License -# -# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = "Daniel Flehner Heen" +__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"] import hiero.ui import hiero.core -from otioimporter.OTIOImport import load_otio +import PySide2.QtWidgets as qw + +from openpype.hosts.hiero.otio.hiero_import import load_otio + + +class OTIOProjectSelect(qw.QDialog): + + def __init__(self, projects, *args, **kwargs): + super(OTIOProjectSelect, self).__init__(*args, **kwargs) + self.setWindowTitle('Please select active project') + self.layout = qw.QVBoxLayout() + + self.label = qw.QLabel( + 'Unable to determine which project to import sequence to.\n' + 'Please select one.' + ) + self.layout.addWidget(self.label) + + self.projects = qw.QComboBox() + self.projects.addItems(map(lambda p: p.name(), projects)) + self.layout.addWidget(self.projects) + + QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel + self.buttonBox = qw.QDialogButtonBox(QBtn) + self.buttonBox.accepted.connect(self.accept) + self.buttonBox.rejected.connect(self.reject) + + self.layout.addWidget(self.buttonBox) + self.setLayout(self.layout) + + +def get_sequence(view): + sequence = None + if isinstance(view, hiero.ui.TimelineEditor): + sequence = view.sequence() + + elif isinstance(view, hiero.ui.BinView): + for item in view.selection(): + if not hasattr(item, 'acitveItem'): + continue + + if isinstance(item.activeItem(), hiero.core.Sequence): + sequence = item.activeItem() + + return sequence def OTIO_menu_action(event): - otio_action = hiero.ui.createMenuAction( - 'Import OTIO', + # Menu actions + otio_import_action = hiero.ui.createMenuAction( + 'Import OTIO...', open_otio_file, icon=None ) - hiero.ui.registerAction(otio_action) + + otio_add_track_action = hiero.ui.createMenuAction( + 'New Track(s) from OTIO...', + open_otio_file, + icon=None + ) + otio_add_track_action.setEnabled(False) + + hiero.ui.registerAction(otio_import_action) + hiero.ui.registerAction(otio_add_track_action) + + view = hiero.ui.currentContextMenuView() + + if view: + sequence = get_sequence(view) + if sequence: + otio_add_track_action.setEnabled(True) + for action in event.menu.actions(): if action.text() == 'Import': - action.menu().addAction(otio_action) - break + action.menu().addAction(otio_import_action) + action.menu().addAction(otio_add_track_action) + + elif action.text() == 'New Track': + action.menu().addAction(otio_add_track_action) def open_otio_file(): @@ -45,8 +94,39 @@ def open_otio_file(): pattern='*.otio', requiredExtension='.otio' ) + + selection = None + sequence = None + + view = hiero.ui.currentContextMenuView() + if view: + sequence = get_sequence(view) + selection = view.selection() + + if sequence: + project = sequence.project() + + elif selection: + project = selection[0].project() + + elif len(hiero.core.projects()) > 1: + dialog = OTIOProjectSelect(hiero.core.projects()) + if dialog.exec_(): + project = hiero.core.projects()[dialog.projects.currentIndex()] + + else: + bar = hiero.ui.mainWindow().statusBar() + bar.showMessage( + 'OTIO Import aborted by user', + timeout=3000 + ) + return + + else: + project = hiero.core.projects()[-1] + for otio_file in files: - load_otio(otio_file) + load_otio(otio_file, project, sequence) # HieroPlayer is quite limited and can't create transitions etc. @@ -55,3 +135,7 @@ if not hiero.core.isHieroPlayer(): "kShowContextMenu/kBin", OTIO_menu_action ) + hiero.core.events.registerInterest( + "kShowContextMenu/kTimeline", + OTIO_menu_action + ) diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index dd586ca02d..1f0f90811f 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -210,7 +210,7 @@ def validate_fps(): if current_fps != fps: - from ...widgets import popup + from openpype.widgets import popup # Find main window parent = hou.ui.mainQtWindow() @@ -219,8 +219,8 @@ def validate_fps(): else: dialog = popup.Popup2(parent=parent) dialog.setModal(True) - dialog.setWindowTitle("Maya scene not in line with project") - dialog.setMessage("The FPS is out of sync, please fix") + dialog.setWindowTitle("Houdini scene not in line with project") + dialog.setMessage("The FPS is out of sync, please fix it") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.XML b/openpype/hosts/houdini/startup/MainMenuCommon.xml similarity index 100% rename from openpype/hosts/houdini/startup/MainMenuCommon.XML rename to openpype/hosts/houdini/startup/MainMenuCommon.xml diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index ae2d329a97..a83ff98c99 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -1872,7 +1872,7 @@ def set_context_settings(): # Set project fps fps = asset_data.get("fps", project_data.get("fps", 25)) - api.Session["AVALON_FPS"] = fps + api.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) # Set project resolution diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index 238213c000..bf24b463ac 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -348,6 +348,13 @@ class CollectLook(pyblish.api.InstancePlugin): history = [] for material in materials: history.extend(cmds.listHistory(material)) + + # handle VrayPluginNodeMtl node - see #1397 + vray_plugin_nodes = cmds.ls( + history, type="VRayPluginNodeMtl", long=True) + for vray_node in vray_plugin_nodes: + history.extend(cmds.listHistory(vray_node)) + files = cmds.ls(history, type="file", long=True) files.extend(cmds.ls(history, type="aiImage", long=True)) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index 75749a952e..647a46e240 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -358,9 +358,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): options["extendFrames"] = extend_frames options["overrideExistingFrame"] = override_frames - maya_render_plugin = "MayaPype" - if attributes.get("useMayaBatch", True): - maya_render_plugin = "MayaBatch" + maya_render_plugin = "MayaBatch" options["mayaRenderPlugin"] = maya_render_plugin diff --git a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py index d0c6c4eb14..7c9e201986 100644 --- a/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_redshift_proxy.py @@ -74,6 +74,8 @@ class ExtractRedshiftProxy(openpype.api.Extractor): 'files': repr_files, "stagingDir": staging_dir, } + if anim_on: + representation["frameStart"] = instance.data["proxyFrameStart"] instance.data["representations"].append(representation) self.log.info("Extracted instance '%s' to: %s" diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index d556a89fa3..6d27c66882 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -10,7 +10,6 @@ print("starting OpenPype usersetup") settings = get_project_settings(os.environ['AVALON_PROJECT']) shelf_preset = settings['maya'].get('project_shelf') - if shelf_preset: project = os.environ["AVALON_PROJECT"] @@ -23,7 +22,7 @@ if shelf_preset: print(import_string) exec(import_string) -cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") + cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") print("finished OpenPype usersetup") diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index c80507e7ea..bd7a95f916 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -106,7 +106,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( instance, old_value, new_value)) - from avalon.api.nuke import ( + from avalon.nuke import ( viewer_update_and_undo_stop, add_publish_knob ) diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 34337f726f..7ef5401292 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1,6 +1,8 @@ import os import re import sys +import six +import platform from collections import OrderedDict @@ -19,7 +21,6 @@ from openpype.api import ( get_hierarchy, get_asset, get_current_project_settings, - config, ApplicationManager ) @@ -29,36 +30,34 @@ from .utils import set_context_favorites log = Logger().get_logger(__name__) -self = sys.modules[__name__] -self._project = None -self.workfiles_launched = False -self._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") +opnl = sys.modules[__name__] +opnl._project = None +opnl.project_name = os.getenv("AVALON_PROJECT") +opnl.workfiles_launched = False +opnl._node_tab_name = "{}".format(os.getenv("AVALON_LABEL") or "Avalon") -def get_node_imageio_setting(**kwarg): +def get_created_node_imageio_setting(**kwarg): ''' Get preset data for dataflow (fileType, compression, bitDepth) ''' - log.info(kwarg) - host = str(kwarg.get("host", "nuke")) + log.debug(kwarg) nodeclass = kwarg.get("nodeclass", None) creator = kwarg.get("creator", None) - project_name = os.getenv("AVALON_PROJECT") - assert any([host, nodeclass]), nuke.message( + assert any([creator, nodeclass]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) - imageio_nodes = (get_anatomy_settings(project_name) - ["imageio"] - .get(host, None) - ["nodes"] - ["requiredNodes"] - ) + imageio = get_anatomy_settings(opnl.project_name)["imageio"] + imageio_nodes = imageio["nuke"]["nodes"]["requiredNodes"] + imageio_node = None for node in imageio_nodes: log.info(node) - if node["nukeNodeClass"] == nodeclass: - if creator in node["plugins"]: - imageio_node = node + if (node["nukeNodeClass"] != nodeclass) and ( + creator not in node["plugins"]): + continue + + imageio_node = node log.info("ImageIO node: {}".format(imageio_node)) return imageio_node @@ -67,12 +66,9 @@ def get_node_imageio_setting(**kwarg): def get_imageio_input_colorspace(filename): ''' Get input file colorspace based on regex in settings. ''' - imageio_regex_inputs = (get_anatomy_settings(os.getenv("AVALON_PROJECT")) - ["imageio"] - ["nuke"] - ["regexInputs"] - ["inputs"] - ) + imageio_regex_inputs = ( + get_anatomy_settings(opnl.project_name) + ["imageio"]["nuke"]["regexInputs"]["inputs"]) preset_clrsp = None for regexInput in imageio_regex_inputs: @@ -104,40 +100,39 @@ def check_inventory_versions(): """ # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): - if each.Class() == 'Read': - container = avalon.nuke.parse_container(each) + container = avalon.nuke.parse_container(each) - if container: - node = nuke.toNode(container["objectName"]) - avalon_knob_data = avalon.nuke.read( - node) + if container: + node = nuke.toNode(container["objectName"]) + avalon_knob_data = avalon.nuke.read( + node) - # get representation from io - representation = io.find_one({ - "type": "representation", - "_id": io.ObjectId(avalon_knob_data["representation"]) - }) + # get representation from io + representation = io.find_one({ + "type": "representation", + "_id": io.ObjectId(avalon_knob_data["representation"]) + }) - # Get start frame from version data - version = io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + # Get start frame from version data + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) - # get all versions in list - versions = io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') - max_version = max(versions) + max_version = max(versions) - # check the available version and do match - # change color of node if not max verion - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) - else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) + # check the available version and do match + # change color of node if not max verion + if version.get("name") not in [max_version]: + node["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + node["tile_color"].setValue(int("0x4ecd25ff", 16)) def writes_version_sync(): @@ -153,34 +148,33 @@ def writes_version_sync(): except Exception: return - for each in nuke.allNodes(): - if each.Class() == 'Write': - # check if the node is avalon tracked - if self._node_tab_name not in each.knobs(): + for each in nuke.allNodes(filter="Write"): + # check if the node is avalon tracked + if opnl._node_tab_name not in each.knobs(): + continue + + avalon_knob_data = avalon.nuke.read( + each) + + try: + if avalon_knob_data['families'] not in ["render"]: + log.debug(avalon_knob_data['families']) continue - avalon_knob_data = avalon.nuke.read( - each) + node_file = each['file'].value() - try: - if avalon_knob_data['families'] not in ["render"]: - log.debug(avalon_knob_data['families']) - continue + node_version = "v" + get_version_from_path(node_file) + log.debug("node_version: {}".format(node_version)) - node_file = each['file'].value() - - node_version = "v" + get_version_from_path(node_file) - log.debug("node_version: {}".format(node_version)) - - node_new_file = node_file.replace(node_version, new_version) - each['file'].setValue(node_new_file) - if not os.path.isdir(os.path.dirname(node_new_file)): - log.warning("Path does not exist! I am creating it.") - os.makedirs(os.path.dirname(node_new_file)) - except Exception as e: - log.warning( - "Write node: `{}` has no version in path: {}".format( - each.name(), e)) + node_new_file = node_file.replace(node_version, new_version) + each['file'].setValue(node_new_file) + if not os.path.isdir(os.path.dirname(node_new_file)): + log.warning("Path does not exist! I am creating it.") + os.makedirs(os.path.dirname(node_new_file)) + except Exception as e: + log.warning( + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -201,24 +195,22 @@ def check_subsetname_exists(nodes, subset_name): Returns: bool: True of False """ - result = next((True for n in nodes - if subset_name in avalon.nuke.read(n).get("subset", "")), False) - return result + return next((True for n in nodes + if subset_name in avalon.nuke.read(n).get("subset", "")), + False) def get_render_path(node): ''' Generate Render path from presets regarding avalon knob data ''' - data = dict() - data['avalon'] = avalon.nuke.read( - node) - + data = {'avalon': avalon.nuke.read(node)} data_preset = { - "class": data['avalon']['family'], - "preset": data['avalon']['families'] + "nodeclass": data['avalon']['family'], + "families": [data['avalon']['families']], + "creator": data['avalon']['creator'] } - nuke_imageio_writes = get_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) application = lib.get_application(os.environ["AVALON_APP_NAME"]) data.update({ @@ -324,7 +316,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): node (obj): group node with avalon data as Knobs ''' - imageio_writes = get_node_imageio_setting(**data) + imageio_writes = get_created_node_imageio_setting(**data) app_manager = ApplicationManager() app_name = os.environ.get("AVALON_APP_NAME") if app_name: @@ -367,8 +359,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): # adding dataflow template log.debug("imageio_writes: `{}`".format(imageio_writes)) for knob in imageio_writes["knobs"]: - if knob["name"] not in ["_id", "_previous"]: - _data.update({knob["name"]: knob["value"]}) + _data.update({knob["name"]: knob["value"]}) _data = anlib.fix_data_for_node_create(_data) @@ -506,7 +497,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): add_deadline_tab(GN) # open the our Tab as default - GN[self._node_tab_name].setFlag(0) + GN[opnl._node_tab_name].setFlag(0) # set tile color tile_color = _data.get("tile_color", "0xff0000ff") @@ -629,7 +620,7 @@ class WorkfileSettings(object): root_node=None, nodes=None, **kwargs): - self._project = kwargs.get( + opnl._project = kwargs.get( "project") or io.find_one({"type": "project"}) self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"] self._asset_entity = get_asset(self._asset) @@ -672,7 +663,7 @@ class WorkfileSettings(object): ] erased_viewers = [] - for v in [n for n in nuke.allNodes(filter="Viewer")]: + for v in nuke.allNodes(filter="Viewer"): v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"])) if str(viewer_dict["viewerProcess"]) \ not in v['viewerProcess'].value(): @@ -716,7 +707,7 @@ class WorkfileSettings(object): log.error(msg) nuke.message(msg) - log.debug(">> root_dict: {}".format(root_dict)) + log.warning(">> root_dict: {}".format(root_dict)) # first set OCIO if self._root_node["colorManagement"].value() \ @@ -738,41 +729,41 @@ class WorkfileSettings(object): # third set ocio custom path if root_dict.get("customOCIOConfigPath"): - self._root_node["customOCIOConfigPath"].setValue( - str(root_dict["customOCIOConfigPath"]).format( - **os.environ - ).replace("\\", "/") - ) - log.debug("nuke.root()['{}'] changed to: {}".format( - "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) - root_dict.pop("customOCIOConfigPath") + unresolved_path = root_dict["customOCIOConfigPath"] + ocio_paths = unresolved_path[platform.system().lower()] + + resolved_path = None + for ocio_p in ocio_paths: + resolved_path = str(ocio_p).format(**os.environ) + if not os.path.exists(resolved_path): + continue + + if resolved_path: + self._root_node["customOCIOConfigPath"].setValue( + str(resolved_path).replace("\\", "/") + ) + log.debug("nuke.root()['{}'] changed to: {}".format( + "customOCIOConfigPath", resolved_path)) + root_dict.pop("customOCIOConfigPath") # then set the rest for knob, value in root_dict.items(): + # skip unfilled ocio config path + # it will be dict in value + if isinstance(value, dict): + continue if self._root_node[knob].value() not in value: self._root_node[knob].setValue(str(value)) log.debug("nuke.root()['{}'] changed to: {}".format( knob, value)) - def set_writes_colorspace(self, write_dict): + def set_writes_colorspace(self): ''' Adds correct colorspace to write node dict - Arguments: - write_dict (dict): nuke write node as dictionary - ''' - # scene will have fixed colorspace following presets for the project - if not isinstance(write_dict, dict): - msg = "set_root_colorspace(): argument should be dictionary" - log.error(msg) - return - from avalon.nuke import read - for node in nuke.allNodes(): - - if node.Class() in ["Viewer", "Dot"]: - continue + for node in nuke.allNodes(filter="Group"): # get data from avalon knob avalon_knob_data = read(node) @@ -788,49 +779,63 @@ class WorkfileSettings(object): if avalon_knob_data.get("families"): families.append(avalon_knob_data.get("families")) - # except disabled nodes but exclude backdrops in test - for fmly, knob in write_dict.items(): - write = None - if (fmly in families): - # Add all nodes in group instances. - if node.Class() == "Group": - node.begin() - for x in nuke.allNodes(): - if x.Class() == "Write": - write = x - node.end() - elif node.Class() == "Write": - write = node - else: - log.warning("Wrong write node Class") + data_preset = { + "nodeclass": avalon_knob_data["family"], + "families": families, + "creator": avalon_knob_data['creator'] + } - write["colorspace"].setValue(str(knob["colorspace"])) - log.info( - "Setting `{0}` to `{1}`".format( - write.name(), - knob["colorspace"])) + nuke_imageio_writes = get_created_node_imageio_setting( + **data_preset) - def set_reads_colorspace(self, reads): + log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes)) + + if not nuke_imageio_writes: + return + + write_node = None + + # get into the group node + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write_node = x + node.end() + + if not write_node: + return + + # write all knobs to node + for knob in nuke_imageio_writes["knobs"]: + value = knob["value"] + if isinstance(value, six.text_type): + value = str(value) + if str(value).startswith("0x"): + value = int(value, 16) + + write_node[knob["name"]].setValue(value) + + + def set_reads_colorspace(self, read_clrs_inputs): """ Setting colorspace to Read nodes Looping trought all read nodes and tries to set colorspace based on regex rules in presets """ - changes = dict() + changes = {} for n in nuke.allNodes(): file = nuke.filename(n) - if not n.Class() == "Read": + if n.Class() != "Read": continue - # load nuke presets for Read's colorspace - read_clrs_presets = config.get_init_presets()["colorspace"].get( - "nuke", {}).get("read", {}) - # check if any colorspace presets for read is mathing - preset_clrsp = next((read_clrs_presets[k] - for k in read_clrs_presets - if bool(re.search(k, file))), - None) + preset_clrsp = None + + for input in read_clrs_inputs: + if not bool(re.search(input["regex"], file)): + continue + preset_clrsp = input["colorspace"] + log.debug(preset_clrsp) if preset_clrsp is not None: current = n["colorspace"].value() @@ -864,13 +869,15 @@ class WorkfileSettings(object): def set_colorspace(self): ''' Setting colorpace following presets ''' - nuke_colorspace = config.get_init_presets( - )["colorspace"].get("nuke", None) + # get imageio + imageio = get_anatomy_settings(opnl.project_name)["imageio"] + nuke_colorspace = imageio["nuke"] try: - self.set_root_colorspace(nuke_colorspace["root"]) + self.set_root_colorspace(nuke_colorspace["workfile"]) except AttributeError: - msg = "set_colorspace(): missing `root` settings in template" + msg = "set_colorspace(): missing `workfile` settings in template" + nuke.message(msg) try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) @@ -880,15 +887,14 @@ class WorkfileSettings(object): log.error(msg) try: - self.set_writes_colorspace(nuke_colorspace["write"]) - except AttributeError: - msg = "set_colorspace(): missing `write` settings in template" - nuke.message(msg) - log.error(msg) + self.set_writes_colorspace() + except AttributeError as _error: + nuke.message(_error) + log.error(_error) - reads = nuke_colorspace.get("read") - if reads: - self.set_reads_colorspace(reads) + read_clrs_inputs = nuke_colorspace["regexInputs"].get("inputs", []) + if read_clrs_inputs: + self.set_reads_colorspace(read_clrs_inputs) try: for key in nuke_colorspace: @@ -1070,15 +1076,14 @@ class WorkfileSettings(object): def set_favorites(self): work_dir = os.getenv("AVALON_WORKDIR") asset = os.getenv("AVALON_ASSET") - project = os.getenv("AVALON_PROJECT") favorite_items = OrderedDict() # project # get project's root and split to parts projects_root = os.path.normpath(work_dir.split( - project)[0]) + opnl.project_name)[0]) # add project name - project_dir = os.path.join(projects_root, project) + "/" + project_dir = os.path.join(projects_root, opnl.project_name) + "/" # add to favorites favorite_items.update({"Project dir": project_dir.replace("\\", "/")}) @@ -1128,13 +1133,13 @@ def get_write_node_template_attr(node): data['avalon'] = avalon.nuke.read( node) data_preset = { - "class": data['avalon']['family'], - "families": data['avalon']['families'], - "preset": data['avalon']['families'] # omit < 2.0.0v + "nodeclass": data['avalon']['family'], + "families": [data['avalon']['families']], + "creator": data['avalon']['creator'] } # get template data - nuke_imageio_writes = get_node_imageio_setting(**data_preset) + nuke_imageio_writes = get_created_node_imageio_setting(**data_preset) # collecting correct data correct_data = OrderedDict({ @@ -1230,8 +1235,7 @@ class ExporterReview: """ anlib.reset_selection() ipn_orig = None - for v in [n for n in nuke.allNodes() - if "Viewer" == n.Class()]: + for v in nuke.allNodes(filter="Viewer"): ip = v['input_process'].getValue() ipn = v['input_process_node'].getValue() if "VIEWER_INPUT" not in ipn and ip: @@ -1644,8 +1648,8 @@ def launch_workfiles_app(): if not open_at_start: return - if not self.workfiles_launched: - self.workfiles_launched = True + if not opnl.workfiles_launched: + opnl.workfiles_launched = True workfiles.show(os.environ["AVALON_WORKDIR"]) diff --git a/openpype/hosts/nuke/api/menu.py b/openpype/hosts/nuke/api/menu.py index 2317066528..021ea04159 100644 --- a/openpype/hosts/nuke/api/menu.py +++ b/openpype/hosts/nuke/api/menu.py @@ -26,9 +26,9 @@ def install(): menu.addCommand( name, workfiles.show, - index=(rm_item[0]) + index=2 ) - + menu.addSeparator(index=3) # replace reset resolution from avalon core to pype's name = "Reset Resolution" new_name = "Set Resolution" @@ -63,16 +63,7 @@ def install(): # add colorspace menu item name = "Set Colorspace" menu.addCommand( - name, lambda: WorkfileSettings().set_colorspace(), - index=(rm_item[0] + 2) - ) - log.debug("Adding menu item: {}".format(name)) - - # add workfile builder menu item - name = "Build Workfile" - menu.addCommand( - name, lambda: BuildWorkfile().process(), - index=(rm_item[0] + 7) + name, lambda: WorkfileSettings().set_colorspace() ) log.debug("Adding menu item: {}".format(name)) @@ -80,11 +71,20 @@ def install(): name = "Apply All Settings" menu.addCommand( name, - lambda: WorkfileSettings().set_context_settings(), - index=(rm_item[0] + 3) + lambda: WorkfileSettings().set_context_settings() ) log.debug("Adding menu item: {}".format(name)) + menu.addSeparator() + + # add workfile builder menu item + name = "Build Workfile" + menu.addCommand( + name, lambda: BuildWorkfile().process() + ) + log.debug("Adding menu item: {}".format(name)) + + # adding shortcuts add_shortcuts_from_presets() diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 38d1a0c2ed..6e1a2ddd96 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -77,10 +77,14 @@ class CreateWritePrerender(plugin.PypeCreator): write_data = { "nodeclass": self.n_class, "families": [self.family], - "avalon": self.data, - "creator": self.__class__.__name__ + "avalon": self.data } + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + if self.presets.get('fpath_template'): self.log.info("Adding template path from preset") write_data.update( diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 72f851f19c..04983e9c75 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -80,10 +80,14 @@ class CreateWriteRender(plugin.PypeCreator): write_data = { "nodeclass": self.n_class, "families": [self.family], - "avalon": self.data, - "creator": self.__class__.__name__ + "avalon": self.data } + # add creator data + creator_data = {"creator": self.__class__.__name__} + self.data.update(creator_data) + write_data.update(creator_data) + if self.presets.get('fpath_template'): self.log.info("Adding template path from preset") write_data.update( diff --git a/openpype/hosts/nuke/plugins/load/load_mov.py b/openpype/hosts/nuke/plugins/load/load_mov.py index 92726913af..8b8c5d0c10 100644 --- a/openpype/hosts/nuke/plugins/load/load_mov.py +++ b/openpype/hosts/nuke/plugins/load/load_mov.py @@ -135,12 +135,14 @@ class LoadMov(api.Loader): read_name = self.node_name_template.format(**name_data) - # Create the Loader with the filename path set + read_node = nuke.createNode( + "Read", + "name {}".format(read_name) + ) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing with viewer_update_and_undo_stop(): - read_node = nuke.createNode( - "Read", - "name {}".format(read_name) - ) read_node["file"].setValue(file) read_node["origfirst"].setValue(first) diff --git a/openpype/hosts/nuke/plugins/load/load_sequence.py b/openpype/hosts/nuke/plugins/load/load_sequence.py index df7aa55cd1..71f0b8c298 100644 --- a/openpype/hosts/nuke/plugins/load/load_sequence.py +++ b/openpype/hosts/nuke/plugins/load/load_sequence.py @@ -139,11 +139,15 @@ class LoadSequence(api.Loader): read_name = self.node_name_template.format(**name_data) # Create the Loader with the filename path set + + # TODO: it might be universal read to img/geo/camera + r = nuke.createNode( + "Read", + "name {}".format(read_name)) + + # to avoid multiple undo steps for rest of process + # we will switch off undo-ing with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera - r = nuke.createNode( - "Read", - "name {}".format(read_name)) r["file"].setValue(file) # Set colorspace defined in version data diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index 9c7f1b5e95..4257ed3131 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -34,7 +34,8 @@ class CollectSlate(pyblish.api.InstancePlugin): if slate_node: instance.data["slateNode"] = slate_node instance.data["families"].append("slate") + instance.data["versionData"]["families"].append("slate") self.log.info( "Slate node is in node graph: `{}`".format(slate.name())) self.log.debug( - "__ instance: `{}`".format(instance)) + "__ instance.data: `{}`".format(instance.data)) diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index cbc86f7b03..539cebe646 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -77,8 +77,9 @@ def set_context_settings(asset_doc=None): handle_start = handles handle_end = handles - frame_start -= int(handle_start) - frame_end += int(handle_end) + # Always start from 0 Mark In and set only Mark Out + mark_in = 0 + mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end - execute_george("tv_markin {} set".format(frame_start - 1)) - execute_george("tv_markout {} set".format(frame_end - 1)) + execute_george("tv_markin {} set".format(mark_in)) + execute_george("tv_markout {} set".format(mark_out)) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py new file mode 100644 index 0000000000..f291c363b8 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -0,0 +1,37 @@ +import pyblish.api + + +class CollectOutputFrameRange(pyblish.api.ContextPlugin): + """Collect frame start/end from context. + + When instances are collected context does not contain `frameStart` and + `frameEnd` keys yet. They are collected in global plugin + `CollectAvalonEntities`. + """ + label = "Collect output frame range" + order = pyblish.api.CollectorOrder + hosts = ["tvpaint"] + + def process(self, context): + for instance in context: + frame_start = instance.data.get("frameStart") + frame_end = instance.data.get("frameEnd") + if frame_start is not None and frame_end is not None: + self.log.debug( + "Instance {} already has set frames {}-{}".format( + str(instance), frame_start, frame_end + ) + ) + return + + frame_start = context.data.get("frameStart") + frame_end = context.data.get("frameEnd") + + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = frame_end + + self.log.info( + "Set frames {}-{} on instance {} ".format( + frame_start, frame_end, str(instance) + ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index cc236734e5..27bd8e9ede 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -86,9 +86,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance.data["publish"] = any_visible - instance.data["frameStart"] = context.data["sceneMarkIn"] + 1 - instance.data["frameEnd"] = context.data["sceneMarkOut"] + 1 - self.log.debug("Created instance: {}\n{}".format( instance, json.dumps(instance.data, indent=4) )) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 0d125a1a50..007b5c41f1 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -1,8 +1,6 @@ import os import shutil -import time import tempfile -import multiprocessing import pyblish.api from avalon.tvpaint import lib @@ -45,10 +43,64 @@ class ExtractSequence(pyblish.api.Extractor): ) family_lowered = instance.data["family"].lower() - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] + mark_in = instance.context.data["sceneMarkIn"] + mark_out = instance.context.data["sceneMarkOut"] + # Frame start/end may be stored as float + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) - filename_template = self._get_filename_template(frame_end) + # Handles are not stored per instance but on Context + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + + # --- Fallbacks ---------------------------------------------------- + # This is required if validations of ranges are ignored. + # - all of this code won't change processing if range to render + # match to range of expected output + + # Prepare output frames + output_frame_start = frame_start - handle_start + output_frame_end = frame_end + handle_end + + # Change output frame start to 0 if handles cause it's negative number + if output_frame_start < 0: + self.log.warning(( + "Frame start with handles has negative value." + " Changed to \"0\". Frames start: {}, Handle Start: {}" + ).format(frame_start, handle_start)) + output_frame_start = 0 + + # Check Marks range and output range + output_range = output_frame_end - output_frame_start + marks_range = mark_out - mark_in + + # Lower Mark Out if mark range is bigger than output + # - do not rendered not used frames + if output_range < marks_range: + new_mark_out = mark_out - (marks_range - output_range) + self.log.warning(( + "Lowering render range to {} frames. Changed Mark Out {} -> {}" + ).format(marks_range + 1, mark_out, new_mark_out)) + # Assign new mark out to variable + mark_out = new_mark_out + + # Lower output frame end so representation has right `frameEnd` value + elif output_range > marks_range: + new_output_frame_end = ( + output_frame_end - (output_range - marks_range) + ) + self.log.warning(( + "Lowering representation range to {} frames." + " Changed frame end {} -> {}" + ).format(output_range + 1, mark_out, new_mark_out)) + output_frame_end = new_output_frame_end + + # ------------------------------------------------------------------- + + filename_template = self._get_filename_template( + # Use the biggest number + max(mark_out, frame_end) + ) ext = os.path.splitext(filename_template)[1].replace(".", "") self.log.debug("Using file template \"{}\"".format(filename_template)) @@ -57,7 +109,9 @@ class ExtractSequence(pyblish.api.Extractor): output_dir = instance.data.get("stagingDir") if not output_dir: # Create temp folder if staging dir is not set - output_dir = tempfile.mkdtemp().replace("\\", "/") + output_dir = ( + tempfile.mkdtemp(prefix="tvpaint_render_") + ).replace("\\", "/") instance.data["stagingDir"] = output_dir self.log.debug( @@ -65,23 +119,36 @@ class ExtractSequence(pyblish.api.Extractor): ) if instance.data["family"] == "review": - repre_files, thumbnail_fullpath = self.render_review( - filename_template, output_dir, frame_start, frame_end + output_filenames, thumbnail_fullpath = self.render_review( + filename_template, output_dir, mark_in, mark_out ) else: # Render output - repre_files, thumbnail_fullpath = self.render( - filename_template, output_dir, frame_start, frame_end, + output_filenames, thumbnail_fullpath = self.render( + filename_template, output_dir, + mark_in, mark_out, filtered_layers ) + # Sequence of one frame + if not output_filenames: + self.log.warning("Extractor did not create any output.") + return + + repre_files = self._rename_output_files( + filename_template, output_dir, + mark_in, mark_out, + output_frame_start, output_frame_end + ) + # Fill tags and new families tags = [] if family_lowered in ("review", "renderlayer"): tags.append("review") # Sequence of one frame - if len(repre_files) == 1: + single_file = len(repre_files) == 1 + if single_file: repre_files = repre_files[0] new_repre = { @@ -89,10 +156,13 @@ class ExtractSequence(pyblish.api.Extractor): "ext": ext, "files": repre_files, "stagingDir": output_dir, - "frameStart": frame_start, - "frameEnd": frame_end, "tags": tags } + + if not single_file: + new_repre["frameStart"] = output_frame_start + new_repre["frameEnd"] = output_frame_end + self.log.debug("Creating new representation: {}".format(new_repre)) instance.data["representations"].append(new_repre) @@ -133,9 +203,45 @@ class ExtractSequence(pyblish.api.Extractor): return "{{frame:0>{}}}".format(frame_padding) + ".png" - def render_review( - self, filename_template, output_dir, frame_start, frame_end + def _rename_output_files( + self, filename_template, output_dir, + mark_in, mark_out, output_frame_start, output_frame_end ): + # Use differnet ranges based on Mark In and output Frame Start values + # - this is to make sure that filename renaming won't affect files that + # are not renamed yet + mark_start_is_less = bool(mark_in < output_frame_start) + if mark_start_is_less: + marks_range = range(mark_out, mark_in - 1, -1) + frames_range = range(output_frame_end, output_frame_start - 1, -1) + else: + # This is less possible situation as frame start will be in most + # cases higher than Mark In. + marks_range = range(mark_in, mark_out + 1) + frames_range = range(output_frame_start, output_frame_end + 1) + + repre_filepaths = [] + for mark, frame in zip(marks_range, frames_range): + new_filename = filename_template.format(frame=frame) + new_filepath = os.path.join(output_dir, new_filename) + + repre_filepaths.append(new_filepath) + + if mark != frame: + old_filename = filename_template.format(frame=mark) + old_filepath = os.path.join(output_dir, old_filename) + os.rename(old_filepath, new_filepath) + + # Reverse repre files order if output + if mark_start_is_less: + repre_filepaths = list(reversed(repre_filepaths)) + + return [ + os.path.basename(path) + for path in repre_filepaths + ] + + def render_review(self, filename_template, output_dir, mark_in, mark_out): """ Export images from TVPaint using `tv_savesequence` command. Args: @@ -144,8 +250,8 @@ class ExtractSequence(pyblish.api.Extractor): keyword argument `{frame}` or index argument (for same value). Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. - first_frame (int): Starting frame from which export will begin. - last_frame (int): On which frame export will end. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. Retruns: tuple: With 2 items first is list of filenames second is path to @@ -154,10 +260,8 @@ class ExtractSequence(pyblish.api.Extractor): self.log.debug("Preparing data for rendering.") first_frame_filepath = os.path.join( output_dir, - filename_template.format(frame=frame_start) + filename_template.format(frame=mark_in) ) - mark_in = frame_start - 1 - mark_out = frame_end - 1 george_script_lines = [ "tv_SaveMode \"PNG\"", @@ -170,13 +274,22 @@ class ExtractSequence(pyblish.api.Extractor): ] lib.execute_george_through_file("\n".join(george_script_lines)) - output = [] first_frame_filepath = None - for frame in range(frame_start, frame_end + 1): + output_filenames = [] + for frame in range(mark_in, mark_out + 1): filename = filename_template.format(frame=frame) - output.append(filename) + output_filenames.append(filename) + + filepath = os.path.join(output_dir, filename) + if not os.path.exists(filepath): + raise AssertionError( + "Output was not rendered. File was not found {}".format( + filepath + ) + ) + if first_frame_filepath is None: - first_frame_filepath = os.path.join(output_dir, filename) + first_frame_filepath = filepath thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") if first_frame_filepath and os.path.exists(first_frame_filepath): @@ -184,11 +297,10 @@ class ExtractSequence(pyblish.api.Extractor): thumbnail_obj = Image.new("RGB", source_img.size, (255, 255, 255)) thumbnail_obj.paste(source_img) thumbnail_obj.save(thumbnail_filepath) - return output, thumbnail_filepath - def render( - self, filename_template, output_dir, frame_start, frame_end, layers - ): + return output_filenames, thumbnail_filepath + + def render(self, filename_template, output_dir, mark_in, mark_out, layers): """ Export images from TVPaint. Args: @@ -197,8 +309,8 @@ class ExtractSequence(pyblish.api.Extractor): keyword argument `{frame}` or index argument (for same value). Extension in template must match `save_mode`. output_dir (str): Directory where files will be stored. - first_frame (int): Starting frame from which export will begin. - last_frame (int): On which frame export will end. + mark_in (int): Starting frame index from which export will begin. + mark_out (int): On which frame index export will end. layers (list): List of layers to be exported. Retruns: @@ -219,14 +331,11 @@ class ExtractSequence(pyblish.api.Extractor): # Sort layer positions in reverse order sorted_positions = list(reversed(sorted(layers_by_position.keys()))) if not sorted_positions: - return + return [], None self.log.debug("Collecting pre/post behavior of individual layers.") behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids) - mark_in_index = frame_start - 1 - mark_out_index = frame_end - 1 - tmp_filename_template = "pos_{pos}." + filename_template files_by_position = {} @@ -239,25 +348,47 @@ class ExtractSequence(pyblish.api.Extractor): tmp_filename_template, output_dir, behavior, - mark_in_index, - mark_out_index + mark_in, + mark_out ) - files_by_position[position] = files_by_frames + if files_by_frames: + files_by_position[position] = files_by_frames + else: + self.log.warning(( + "Skipped layer \"{}\". Probably out of Mark In/Out range." + ).format(layer["name"])) + + if not files_by_position: + layer_names = set(layer["name"] for layer in layers) + joined_names = ", ".join( + ["\"{}\"".format(name) for name in layer_names] + ) + self.log.warning( + "Layers {} do not have content in range {} - {}".format( + joined_names, mark_in, mark_out + ) + ) + return [], None output_filepaths = self._composite_files( files_by_position, - mark_in_index, - mark_out_index, + mark_in, + mark_out, filename_template, output_dir ) self._cleanup_tmp_files(files_by_position) - thumbnail_src_filepath = None - thumbnail_filepath = None - if output_filepaths: - thumbnail_src_filepath = tuple(sorted(output_filepaths))[0] + output_filenames = [ + os.path.basename(filepath) + for filepath in output_filepaths + ] + thumbnail_src_filepath = None + if output_filepaths: + thumbnail_src_filepath = output_filepaths[0] + + thumbnail_filepath = None if thumbnail_src_filepath and os.path.exists(thumbnail_src_filepath): source_img = Image.open(thumbnail_src_filepath) thumbnail_filepath = os.path.join(output_dir, "thumbnail.jpg") @@ -265,11 +396,7 @@ class ExtractSequence(pyblish.api.Extractor): thumbnail_obj.paste(source_img) thumbnail_obj.save(thumbnail_filepath) - repre_files = [ - os.path.basename(path) - for path in output_filepaths - ] - return repre_files, thumbnail_filepath + return output_filenames, thumbnail_filepath def _render_layer( self, @@ -283,6 +410,22 @@ class ExtractSequence(pyblish.api.Extractor): layer_id = layer["layer_id"] frame_start_index = layer["frame_start"] frame_end_index = layer["frame_end"] + + pre_behavior = behavior["pre"] + post_behavior = behavior["post"] + + # Check if layer is before mark in + if frame_end_index < mark_in_index: + # Skip layer if post behavior is "none" + if post_behavior == "none": + return {} + + # Check if layer is after mark out + elif frame_start_index > mark_out_index: + # Skip layer if pre behavior is "none" + if pre_behavior == "none": + return {} + exposure_frames = lib.get_exposure_frames( layer_id, frame_start_index, frame_end_index ) @@ -341,8 +484,6 @@ class ExtractSequence(pyblish.api.Extractor): self.log.debug("Filled frames {}".format(str(_debug_filled_frames))) # Fill frames by pre/post behavior of layer - pre_behavior = behavior["pre"] - post_behavior = behavior["post"] self.log.debug(( "Completing image sequence of layer by pre/post behavior." " PRE: {} | POST: {}" @@ -530,17 +671,12 @@ class ExtractSequence(pyblish.api.Extractor): filepath = position_data[frame_idx] images_by_frame[frame_idx].append(filepath) - process_count = os.cpu_count() - if process_count > 1: - process_count -= 1 - - processes = {} output_filepaths = [] missing_frame_paths = [] random_frame_path = None for frame_idx in sorted(images_by_frame.keys()): image_filepaths = images_by_frame[frame_idx] - output_filename = filename_template.format(frame=frame_idx + 1) + output_filename = filename_template.format(frame=frame_idx) output_filepath = os.path.join(output_dir, output_filename) output_filepaths.append(output_filepath) @@ -553,45 +689,15 @@ class ExtractSequence(pyblish.api.Extractor): if len(image_filepaths) == 1: os.rename(image_filepaths[0], output_filepath) - # Prepare process for compositing of images + # Composite images else: - processes[frame_idx] = multiprocessing.Process( - target=composite_images, - args=(image_filepaths, output_filepath) - ) + composite_images(image_filepaths, output_filepath) # Store path of random output image that will 100% exist after all # multiprocessing as mockup for missing frames if random_frame_path is None: random_frame_path = output_filepath - self.log.info( - "Running {} compositing processes - this mey take a while.".format( - len(processes) - ) - ) - # Wait until all compositing processes are done - running_processes = {} - while True: - for idx in tuple(running_processes.keys()): - process = running_processes[idx] - if not process.is_alive(): - running_processes.pop(idx).join() - - if processes and len(running_processes) != process_count: - indexes = list(processes.keys()) - for _ in range(process_count - len(running_processes)): - if not indexes: - break - idx = indexes.pop(0) - running_processes[idx] = processes.pop(idx) - running_processes[idx].start() - - if not running_processes and not processes: - break - - time.sleep(0.01) - self.log.debug( "Creating transparent images for frames without render {}.".format( str(missing_frame_paths) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 73486d1005..e2ef81e4a4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -14,37 +14,54 @@ class ValidateMarksRepair(pyblish.api.Action): def process(self, context, plugin): expected_data = ValidateMarks.get_expected_data(context) - expected_data["markIn"] -= 1 - expected_data["markOut"] -= 1 - - lib.execute_george("tv_markin {} set".format(expected_data["markIn"])) + lib.execute_george( + "tv_markin {} set".format(expected_data["markIn"]) + ) lib.execute_george( "tv_markout {} set".format(expected_data["markOut"]) ) class ValidateMarks(pyblish.api.ContextPlugin): - """Validate mark in and out are enabled.""" + """Validate mark in and out are enabled and it's duration. - label = "Validate Marks" + Mark In/Out does not have to match frameStart and frameEnd but duration is + important. + """ + + label = "Validate Mark In/Out" order = pyblish.api.ValidatorOrder optional = True actions = [ValidateMarksRepair] @staticmethod def get_expected_data(context): + scene_mark_in = context.data["sceneMarkIn"] + + # Data collected in `CollectAvalonEntities` + frame_end = context.data["frameEnd"] + frame_start = context.data["frameStart"] + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + + # Calculate expeted Mark out (Mark In + duration - 1) + expected_mark_out = ( + scene_mark_in + + (frame_end - frame_start) + + handle_start + handle_end + ) return { - "markIn": int(context.data["frameStart"]), + "markIn": scene_mark_in, "markInState": True, - "markOut": int(context.data["frameEnd"]), + "markOut": expected_mark_out, "markOutState": True } def process(self, context): current_data = { - "markIn": context.data["sceneMarkIn"] + 1, + "markIn": context.data["sceneMarkIn"], "markInState": context.data["sceneMarkInState"], - "markOut": context.data["sceneMarkOut"] + 1, + "markOut": context.data["sceneMarkOut"], "markOutState": context.data["sceneMarkOutState"] } expected_data = self.get_expected_data(context) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py new file mode 100644 index 0000000000..a9279bf6e0 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py @@ -0,0 +1,162 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class PointCacheAlembicLoader(api.Loader): + """Load Point Cache from Alembic""" + + families = ["model", "pointcache"] + label = "Import Alembic Point Cache" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + + options.geometry_cache_settings.set_editor_property( + 'flatten_tracks', False) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.GEOMETRY_CACHE) + + options.geometry_cache_settings.set_editor_property( + 'flatten_tracks', False) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py new file mode 100644 index 0000000000..b652af0b89 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py @@ -0,0 +1,156 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class SkeletalMeshAlembicLoader(api.Loader): + """Load Unreal SkeletalMesh from Alembic""" + + families = ["pointcache"] + label = "Import Alembic Skeletal Mesh" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py new file mode 100644 index 0000000000..12b9320f72 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py @@ -0,0 +1,156 @@ +import os + +from avalon import api, pipeline +from avalon.unreal import lib +from avalon.unreal import pipeline as unreal_pipeline +import unreal + + +class StaticMeshAlembicLoader(api.Loader): + """Load Unreal StaticMesh from Alembic""" + + families = ["model"] + label = "Import Alembic Static Mesh" + representations = ["abc"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, data): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and avalon container + root = "/Game/Avalon/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + lib.create_avalon_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": pipeline.AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = api.get_representation_path(representation) + destination_path = container["namespace"] + + task = unreal.AssetImportTask() + + task.set_editor_property('filename', source_path) + task.set_editor_property('destination_path', destination_path) + # strip suffix + task.set_editor_property('destination_name', name) + task.set_editor_property('replace_existing', True) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options = unreal.AbcImportSettings() + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + task.options = options + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py index dbea1d5951..dcb566fa4c 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py @@ -1,7 +1,6 @@ import os from avalon import api, pipeline -from avalon import unreal as avalon_unreal from avalon.unreal import lib from avalon.unreal import pipeline as unreal_pipeline import unreal diff --git a/openpype/launcher_actions.py b/openpype/launcher_actions.py deleted file mode 100644 index cf68dfb5c1..0000000000 --- a/openpype/launcher_actions.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import sys - -from avalon import api, pipeline - -PACKAGE_DIR = os.path.dirname(__file__) -PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins", "launcher") -ACTIONS_DIR = os.path.join(PLUGINS_DIR, "actions") - - -def register_launcher_actions(): - """Register specific actions which should be accessible in the launcher""" - - actions = [] - ext = ".py" - sys.path.append(ACTIONS_DIR) - - for f in os.listdir(ACTIONS_DIR): - file, extention = os.path.splitext(f) - if ext in extention: - module = __import__(file) - klass = getattr(module, file) - actions.append(klass) - - if actions is []: - return - - for action in actions: - print("Using launcher action from config @ '{}'".format(action.name)) - pipeline.register_plugin(api.Action, action) diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index f46c81bf7a..895d11601f 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -79,6 +79,16 @@ from .avalon_context import ( change_timer_to_current_context ) +from .local_settings import ( + IniSettingRegistry, + JSONSettingRegistry, + OpenPypeSecureRegistry, + OpenPypeSettingsRegistry, + get_local_site_id, + change_openpype_mongo_url, + get_openpype_username +) + from .applications import ( ApplicationLaunchFailed, ApplictionExecutableNotFound, @@ -112,15 +122,6 @@ from .plugin_tools import ( should_decompress ) -from .local_settings import ( - IniSettingRegistry, - JSONSettingRegistry, - OpenPypeSecureRegistry, - OpenPypeSettingsRegistry, - get_local_site_id, - change_openpype_mongo_url -) - from .path_tools import ( version_up, get_version_from_path, @@ -179,6 +180,14 @@ __all__ = [ "change_timer_to_current_context", + "IniSettingRegistry", + "JSONSettingRegistry", + "OpenPypeSecureRegistry", + "OpenPypeSettingsRegistry", + "get_local_site_id", + "change_openpype_mongo_url", + "get_openpype_username", + "ApplicationLaunchFailed", "ApplictionExecutableNotFound", "ApplicationNotFound", @@ -224,13 +233,6 @@ __all__ = [ "validate_mongo_connection", "OpenPypeMongoConnection", - "IniSettingRegistry", - "JSONSettingRegistry", - "OpenPypeSecureRegistry", - "OpenPypeSettingsRegistry", - "get_local_site_id", - "change_openpype_mongo_url", - "timeit", "is_overlapping_otio_ranges", diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 730d4230b6..c5c192f51b 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -25,6 +25,7 @@ from . import ( PypeLogger, Anatomy ) +from .local_settings import get_openpype_username from .avalon_context import ( get_workdir_data, get_workdir_with_workdir_data @@ -262,14 +263,32 @@ class Application: class ApplicationManager: - def __init__(self): - self.log = PypeLogger().get_logger(self.__class__.__name__) + """Load applications and tools and store them by their full name. + + Args: + system_settings (dict): Preloaded system settings. When passed manager + will always use these values. Gives ability to create manager + using different settings. + """ + def __init__(self, system_settings=None): + self.log = PypeLogger.get_logger(self.__class__.__name__) self.app_groups = {} self.applications = {} self.tool_groups = {} self.tools = {} + self._system_settings = system_settings + + self.refresh() + + def set_system_settings(self, system_settings): + """Ability to change init system settings. + + This will trigger refresh of manager. + """ + self._system_settings = system_settings + self.refresh() def refresh(self): @@ -279,9 +298,12 @@ class ApplicationManager: self.tool_groups.clear() self.tools.clear() - settings = get_system_settings( - clear_metadata=False, exclude_locals=False - ) + if self._system_settings is not None: + settings = copy.deepcopy(self._system_settings) + else: + settings = get_system_settings( + clear_metadata=False, exclude_locals=False + ) app_defs = settings["applications"] for group_name, variant_defs in app_defs.items(): @@ -1225,7 +1247,7 @@ def _prepare_last_workfile(data, workdir): file_template = anatomy.templates["work"]["file"] workdir_data.update({ "version": 1, - "user": os.environ.get("OPENPYPE_USERNAME") or getpass.getuser(), + "user": get_openpype_username(), "ext": extensions[0] }) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 56bdd047c9..67845c77cf 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- """Package to deal with saving and retrieving user specific settings.""" import os +import json +import getpass +import platform from datetime import datetime from abc import ABCMeta, abstractmethod -import json # TODO Use pype igniter logic instead of using duplicated code # disable lru cache in Python 2 @@ -24,11 +26,11 @@ try: except ImportError: import ConfigParser as configparser -import platform - import six import appdirs +from openpype.settings import get_local_settings + from .import validate_mongo_connection _PLACEHOLDER = object() @@ -538,3 +540,25 @@ def change_openpype_mongo_url(new_mongo_url): if existing_value is not None: registry.delete_item(key) registry.set_item(key, new_mongo_url) + + +def get_openpype_username(): + """OpenPype username used for templates and publishing. + + May be different than machine's username. + + Always returns "OPENPYPE_USERNAME" environment if is set then tries local + settings and last option is to use `getpass.getuser()` which returns + machine username. + """ + username = os.environ.get("OPENPYPE_USERNAME") + if not username: + local_settings = get_local_settings() + username = ( + local_settings + .get("general", {}) + .get("username") + ) + if not username: + username = getpass.getuser() + return username diff --git a/openpype/lib/log.py b/openpype/lib/log.py index 9745279e28..39b6c67080 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -123,6 +123,8 @@ class PypeFormatter(logging.Formatter): if record.exc_info is not None: line_len = len(str(record.exc_info[1])) + if line_len > 30: + line_len = 30 out = "{}\n{}\n{}\n{}\n{}".format( out, line_len * "=", diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py index d7c6d99fe6..bae48c540b 100644 --- a/openpype/modules/__init__.py +++ b/openpype/modules/__init__.py @@ -18,10 +18,6 @@ from .webserver import ( WebServerModule, IWebServerRoutes ) -from .user import ( - UserModule, - IUserModule -) from .idle_manager import ( IdleManager, IIdleManager @@ -60,9 +56,6 @@ __all__ = ( "WebServerModule", "IWebServerRoutes", - "UserModule", - "IUserModule", - "IdleManager", "IIdleManager", diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 38a6b9b246..69159fda1a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -64,7 +64,6 @@ class AfterEffectsSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index ba1ffdcf30..37041a84b1 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -273,7 +273,6 @@ class HarmonySubmitDeadline( "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 3aea837bb1..a5841f406c 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -47,7 +47,7 @@ payload_skeleton_template = { "BatchName": None, # Top-level group name "Name": None, # Job name, as seen in Monitor "UserName": None, - "Plugin": "MayaPype", + "Plugin": "MayaBatch", "Frames": "{start}-{end}x{step}", "Comment": None, "Priority": 50, @@ -396,7 +396,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): step=int(self._instance.data["byFrameStep"])) self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get( - "mayaRenderPlugin", "MayaPype") + "mayaRenderPlugin", "MayaBatch") self.payload_skeleton["JobInfo"]["BatchName"] = filename # Job name, as seen in Monitor @@ -441,7 +441,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS" ] diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index 2e30e624ef..7faa3393e5 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -31,6 +31,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): group = "" department = "" limit_groups = {} + use_gpu = False def process(self, instance): instance.data["toBeRenderedOn"] = "deadline" @@ -206,6 +207,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Resolve relative references "ProjectPath": script_path, "AWSAssetFile0": render_path, + + # using GPU by default + "UseGpu": self.use_gpu, + # Only the specific write node is rendered. "WriteNode": exe_node_name }, @@ -375,7 +380,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): list: captured groups list """ captured_groups = [] - for lg_name, list_node_class in self.deadline_limit_groups.items(): + for lg_name, list_node_class in self.limit_groups.items(): for node_class in list_node_class: for node in nuke.allNodes(recurseGroups=True): # ignore all nodes not member of defined class diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 8248bf532e..12d687bbf2 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -2,9 +2,9 @@ import json from openpype.api import ProjectSettings -from openpype.modules.ftrack.lib import ServerAction -from openpype.modules.ftrack.lib.avalon_sync import ( - get_pype_attr, +from openpype.modules.ftrack.lib import ( + ServerAction, + get_openpype_attr, CUST_ATTR_AUTO_SYNC ) @@ -159,7 +159,7 @@ class PrepareProjectServer(ServerAction): for key, entity in project_anatom_settings["attributes"].items(): attribute_values_by_key[key] = entity.value - cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True) + cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) for attr in hier_cust_attrs: key = attr["key"] diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 347b227dd3..3bb01798e4 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -18,12 +18,15 @@ from avalon import schema from avalon.api import AvalonMongoDB from openpype.modules.ftrack.lib import ( + get_openpype_attr, + CUST_ATTR_ID_KEY, + CUST_ATTR_AUTO_SYNC, + avalon_sync, + BaseEvent ) from openpype.modules.ftrack.lib.avalon_sync import ( - CUST_ATTR_ID_KEY, - CUST_ATTR_AUTO_SYNC, EntitySchemas ) @@ -125,7 +128,7 @@ class SyncToAvalonEvent(BaseEvent): @property def avalon_cust_attrs(self): if self._avalon_cust_attrs is None: - self._avalon_cust_attrs = avalon_sync.get_pype_attr( + self._avalon_cust_attrs = get_openpype_attr( self.process_session, query_keys=self.cust_attr_query_keys ) return self._avalon_cust_attrs diff --git a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py index c326c56a7c..45cc9adf55 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py +++ b/openpype/modules/ftrack/event_handlers_user/action_clean_hierarchical_attributes.py @@ -1,7 +1,10 @@ import collections import ftrack_api -from openpype.modules.ftrack.lib import BaseAction, statics_icon -from openpype.modules.ftrack.lib.avalon_sync import get_pype_attr +from openpype.modules.ftrack.lib import ( + BaseAction, + statics_icon, + get_openpype_attr +) class CleanHierarchicalAttrsAction(BaseAction): @@ -52,7 +55,7 @@ class CleanHierarchicalAttrsAction(BaseAction): ) entity_ids_joined = ", ".join(all_entities_ids) - attrs, hier_attrs = get_pype_attr(session) + attrs, hier_attrs = get_openpype_attr(session) for attr in hier_attrs: configuration_key = attr["key"] diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index 63025d35b3..63605eda5e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -2,10 +2,20 @@ import collections import json import arrow import ftrack_api -from openpype.modules.ftrack.lib import BaseAction, statics_icon -from openpype.modules.ftrack.lib.avalon_sync import ( - CUST_ATTR_ID_KEY, CUST_ATTR_GROUP, default_custom_attributes_definition +from openpype.modules.ftrack.lib import ( + BaseAction, + statics_icon, + + CUST_ATTR_ID_KEY, + CUST_ATTR_GROUP, + CUST_ATTR_TOOLS, + CUST_ATTR_APPLICATIONS, + + default_custom_attributes_definition, + app_definitions_from_app_manager, + tool_definitions_from_app_manager ) + from openpype.api import get_system_settings from openpype.lib import ApplicationManager @@ -370,24 +380,12 @@ class CustomAttributes(BaseAction): exc_info=True ) - def app_defs_from_app_manager(self): - app_definitions = [] - for app_name, app in self.app_manager.applications.items(): - if app.enabled and app.is_host: - app_definitions.append({ - app_name: app.full_label - }) - - if not app_definitions: - app_definitions.append({"empty": "< Empty >"}) - return app_definitions - def applications_attribute(self, event): - apps_data = self.app_defs_from_app_manager() + apps_data = app_definitions_from_app_manager(self.app_manager) applications_custom_attr_data = { "label": "Applications", - "key": "applications", + "key": CUST_ATTR_APPLICATIONS, "type": "enumerator", "entity_type": "show", "group": CUST_ATTR_GROUP, @@ -399,19 +397,11 @@ class CustomAttributes(BaseAction): self.process_attr_data(applications_custom_attr_data, event) def tools_attribute(self, event): - tools_data = [] - for tool_name, tool in self.app_manager.tools.items(): - tools_data.append({ - tool_name: tool.label - }) - - # Make sure there is at least one item - if not tools_data: - tools_data.append({"empty": "< Empty >"}) + tools_data = tool_definitions_from_app_manager(self.app_manager) tools_custom_attr_data = { "label": "Tools", - "key": "tools_env", + "key": CUST_ATTR_TOOLS, "type": "enumerator", "is_hierarchical": True, "group": CUST_ATTR_GROUP, diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index bd25f995fe..5298c06371 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -4,10 +4,8 @@ from openpype.api import ProjectSettings from openpype.modules.ftrack.lib import ( BaseAction, - statics_icon -) -from openpype.modules.ftrack.lib.avalon_sync import ( - get_pype_attr, + statics_icon, + get_openpype_attr, CUST_ATTR_AUTO_SYNC ) @@ -162,7 +160,7 @@ class PrepareProjectLocal(BaseAction): for key, entity in project_anatom_settings["attributes"].items(): attribute_values_by_key[key] = entity.value - cust_attrs, hier_cust_attrs = get_pype_attr(self.session, True) + cust_attrs, hier_cust_attrs = get_openpype_attr(self.session, True) for attr in hier_cust_attrs: key = attr["key"] diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index d242268048..af578de86b 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -1,4 +1,5 @@ import os +import json import collections from abc import ABCMeta, abstractmethod import six @@ -8,10 +9,10 @@ from openpype.modules import ( ITrayModule, IPluginPaths, ITimersManager, - IUserModule, ILaunchHookPaths, ISettingsChangeListener ) +from openpype.settings import SaveWarningExc FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -32,7 +33,6 @@ class FtrackModule( ITrayModule, IPluginPaths, ITimersManager, - IUserModule, ILaunchHookPaths, ISettingsChangeListener ): @@ -123,15 +123,86 @@ class FtrackModule( if self.tray_module: self.tray_module.stop_timer_manager() - def on_pype_user_change(self, username): - """Implementation of IUserModule interface.""" - if self.tray_module: - self.tray_module.changed_user() - - def on_system_settings_save(self, *_args, **_kwargs): + def on_system_settings_save( + self, old_value, new_value, changes, new_value_metadata + ): """Implementation of ISettingsChangeListener interface.""" - # Ignore - return + try: + session = self.create_ftrack_session() + except Exception: + self.log.warning("Couldn't create ftrack session.", exc_info=True) + raise SaveWarningExc(( + "Saving of attributes to ftrack wasn't successful," + " try running Create/Update Avalon Attributes in ftrack." + )) + + from .lib import ( + get_openpype_attr, + CUST_ATTR_APPLICATIONS, + CUST_ATTR_TOOLS, + app_definitions_from_app_manager, + tool_definitions_from_app_manager + ) + from openpype.api import ApplicationManager + query_keys = [ + "id", + "key", + "config" + ] + custom_attributes = get_openpype_attr( + session, + split_hierarchical=False, + query_keys=query_keys + ) + app_attribute = None + tool_attribute = None + for custom_attribute in custom_attributes: + key = custom_attribute["key"] + if key == CUST_ATTR_APPLICATIONS: + app_attribute = custom_attribute + elif key == CUST_ATTR_TOOLS: + tool_attribute = custom_attribute + + app_manager = ApplicationManager(new_value_metadata) + missing_attributes = [] + if not app_attribute: + missing_attributes.append(CUST_ATTR_APPLICATIONS) + else: + config = json.loads(app_attribute["config"]) + new_data = app_definitions_from_app_manager(app_manager) + prepared_data = [] + for item in new_data: + for key, label in item.items(): + prepared_data.append({ + "menu": label, + "value": key + }) + + config["data"] = json.dumps(prepared_data) + app_attribute["config"] = json.dumps(config) + + if not tool_attribute: + missing_attributes.append(CUST_ATTR_TOOLS) + else: + config = json.loads(tool_attribute["config"]) + new_data = tool_definitions_from_app_manager(app_manager) + prepared_data = [] + for item in new_data: + for key, label in item.items(): + prepared_data.append({ + "menu": label, + "value": key + }) + config["data"] = json.dumps(prepared_data) + tool_attribute["config"] = json.dumps(config) + + session.commit() + + if missing_attributes: + raise SaveWarningExc(( + "Couldn't find custom attribute/s ({}) to update." + " Try running Create/Update Avalon Attributes in ftrack." + ).format(", ".join(missing_attributes))) def on_project_settings_save(self, *_args, **_kwargs): """Implementation of ISettingsChangeListener interface.""" @@ -139,7 +210,7 @@ class FtrackModule( return def on_project_anatomy_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): """Implementation of ISettingsChangeListener interface.""" if not project_name: @@ -150,32 +221,49 @@ class FtrackModule( return import ftrack_api - from openpype.modules.ftrack.lib import avalon_sync + from openpype.modules.ftrack.lib import get_openpype_attr + + try: + session = self.create_ftrack_session() + except Exception: + self.log.warning("Couldn't create ftrack session.", exc_info=True) + raise SaveWarningExc(( + "Saving of attributes to ftrack wasn't successful," + " try running Create/Update Avalon Attributes in ftrack." + )) - session = self.create_ftrack_session() project_entity = session.query( "Project where full_name is \"{}\"".format(project_name) ).first() if not project_entity: - self.log.warning(( - "Ftrack project with names \"{}\" was not found." - " Skipping settings attributes change callback." - )) - return + msg = ( + "Ftrack project with name \"{}\" was not found in Ftrack." + " Can't push attribute changes." + ).format(project_name) + self.log.warning(msg) + raise SaveWarningExc(msg) project_id = project_entity["id"] - cust_attr, hier_attr = avalon_sync.get_pype_attr(session) + cust_attr, hier_attr = get_openpype_attr(session) cust_attr_by_key = {attr["key"]: attr for attr in cust_attr} hier_attrs_by_key = {attr["key"]: attr for attr in hier_attr} + + failed = {} + missing = {} for key, value in attributes_changes.items(): configuration = hier_attrs_by_key.get(key) if not configuration: configuration = cust_attr_by_key.get(key) if not configuration: + self.log.warning( + "Custom attribute \"{}\" was not found.".format(key) + ) + missing[key] = value continue + # TODO add add permissions check # TODO add value validations # - value type and list items entity_key = collections.OrderedDict() @@ -189,10 +277,45 @@ class FtrackModule( "value", ftrack_api.symbol.NOT_SET, value - ) ) - session.commit() + try: + session.commit() + self.log.debug( + "Changed project custom attribute \"{}\" to \"{}\"".format( + key, value + ) + ) + except Exception: + self.log.warning( + "Failed to set \"{}\" to \"{}\"".format(key, value), + exc_info=True + ) + session.rollback() + failed[key] = value + + if not failed and not missing: + return + + error_msg = ( + "Values were not updated on Ftrack which may cause issues." + " try running Create/Update Avalon Attributes in ftrack " + " and resave project settings." + ) + if missing: + error_msg += "\nMissing Custom attributes on Ftrack: {}.".format( + ", ".join([ + '"{}"'.format(key) + for key in missing.keys() + ]) + ) + if failed: + joined_failed = ", ".join([ + '"{}": "{}"'.format(key, value) + for key, value in failed.items() + ]) + error_msg += "\nFailed to set: {}".format(joined_failed) + raise SaveWarningExc(error_msg) def create_ftrack_session(self, **session_kwargs): import ftrack_api diff --git a/openpype/modules/ftrack/lib/__init__.py b/openpype/modules/ftrack/lib/__init__.py index 82b6875590..ce6d5284b6 100644 --- a/openpype/modules/ftrack/lib/__init__.py +++ b/openpype/modules/ftrack/lib/__init__.py @@ -1,7 +1,21 @@ +from .constants import ( + CUST_ATTR_ID_KEY, + CUST_ATTR_AUTO_SYNC, + CUST_ATTR_GROUP, + CUST_ATTR_TOOLS, + CUST_ATTR_APPLICATIONS +) from . settings import ( get_ftrack_url_from_settings, get_ftrack_event_mongo_info ) +from .custom_attributes import ( + default_custom_attributes_definition, + app_definitions_from_app_manager, + tool_definitions_from_app_manager, + get_openpype_attr +) + from . import avalon_sync from . import credentials from .ftrack_base_handler import BaseHandler @@ -10,9 +24,20 @@ from .ftrack_action_handler import BaseAction, ServerAction, statics_icon __all__ = ( + "CUST_ATTR_ID_KEY", + "CUST_ATTR_AUTO_SYNC", + "CUST_ATTR_GROUP", + "CUST_ATTR_TOOLS", + "CUST_ATTR_APPLICATIONS", + "get_ftrack_url_from_settings", "get_ftrack_event_mongo_info", + "default_custom_attributes_definition", + "app_definitions_from_app_manager", + "tool_definitions_from_app_manager", + "get_openpype_attr", + "avalon_sync", "credentials", diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 79e1366a0d..f58e858a5a 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -14,17 +14,21 @@ else: from avalon.api import AvalonMongoDB import avalon + from openpype.api import ( Logger, Anatomy, get_anatomy_settings ) +from openpype.lib import ApplicationManager + +from .constants import CUST_ATTR_ID_KEY +from .custom_attributes import get_openpype_attr from bson.objectid import ObjectId from bson.errors import InvalidId from pymongo import UpdateOne import ftrack_api -from openpype.lib import ApplicationManager log = Logger.get_logger(__name__) @@ -36,23 +40,6 @@ EntitySchemas = { "config": "openpype:config-2.0" } -# Group name of custom attributes -CUST_ATTR_GROUP = "openpype" - -# name of Custom attribute that stores mongo_id from avalon db -CUST_ATTR_ID_KEY = "avalon_mongo_id" -CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" - - -def default_custom_attributes_definition(): - json_file_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "custom_attributes.json" - ) - with open(json_file_path, "r") as json_stream: - data = json.load(json_stream) - return data - def check_regex(name, entity_type, in_schema=None, schema_patterns=None): schema_name = "asset-3.0" @@ -91,39 +78,6 @@ def join_query_keys(keys): return ",".join(["\"{}\"".format(key) for key in keys]) -def get_pype_attr(session, split_hierarchical=True, query_keys=None): - custom_attributes = [] - hier_custom_attributes = [] - if not query_keys: - query_keys = [ - "id", - "entity_type", - "object_type_id", - "is_hierarchical", - "default" - ] - # TODO remove deprecated "pype" group from query - cust_attrs_query = ( - "select {}" - " from CustomAttributeConfiguration" - # Kept `pype` for Backwards Compatiblity - " where group.name in (\"pype\", \"{}\")" - ).format(", ".join(query_keys), CUST_ATTR_GROUP) - all_avalon_attr = session.query(cust_attrs_query).all() - for cust_attr in all_avalon_attr: - if split_hierarchical and cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - - custom_attributes.append(cust_attr) - - if split_hierarchical: - # return tuple - return custom_attributes, hier_custom_attributes - - return custom_attributes - - def get_python_type_for_custom_attribute(cust_attr, cust_attr_type_name=None): """Python type that should value of custom attribute have. @@ -921,7 +875,7 @@ class SyncEntitiesFactory: def set_cutom_attributes(self): self.log.debug("* Preparing custom attributes") # Get custom attributes and values - custom_attrs, hier_attrs = get_pype_attr( + custom_attrs, hier_attrs = get_openpype_attr( self.session, query_keys=self.cust_attr_query_keys ) ent_types = self.session.query("select id, name from ObjectType").all() @@ -2508,7 +2462,7 @@ class SyncEntitiesFactory: if new_entity_id not in p_chilren: self.entities_dict[parent_id]["children"].append(new_entity_id) - cust_attr, _ = get_pype_attr(self.session) + cust_attr, _ = get_openpype_attr(self.session) for _attr in cust_attr: key = _attr["key"] if key not in av_entity["data"]: diff --git a/openpype/modules/ftrack/lib/constants.py b/openpype/modules/ftrack/lib/constants.py new file mode 100644 index 0000000000..73d5112e6d --- /dev/null +++ b/openpype/modules/ftrack/lib/constants.py @@ -0,0 +1,12 @@ +# Group name of custom attributes +CUST_ATTR_GROUP = "openpype" + +# name of Custom attribute that stores mongo_id from avalon db +CUST_ATTR_ID_KEY = "avalon_mongo_id" +# Auto sync of project +CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" + +# Applications custom attribute name +CUST_ATTR_APPLICATIONS = "applications" +# Environment tools custom attribute +CUST_ATTR_TOOLS = "tools_env" diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py new file mode 100644 index 0000000000..33eea32baa --- /dev/null +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -0,0 +1,73 @@ +import os +import json + +from .constants import CUST_ATTR_GROUP + + +def default_custom_attributes_definition(): + json_file_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "custom_attributes.json" + ) + with open(json_file_path, "r") as json_stream: + data = json.load(json_stream) + return data + + +def app_definitions_from_app_manager(app_manager): + app_definitions = [] + for app_name, app in app_manager.applications.items(): + if app.enabled and app.is_host: + app_definitions.append({ + app_name: app.full_label + }) + + if not app_definitions: + app_definitions.append({"empty": "< Empty >"}) + return app_definitions + + +def tool_definitions_from_app_manager(app_manager): + tools_data = [] + for tool_name, tool in app_manager.tools.items(): + tools_data.append({ + tool_name: tool.label + }) + + # Make sure there is at least one item + if not tools_data: + tools_data.append({"empty": "< Empty >"}) + return tools_data + + +def get_openpype_attr(session, split_hierarchical=True, query_keys=None): + custom_attributes = [] + hier_custom_attributes = [] + if not query_keys: + query_keys = [ + "id", + "entity_type", + "object_type_id", + "is_hierarchical", + "default" + ] + # TODO remove deprecated "pype" group from query + cust_attrs_query = ( + "select {}" + " from CustomAttributeConfiguration" + # Kept `pype` for Backwards Compatiblity + " where group.name in (\"pype\", \"{}\")" + ).format(", ".join(query_keys), CUST_ATTR_GROUP) + all_avalon_attr = session.query(cust_attrs_query).all() + for cust_attr in all_avalon_attr: + if split_hierarchical and cust_attr["is_hierarchical"]: + hier_custom_attributes.append(cust_attr) + continue + + custom_attributes.append(cust_attr) + + if split_hierarchical: + # return tuple + return custom_attributes, hier_custom_attributes + + return custom_attributes diff --git a/openpype/modules/launcher_action.py b/openpype/modules/launcher_action.py index da0468d495..5ed8585b6a 100644 --- a/openpype/modules/launcher_action.py +++ b/openpype/modules/launcher_action.py @@ -22,7 +22,6 @@ class LauncherAction(PypeModule, ITrayAction): # Register actions if self.tray_initialized: from openpype.tools.launcher import actions - # actions.register_default_actions() actions.register_config_actions() actions_paths = self.manager.collect_plugin_paths()["actions"] actions.register_actions_from_paths(actions_paths) diff --git a/openpype/modules/settings_action.py b/openpype/modules/settings_action.py index 371e190c12..3f7cb8c3ba 100644 --- a/openpype/modules/settings_action.py +++ b/openpype/modules/settings_action.py @@ -16,18 +16,20 @@ class ISettingsChangeListener: } """ @abstractmethod - def on_system_settings_save(self, old_value, new_value, changes): + def on_system_settings_save( + self, old_value, new_value, changes, new_value_metadata + ): pass @abstractmethod def on_project_settings_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): pass @abstractmethod def on_project_anatomy_save( - self, old_value, new_value, changes, project_name + self, old_value, new_value, changes, project_name, new_value_metadata ): pass diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index f1ea24f601..b67e5a6cfa 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -7,7 +7,7 @@ from .abstract_provider import AbstractProvider from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload from openpype.api import Logger from openpype.api import get_system_settings -from ..utils import time_function +from ..utils import time_function, ResumableError import time @@ -63,7 +63,14 @@ class GDriveHandler(AbstractProvider): return self.service = self._get_gd_service() - self.root = self._prepare_root_info() + try: + self.root = self._prepare_root_info() + except errors.HttpError: + log.warning("HttpError in sync loop, " + "trying next loop", + exc_info=True) + raise ResumableError + self._tree = tree self.active = True diff --git a/openpype/modules/sync_server/providers/lib.py b/openpype/modules/sync_server/providers/lib.py index 58947e115d..01a5d50ba5 100644 --- a/openpype/modules/sync_server/providers/lib.py +++ b/openpype/modules/sync_server/providers/lib.py @@ -92,4 +92,4 @@ factory = ProviderFactory() # 7 denotes number of files that could be synced in single loop - learned by # trial and error factory.register_provider('gdrive', GDriveHandler, 7) -factory.register_provider('local_drive', LocalDriveHandler, 10) +factory.register_provider('local_drive', LocalDriveHandler, 50) diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index e97c0e8844..9b305a1b2e 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -8,7 +8,7 @@ from concurrent.futures._base import CancelledError from .providers import lib from openpype.lib import PypeLogger -from .utils import SyncStatus +from .utils import SyncStatus, ResumableError log = PypeLogger().get_logger("SyncServer") @@ -232,6 +232,7 @@ class SyncServerThread(threading.Thread): self.loop = None self.is_running = False self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) + self.timer = None def run(self): self.is_running = True @@ -266,8 +267,8 @@ class SyncServerThread(threading.Thread): Returns: """ - try: - while self.is_running and not self.module.is_paused(): + while self.is_running and not self.module.is_paused(): + try: import time start_time = None self.module.set_sync_project_settings() # clean cache @@ -384,17 +385,27 @@ class SyncServerThread(threading.Thread): duration = time.time() - start_time log.debug("One loop took {:.2f}s".format(duration)) - await asyncio.sleep(self.module.get_loop_delay(collection)) - except ConnectionResetError: - log.warning("ConnectionResetError in sync loop, trying next loop", - exc_info=True) - except CancelledError: - # just stopping server - pass - except Exception: - self.stop() - log.warning("Unhandled exception in sync loop, stopping server", - exc_info=True) + + delay = self.module.get_loop_delay(collection) + log.debug("Waiting for {} seconds to new loop".format(delay)) + self.timer = asyncio.create_task(self.run_timer(delay)) + await asyncio.gather(self.timer) + + except ConnectionResetError: + log.warning("ConnectionResetError in sync loop, " + "trying next loop", + exc_info=True) + except CancelledError: + # just stopping server + pass + except ResumableError: + log.warning("ResumableError in sync loop, " + "trying next loop", + exc_info=True) + except Exception: + self.stop() + log.warning("Unhandled except. in sync loop, stopping server", + exc_info=True) def stop(self): """Sets is_running flag to false, 'check_shutdown' shuts server down""" @@ -417,6 +428,17 @@ class SyncServerThread(threading.Thread): await asyncio.sleep(0.07) self.loop.stop() + async def run_timer(self, delay): + """Wait for 'delay' seconds to start next loop""" + await asyncio.sleep(delay) + + def reset_timer(self): + """Called when waiting for next loop should be skipped""" + log.debug("Resetting timer") + if self.timer: + self.timer.cancel() + self.timer = None + def _working_sites(self, collection): if self.module.is_project_paused(collection): log.debug("Both sites same, skipping") diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index 59c3787789..a434af9fea 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -401,6 +401,24 @@ class SyncServerModule(PypeModule, ITrayModule): return remote_site + def reset_timer(self): + """ + Called when waiting for next loop should be skipped. + + In case of user's involvement (reset site), start that right away. + """ + self.sync_server_thread.reset_timer() + + def get_enabled_projects(self): + """Returns list of projects which have SyncServer enabled.""" + enabled_projects = [] + for project in self.connection.projects(): + project_name = project["name"] + project_settings = self.get_sync_project_setting(project_name) + if project_settings: + enabled_projects.append(project_name) + + return enabled_projects """ End of Public API """ def get_local_file_path(self, collection, site_name, file_path): @@ -413,7 +431,7 @@ class SyncServerModule(PypeModule, ITrayModule): return local_file_path def _get_remote_sites_from_settings(self, sync_settings): - if not self.enabled or not sync_settings['enabled']: + if not self.enabled or not sync_settings.get('enabled'): return [] remote_sites = [self.DEFAULT_SITE, self.LOCAL_SITE] @@ -424,7 +442,7 @@ class SyncServerModule(PypeModule, ITrayModule): def _get_enabled_sites_from_settings(self, sync_settings): sites = [self.DEFAULT_SITE] - if self.enabled and sync_settings['enabled']: + if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) return sites @@ -445,6 +463,11 @@ class SyncServerModule(PypeModule, ITrayModule): if not self.enabled: return + enabled_projects = self.get_enabled_projects() + if not enabled_projects: + self.enabled = False + return + self.lock = threading.Lock() try: diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/sync_server/tray/app.py index 25fbf0e49a..2538675c51 100644 --- a/openpype/modules/sync_server/tray/app.py +++ b/openpype/modules/sync_server/tray/app.py @@ -7,7 +7,7 @@ from openpype import resources from openpype.modules.sync_server.tray.widgets import ( SyncProjectListWidget, - SyncRepresentationWidget + SyncRepresentationSummaryWidget ) log = PypeLogger().get_logger("SyncServer") @@ -47,7 +47,7 @@ class SyncServerWindow(QtWidgets.QDialog): left_column_layout.addWidget(self.pause_btn) left_column.setLayout(left_column_layout) - repres = SyncRepresentationWidget( + repres = SyncRepresentationSummaryWidget( sync_server, project=self.projects.current_project, parent=self) @@ -78,7 +78,7 @@ class SyncServerWindow(QtWidgets.QDialog): layout.addWidget(footer) self.setLayout(body_layout) - self.setWindowTitle("Sync Server") + self.setWindowTitle("Sync Queue") self.projects.project_changed.connect( lambda: repres.table_view.model().set_project( diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/sync_server/tray/lib.py index 0282d79ea1..04bd1f568e 100644 --- a/openpype/modules/sync_server/tray/lib.py +++ b/openpype/modules/sync_server/tray/lib.py @@ -1,4 +1,7 @@ from Qt import QtCore +import attr +import abc +import six from openpype.lib import PypeLogger @@ -20,6 +23,111 @@ ProviderRole = QtCore.Qt.UserRole + 2 ProgressRole = QtCore.Qt.UserRole + 4 DateRole = QtCore.Qt.UserRole + 6 FailedRole = QtCore.Qt.UserRole + 8 +HeaderNameRole = QtCore.Qt.UserRole + 10 +FullItemRole = QtCore.Qt.UserRole + 12 + + +@six.add_metaclass(abc.ABCMeta) +class AbstractColumnFilter: + + def __init__(self, column_name, dbcon=None): + self.column_name = column_name + self.dbcon = dbcon + self._search_variants = [] + + def search_variants(self): + """ + Returns all flavors of search available for this column, + """ + return self._search_variants + + @abc.abstractmethod + def values(self): + """ + Returns dict of available values for filter {'label':'value'} + """ + pass + + @abc.abstractmethod + def prepare_match_part(self, values): + """ + Prepares format valid for $match part from 'values + + Args: + values (dict): {'label': 'value'} + Returns: + (dict): {'COLUMN_NAME': {'$in': ['val1', 'val2']}} + """ + pass + + +class PredefinedSetFilter(AbstractColumnFilter): + + def __init__(self, column_name, values): + super().__init__(column_name) + self._search_variants = ['checkbox'] + self._values = values + if self._values and \ + list(self._values.keys())[0] == list(self._values.values())[0]: + self._search_variants.append('text') + + def values(self): + return {k: v for k, v in self._values.items()} + + def prepare_match_part(self, values): + return {'$in': list(values.keys())} + + +class RegexTextFilter(AbstractColumnFilter): + + def __init__(self, column_name): + super().__init__(column_name) + self._search_variants = ['text'] + + def values(self): + return {} + + def prepare_match_part(self, values): + """ values = {'text1 text2': 'text1 text2'} """ + if not values: + return {} + + regex_strs = set() + text = list(values.keys())[0] # only single key always expected + for word in text.split(): + regex_strs.add('.*{}.*'.format(word)) + + return {"$regex": "|".join(regex_strs), + "$options": 'i'} + + +class MultiSelectFilter(AbstractColumnFilter): + + def __init__(self, column_name, values=None, dbcon=None): + super().__init__(column_name) + self._values = values + self.dbcon = dbcon + self._search_variants = ['checkbox'] + + def values(self): + if self._values: + return {k: v for k, v in self._values.items()} + + recs = self.dbcon.find({'type': self.column_name}, {"name": 1, + "_id": -1}) + values = {} + for item in recs: + values[item["name"]] = item["name"] + return dict(sorted(values.items(), key=lambda it: it[1])) + + def prepare_match_part(self, values): + return {'$in': list(values.keys())} + + +@attr.s +class FilterDefinition: + type = attr.ib() + values = attr.ib(factory=list) def pretty_size(value, suffix='B'): @@ -50,3 +158,9 @@ def translate_provider_for_icon(sync_server, project, site): if site == sync_server.DEFAULT_SITE: return sync_server.DEFAULT_SITE return sync_server.get_provider_for_site(project, site) + + +def get_item_by_id(model, object_id): + index = model.get_index(object_id) + item = model.data(index, FullItemRole) + return item diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 3cc53c6ec4..8fdd9487a4 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -56,17 +56,31 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): """Returns project""" return self._project + @property + def column_filtering(self): + return self._column_filtering + def rowCount(self, _index): return len(self._data) - def columnCount(self, _index): + def columnCount(self, _index=None): return len(self._header) - def headerData(self, section, orientation, role): + def headerData(self, section, orientation, role=Qt.DisplayRole): + if section >= len(self.COLUMN_LABELS): + return + if role == Qt.DisplayRole: if orientation == Qt.Horizontal: return self.COLUMN_LABELS[section][1] + if role == lib.HeaderNameRole: + if orientation == Qt.Horizontal: + return self.COLUMN_LABELS[section][0] # return name + + def get_column(self, index): + return self.COLUMN_LABELS[index] + def get_header_index(self, value): """ Returns index of 'value' in headers @@ -103,10 +117,10 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): self._rec_loaded = 0 if not representations: - self.query = self.get_default_query(load_records) + self.query = self.get_query(load_records) representations = self.dbcon.aggregate(self.query) - self.add_page_records(self.local_site, self.remote_site, + self.add_page_records(self.active_site, self.remote_site, representations) self.endResetModel() self.refresh_finished.emit() @@ -138,13 +152,13 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): log.debug("fetchMore") items_to_fetch = min(self._total_records - self._rec_loaded, self.PAGE_SIZE) - self.query = self.get_default_query(self._rec_loaded) + self.query = self.get_query(self._rec_loaded) representations = self.dbcon.aggregate(self.query) self.beginInsertRows(index, self._rec_loaded, self._rec_loaded + items_to_fetch - 1) - self.add_page_records(self.local_site, self.remote_site, + self.add_page_records(self.active_site, self.remote_site, representations) self.endInsertRows() @@ -156,6 +170,8 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): Sort is happening on a DB side, model is reset, db queried again. + It remembers one last sort, adds it as secondary after new sort. + Args: index (int): column index order (int): 0| @@ -170,8 +186,18 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): else: order = -1 - self.sort = {self.SORT_BY_COLUMN[index]: order, '_id': 1} - self.query = self.get_default_query() + backup_sort = dict(self.sort) + + self.sort = {self.SORT_BY_COLUMN[index]: order} # reset + # add last one + for key, val in backup_sort.items(): + if key != '_id': + self.sort[key] = val + break + # add default one + self.sort['_id'] = 1 + + self.query = self.get_query() # import json # log.debug(json.dumps(self.query, indent=4).\ # replace('False', 'false').\ @@ -180,16 +206,86 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): representations = self.dbcon.aggregate(self.query) self.refresh(representations) - def set_filter(self, word_filter): + def set_word_filter(self, word_filter): """ Adds text value filtering Args: word_filter (str): string inputted by user """ - self.word_filter = word_filter + self._word_filter = word_filter self.refresh() + def get_filters(self): + """ + Returns all available filter editors per column_name keys. + """ + filters = {} + for column_name, _ in self.COLUMN_LABELS: + filter_rec = self.COLUMN_FILTERS.get(column_name) + if filter_rec: + filter_rec.dbcon = self.dbcon + filters[column_name] = filter_rec + + return filters + + def get_column_filter(self, index): + """ + Returns filter object for column 'index + + Args: + index(int): index of column in header + + Returns: + (AbstractColumnFilter) + """ + column_name = self._header[index] + + filter_rec = self.COLUMN_FILTERS.get(column_name) + if filter_rec: + filter_rec.dbcon = self.dbcon # up-to-date db connection + + return filter_rec + + def set_column_filtering(self, checked_values): + """ + Sets dictionary used in '$match' part of MongoDB aggregate + + Args: + checked_values(dict): key:values ({'status':{1:"Foo",3:"Bar"}} + + Modifies: + self._column_filtering : {'status': {'$in': [1, 2, 3]}} + """ + filtering = {} + for column_name, dict_value in checked_values.items(): + column_f = self.COLUMN_FILTERS.get(column_name) + if not column_f: + continue + column_f.dbcon = self.dbcon + filtering[column_name] = column_f.prepare_match_part(dict_value) + + self._column_filtering = filtering + + def get_column_filter_values(self, index): + """ + Returns list of available values for filtering in the column + + Args: + index(int): index of column in header + + Returns: + (dict) of value: label shown in filtering menu + 'value' is used in MongoDB query, 'label' is human readable for + menu + for some columns ('subset') might be 'value' and 'label' same + """ + filter_rec = self.get_column_filter(index) + if not filter_rec: + return {} + + return filter_rec.values() + def set_project(self, project): """ Changes project, called after project selection is changed @@ -199,7 +295,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): """ self._project = project self.sync_server.set_sync_project_settings() - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) self.refresh() @@ -251,7 +347,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): ("files_count", "Files"), ("files_size", "Size"), ("priority", "Priority"), - ("state", "Status") + ("status", "Status") ] DEFAULT_SORT = { @@ -259,18 +355,25 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): "_id": 1 } SORT_BY_COLUMN = [ - "context.asset", # asset - "context.subset", # subset - "context.version", # version - "context.representation", # representation + "asset", # asset + "subset", # subset + "version", # version + "representation", # representation "updated_dt_local", # local created_dt "updated_dt_remote", # remote created_dt "files_count", # count of files "files_size", # file size of all files "context.asset", # priority TODO - "status" # state + "status" # status ] + COLUMN_FILTERS = { + 'status': lib.PredefinedSetFilter('status', lib.STATUS), + 'subset': lib.RegexTextFilter('subset'), + 'asset': lib.RegexTextFilter('asset'), + 'representation': lib.MultiSelectFilter('representation') + } + refresh_started = QtCore.Signal() refresh_finished = QtCore.Signal() @@ -297,7 +400,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): files_count = attr.ib(default=None) files_size = attr.ib(default=None) priority = attr.ib(default=None) - state = attr.ib(default=None) + status = attr.ib(default=None) path = attr.ib(default=None) def __init__(self, sync_server, header, project=None): @@ -307,7 +410,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self._project = project self._rec_loaded = 0 self._total_records = 0 # how many documents query actually found - self.word_filter = None + self._word_filter = None + self._column_filtering = {} + + self._word_filter = None self._initialized = False if not self._project or self._project == lib.DUMMY_PROJECT: @@ -316,15 +422,13 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self.sync_server = sync_server # TODO think about admin mode # this is for regular user, always only single local and single remote - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) - self.projection = self.get_default_projection() - self.sort = self.DEFAULT_SORT - self.query = self.get_default_query() - self.default_query = list(self.get_default_query()) + self.query = self.get_query() + self.default_query = list(self.get_query()) representations = self.dbcon.aggregate(self.query) self.refresh(representations) @@ -336,6 +440,9 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): def data(self, index, role): item = self._data[index.row()] + if role == lib.FullItemRole: + return item + header_value = self._header[index.column()] if role == lib.ProviderRole: if header_value == 'local_site': @@ -359,9 +466,11 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if role == lib.FailedRole: if header_value == 'local_site': - return item.state == lib.STATUS[2] and item.local_progress < 1 + return item.status == lib.STATUS[2] and \ + item.local_progress < 1 if header_value == 'remote_site': - return item.state == lib.STATUS[2] and item.remote_progress < 1 + return item.status == lib.STATUS[2] and \ + item.remote_progress < 1 if role == Qt.DisplayRole: # because of ImageDelegate @@ -397,7 +506,6 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): remote_site) for repre in result.get("paginatedResults"): - context = repre.get("context").pop() files = repre.get("files", []) if isinstance(files, dict): # aggregate returns dictionary files = [files] @@ -420,17 +528,17 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): avg_progress_local = lib.convert_progress( repre.get('avg_progress_local', '0')) - if context.get("version"): - version = "v{:0>3d}".format(context.get("version")) + if repre.get("version"): + version = "v{:0>3d}".format(repre.get("version")) else: version = "master" item = self.SyncRepresentation( repre.get("_id"), - context.get("asset"), - context.get("subset"), + repre.get("asset"), + repre.get("subset"), version, - context.get("representation"), + repre.get("representation"), local_updated, remote_updated, local_site, @@ -449,7 +557,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self._data.append(item) self._rec_loaded += 1 - def get_default_query(self, limit=0): + def get_query(self, limit=0): """ Returns basic aggregate query for main table. @@ -461,7 +569,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): 'sync_dt' - same for remote side 'local_site' - progress of repr on local side, 1 = finished 'remote_site' - progress on remote side, calculates from files - 'state' - + 'status' - 0 - in progress 1 - failed 2 - queued @@ -481,7 +589,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE - return [ + aggr = [ {"$match": self.get_match_part()}, {'$unwind': '$files'}, # merge potentially unwinded records back to single per repre @@ -492,7 +600,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): }}, 'order_local': { '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', self.local_site]} + 'cond': {'$eq': ['$$p.name', self.active_site]} }} }}, {'$addFields': { @@ -584,16 +692,26 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): 'paused_local': {'$sum': '$paused_local'}, 'updated_dt_local': {'$max': "$updated_dt_local"} }}, - {"$project": self.projection}, - {"$sort": self.sort}, - { + {"$project": self.projection} + ] + + if self.column_filtering: + aggr.append( + {"$match": self.column_filtering} + ) + + aggr.extend( + [{"$sort": self.sort}, + { '$facet': { 'paginatedResults': [{'$skip': self._rec_loaded}, {'$limit': limit}], 'totalCount': [{'$count': 'count'}] } - } - ] + }] + ) + + return aggr def get_match_part(self): """ @@ -611,25 +729,26 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): """ base_match = { "type": "representation", - 'files.sites.name': {'$all': [self.local_site, + 'files.sites.name': {'$all': [self.active_site, self.remote_site]} } - if not self.word_filter: + if not self._word_filter: return base_match else: - regex_str = '.*{}.*'.format(self.word_filter) + regex_str = '.*{}.*'.format(self._word_filter) base_match['$or'] = [ {'context.subset': {'$regex': regex_str, '$options': 'i'}}, {'context.asset': {'$regex': regex_str, '$options': 'i'}}, {'context.representation': {'$regex': regex_str, '$options': 'i'}}] - if ObjectId.is_valid(self.word_filter): - base_match['$or'] = [{'_id': ObjectId(self.word_filter)}] + if ObjectId.is_valid(self._word_filter): + base_match['$or'] = [{'_id': ObjectId(self._word_filter)}] return base_match - def get_default_projection(self): + @property + def projection(self): """ Projection part for aggregate query. @@ -639,10 +758,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): (dict) """ return { - "context.subset": 1, - "context.asset": 1, - "context.version": 1, - "context.representation": 1, + "subset": {"$first": "$context.subset"}, + "asset": {"$first": "$context.asset"}, + "version": {"$first": "$context.version"}, + "representation": {"$first": "$context.representation"}, "data.path": 1, "files": 1, 'files_count': 1, @@ -721,7 +840,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): ("remote_site", "Remote site"), ("files_size", "Size"), ("priority", "Priority"), - ("state", "Status") + ("status", "Status") ] PAGE_SIZE = 30 @@ -733,10 +852,15 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): "updated_dt_local", # local created_dt "updated_dt_remote", # remote created_dt "size", # remote progress - "context.asset", # priority TODO - "status" # state + "size", # priority TODO + "status" # status ] + COLUMN_FILTERS = { + 'status': lib.PredefinedSetFilter('status', lib.STATUS), + 'file': lib.RegexTextFilter('file'), + } + refresh_started = QtCore.Signal() refresh_finished = QtCore.Signal() @@ -759,7 +883,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): remote_progress = attr.ib(default=None) size = attr.ib(default=None) priority = attr.ib(default=None) - state = attr.ib(default=None) + status = attr.ib(default=None) tries = attr.ib(default=None) error = attr.ib(default=None) path = attr.ib(default=None) @@ -772,22 +896,20 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self._project = project self._rec_loaded = 0 self._total_records = 0 # how many documents query actually found - self.word_filter = None + self._word_filter = None self._id = _id self._initialized = False + self._column_filtering = {} self.sync_server = sync_server # TODO think about admin mode # this is for regular user, always only single local and single remote - self.local_site = self.sync_server.get_active_site(self.project) + self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) self.sort = self.DEFAULT_SORT - # in case we would like to hide/show some columns - self.projection = self.get_default_projection() - - self.query = self.get_default_query() + self.query = self.get_query() representations = self.dbcon.aggregate(self.query) self.refresh(representations) @@ -798,6 +920,9 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): def data(self, index, role): item = self._data[index.row()] + if role == lib.FullItemRole: + return item + header_value = self._header[index.column()] if role == lib.ProviderRole: if header_value == 'local_site': @@ -821,9 +946,11 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if role == lib.FailedRole: if header_value == 'local_site': - return item.state == lib.STATUS[2] and item.local_progress < 1 + return item.status == lib.STATUS[2] and \ + item.local_progress < 1 if header_value == 'remote_site': - return item.state == lib.STATUS[2] and item.remote_progress < 1 + return item.status == lib.STATUS[2] and \ + item.remote_progress < 1 if role == Qt.DisplayRole: # because of ImageDelegate @@ -909,7 +1036,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self._data.append(item) self._rec_loaded += 1 - def get_default_query(self, limit=0): + def get_query(self, limit=0): """ Gets query that gets used when no extra sorting, filtering or projecting is needed. @@ -923,7 +1050,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE - return [ + aggr = [ {"$match": self.get_match_part()}, {"$unwind": "$files"}, {'$addFields': { @@ -933,7 +1060,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): }}, 'order_local': { '$filter': {'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', self.local_site]} + 'cond': {'$eq': ['$$p.name', self.active_site]} }} }}, {'$addFields': { @@ -1019,7 +1146,16 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): ]} ]}} }}, - {"$project": self.projection}, + {"$project": self.projection} + ] + + if self.column_filtering: + aggr.append( + {"$match": self.column_filtering} + ) + print(self.column_filtering) + + aggr.extend([ {"$sort": self.sort}, { '$facet': { @@ -1028,7 +1164,9 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): 'totalCount': [{'$count': 'count'}] } } - ] + ]) + + return aggr def get_match_part(self): """ @@ -1038,20 +1176,21 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): Returns: (dict) """ - if not self.word_filter: + if not self._word_filter: return { "type": "representation", "_id": self._id } else: - regex_str = '.*{}.*'.format(self.word_filter) + regex_str = '.*{}.*'.format(self._word_filter) return { "type": "representation", "_id": self._id, '$or': [{'files.path': {'$regex': regex_str, '$options': 'i'}}] } - def get_default_projection(self): + @property + def projection(self): """ Projection part for aggregate query. diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py index 5071ffa2b0..106fc4b8a8 100644 --- a/openpype/modules/sync_server/tray/widgets.py +++ b/openpype/modules/sync_server/tray/widgets.py @@ -1,6 +1,7 @@ import os import subprocess import sys +from functools import partial from Qt import QtWidgets, QtCore, QtGui from Qt.QtCore import Qt @@ -14,6 +15,7 @@ from openpype.api import get_local_site_id from openpype.lib import PypeLogger from avalon.tools.delegates import pretty_timestamp +from avalon.vendor import qtawesome from openpype.modules.sync_server.tray.models import ( SyncRepresentationSummaryModel, @@ -40,6 +42,8 @@ class SyncProjectListWidget(ProjectListWidget): self.local_site = None self.icons = {} + self.layout().setContentsMargins(0, 0, 0, 0) + def validate_context_change(self): return True @@ -91,7 +95,6 @@ class SyncProjectListWidget(ProjectListWidget): self.project_name = point_index.data(QtCore.Qt.DisplayRole) menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) actions_mapping = {} if self.sync_server.is_project_paused(self.project_name): @@ -132,7 +135,7 @@ class SyncProjectListWidget(ProjectListWidget): self.refresh() -class SyncRepresentationWidget(QtWidgets.QWidget): +class _SyncRepresentationWidget(QtWidgets.QWidget): """ Summary dialog with list of representations that matches current settings 'local_site' and 'remote_site'. @@ -140,87 +143,12 @@ class SyncRepresentationWidget(QtWidgets.QWidget): active_changed = QtCore.Signal() # active index changed message_generated = QtCore.Signal(str) - default_widths = ( - ("asset", 220), - ("subset", 190), - ("version", 55), - ("representation", 95), - ("local_site", 170), - ("remote_site", 170), - ("files_count", 50), - ("files_size", 60), - ("priority", 50), - ("state", 110) - ) + def _selection_changed(self, _new_selected, _all_selected): + idxs = self.selection_model.selectedRows() + self._selected_ids = [] - def __init__(self, sync_server, project=None, parent=None): - super(SyncRepresentationWidget, self).__init__(parent) - - self.sync_server = sync_server - - self._selected_id = None # keep last selected _id - self.representation_id = None - self.site_name = None # to pause/unpause representation - - self.filter = QtWidgets.QLineEdit() - self.filter.setPlaceholderText("Filter representations..") - - self._scrollbar_pos = None - - top_bar_layout = QtWidgets.QHBoxLayout() - top_bar_layout.addWidget(self.filter) - - self.table_view = QtWidgets.QTableView() - headers = [item[0] for item in self.default_widths] - - model = SyncRepresentationSummaryModel(sync_server, headers, project) - self.table_view.setModel(model) - self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.table_view.setSelectionMode( - QtWidgets.QAbstractItemView.SingleSelection) - self.table_view.setSelectionBehavior( - QtWidgets.QAbstractItemView.SelectRows) - self.table_view.horizontalHeader().setSortIndicator( - -1, Qt.AscendingOrder) - self.table_view.setSortingEnabled(True) - self.table_view.horizontalHeader().setSortIndicatorShown(True) - self.table_view.setAlternatingRowColors(True) - self.table_view.verticalHeader().hide() - - column = self.table_view.model().get_header_index("local_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) - - column = self.table_view.model().get_header_index("remote_site") - delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) - - for column_name, width in self.default_widths: - idx = model.get_header_index(column_name) - self.table_view.setColumnWidth(idx, width) - - layout = QtWidgets.QVBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addLayout(top_bar_layout) - layout.addWidget(self.table_view) - - self.table_view.doubleClicked.connect(self._double_clicked) - self.filter.textChanged.connect(lambda: model.set_filter( - self.filter.text())) - self.table_view.customContextMenuRequested.connect( - self._on_context_menu) - - model.refresh_started.connect(self._save_scrollbar) - model.refresh_finished.connect(self._set_scrollbar) - self.table_view.model().modelReset.connect(self._set_selection) - - self.selection_model = self.table_view.selectionModel() - self.selection_model.selectionChanged.connect(self._selection_changed) - - def _selection_changed(self, _new_selection): - index = self.selection_model.currentIndex() - self._selected_id = \ - self.table_view.model().data(index, Qt.UserRole) + for index in idxs: + self._selected_ids.append(self.model.data(index, Qt.UserRole)) def _set_selection(self): """ @@ -228,151 +156,169 @@ class SyncRepresentationWidget(QtWidgets.QWidget): Keep selection during model refresh. """ - if self._selected_id: - index = self.table_view.model().get_index(self._selected_id) + existing_ids = [] + for selected_id in self._selected_ids: + index = self.model.get_index(selected_id) if index and index.isValid(): mode = QtCore.QItemSelectionModel.Select | \ QtCore.QItemSelectionModel.Rows - self.selection_model.setCurrentIndex(index, mode) - else: - self._selected_id = None + self.selection_model.select(index, mode) + existing_ids.append(selected_id) + + self._selected_ids = existing_ids def _double_clicked(self, index): """ Opens representation dialog with all files after doubleclick """ - _id = self.table_view.model().data(index, Qt.UserRole) + _id = self.model.data(index, Qt.UserRole) detail_window = SyncServerDetailWindow( - self.sync_server, _id, self.table_view.model().project) + self.sync_server, _id, self.model.project) detail_window.exec() - + def _on_context_menu(self, point): """ Shows menu with loader actions on Right-click. + + Supports multiple selects - adds all available actions, each + action handles if it appropriate for item itself, if not it skips. """ + is_multi = len(self._selected_ids) > 1 point_index = self.table_view.indexAt(point) - if not point_index.isValid(): + if not point_index.isValid() and not is_multi: return - self.item = self.table_view.model()._data[point_index.row()] - self.representation_id = self.item._id - log.debug("menu representation _id:: {}". - format(self.representation_id)) + if is_multi: + index = self.model.get_index(self._selected_ids[0]) + item = self.model.data(index, lib.FullItemRole) + else: + item = self.model.data(point_index, lib.FullItemRole) + action_kwarg_map, actions_mapping, menu = self._prepare_menu(item, + is_multi) + + result = menu.exec_(QtGui.QCursor.pos()) + if result: + to_run = actions_mapping[result] + to_run_kwargs = action_kwarg_map.get(result, {}) + if to_run: + to_run(**to_run_kwargs) + + self.model.refresh() + + def _prepare_menu(self, item, is_multi): menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) + actions_mapping = {} - actions_kwargs_mapping = {} + action_kwarg_map = {} - local_site = self.item.local_site - local_progress = self.item.local_progress - remote_site = self.item.remote_site - remote_progress = self.item.remote_progress + active_site = self.model.active_site + remote_site = self.model.remote_site - for site, progress in {local_site: local_progress, + local_progress = item.local_progress + remote_progress = item.remote_progress + + project = self.model.project + + for site, progress in {active_site: local_progress, remote_site: remote_progress}.items(): - project = self.table_view.model().project - provider = self.sync_server.get_provider_for_site(project, - site) + provider = self.sync_server.get_provider_for_site(project, site) if provider == 'local_drive': if 'studio' in site: txt = " studio version" else: txt = " local version" action = QtWidgets.QAction("Open in explorer" + txt) - if progress == 1.0: + if progress == 1.0 or is_multi: actions_mapping[action] = self._open_in_explorer - actions_kwargs_mapping[action] = {'site': site} + action_kwarg_map[action] = \ + self._get_action_kwargs(site) menu.addAction(action) - # progress smaller then 1.0 --> in progress or queued - if local_progress < 1.0: - self.site_name = local_site - else: - self.site_name = remote_site - - if self.item.state in [lib.STATUS[0], lib.STATUS[1]]: - action = QtWidgets.QAction("Pause") - actions_mapping[action] = self._pause - menu.addAction(action) - - if self.item.state == lib.STATUS[3]: - action = QtWidgets.QAction("Unpause") - actions_mapping[action] = self._unpause - menu.addAction(action) - - # if self.item.state == lib.STATUS[1]: - # action = QtWidgets.QAction("Open error detail") - # actions_mapping[action] = self._show_detail - # menu.addAction(action) - - if remote_progress == 1.0: + if remote_progress == 1.0 or is_multi: action = QtWidgets.QAction("Re-sync Active site") - actions_mapping[action] = self._reset_local_site + action_kwarg_map[action] = self._get_action_kwargs(active_site) + actions_mapping[action] = self._reset_site menu.addAction(action) - if local_progress == 1.0: + if local_progress == 1.0 or is_multi: action = QtWidgets.QAction("Re-sync Remote site") - actions_mapping[action] = self._reset_remote_site + action_kwarg_map[action] = self._get_action_kwargs(remote_site) + actions_mapping[action] = self._reset_site menu.addAction(action) - if local_site != self.sync_server.DEFAULT_SITE: + if active_site == get_local_site_id(): action = QtWidgets.QAction("Completely remove from local") + action_kwarg_map[action] = self._get_action_kwargs(active_site) actions_mapping[action] = self._remove_site menu.addAction(action) - else: - action = QtWidgets.QAction("Mark for sync to local") - actions_mapping[action] = self._add_site - menu.addAction(action) + + # # temp for testing only !!! + # action = QtWidgets.QAction("Download") + # action_kwarg_map[action] = self._get_action_kwargs(active_site) + # actions_mapping[action] = self._add_site + # menu.addAction(action) if not actions_mapping: action = QtWidgets.QAction("< No action >") actions_mapping[action] = None menu.addAction(action) - result = menu.exec_(QtGui.QCursor.pos()) - if result: - to_run = actions_mapping[result] - to_run_kwargs = actions_kwargs_mapping.get(result, {}) - if to_run: - to_run(**to_run_kwargs) + return action_kwarg_map, actions_mapping, menu - self.table_view.model().refresh() + def _pause(self, selected_ids=None): + log.debug("Pause {}".format(selected_ids)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.status not in [lib.STATUS[0], lib.STATUS[1]]: + continue + for site_name in [self.model.active_site, self.model.remote_site]: + check_progress = self._get_progress(item, site_name) + if check_progress < 1: + self.sync_server.pause_representation(self.model.project, + representation_id, + site_name) - def _pause(self): - self.sync_server.pause_representation(self.table_view.model().project, - self.representation_id, - self.site_name) - self.site_name = None - self.message_generated.emit("Paused {}".format(self.representation_id)) + self.message_generated.emit("Paused {}".format(representation_id)) - def _unpause(self): - self.sync_server.unpause_representation( - self.table_view.model().project, - self.representation_id, - self.site_name) - self.site_name = None - self.message_generated.emit("Unpaused {}".format( - self.representation_id)) + def _unpause(self, selected_ids=None): + log.debug("UnPause {}".format(selected_ids)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.status not in lib.STATUS[3]: + continue + for site_name in [self.model.active_site, self.model.remote_site]: + check_progress = self._get_progress(item, site_name) + if check_progress < 1: + self.sync_server.unpause_representation( + self.model.project, + representation_id, + site_name) + + self.message_generated.emit("Unpause {}".format(representation_id)) # temporary here for testing, will be removed TODO - def _add_site(self): - log.info(self.representation_id) - project_name = self.table_view.model().project - local_site_name = get_local_site_id() - try: - self.sync_server.add_site( - project_name, - self.representation_id, - local_site_name - ) - self.message_generated.emit( - "Site {} added for {}".format(local_site_name, - self.representation_id)) - except ValueError as exp: - self.message_generated.emit("Error {}".format(str(exp))) + def _add_site(self, selected_ids=None, site_name=None): + log.debug("Add site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + if item.local_site == site_name or item.remote_site == site_name: + # site already exists skip + continue - def _remove_site(self): + try: + self.sync_server.add_site( + self.model.project, + representation_id, + site_name) + self.message_generated.emit( + "Site {} added for {}".format(site_name, + representation_id)) + except ValueError as exp: + self.message_generated.emit("Error {}".format(str(exp))) + self.sync_server.reset_timer() + + def _remove_site(self, selected_ids=None, site_name=None): """ Removes site record AND files. @@ -382,65 +328,90 @@ class SyncRepresentationWidget(QtWidgets.QWidget): This could only happen when artist work on local machine, not connected to studio mounted drives. """ - log.info("Removing {}".format(self.representation_id)) - try: - local_site = get_local_site_id() - self.sync_server.remove_site( - self.table_view.model().project, - self.representation_id, - local_site, - True) - self.message_generated.emit("Site {} removed".format(local_site)) - except ValueError as exp: - self.message_generated.emit("Error {}".format(str(exp))) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + log.debug("Remove site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + log.info("Removing {}".format(representation_id)) + try: + self.sync_server.remove_site( + self.model.project, + representation_id, + site_name, + True) + self.message_generated.emit( + "Site {} removed".format(site_name)) + except ValueError as exp: + self.message_generated.emit("Error {}".format(str(exp))) - def _reset_local_site(self): + self.model.refresh( + load_records=self.model._rec_loaded) + self.sync_server.reset_timer() + + def _reset_site(self, selected_ids=None, site_name=None): """ Removes errors or success metadata for particular file >> forces redo of upload/download """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'local') - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + log.debug("Reset site {}:{}".format(selected_ids, site_name)) + for representation_id in selected_ids: + item = lib.get_item_by_id(self.model, representation_id) + check_progress = self._get_progress(item, site_name, True) - def _reset_remote_site(self): - """ - Removes errors or success metadata for particular file >> forces - redo of upload/download - """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'remote') - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + # do not reset if opposite side is not fully there + if check_progress != 1: + log.debug("Not fully available {} on other side, skipping". + format(check_progress)) + continue - def _open_in_explorer(self, site): - if not self.item: - return + self.sync_server.reset_provider_for_file( + self.model.project, + representation_id, + site_name=site_name, + force=True) - fpath = self.item.path - project = self.table_view.model().project - fpath = self.sync_server.get_local_file_path(project, - site, - fpath) + self.model.refresh( + load_records=self.model._rec_loaded) + self.sync_server.reset_timer() - fpath = os.path.normpath(os.path.dirname(fpath)) - if os.path.isdir(fpath): - if 'win' in sys.platform: # windows - subprocess.Popen('explorer "%s"' % fpath) - elif sys.platform == 'darwin': # macOS - subprocess.Popen(['open', fpath]) - else: # linux - try: - subprocess.Popen(['xdg-open', fpath]) - except OSError: - raise OSError('unsupported xdg-open call??') + def _open_in_explorer(self, selected_ids=None, site_name=None): + log.debug("Open in Explorer {}:{}".format(selected_ids, site_name)) + for selected_id in selected_ids: + item = lib.get_item_by_id(self.model, selected_id) + if not item: + return + + fpath = item.path + project = self.model.project + fpath = self.sync_server.get_local_file_path(project, + site_name, + fpath) + + fpath = os.path.normpath(os.path.dirname(fpath)) + if os.path.isdir(fpath): + if 'win' in sys.platform: # windows + subprocess.Popen('explorer "%s"' % fpath) + elif sys.platform == 'darwin': # macOS + subprocess.Popen(['open', fpath]) + else: # linux + try: + subprocess.Popen(['xdg-open', fpath]) + except OSError: + raise OSError('unsupported xdg-open call??') + + def _get_progress(self, item, site_name, opposite=False): + """Returns progress value according to site (side)""" + progress = {'local': item.local_progress, + 'remote': item.remote_progress} + side = 'remote' + if site_name == self.model.active_site: + side = 'local' + if opposite: + side = 'remote' if side == 'local' else 'local' + + return progress[side] + + def _get_action_kwargs(self, site_name): + """Default format of kwargs for action""" + return {"selected_ids": self._selected_ids, "site_name": site_name} def _save_scrollbar(self): self._scrollbar_pos = self.table_view.verticalScrollBar().value() @@ -450,7 +421,155 @@ class SyncRepresentationWidget(QtWidgets.QWidget): self.table_view.verticalScrollBar().setValue(self._scrollbar_pos) -class SyncRepresentationDetailWidget(QtWidgets.QWidget): +class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): + + default_widths = ( + ("asset", 190), + ("subset", 170), + ("version", 60), + ("representation", 145), + ("local_site", 160), + ("remote_site", 160), + ("files_count", 50), + ("files_size", 60), + ("priority", 70), + ("status", 110) + ) + + def __init__(self, sync_server, project=None, parent=None): + super(SyncRepresentationSummaryWidget, self).__init__(parent) + + self.sync_server = sync_server + + self._selected_ids = [] # keep last selected _id + + txt_filter = QtWidgets.QLineEdit() + txt_filter.setPlaceholderText("Quick filter representations..") + txt_filter.setClearButtonEnabled(True) + txt_filter.addAction( + qtawesome.icon("fa.filter", color="gray"), + QtWidgets.QLineEdit.LeadingPosition) + self.txt_filter = txt_filter + + self._scrollbar_pos = None + + top_bar_layout = QtWidgets.QHBoxLayout() + top_bar_layout.addWidget(self.txt_filter) + + table_view = QtWidgets.QTableView() + headers = [item[0] for item in self.default_widths] + + model = SyncRepresentationSummaryModel(sync_server, headers, project) + table_view.setModel(model) + table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + table_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + table_view.setSelectionBehavior( + QtWidgets.QAbstractItemView.SelectRows) + table_view.horizontalHeader().setSortIndicator( + -1, Qt.AscendingOrder) + table_view.setAlternatingRowColors(True) + table_view.verticalHeader().hide() + + column = table_view.model().get_header_index("local_site") + delegate = ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) + + column = table_view.model().get_header_index("remote_site") + delegate = ImageDelegate(self) + table_view.setItemDelegateForColumn(column, delegate) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addLayout(top_bar_layout) + layout.addWidget(table_view) + + self.table_view = table_view + self.model = model + + horizontal_header = HorizontalHeader(self) + + table_view.setHorizontalHeader(horizontal_header) + table_view.setSortingEnabled(True) + + for column_name, width in self.default_widths: + idx = model.get_header_index(column_name) + table_view.setColumnWidth(idx, width) + + table_view.doubleClicked.connect(self._double_clicked) + self.txt_filter.textChanged.connect(lambda: model.set_word_filter( + self.txt_filter.text())) + table_view.customContextMenuRequested.connect(self._on_context_menu) + + model.refresh_started.connect(self._save_scrollbar) + model.refresh_finished.connect(self._set_scrollbar) + model.modelReset.connect(self._set_selection) + + self.selection_model = self.table_view.selectionModel() + self.selection_model.selectionChanged.connect(self._selection_changed) + + def _prepare_menu(self, item, is_multi): + action_kwarg_map, actions_mapping, menu = \ + super()._prepare_menu(item, is_multi) + + if item.status in [lib.STATUS[0], lib.STATUS[1]] or is_multi: + action = QtWidgets.QAction("Pause in queue") + actions_mapping[action] = self._pause + # pause handles which site_name it will pause itself + action_kwarg_map[action] = {"selected_ids": self._selected_ids} + menu.addAction(action) + + if item.status == lib.STATUS[3] or is_multi: + action = QtWidgets.QAction("Unpause in queue") + actions_mapping[action] = self._unpause + action_kwarg_map[action] = {"selected_ids": self._selected_ids} + menu.addAction(action) + + return action_kwarg_map, actions_mapping, menu + + +class SyncServerDetailWindow(QtWidgets.QDialog): + """Wrapper window for SyncRepresentationDetailWidget + + Creates standalone window with list of files for selected repre_id. + """ + def __init__(self, sync_server, _id, project, parent=None): + log.debug( + "!!! SyncServerDetailWindow _id:: {}".format(_id)) + super(SyncServerDetailWindow, self).__init__(parent) + self.setWindowFlags(QtCore.Qt.Window) + self.setFocusPolicy(QtCore.Qt.StrongFocus) + + self.setStyleSheet(style.load_stylesheet()) + self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) + self.resize(1000, 400) + + body = QtWidgets.QWidget() + footer = QtWidgets.QWidget() + footer.setFixedHeight(20) + + container = SyncRepresentationDetailWidget(sync_server, _id, project, + parent=self) + body_layout = QtWidgets.QHBoxLayout(body) + body_layout.addWidget(container) + body_layout.setContentsMargins(0, 0, 0, 0) + + self.message = QtWidgets.QLabel() + self.message.hide() + + footer_layout = QtWidgets.QVBoxLayout(footer) + footer_layout.addWidget(self.message) + footer_layout.setContentsMargins(0, 0, 0, 0) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(body) + layout.addWidget(footer) + + self.setLayout(body_layout) + self.setWindowTitle("Sync Representation Detail") + + +class SyncRepresentationDetailWidget(_SyncRepresentationWidget): """ Widget to display list of synchronizable files for single repre. @@ -466,243 +585,197 @@ class SyncRepresentationDetailWidget(QtWidgets.QWidget): ("local_site", 185), ("remote_site", 185), ("size", 60), - ("priority", 25), - ("state", 110) + ("priority", 60), + ("status", 110) ) def __init__(self, sync_server, _id=None, project=None, parent=None): super(SyncRepresentationDetailWidget, self).__init__(parent) log.debug("Representation_id:{}".format(_id)) - self.representation_id = _id - self.item = None # set to item that mouse was clicked over self.project = project self.sync_server = sync_server - self._selected_id = None + self.representation_id = _id + self._selected_ids = [] - self.filter = QtWidgets.QLineEdit() - self.filter.setPlaceholderText("Filter representation..") + self.txt_filter = QtWidgets.QLineEdit() + self.txt_filter.setPlaceholderText("Quick filter representation..") + self.txt_filter.setClearButtonEnabled(True) + self.txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"), + QtWidgets.QLineEdit.LeadingPosition) self._scrollbar_pos = None top_bar_layout = QtWidgets.QHBoxLayout() - top_bar_layout.addWidget(self.filter) + top_bar_layout.addWidget(self.txt_filter) - self.table_view = QtWidgets.QTableView() + table_view = QtWidgets.QTableView() headers = [item[0] for item in self.default_widths] model = SyncRepresentationDetailModel(sync_server, headers, _id, project) - self.table_view.setModel(model) - self.table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.table_view.setSelectionMode( - QtWidgets.QAbstractItemView.SingleSelection) - self.table_view.setSelectionBehavior( + table_view.setModel(model) + table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + table_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + table_view.setSelectionBehavior( QtWidgets.QTableView.SelectRows) - self.table_view.horizontalHeader().setSortIndicator(-1, - Qt.AscendingOrder) - self.table_view.setSortingEnabled(True) - self.table_view.horizontalHeader().setSortIndicatorShown(True) - self.table_view.setAlternatingRowColors(True) - self.table_view.verticalHeader().hide() + table_view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder) + table_view.horizontalHeader().setSortIndicatorShown(True) + table_view.setAlternatingRowColors(True) + table_view.verticalHeader().hide() - column = self.table_view.model().get_header_index("local_site") + column = model.get_header_index("local_site") delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) + table_view.setItemDelegateForColumn(column, delegate) - column = self.table_view.model().get_header_index("remote_site") + column = model.get_header_index("remote_site") delegate = ImageDelegate(self) - self.table_view.setItemDelegateForColumn(column, delegate) - - for column_name, width in self.default_widths: - idx = model.get_header_index(column_name) - self.table_view.setColumnWidth(idx, width) + table_view.setItemDelegateForColumn(column, delegate) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) - layout.addWidget(self.table_view) + layout.addWidget(table_view) - self.filter.textChanged.connect(lambda: model.set_filter( - self.filter.text())) - self.table_view.customContextMenuRequested.connect( - self._on_context_menu) + self.model = model + + self.selection_model = table_view.selectionModel() + self.selection_model.selectionChanged.connect(self._selection_changed) + + horizontal_header = HorizontalHeader(self) + + table_view.setHorizontalHeader(horizontal_header) + table_view.setSortingEnabled(True) + + for column_name, width in self.default_widths: + idx = model.get_header_index(column_name) + table_view.setColumnWidth(idx, width) + + self.table_view = table_view + + self.txt_filter.textChanged.connect(lambda: model.set_word_filter( + self.txt_filter.text())) + table_view.customContextMenuRequested.connect(self._on_context_menu) model.refresh_started.connect(self._save_scrollbar) model.refresh_finished.connect(self._set_scrollbar) - self.table_view.model().modelReset.connect(self._set_selection) + model.modelReset.connect(self._set_selection) - self.selection_model = self.table_view.selectionModel() - self.selection_model.selectionChanged.connect(self._selection_changed) - - def _selection_changed(self): - index = self.selection_model.currentIndex() - self._selected_id = self.table_view.model().data(index, Qt.UserRole) - - def _set_selection(self): - """ - Sets selection to 'self._selected_id' if exists. - - Keep selection during model refresh. - """ - if self._selected_id: - index = self.table_view.model().get_index(self._selected_id) - if index and index.isValid(): - mode = QtCore.QItemSelectionModel.Select | \ - QtCore.QItemSelectionModel.Rows - self.selection_model.setCurrentIndex(index, mode) - else: - self._selected_id = None - - def _show_detail(self): + def _show_detail(self, selected_ids=None): """ Shows windows with error message for failed sync of a file. """ - dt = max(self.item.created_dt, self.item.sync_dt) - detail_window = SyncRepresentationErrorWindow(self.item._id, - self.project, - dt, - self.item.tries, - self.item.error) + detail_window = SyncRepresentationErrorWindow(self.model, selected_ids) + detail_window.exec() - def _on_context_menu(self, point): - """ - Shows menu with loader actions on Right-click. - """ - point_index = self.table_view.indexAt(point) - if not point_index.isValid(): - return + def _prepare_menu(self, item, is_multi): + """Adds view (and model) dependent actions to default ones""" + action_kwarg_map, actions_mapping, menu = \ + super()._prepare_menu(item, is_multi) - self.item = self.table_view.model()._data[point_index.row()] - - menu = QtWidgets.QMenu() - menu.setStyleSheet(style.load_stylesheet()) - actions_mapping = {} - actions_kwargs_mapping = {} - - local_site = self.item.local_site - local_progress = self.item.local_progress - remote_site = self.item.remote_site - remote_progress = self.item.remote_progress - - for site, progress in {local_site: local_progress, - remote_site: remote_progress}.items(): - project = self.table_view.model().project - provider = self.sync_server.get_provider_for_site(project, - site) - if provider == 'local_drive': - if 'studio' in site: - txt = " studio version" - else: - txt = " local version" - action = QtWidgets.QAction("Open in explorer" + txt) - if progress == 1: - actions_mapping[action] = self._open_in_explorer - actions_kwargs_mapping[action] = {'site': site} - menu.addAction(action) - - if self.item.state == lib.STATUS[2]: + if item.status == lib.STATUS[2] or is_multi: action = QtWidgets.QAction("Open error detail") actions_mapping[action] = self._show_detail + action_kwarg_map[action] = {"selected_ids": self._selected_ids} + menu.addAction(action) - if float(remote_progress) == 1.0: - action = QtWidgets.QAction("Re-sync active site") - actions_mapping[action] = self._reset_local_site - menu.addAction(action) + return action_kwarg_map, actions_mapping, menu - if float(local_progress) == 1.0: - action = QtWidgets.QAction("Re-sync remote site") - actions_mapping[action] = self._reset_remote_site - menu.addAction(action) - - if not actions_mapping: - action = QtWidgets.QAction("< No action >") - actions_mapping[action] = None - menu.addAction(action) - - result = menu.exec_(QtGui.QCursor.pos()) - if result: - to_run = actions_mapping[result] - to_run_kwargs = actions_kwargs_mapping.get(result, {}) - if to_run: - to_run(**to_run_kwargs) - - def _reset_local_site(self): + def _reset_site(self, selected_ids=None, site_name=None): """ Removes errors or success metadata for particular file >> forces redo of upload/download """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'local', - self.item._id) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + for file_id in selected_ids: + item = lib.get_item_by_id(self.model, file_id) + check_progress = self._get_progress(item, site_name, True) - def _reset_remote_site(self): - """ - Removes errors or success metadata for particular file >> forces - redo of upload/download - """ - self.sync_server.reset_provider_for_file( - self.table_view.model().project, - self.representation_id, - 'remote', - self.item._id) - self.table_view.model().refresh( - load_records=self.table_view.model()._rec_loaded) + # do not reset if opposite side is not fully there + if check_progress != 1: + log.debug("Not fully available {} on other side, skipping". + format(check_progress)) + continue - def _open_in_explorer(self, site): - if not self.item: - return + self.sync_server.reset_provider_for_file( + self.model.project, + self.representation_id, + site_name=site_name, + file_id=file_id, + force=True) + self.model.refresh( + load_records=self.model._rec_loaded) - fpath = self.item.path - project = self.project - fpath = self.sync_server.get_local_file_path(project, site, fpath) - fpath = os.path.normpath(os.path.dirname(fpath)) - if os.path.isdir(fpath): - if 'win' in sys.platform: # windows - subprocess.Popen('explorer "%s"' % fpath) - elif sys.platform == 'darwin': # macOS - subprocess.Popen(['open', fpath]) - else: # linux - try: - subprocess.Popen(['xdg-open', fpath]) - except OSError: - raise OSError('unsupported xdg-open call??') +class SyncRepresentationErrorWindow(QtWidgets.QDialog): + """Wrapper window to show errors during sync on file(s)""" + def __init__(self, model, selected_ids, parent=None): + super(SyncRepresentationErrorWindow, self).__init__(parent) + self.setWindowFlags(QtCore.Qt.Window) + self.setFocusPolicy(QtCore.Qt.StrongFocus) - def _save_scrollbar(self): - self._scrollbar_pos = self.table_view.verticalScrollBar().value() + self.setStyleSheet(style.load_stylesheet()) + self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) + self.resize(900, 150) - def _set_scrollbar(self): - if self._scrollbar_pos: - self.table_view.verticalScrollBar().setValue(self._scrollbar_pos) + body = QtWidgets.QWidget() + + container = SyncRepresentationErrorWidget(model, + selected_ids, + parent=self) + body_layout = QtWidgets.QHBoxLayout(body) + body_layout.addWidget(container) + body_layout.setContentsMargins(0, 0, 0, 0) + + message = QtWidgets.QLabel() + message.hide() + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(body) + + self.setLayout(body_layout) + self.setWindowTitle("Sync Representation Error Detail") class SyncRepresentationErrorWidget(QtWidgets.QWidget): """ - Dialog to show when sync error happened, prints error message + Dialog to show when sync error happened, prints formatted error message """ - - def __init__(self, _id, dt, tries, msg, parent=None): + def __init__(self, model, selected_ids, parent=None): super(SyncRepresentationErrorWidget, self).__init__(parent) - layout = QtWidgets.QHBoxLayout(self) + layout = QtWidgets.QVBoxLayout(self) - txts = [] - txts.append("{}: {}".format("Last update date", pretty_timestamp(dt))) - txts.append("{}: {}".format("Retries", str(tries))) - txts.append("{}: {}".format("Error message", msg)) + no_errors = True + for file_id in selected_ids: + item = lib.get_item_by_id(model, file_id) + if not item.created_dt or not item.sync_dt or not item.error: + continue - text_area = QtWidgets.QPlainTextEdit("\n\n".join(txts)) - text_area.setReadOnly(True) - layout.addWidget(text_area) + no_errors = False + dt = max(item.created_dt, item.sync_dt) + + txts = [] + txts.append("{}: {}
".format("Last update date", + pretty_timestamp(dt))) + txts.append("{}: {}
".format("Retries", + str(item.tries))) + txts.append("{}: {}
".format("Error message", + item.error)) + + text_area = QtWidgets.QTextEdit("\n\n".join(txts)) + text_area.setReadOnly(True) + layout.addWidget(text_area) + + if no_errors: + text_area = QtWidgets.QTextEdit() + text_area.setText("

No errors located

") + text_area.setReadOnly(True) + layout.addWidget(text_area) class ImageDelegate(QtWidgets.QStyledItemDelegate): @@ -755,66 +828,276 @@ class ImageDelegate(QtWidgets.QStyledItemDelegate): QtGui.QBrush(QtGui.QColor(255, 0, 0, 35))) -class SyncServerDetailWindow(QtWidgets.QDialog): - def __init__(self, sync_server, _id, project, parent=None): - log.debug( - "!!! SyncServerDetailWindow _id:: {}".format(_id)) - super(SyncServerDetailWindow, self).__init__(parent) - self.setWindowFlags(QtCore.Qt.Window) - self.setFocusPolicy(QtCore.Qt.StrongFocus) +class TransparentWidget(QtWidgets.QWidget): + """Used for header cell for resizing to work properly""" + clicked = QtCore.Signal(str) - self.setStyleSheet(style.load_stylesheet()) - self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) - self.resize(1000, 400) + def __init__(self, column_name, *args, **kwargs): + super(TransparentWidget, self).__init__(*args, **kwargs) + self.column_name = column_name + # self.setStyleSheet("background: red;") - body = QtWidgets.QWidget() - footer = QtWidgets.QWidget() - footer.setFixedHeight(20) + def mouseReleaseEvent(self, event): + if event.button() == QtCore.Qt.LeftButton: + self.clicked.emit(self.column_name) - container = SyncRepresentationDetailWidget(sync_server, _id, project, - parent=self) - body_layout = QtWidgets.QHBoxLayout(body) - body_layout.addWidget(container) - body_layout.setContentsMargins(0, 0, 0, 0) - - self.message = QtWidgets.QLabel() - self.message.hide() - - footer_layout = QtWidgets.QVBoxLayout(footer) - footer_layout.addWidget(self.message) - footer_layout.setContentsMargins(0, 0, 0, 0) - - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(body) - layout.addWidget(footer) - - self.setLayout(body_layout) - self.setWindowTitle("Sync Representation Detail") + super(TransparentWidget, self).mouseReleaseEvent(event) -class SyncRepresentationErrorWindow(QtWidgets.QDialog): - def __init__(self, _id, project, dt, tries, msg, parent=None): - super(SyncRepresentationErrorWindow, self).__init__(parent) - self.setWindowFlags(QtCore.Qt.Window) - self.setFocusPolicy(QtCore.Qt.StrongFocus) +class HorizontalHeader(QtWidgets.QHeaderView): + """Reiplemented QHeaderView to contain clickable changeable button""" + def __init__(self, parent=None): + super(HorizontalHeader, self).__init__(QtCore.Qt.Horizontal, parent) + self._parent = parent + self.checked_values = {} - self.setStyleSheet(style.load_stylesheet()) - self.setWindowIcon(QtGui.QIcon(style.app_icon_path())) - self.resize(900, 150) + self.setModel(self._parent.model) - body = QtWidgets.QWidget() + self.setSectionsClickable(True) - container = SyncRepresentationErrorWidget(_id, dt, tries, msg, - parent=self) - body_layout = QtWidgets.QHBoxLayout(body) - body_layout.addWidget(container) - body_layout.setContentsMargins(0, 0, 0, 0) + self.menu_items_dict = {} + self.menu = None + self.header_cells = [] + self.filter_buttons = {} - message = QtWidgets.QLabel() - message.hide() + self.filter_icon = qtawesome.icon("fa.filter", color="gray") + self.filter_set_icon = qtawesome.icon("fa.filter", color="white") - layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(body) + self.init_layout() - self.setLayout(body_layout) - self.setWindowTitle("Sync Representation Error Detail") + self._resetting = False + + @property + def model(self): + """Keep model synchronized with parent widget""" + return self._parent.model + + def init_layout(self): + """Initial preparation of header's content""" + for column_idx in range(self.model.columnCount()): + column_name, column_label = self.model.get_column(column_idx) + filter_rec = self.model.get_filters().get(column_name) + if not filter_rec: + continue + + icon = self.filter_icon + button = QtWidgets.QPushButton(icon, "", self) + + button.setFixedSize(24, 24) + button.setStyleSheet( + "QPushButton::menu-indicator{width:0px;}" + "QPushButton{border: none;background: transparent;}") + button.clicked.connect(partial(self._get_menu, + column_name, column_idx)) + button.setFlat(True) + self.filter_buttons[column_name] = button + + def showEvent(self, event): + """Paint header""" + super(HorizontalHeader, self).showEvent(event) + + for i in range(len(self.header_cells)): + cell_content = self.header_cells[i] + cell_content.setGeometry(self.sectionViewportPosition(i), 0, + self.sectionSize(i) - 1, self.height()) + + cell_content.show() + + def _set_filter_icon(self, column_name): + """Set different states of button depending on its engagement""" + button = self.filter_buttons.get(column_name) + if button: + if self.checked_values.get(column_name): + button.setIcon(self.filter_set_icon) + else: + button.setIcon(self.filter_icon) + + def _reset_filter(self, column_name): + """ + Remove whole column from filter >> not in $match at all (faster) + """ + self._resetting = True # mark changes to consume them + if self.checked_values.get(column_name) is not None: + self.checked_values.pop(column_name) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, True) + self._resetting = False + + def _apply_filter(self, column_name, values, state): + """ + Sets 'values' to specific 'state' (checked/unchecked), + sends to model. + """ + if self._resetting: # event triggered by _resetting, skip it + return + + self._update_checked_values(column_name, values, state) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, False) + + def _apply_text_filter(self, column_name, items, line_edit): + """ + Resets all checkboxes, prefers inserted text. + """ + le_text = line_edit.text() + self._update_checked_values(column_name, items, 0) # reset other + if self.checked_values.get(column_name) is not None or \ + le_text == '': + self.checked_values.pop(column_name) # reset during typing + + if le_text: + self._update_checked_values(column_name, {le_text: le_text}, 2) + self._set_filter_icon(column_name) + self._filter_and_refresh_model_and_menu(column_name, True, True) + + def _filter_and_refresh_model_and_menu(self, column_name, + model=True, menu=True): + """ + Refresh model and its content and possibly menu for big changes. + """ + if model: + self.model.set_column_filtering(self.checked_values) + self.model.refresh() + if menu: + self._menu_refresh(column_name) + + def _get_menu(self, column_name, index): + """Prepares content of menu for 'column_name'""" + menu = QtWidgets.QMenu(self) + filter_rec = self.model.get_filters()[column_name] + self.menu_items_dict[column_name] = filter_rec.values() + + # text filtering only if labels same as values, not if codes are used + if 'text' in filter_rec.search_variants(): + line_edit = QtWidgets.QLineEdit(menu) + line_edit.setClearButtonEnabled(True) + line_edit.addAction(self.filter_icon, + QtWidgets.QLineEdit.LeadingPosition) + + line_edit.setFixedHeight(line_edit.height()) + txt = "" + if self.checked_values.get(column_name): + txt = list(self.checked_values.get(column_name).keys())[0] + line_edit.setText(txt) + + action_le = QtWidgets.QWidgetAction(menu) + action_le.setDefaultWidget(line_edit) + line_edit.textChanged.connect( + partial(self._apply_text_filter, column_name, + filter_rec.values(), line_edit)) + menu.addAction(action_le) + menu.addSeparator() + + if 'checkbox' in filter_rec.search_variants(): + action_all = QtWidgets.QAction("All", self) + action_all.triggered.connect(partial(self._reset_filter, + column_name)) + menu.addAction(action_all) + + action_none = QtWidgets.QAction("Unselect all", self) + state_unchecked = 0 + action_none.triggered.connect(partial(self._apply_filter, + column_name, + filter_rec.values(), + state_unchecked)) + menu.addAction(action_none) + menu.addSeparator() + + # nothing explicitly >> ALL implicitly >> first time + if self.checked_values.get(column_name) is None: + checked_keys = self.menu_items_dict[column_name].keys() + else: + checked_keys = self.checked_values[column_name] + + for value, label in self.menu_items_dict[column_name].items(): + checkbox = QtWidgets.QCheckBox(str(label), menu) + + # temp + checkbox.setStyleSheet("QCheckBox{spacing: 5px;" + "padding:5px 5px 5px 5px;}") + if value in checked_keys: + checkbox.setChecked(True) + + action = QtWidgets.QWidgetAction(menu) + action.setDefaultWidget(checkbox) + + checkbox.stateChanged.connect(partial(self._apply_filter, + column_name, {value: label})) + menu.addAction(action) + + self.menu = menu + + self._show_menu(index, menu) + + def _show_menu(self, index, menu): + """Shows 'menu' under header column of 'index'""" + global_pos_point = self.mapToGlobal( + QtCore.QPoint(self.sectionViewportPosition(index), 0)) + menu.setMinimumWidth(self.sectionSize(index)) + menu.setMinimumHeight(self.height()) + menu.exec_(QtCore.QPoint(global_pos_point.x(), + global_pos_point.y() + self.height())) + + def _menu_refresh(self, column_name): + """ + Reset boxes after big change - word filtering or reset + """ + for action in self.menu.actions(): + if not isinstance(action, QtWidgets.QWidgetAction): + continue + + widget = action.defaultWidget() + if not isinstance(widget, QtWidgets.QCheckBox): + continue + + if not self.checked_values.get(column_name) or \ + widget.text() in self.checked_values[column_name].values(): + widget.setChecked(True) + else: + widget.setChecked(False) + + def _update_checked_values(self, column_name, values, state): + """ + Modify dictionary of set values in columns for filtering. + + Modifies 'self.checked_values' + """ + copy_menu_items = dict(self.menu_items_dict[column_name]) + checked = self.checked_values.get(column_name, copy_menu_items) + set_items = dict(values.items()) # prevent dict change during loop + for value, label in set_items.items(): + if state == 2 and label: # checked + checked[value] = label + elif state == 0 and checked.get(value): + checked.pop(value) + + self.checked_values[column_name] = checked + + def paintEvent(self, event): + self._fix_size() + super(HorizontalHeader, self).paintEvent(event) + + def _fix_size(self): + for column_idx in range(self.model.columnCount()): + vis_index = self.visualIndex(column_idx) + index = self.logicalIndex(vis_index) + section_width = self.sectionSize(index) + + column_name = self.model.headerData(column_idx, + QtCore.Qt.Horizontal, + lib.HeaderNameRole) + button = self.filter_buttons.get(column_name) + if not button: + continue + + pos_x = self.sectionViewportPosition( + index) + section_width - self.height() + + pos_y = 0 + if button.height() < self.height(): + pos_y = int((self.height() - button.height()) / 2) + button.setGeometry( + pos_x, + pos_y, + self.height(), + self.height()) diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 36f3444399..fa6e63b029 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -3,6 +3,11 @@ from openpype.api import Logger log = Logger().get_logger("SyncServer") +class ResumableError(Exception): + """Error which could be temporary, skip current loop, try next time""" + pass + + class SyncStatus: DO_NOTHING = 0 DO_UPLOAD = 1 diff --git a/openpype/modules/user/__init__.py b/openpype/modules/user/__init__.py deleted file mode 100644 index a97ac0eef6..0000000000 --- a/openpype/modules/user/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .user_module import ( - UserModule, - IUserModule -) - - -__all__ = ( - "UserModule", - "IUserModule" -) diff --git a/openpype/modules/user/rest_api.py b/openpype/modules/user/rest_api.py deleted file mode 100644 index 566425a19b..0000000000 --- a/openpype/modules/user/rest_api.py +++ /dev/null @@ -1,35 +0,0 @@ -import json -from aiohttp.web_response import Response - - -class UserModuleRestApi: - def __init__(self, user_module, server_manager): - self.module = user_module - self.server_manager = server_manager - - self.prefix = "/user" - - self.register() - - def register(self): - self.server_manager.add_route( - "GET", - self.prefix + "/username", - self.get_username - ) - self.server_manager.add_route( - "GET", - self.prefix + "/show_widget", - self.show_user_widget - ) - - async def get_username(self, request): - return Response( - status=200, - body=json.dumps(self.module.cred, indent=4), - content_type="application/json" - ) - - async def show_user_widget(self, request): - self.module.action_show_widget.trigger() - return Response(status=200) diff --git a/openpype/modules/user/user_module.py b/openpype/modules/user/user_module.py deleted file mode 100644 index 7d257f1781..0000000000 --- a/openpype/modules/user/user_module.py +++ /dev/null @@ -1,169 +0,0 @@ -import os -import json -import getpass - -from abc import ABCMeta, abstractmethod - -import six -import appdirs - -from .. import ( - PypeModule, - ITrayModule, - IWebServerRoutes -) - - -@six.add_metaclass(ABCMeta) -class IUserModule: - """Interface for other modules to use user change callbacks.""" - - @abstractmethod - def on_pype_user_change(self, username): - """What should happen on Pype user change.""" - pass - - -class UserModule(PypeModule, ITrayModule, IWebServerRoutes): - cred_folder_path = os.path.normpath( - appdirs.user_data_dir('pype-app', 'pype') - ) - cred_filename = 'user_info.json' - env_name = "OPENPYPE_USERNAME" - - name = "user" - - def initialize(self, modules_settings): - user_settings = modules_settings[self.name] - self.enabled = user_settings["enabled"] - - self.callbacks_on_user_change = [] - self.cred = {} - self.cred_path = os.path.normpath(os.path.join( - self.cred_folder_path, self.cred_filename - )) - - # Tray attributes - self.widget_login = None - self.action_show_widget = None - - self.rest_api_obj = None - - def tray_init(self): - from .widget_user import UserWidget - self.widget_login = UserWidget(self) - - self.load_credentials() - - def register_callback_on_user_change(self, callback): - self.callbacks_on_user_change.append(callback) - - def tray_start(self): - """Store credentials to env and preset them to widget""" - username = "" - if self.cred: - username = self.cred.get("username") or "" - - os.environ[self.env_name] = username - self.widget_login.set_user(username) - - def tray_exit(self): - """Nothing special for User.""" - return - - def get_user(self): - return self.cred.get("username") or getpass.getuser() - - def webserver_initialization(self, server_manager): - """Implementation of IWebServerRoutes interface.""" - from .rest_api import UserModuleRestApi - - self.rest_api_obj = UserModuleRestApi(self, server_manager) - - def connect_with_modules(self, enabled_modules): - for module in enabled_modules: - if isinstance(module, IUserModule): - self.callbacks_on_user_change.append( - module.on_pype_user_change - ) - - # Definition of Tray menu - def tray_menu(self, parent_menu): - from Qt import QtWidgets - """Add menu or action to Tray(or parent)'s menu""" - action = QtWidgets.QAction("Username", parent_menu) - action.triggered.connect(self.show_widget) - parent_menu.addAction(action) - parent_menu.addSeparator() - - self.action_show_widget = action - - def load_credentials(self): - """Get credentials from JSON file """ - credentials = {} - try: - file = open(self.cred_path, "r") - credentials = json.load(file) - file.close() - - self.cred = credentials - username = credentials.get("username") - if username: - self.log.debug("Loaded Username \"{}\"".format(username)) - else: - self.log.debug("Pype Username is not set") - - return credentials - - except FileNotFoundError: - return self.save_credentials(getpass.getuser()) - - except json.decoder.JSONDecodeError: - self.log.warning(( - "File where users credentials should be stored" - " has invalid json format. Loading system username." - )) - return self.save_credentials(getpass.getuser()) - - def change_credentials(self, username): - self.save_credentials(username) - for callback in self.callbacks_on_user_change: - try: - callback(username) - except Exception: - self.log.warning( - "Failed to execute callback \"{}\".".format( - str(callback) - ), - exc_info=True - ) - - def save_credentials(self, username): - """Save credentials to JSON file, env and widget""" - if username is None: - username = "" - - username = str(username).strip() - - self.cred = {"username": username} - os.environ[self.env_name] = username - if self.widget_login: - self.widget_login.set_user(username) - try: - file = open(self.cred_path, "w") - file.write(json.dumps(self.cred)) - file.close() - self.log.debug("Username \"{}\" stored".format(username)) - except Exception: - self.log.error( - "Could not store username to file \"{}\"".format( - self.cred_path - ), - exc_info=True - ) - - return self.cred - - def show_widget(self): - """Show dialog to enter credentials""" - self.widget_login.show() diff --git a/openpype/modules/user/widget_user.py b/openpype/modules/user/widget_user.py deleted file mode 100644 index f8ecadf56b..0000000000 --- a/openpype/modules/user/widget_user.py +++ /dev/null @@ -1,88 +0,0 @@ -from Qt import QtCore, QtGui, QtWidgets -from avalon import style -from openpype import resources - - -class UserWidget(QtWidgets.QWidget): - - MIN_WIDTH = 300 - - def __init__(self, module): - - super(UserWidget, self).__init__() - - self.module = module - - # Style - icon = QtGui.QIcon(resources.pype_icon_filepath()) - self.setWindowIcon(icon) - self.setWindowTitle("Username Settings") - self.setMinimumWidth(self.MIN_WIDTH) - self.setStyleSheet(style.load_stylesheet()) - - self.setWindowFlags( - QtCore.Qt.WindowCloseButtonHint | - QtCore.Qt.WindowMinimizeButtonHint - ) - - self.setLayout(self._main()) - - def show(self, *args, **kwargs): - super().show(*args, **kwargs) - # Move widget to center of active screen on show - screen = QtWidgets.QApplication.desktop().screen() - screen_center = lambda self: ( - screen.rect().center() - self.rect().center() - ) - self.move(screen_center(self)) - - def _main(self): - main_layout = QtWidgets.QVBoxLayout() - - form_layout = QtWidgets.QFormLayout() - form_layout.setContentsMargins(10, 15, 10, 5) - - label_username = QtWidgets.QLabel("Username:") - label_username.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) - label_username.setTextFormat(QtCore.Qt.RichText) - - input_username = QtWidgets.QLineEdit() - input_username.setPlaceholderText( - QtCore.QCoreApplication.translate("main", "e.g. John Smith") - ) - - form_layout.addRow(label_username, input_username) - - btn_save = QtWidgets.QPushButton("Save") - btn_save.clicked.connect(self.click_save) - - btn_cancel = QtWidgets.QPushButton("Cancel") - btn_cancel.clicked.connect(self.close) - - btn_group = QtWidgets.QHBoxLayout() - btn_group.addStretch(1) - btn_group.addWidget(btn_save) - btn_group.addWidget(btn_cancel) - - main_layout.addLayout(form_layout) - main_layout.addLayout(btn_group) - - self.input_username = input_username - - return main_layout - - def set_user(self, username): - self.input_username.setText(username) - - def click_save(self): - # all what should happen - validations and saving into appsdir - username = self.input_username.text() - self.module.change_credentials(username) - self._close_widget() - - def closeEvent(self, event): - event.ignore() - self._close_widget() - - def _close_widget(self): - self.hide() diff --git a/openpype/plugins/publish/collect_current_pype_user.py b/openpype/plugins/publish/collect_current_pype_user.py index de4e950d56..003c779836 100644 --- a/openpype/plugins/publish/collect_current_pype_user.py +++ b/openpype/plugins/publish/collect_current_pype_user.py @@ -1,6 +1,7 @@ import os import getpass import pyblish.api +from openpype.lib import get_openpype_username class CollectCurrentUserPype(pyblish.api.ContextPlugin): @@ -11,9 +12,6 @@ class CollectCurrentUserPype(pyblish.api.ContextPlugin): label = "Collect Pype User" def process(self, context): - user = os.getenv("OPENPYPE_USERNAME", "").strip() - if not user: - user = context.data.get("user", getpass.getuser()) - + user = get_openpype_username() context.data["user"] = user self.log.debug("Colected user \"{}\"".format(user)) diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index 390ce443b6..1aa10fcb9b 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -15,7 +15,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): label = "Collect Hierarchy" order = pyblish.api.CollectorOrder - 0.57 families = ["shot"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, context): temp_context = {} diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index 53cc249033..e1b8b95a46 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -20,7 +20,7 @@ class CollectOcioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.58 families = ["shot", "clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # get basic variables diff --git a/openpype/plugins/publish/collect_otio_review.py b/openpype/plugins/publish/collect_otio_review.py index 0c7eeaea44..e2375c70c9 100644 --- a/openpype/plugins/publish/collect_otio_review.py +++ b/openpype/plugins/publish/collect_otio_review.py @@ -22,7 +22,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin): label = "Collect OTIO Review" order = pyblish.api.CollectorOrder - 0.57 families = ["clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # get basic variables @@ -88,6 +88,7 @@ class CollectOcioReview(pyblish.api.InstancePlugin): otio_review_clips.append(otio_gap) if otio_review_clips: + instance.data["label"] += " (review)" instance.data["families"] += ["review", "ftrack"] instance.data["otioReviewClips"] = otio_review_clips self.log.info( diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index a0c6b9339b..d687c1920a 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -19,7 +19,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): label = "Collect OTIO Subset Resources" order = pyblish.api.CollectorOrder - 0.57 families = ["clip"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): if not instance.data.get("representations"): @@ -48,8 +48,8 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): trimmed_media_range) a_frame_start, a_frame_end = openpype.lib.otio_range_to_frame_range( otio_avalable_range) - a_frame_start_h, a_frame_end_h = openpype.lib.otio_range_to_frame_range( - trimmed_media_range_h) + a_frame_start_h, a_frame_end_h = openpype.lib.\ + otio_range_to_frame_range(trimmed_media_range_h) # fix frame_start and frame_end frame to be in range of media if a_frame_start_h < a_frame_start: @@ -80,6 +80,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): media_ref = otio_clip.media_reference metadata = media_ref.metadata + is_sequence = None # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer @@ -116,7 +117,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): # `ImageSequenceReference` path = media_ref.target_url collection_data = openpype.lib.make_sequence_collection( - path, trimmed_media_range, metadata) + path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data self.log.debug(collection) @@ -126,7 +127,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin): dirname, filename = os.path.split(media_ref.target_url) self.staging_dir = dirname - self.log.debug(path) + self.log.debug(filename) repre = self._create_representation( frame_start, frame_end, file=filename) diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index dd1f09bafa..e263edd931 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -2,7 +2,6 @@ import pyblish.api from avalon import io from copy import deepcopy - class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): """Create entities in Avalon based on collected data.""" @@ -100,13 +99,20 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): if entity: # Do not override data, only update cur_entity_data = entity.get("data") or {} + entity_tasks = cur_entity_data["tasks"] or {} + + # create tasks as dict by default + if not entity_tasks: + cur_entity_data["tasks"] = entity_tasks + new_tasks = data.pop("tasks", {}) if "tasks" not in cur_entity_data and not new_tasks: continue for task_name in new_tasks: - if task_name in cur_entity_data["tasks"].keys(): + if task_name in entity_tasks.keys(): continue - cur_entity_data["tasks"][task_name] = new_tasks[task_name] + cur_entity_data["tasks"][task_name] = new_tasks[ + task_name] cur_entity_data.update(data) data = cur_entity_data else: diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 146f3b88ec..3bd217d5d4 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve"] + hosts = ["resolve", "hiero"] def process(self, instance): # create representation data diff --git a/openpype/plugins/publish/extract_otio_review.py b/openpype/plugins/publish/extract_otio_review.py index 91a680ddb0..07fe6f2731 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/openpype/plugins/publish/extract_otio_review.py @@ -40,8 +40,8 @@ class ExtractOTIOReview(openpype.api.Extractor): order = api.ExtractorOrder - 0.45 label = "Extract OTIO review" - hosts = ["resolve"] families = ["review"] + hosts = ["resolve", "hiero"] # plugin default attributes temp_file_head = "tempFile." @@ -188,7 +188,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # creating and registering representation representation = self._create_representation(start, duration) instance.data["representations"].append(representation) - self.log.info(f"Adding representation: {representation}") + self.log.info("Adding representation: {}".format(representation)) def _create_representation(self, start, duration): """ @@ -388,7 +388,7 @@ class ExtractOTIOReview(openpype.api.Extractor): (int(end_offset + duration) + 1)): seq_number = padding.format(start_frame + index) self.log.debug( - f"index: `{index}` | seq_number: `{seq_number}`") + "index: `{}` | seq_number: `{}`".format(index, seq_number)) new_frames.append(int(seq_number)) new_frames += self.used_frames self.used_frames = new_frames diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index a71b1db66b..f341ba197f 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -333,10 +333,24 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get FFmpeg arguments from profile presets out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} - ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] - ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] - ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] - ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] + _ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] + _ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] + _ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] + _ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] + + # Cleanup empty strings + ffmpeg_input_args = [ + value for value in _ffmpeg_input_args if value.strip() + ] + ffmpeg_output_args = [ + value for value in _ffmpeg_output_args if value.strip() + ] + ffmpeg_video_filters = [ + value for value in _ffmpeg_video_filters if value.strip() + ] + ffmpeg_audio_filters = [ + value for value in _ffmpeg_audio_filters if value.strip() + ] if isinstance(new_repre['files'], list): input_files_urls = [os.path.join(new_repre["stagingDir"], f) for f @@ -704,6 +718,105 @@ class ExtractReview(pyblish.api.InstancePlugin): return audio_in_args, audio_filters, audio_out_args + def get_letterbox_filters( + self, + letter_box_def, + input_res_ratio, + output_res_ratio, + pixel_aspect, + scale_factor_by_width, + scale_factor_by_height + ): + output = [] + + ratio = letter_box_def["ratio"] + state = letter_box_def["state"] + fill_color = letter_box_def["fill_color"] + f_red, f_green, f_blue, f_alpha = fill_color + fill_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + f_red, f_green, f_blue + ) + fill_color_alpha = float(f_alpha) / 255 + + line_thickness = letter_box_def["line_thickness"] + line_color = letter_box_def["line_color"] + l_red, l_green, l_blue, l_alpha = line_color + line_color_hex = "{0:0>2X}{1:0>2X}{2:0>2X}".format( + l_red, l_green, l_blue + ) + line_color_alpha = float(l_alpha) / 255 + + if input_res_ratio == output_res_ratio: + ratio /= pixel_aspect + elif input_res_ratio < output_res_ratio: + ratio /= scale_factor_by_width + else: + ratio /= scale_factor_by_height + + if state == "letterbox": + if fill_color_alpha > 0: + top_box = ( + "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c={}@{}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + bottom_box = ( + "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" + ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c={1}@{2}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + output.extend([top_box, bottom_box]) + + if line_color_alpha > 0 and line_thickness > 0: + top_line = ( + "drawbox=0:round((ih-(iw*(1/{0})))/2)-{1}:iw:{1}:" + "t=fill:c={2}@{3}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + bottom_line = ( + "drawbox=0:ih-round((ih-(iw*(1/{})))/2)" + ":iw:{}:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + output.extend([top_line, bottom_line]) + + elif state == "pillar": + if fill_color_alpha > 0: + left_box = ( + "drawbox=0:0:round((iw-(ih*{}))/2):ih:t=fill:c={}@{}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + right_box = ( + "drawbox=iw-round((iw-(ih*{0}))/2))" + ":0:round((iw-(ih*{0}))/2):ih:t=fill:c={1}@{2}" + ).format(ratio, fill_color_hex, fill_color_alpha) + + output.extend([left_box, right_box]) + + if line_color_alpha > 0 and line_thickness > 0: + left_line = ( + "drawbox=round((iw-(ih*{}))/2):0:{}:ih:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + + right_line = ( + "drawbox=iw-round((iw-(ih*{}))/2))" + ":0:{}:ih:t=fill:c={}@{}" + ).format( + ratio, line_thickness, line_color_hex, line_color_alpha + ) + + output.extend([left_line, right_line]) + + else: + raise ValueError( + "Letterbox state \"{}\" is not recognized".format(state) + ) + + return output + def rescaling_filters(self, temp_data, output_def, new_repre): """Prepare vieo filters based on tags in new representation. @@ -715,7 +828,8 @@ class ExtractReview(pyblish.api.InstancePlugin): """ filters = [] - letter_box = output_def.get("letter_box") + letter_box_def = output_def["letter_box"] + letter_box_enabled = letter_box_def["enabled"] # Get instance data pixel_aspect = temp_data["pixel_aspect"] @@ -795,7 +909,7 @@ class ExtractReview(pyblish.api.InstancePlugin): if ( output_width == input_width and output_height == input_height - and not letter_box + and not letter_box_enabled and pixel_aspect == 1 ): self.log.debug( @@ -834,30 +948,24 @@ class ExtractReview(pyblish.api.InstancePlugin): ) # letter_box - if letter_box: - if input_res_ratio == output_res_ratio: - letter_box /= pixel_aspect - elif input_res_ratio < output_res_ratio: - letter_box /= scale_factor_by_width - else: - letter_box /= scale_factor_by_height - - scale_filter = "scale={}x{}:flags=lanczos".format( - output_width, output_height + if letter_box_enabled: + filters.extend([ + "scale={}x{}:flags=lanczos".format( + output_width, output_height + ), + "setsar=1" + ]) + filters.extend( + self.get_letterbox_filters( + letter_box_def, + input_res_ratio, + output_res_ratio, + pixel_aspect, + scale_factor_by_width, + scale_factor_by_height + ) ) - top_box = ( - "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black" - ).format(letter_box) - - bottom_box = ( - "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" - ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" - ).format(letter_box) - - # Add letter box filters - filters.extend([scale_filter, "setsar=1", top_box, bottom_box]) - # scaling none square pixels and 1920 width if ( input_height != output_height diff --git a/openpype/settings/__init__.py b/openpype/settings/__init__.py index b4187829fc..b5810deef4 100644 --- a/openpype/settings/__init__.py +++ b/openpype/settings/__init__.py @@ -1,9 +1,13 @@ +from .exceptions import ( + SaveWarningExc +) from .lib import ( get_system_settings, get_project_settings, get_current_project_settings, get_anatomy_settings, - get_environments + get_environments, + get_local_settings ) from .entities import ( SystemSettings, @@ -12,11 +16,14 @@ from .entities import ( __all__ = ( + "SaveWarningExc", + "get_system_settings", "get_project_settings", "get_current_project_settings", "get_anatomy_settings", "get_environments", + "get_local_settings", "SystemSettings", "ProjectSettings" diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index 4e98463ee4..ff16c22663 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -25,6 +25,9 @@ } }, "nuke": { + "viewer": { + "viewerProcess": "sRGB" + }, "workfile": { "colorManagement": "Nuke", "OCIO_config": "nuke-default", @@ -102,7 +105,7 @@ }, { "name": "tile_color", - "value": "0xff0000ff" + "value": "0xadab1dff" }, { "name": "channels", diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 9ff551491c..905ba68d60 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -21,7 +21,8 @@ "secondary_pool": "", "group": "", "department": "", - "limit_groups": {} + "limit_groups": {}, + "use_gpu": true }, "HarmonySubmitDeadline": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index ca1b258e72..61db35ba79 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -26,11 +26,11 @@ "ftrackreview" ], "ffmpeg_args": { - "video_filters": [], - "audio_filters": [], - "input": [ - "-gamma 2.2" + "video_filters": [ + "eq=gamma=2.2" ], + "audio_filters": [], + "input": [], "output": [ "-pix_fmt yuv420p", "-crf 18", @@ -45,7 +45,25 @@ ] }, "width": 0, - "height": 0 + "height": 0, + "letter_box": { + "enabled": false, + "ratio": 0.0, + "state": "letterbox", + "fill_color": [ + 0, + 0, + 0, + 255 + ], + "line_thickness": 0, + "line_color": [ + 255, + 0, + 0, + 255 + ] + } } } } diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index b69bc66457..a8d6472c47 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -17,6 +17,18 @@ "handleEnd": 10 } }, + "load": { + "LoadClip": { + "enabled": true, + "families": [ + "render2d", "source", "plate", "render", "review" + ], + "representations": [ + "exr", "dpx", "jpg", "jpeg", "png", "h264", "mov" + ], + "clip_name_template": "{asset}_{subset}_{representation}" + } + }, "publish": { "CollectInstanceVersion": { "enabled": false diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index dfece74f80..8600e49518 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -382,10 +382,6 @@ "optional": true, "active": true, "bake_attributes": [] - }, - "MayaSubmitDeadline": { - "enabled": true, - "tile_assembler_plugin": "DraftTileAssembler" } }, "load": { diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 852e041805..bb5232cea7 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -86,12 +86,6 @@ }, "ExtractSlateFrame": { "viewer_lut_raw": false - }, - "NukeSubmitDeadline": { - "deadline_priority": 50, - "deadline_pool": "", - "deadline_pool_secondary": "", - "deadline_chunk_size": 1 } }, "load": { diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index 2355f39aa1..56d63ecf09 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -1165,6 +1165,7 @@ }, "variants": { "4-26": { + "use_python_2": false, "executables": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index b3065058a1..6e4b493116 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -161,9 +161,6 @@ "log_viewer": { "enabled": true }, - "user": { - "enabled": true - }, "standalonepublish_tool": { "enabled": true } diff --git a/openpype/settings/entities/root_entities.py b/openpype/settings/entities/root_entities.py index eed3d47f46..b89473d9fb 100644 --- a/openpype/settings/entities/root_entities.py +++ b/openpype/settings/entities/root_entities.py @@ -23,6 +23,7 @@ from openpype.settings.constants import ( PROJECT_ANATOMY_KEY, KEY_REGEX ) +from openpype.settings.exceptions import SaveWarningExc from openpype.settings.lib import ( DEFAULTS_DIR, @@ -724,8 +725,19 @@ class ProjectSettings(RootEntity): project_settings = settings_value.get(PROJECT_SETTINGS_KEY) or {} project_anatomy = settings_value.get(PROJECT_ANATOMY_KEY) or {} - save_project_settings(self.project_name, project_settings) - save_project_anatomy(self.project_name, project_anatomy) + warnings = [] + try: + save_project_settings(self.project_name, project_settings) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) + + try: + save_project_anatomy(self.project_name, project_anatomy) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) + + if warnings: + raise SaveWarningExc(warnings) def _validate_defaults_to_save(self, value): """Valiations of default values before save.""" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index f46221ba63..1346fb3dad 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -128,6 +128,11 @@ "key": "department", "label": "Department" }, + { + "type": "boolean", + "key": "use_gpu", + "label": "Use GPU" + }, { "type": "dict-modifiable", "key": "limit_groups", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index d2191a45a0..f717eff7dd 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -120,6 +120,45 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "load", + "label": "Loader plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Load Clip", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index b48f90bd91..edd5c18f51 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -118,6 +118,19 @@ "type": "dict", "label": "Nuke", "children": [ + { + "key": "viewer", + "type": "dict", + "label": "Viewer", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, { "key": "workfile", "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 3c079a130d..1bd028ac79 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -203,6 +203,69 @@ "default": 0, "minimum": 0, "maximum": 100000 + }, + { + "key": "letter_box", + "label": "Letter box", + "type": "dict", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled", + "default": false + }, + { + "key": "ratio", + "label": "Ratio", + "type": "number", + "decimal": 4, + "default": 0, + "minimum": 0, + "maximum": 10000 + }, + { + "key": "state", + "label": "Type", + "type": "enum", + "enum_items": [ + { + "letterbox": "Letterbox" + }, + { + "pillar": "Pillar" + } + ] + }, + { + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Fill Color", + "name": "fill_color" + } + ] + }, + { + "key": "line_thickness", + "label": "Line Thickness", + "type": "number", + "minimum": 0, + "maximum": 1000 + }, + { + "type": "schema_template", + "name": "template_rgba_color", + "template_data": [ + { + "label": "Line Color", + "name": "line_color" + } + ] + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 9d2e39edde..95b02a7936 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -283,34 +283,6 @@ "is_list": true } ] - }, - { - "type": "dict", - "collapsible": true, - "key": "MayaSubmitDeadline", - "label": "Submit maya job to deadline", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "enum", - "key": "tile_assembler_plugin", - "label": "Tile Assembler Plugin", - "multiselection": false, - "enum_items": [ - { - "DraftTileAssembler": "Draft Tile Assembler" - }, - { - "oiio": "Open Image IO" - } - ] - } - ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 0e3770ac78..087e6c13a9 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -146,35 +146,6 @@ "label": "Viewer LUT raw" } ] - }, - { - "type": "dict", - "collapsible": true, - "key": "NukeSubmitDeadline", - "label": "NukeSubmitDeadline", - "is_group": true, - "children": [ - { - "type": "number", - "key": "deadline_priority", - "label": "deadline_priority" - }, - { - "type": "text", - "key": "deadline_pool", - "label": "deadline_pool" - }, - { - "type": "text", - "key": "deadline_pool_secondary", - "label": "deadline_pool_secondary" - }, - { - "type": "number", - "key": "deadline_chunk_size", - "label": "deadline_chunk_size" - } - ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json new file mode 100644 index 0000000000..ffe530175a --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_rgba_color.json @@ -0,0 +1,33 @@ +[ + { + "type": "list-strict", + "key": "{name}", + "label": "{label}", + "object_types": [ + { + "label": "R", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "G", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "B", + "type": "number", + "minimum": 0, + "maximum": 255 + }, + { + "label": "A", + "type": "number", + "minimum": 0, + "maximum": 255 + } + ] + } +] diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index a30cafd0c2..878958b12d 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -154,20 +154,6 @@ } ] }, - { - "type": "dict", - "key": "user", - "label": "User setting", - "collapsible": true, - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - } - ] - }, { "type": "dict", "key": "standalonepublish_tool", diff --git a/openpype/settings/exceptions.py b/openpype/settings/exceptions.py new file mode 100644 index 0000000000..a06138eeaf --- /dev/null +++ b/openpype/settings/exceptions.py @@ -0,0 +1,11 @@ +class SaveSettingsValidation(Exception): + pass + + +class SaveWarningExc(SaveSettingsValidation): + def __init__(self, warnings): + if isinstance(warnings, str): + warnings = [warnings] + self.warnings = warnings + msg = " | ".join(warnings) + super(SaveWarningExc, self).__init__(msg) diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 3bf2141808..f61166fa69 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -4,6 +4,9 @@ import functools import logging import platform import copy +from .exceptions import ( + SaveWarningExc +) from .constants import ( M_OVERRIDEN_KEY, M_ENVIRONMENT_KEY, @@ -101,8 +104,14 @@ def save_studio_settings(data): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: data(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -110,15 +119,25 @@ def save_studio_settings(data): old_data = get_system_settings() default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] new_data = apply_overrides(default_values, copy.deepcopy(data)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager(_system_settings=new_data) + + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_system_settings_save(old_data, new_data, changes) + try: + module.on_system_settings_save( + old_data, new_data, changes, new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_studio_settings(data) + _SETTINGS_HANDLER.save_studio_settings(data) + if warnings: + raise SaveWarningExc(warnings) @require_handler @@ -130,10 +149,16 @@ def save_project_settings(project_name, overrides): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: project_name (str): Project name for which overrides are passed. Default project's value is None. overrides(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -151,17 +176,29 @@ def save_project_settings(project_name, overrides): old_data = get_default_project_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(overrides)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager() + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_project_settings_save( - old_data, new_data, project_name, changes - ) + try: + module.on_project_settings_save( + old_data, + new_data, + project_name, + changes, + new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_project_settings(project_name, overrides) + _SETTINGS_HANDLER.save_project_settings(project_name, overrides) + + if warnings: + raise SaveWarningExc(warnings) @require_handler @@ -173,10 +210,16 @@ def save_project_anatomy(project_name, anatomy_data): For saving of data cares registered Settings handler. + Warning messages are not logged as module raising them should log it within + it's logger. + Args: project_name (str): Project name for which overrides are passed. Default project's value is None. overrides(dict): Overrides data with metadata defying studio overrides. + + Raises: + SaveWarningExc: If any module raises the exception. """ # Notify Pype modules from openpype.modules import ModulesManager, ISettingsChangeListener @@ -194,17 +237,29 @@ def save_project_anatomy(project_name, anatomy_data): old_data = get_default_anatomy_settings(exclude_locals=True) new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data)) + new_data_with_metadata = copy.deepcopy(new_data) clear_metadata_from_settings(new_data) changes = calculate_changes(old_data, new_data) modules_manager = ModulesManager() + warnings = [] for module in modules_manager.get_enabled_modules(): if isinstance(module, ISettingsChangeListener): - module.on_project_anatomy_save( - old_data, new_data, changes, project_name - ) + try: + module.on_project_anatomy_save( + old_data, + new_data, + changes, + project_name, + new_data_with_metadata + ) + except SaveWarningExc as exc: + warnings.extend(exc.warnings) - return _SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data) + _SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data) + + if warnings: + raise SaveWarningExc(warnings) @require_handler diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index 6261fe91ca..72c7aece72 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -1,7 +1,7 @@ import os -import importlib -from avalon import api, lib, style +from avalon import api, style +from openpype import PLUGINS_DIR from openpype.api import Logger, resources from openpype.lib import ( ApplictionExecutableNotFound, @@ -10,81 +10,6 @@ from openpype.lib import ( from Qt import QtWidgets, QtGui -class ProjectManagerAction(api.Action): - name = "projectmanager" - label = "Project Manager" - icon = "gear" - order = 999 # at the end - - def is_compatible(self, session): - return "AVALON_PROJECT" in session - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=[ - "-u", "-m", "avalon.tools.projectmanager", - session['AVALON_PROJECT'] - ] - ) - - -class LoaderAction(api.Action): - name = "loader" - label = "Loader" - icon = "cloud-download" - order = 998 - - def is_compatible(self, session): - return "AVALON_PROJECT" in session - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=[ - "-u", "-m", "avalon.tools.loader", session['AVALON_PROJECT'] - ] - ) - - -class LoaderLibrary(api.Action): - name = "loader_os" - label = "Library Loader" - icon = "book" - order = 997 # at the end - - def is_compatible(self, session): - return True - - def process(self, session, **kwargs): - return lib.launch( - executable="python", - args=["-u", "-m", "avalon.tools.libraryloader"] - ) - - -def register_default_actions(): - """Register default actions for Launcher""" - api.register_plugin(api.Action, ProjectManagerAction) - api.register_plugin(api.Action, LoaderAction) - api.register_plugin(api.Action, LoaderLibrary) - - -def register_config_actions(): - """Register actions from the configuration for Launcher""" - - module_name = os.environ["AVALON_CONFIG"] - config = importlib.import_module(module_name) - if not hasattr(config, "register_launcher_actions"): - print( - "Current configuration `%s` has no 'register_launcher_actions'" - % config.__name__ - ) - return - - config.register_launcher_actions() - - def register_actions_from_paths(paths): if not paths: return @@ -106,6 +31,13 @@ def register_actions_from_paths(paths): api.register_plugin_path(api.Action, path) +def register_config_actions(): + """Register actions from the configuration for Launcher""" + + actions_dir = os.path.join(PLUGINS_DIR, "actions") + register_actions_from_paths([actions_dir]) + + def register_environment_actions(): """Register actions from AVALON_ACTIONS for Launcher.""" diff --git a/openpype/tools/settings/local_settings/general_widget.py b/openpype/tools/settings/local_settings/general_widget.py index e820d8ab8b..78bc53fdd2 100644 --- a/openpype/tools/settings/local_settings/general_widget.py +++ b/openpype/tools/settings/local_settings/general_widget.py @@ -1,3 +1,5 @@ +import getpass + from Qt import QtWidgets @@ -5,16 +7,29 @@ class LocalGeneralWidgets(QtWidgets.QWidget): def __init__(self, parent): super(LocalGeneralWidgets, self).__init__(parent) + username_input = QtWidgets.QLineEdit(self) + username_input.setPlaceholderText(getpass.getuser()) + + layout = QtWidgets.QFormLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + + layout.addRow("OpenPype Username", username_input) + + self.username_input = username_input def update_local_settings(self, value): - return - - # RETURNING EARLY TO HIDE WIDGET WITHOUT CONTENT + username = "" + if value: + username = value.get("username", username) + self.username_input.setText(username) def settings_value(self): # Add changed # If these have changed then output = {} - # TEMPORARILY EMPTY AS THERE IS NOTHING TO PUT HERE - + username = self.username_input.text() + if username: + output["username"] = username + # Do not return output yet since we don't have mechanism to save or + # load these data through api calls return output diff --git a/openpype/tools/settings/local_settings/window.py b/openpype/tools/settings/local_settings/window.py index a12a2289b5..b6ca56d348 100644 --- a/openpype/tools/settings/local_settings/window.py +++ b/openpype/tools/settings/local_settings/window.py @@ -80,7 +80,6 @@ class LocalSettingsWidget(QtWidgets.QWidget): general_widget = LocalGeneralWidgets(general_content) general_layout.addWidget(general_widget) - general_expand_widget.hide() self.main_layout.addWidget(general_expand_widget) @@ -127,9 +126,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): self.system_settings.reset() self.project_settings.reset() - # self.general_widget.update_local_settings( - # value.get(LOCAL_GENERAL_KEY) - # ) + self.general_widget.update_local_settings( + value.get(LOCAL_GENERAL_KEY) + ) self.app_widget.update_local_settings( value.get(LOCAL_APPS_KEY) ) @@ -139,9 +138,9 @@ class LocalSettingsWidget(QtWidgets.QWidget): def settings_value(self): output = {} - # general_value = self.general_widget.settings_value() - # if general_value: - # output[LOCAL_GENERAL_KEY] = general_value + general_value = self.general_widget.settings_value() + if general_value: + output[LOCAL_GENERAL_KEY] = general_value app_value = self.app_widget.settings_value() if app_value: diff --git a/openpype/tools/settings/settings/widgets/categories.py b/openpype/tools/settings/settings/widgets/categories.py index 9d286485a3..e4832c989a 100644 --- a/openpype/tools/settings/settings/widgets/categories.py +++ b/openpype/tools/settings/settings/widgets/categories.py @@ -27,7 +27,7 @@ from openpype.settings.entities import ( SchemaError ) -from openpype.settings.lib import get_system_settings +from openpype.settings import SaveWarningExc from .widgets import ProjectListWidget from . import lib @@ -272,6 +272,22 @@ class SettingsCategoryWidget(QtWidgets.QWidget): # not required. self.reset() + except SaveWarningExc as exc: + warnings = [ + "Settings were saved but few issues happened." + ] + for item in exc.warnings: + warnings.append(item.replace("\n", "
")) + + msg = "

".join(warnings) + + dialog = QtWidgets.QMessageBox(self) + dialog.setText(msg) + dialog.setIcon(QtWidgets.QMessageBox.Warning) + dialog.exec_() + + self.reset() + except Exception as exc: formatted_traceback = traceback.format_exception(*sys.exc_info()) dialog = QtWidgets.QMessageBox(self) diff --git a/pyproject.toml b/pyproject.toml index 12b9c4446d..88c977cd99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "OpenPype" version = "3.0.0-beta2" -description = "Multi-platform open-source pipeline built around the Avalon platform, expanding it with extra features and integrations." +description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" homepage = "https://openpype.io" diff --git a/setup.py b/setup.py index fd589e5251..c096befa34 100644 --- a/setup.py +++ b/setup.py @@ -45,7 +45,8 @@ install_requires = [ "googleapiclient", "httplib2", # Harmony implementation - "filecmp" + "filecmp", + "dns" ] includes = [] @@ -69,7 +70,11 @@ if sys.platform == "win32": "pythoncom" ]) -build_options = dict( + +icon_path = openpype_root / "igniter" / "openpype.ico" +mac_icon_path = openpype_root / "igniter" / "openpype.icns" + +build_exe_options = dict( packages=install_requires, includes=includes, excludes=excludes, @@ -78,13 +83,16 @@ build_options = dict( optimize=0 ) -icon_path = openpype_root / "igniter" / "openpype.ico" +bdist_mac_options = dict( + bundle_name="OpenPype", + iconfile=mac_icon_path +) executables = [ - Executable("start.py", base=None, - target_name="openpype_console", icon=icon_path.as_posix()), Executable("start.py", base=base, - target_name="openpype_gui", icon=icon_path.as_posix()) + target_name="openpype_gui", icon=icon_path.as_posix()), + Executable("start.py", base=None, + target_name="openpype_console", icon=icon_path.as_posix()) ] setup( @@ -93,7 +101,8 @@ setup( description="Ultimate pipeline", cmdclass={"build_sphinx": BuildDoc}, options={ - "build_exe": build_options, + "build_exe": build_exe_options, + "bdist_mac": bdist_mac_options, "build_sphinx": { "project": "OpenPype", "version": __version__, diff --git a/start.py b/start.py index a2a03f112c..05069862bf 100644 --- a/start.py +++ b/start.py @@ -115,6 +115,7 @@ else: os.path.join(OPENPYPE_ROOT, "dependencies") ) sys.path.append(frozen_libs) + sys.path.insert(0, OPENPYPE_ROOT) # add stuff from `/dependencies` to PYTHONPATH. pythonpath = os.getenv("PYTHONPATH", "") paths = pythonpath.split(os.pathsep) @@ -123,7 +124,10 @@ else: import igniter # noqa: E402 from igniter import BootstrapRepos # noqa: E402 -from igniter.tools import get_openpype_path_from_db # noqa +from igniter.tools import ( + get_openpype_path_from_db, + validate_mongo_connection +) # noqa from igniter.bootstrap_repos import OpenPypeVersion # noqa: E402 bootstrap = BootstrapRepos() @@ -305,20 +309,32 @@ def _determine_mongodb() -> str: openpype_mongo = os.getenv("OPENPYPE_MONGO", None) if not openpype_mongo: # try system keyring + try: + openpype_mongo = bootstrap.secure_registry.get_item( + "openPypeMongo" + ) + except ValueError: + pass + + if openpype_mongo: + result, msg = validate_mongo_connection(openpype_mongo) + if not result: + print(msg) + openpype_mongo = None + + if not openpype_mongo: + print("*** No DB connection string specified.") + print("--- launching setup UI ...") + + result = igniter.open_dialog() + if result == 0: + raise RuntimeError("MongoDB URL was not defined") + try: openpype_mongo = bootstrap.secure_registry.get_item( "openPypeMongo") except ValueError: - print("*** No DB connection string specified.") - print("--- launching setup UI ...") - import igniter - igniter.open_dialog() - - try: - openpype_mongo = bootstrap.secure_registry.get_item( - "openPypeMongo") - except ValueError: - raise RuntimeError("missing mongodb url") + raise RuntimeError("Missing MongoDB url") return openpype_mongo diff --git a/test_localsystem.txt b/test_localsystem.txt new file mode 100644 index 0000000000..dde7986af8 --- /dev/null +++ b/test_localsystem.txt @@ -0,0 +1 @@ +I have run diff --git a/tools/build.ps1 b/tools/build.ps1 index 412bb111c1..5283ee4754 100644 --- a/tools/build.ps1 +++ b/tools/build.ps1 @@ -121,6 +121,10 @@ catch { Exit-WithCode 1 } +Write-Host ">>> " -NoNewLine -ForegroundColor green +Write-Host "Making sure submodules are up-to-date ..." +git submodule update --init --recursive + Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Building OpenPype [ " -NoNewline -ForegroundColor white Write-host $openpype_version -NoNewline -ForegroundColor green diff --git a/tools/build.sh b/tools/build.sh index b95e2969c4..d0593a2b2f 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -157,10 +157,33 @@ main () { install_poetry || { echo -e "${BIRed}!!!${RST} Poetry installation failed"; return; } fi + echo -e "${BIGreen}>>>${RST} Making sure submodules are up-to-date ..." + git submodule update --init --recursive + echo -e "${BIGreen}>>>${RST} Building ..." - poetry run python3 "$openpype_root/setup.py" build > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + poetry run python3 "$openpype_root/setup.py" build > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } + elif [[ "$OSTYPE" == "darwin"* ]]; then + poetry run python3 "$openpype_root/setup.py" bdist_mac > "$openpype_root/build/build.log" || { echo -e "${BIRed}!!!${RST} Build failed, see the build log."; return; } + fi poetry run python3 "$openpype_root/tools/build_dependencies.py" + if [[ "$OSTYPE" == "darwin"* ]]; then + # fix code signing issue + codesign --remove-signature "$openpype_root/build/OpenPype.app/Contents/MacOS/lib/Python" + if command -v create-dmg > /dev/null 2>&1; then + create-dmg \ + --volname "OpenPype Installer" \ + --window-pos 200 120 \ + --window-size 600 300 \ + --app-drop-link 100 50 \ + "$openpype_root/build/OpenPype-Installer.dmg" \ + "$openpype_root/build/OpenPype.app" + else + echo -e "${BIYellow}!!!${RST} ${BIWhite}create-dmg${RST} command is not available." + fi + fi + echo -e "${BICyan}>>>${RST} All done. You will find OpenPype and build log in \c" echo -e "${BIWhite}$openpype_root/build${RST} directory." } diff --git a/tools/build_dependencies.py b/tools/build_dependencies.py index e49e930a70..fb52e2b5fd 100644 --- a/tools/build_dependencies.py +++ b/tools/build_dependencies.py @@ -22,6 +22,7 @@ import os import sys import site from distutils.util import get_platform +import platform from pathlib import Path import shutil import blessed @@ -76,7 +77,14 @@ _print(f"Working with: {site_pkg}", 2) build_dir = "exe.{}-{}".format(get_platform(), sys.version[0:3]) # create full path -build_dir = Path(os.path.dirname(__file__)).parent / "build" / build_dir +if platform.system().lower() == "darwin": + build_dir = Path(os.path.dirname(__file__)).parent.joinpath( + "build", + "OpenPype.app", + "Contents", + "MacOS") +else: + build_dir = Path(os.path.dirname(__file__)).parent / "build" / build_dir _print(f"Using build at {build_dir}", 2) if not build_dir.exists(): diff --git a/tools/build_win_installer.ps1 b/tools/build_win_installer.ps1 new file mode 100644 index 0000000000..4a4d011258 --- /dev/null +++ b/tools/build_win_installer.ps1 @@ -0,0 +1,140 @@ +<# +.SYNOPSIS + Helper script to build OpenPype. + +.DESCRIPTION + This script will detect Python installation, and build OpenPype to `build` + directory using existing virtual environment created by Poetry (or + by running `/tools/create_venv.ps1`). It will then shuffle dependencies in + build folder to optimize for different Python versions (2/3) in Python host. + +.EXAMPLE + +PS> .\build.ps1 + +#> + +function Start-Progress { + param([ScriptBlock]$code) + $scroll = "/-\|/-\|" + $idx = 0 + $job = Invoke-Command -ComputerName $env:ComputerName -ScriptBlock { $code } -AsJob + + $origpos = $host.UI.RawUI.CursorPosition + + # $origpos.Y -= 1 + + while (($job.State -eq "Running") -and ($job.State -ne "NotStarted")) + { + $host.UI.RawUI.CursorPosition = $origpos + Write-Host $scroll[$idx] -NoNewline + $idx++ + if ($idx -ge $scroll.Length) + { + $idx = 0 + } + Start-Sleep -Milliseconds 100 + } + # It's over - clear the activity indicator. + $host.UI.RawUI.CursorPosition = $origpos + Write-Host ' ' + <# + .SYNOPSIS + Display spinner for running job + .PARAMETER code + Job to display spinner for + #> +} + + +function Exit-WithCode($exitcode) { + # Only exit this host process if it's a child of another PowerShell parent process... + $parentPID = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$PID" | Select-Object -Property ParentProcessId).ParentProcessId + $parentProcName = (Get-CimInstance -ClassName Win32_Process -Filter "ProcessId=$parentPID" | Select-Object -Property Name).Name + if ('powershell.exe' -eq $parentProcName) { $host.SetShouldExit($exitcode) } + + exit $exitcode +} + +function Show-PSWarning() { + if ($PSVersionTable.PSVersion.Major -lt 7) { + Write-Host "!!! " -NoNewline -ForegroundColor Red + Write-Host "You are using old version of PowerShell. $($PSVersionTable.PSVersion.Major).$($PSVersionTable.PSVersion.Minor)" + Write-Host "Please update to at least 7.0 - " -NoNewline -ForegroundColor Gray + Write-Host "https://github.com/PowerShell/PowerShell/releases" -ForegroundColor White + Exit-WithCode 1 + } +} + +function Install-Poetry() { + Write-Host ">>> " -NoNewline -ForegroundColor Green + Write-Host "Installing Poetry ... " + (Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python - + # add it to PATH + $env:PATH = "$($env:PATH);$($env:USERPROFILE)\.poetry\bin" +} + +$art = @" + +▒█▀▀▀█ █▀▀█ █▀▀ █▀▀▄ ▒█▀▀█ █░░█ █▀▀█ █▀▀ ▀█▀ ▀█▀ ▀█▀ +▒█░░▒█ █░░█ █▀▀ █░░█ ▒█▄▄█ █▄▄█ █░░█ █▀▀ ▒█░ ▒█░ ▒█░ +▒█▄▄▄█ █▀▀▀ ▀▀▀ ▀░░▀ ▒█░░░ ▄▄▄█ █▀▀▀ ▀▀▀ ▄█▄ ▄█▄ ▄█▄ + .---= [ by Pype Club ] =---. + https://openpype.io + +"@ + +Write-Host $art -ForegroundColor DarkGreen + +# Enable if PS 7.x is needed. +# Show-PSWarning + +$current_dir = Get-Location +$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent +$openpype_root = (Get-Item $script_dir).parent.FullName + +Set-Location -Path $openpype_root + +$version_file = Get-Content -Path "$($openpype_root)\openpype\version.py" +$result = [regex]::Matches($version_file, '__version__ = "(?\d+\.\d+.\d+.*)"') +$openpype_version = $result[0].Groups['version'].Value +if (-not $openpype_version) { + Write-Host "!!! " -ForegroundColor yellow -NoNewline + Write-Host "Cannot determine OpenPype version." + Exit-WithCode 1 +} +$env:BUILD_VERSION = $openpype_version + +iscc + +Write-Host ">>> " -NoNewline -ForegroundColor green +Write-Host "Creating OpenPype installer ... " -ForegroundColor white + +$build_dir_command = @" +import sys +from distutils.util import get_platform +print('exe.{}-{}'.format(get_platform(), sys.version[0:3])) +"@ + +$build_dir = & python -c $build_dir_command +Write-Host "Build directory ... ${build_dir}" -ForegroundColor white +$env:BUILD_DIR = $build_dir + +if (Get-Command iscc -errorAction SilentlyContinue -ErrorVariable ProcessError) +{ + iscc "$openpype_root\inno_setup.iss" +}else { + Write-Host "!!! Cannot find Inno Setup command" -ForegroundColor red + Write-Host "!!! You can download it at https://jrsoftware.org/" -ForegroundColor red + Exit-WithCode 1 +} + + +Write-Host ">>> " -NoNewline -ForegroundColor green +Write-Host "restoring current directory" +Set-Location -Path $current_dir + +Write-Host "*** " -NoNewline -ForegroundColor Cyan +Write-Host "All done. You will find OpenPype installer in " -NoNewLine +Write-Host "'.\build'" -NoNewline -ForegroundColor Green +Write-Host " directory." diff --git a/tools/create_env.ps1 b/tools/create_env.ps1 index 44e1799be8..e72e98e04b 100644 --- a/tools/create_env.ps1 +++ b/tools/create_env.ps1 @@ -133,7 +133,7 @@ if (-not (Test-Path -PathType Leaf -Path "$($openpype_root)\poetry.lock")) { Write-Host ">>> " -NoNewline -ForegroundColor green Write-Host "Installing virtual environment from lock." } -& poetry install $poetry_verbosity +& poetry install --no-root $poetry_verbosity if ($LASTEXITCODE -ne 0) { Write-Host "!!! " -ForegroundColor yellow -NoNewline Write-Host "Poetry command failed." diff --git a/tools/create_env.sh b/tools/create_env.sh index 7bdb8503fd..04414ddea5 100755 --- a/tools/create_env.sh +++ b/tools/create_env.sh @@ -160,7 +160,7 @@ main () { echo -e "${BIGreen}>>>${RST} Installing dependencies ..." fi - poetry install $poetry_verbosity || { echo -e "${BIRed}!!!${RST} Poetry environment installation failed"; return; } + poetry install --no-root $poetry_verbosity || { echo -e "${BIRed}!!!${RST} Poetry environment installation failed"; return; } echo -e "${BIGreen}>>>${RST} Cleaning cache files ..." clean_pyc diff --git a/tools/run_mongo.sh b/tools/run_mongo.sh index 1c788abcaf..8c94fcf881 100755 --- a/tools/run_mongo.sh +++ b/tools/run_mongo.sh @@ -82,3 +82,4 @@ main () { echo -e "${BIGreen}>>>${RST} Detached to background." } +main diff --git a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py index d1287dd213..5e64605271 100644 --- a/vendor/deadline/custom/plugins/GlobalJobPreLoad.py +++ b/vendor/deadline/custom/plugins/GlobalJobPreLoad.py @@ -60,7 +60,7 @@ def inject_openpype_environment(deadlinePlugin): with open(export_url) as fp: contents = json.load(fp) for key, value in contents.items(): - deadlinePlugin.SetEnvironmentVariable(key, value) + deadlinePlugin.SetProcessEnvironmentVariable(key, value) os.remove(export_url) @@ -162,4 +162,3 @@ def __main__(deadlinePlugin): inject_openpype_environment(deadlinePlugin) else: pype(deadlinePlugin) # backward compatibility with Pype2 - diff --git a/website/docs/artist_hosts_hiero.md b/website/docs/artist_hosts_hiero.md new file mode 100644 index 0000000000..4ada1fba2d --- /dev/null +++ b/website/docs/artist_hosts_hiero.md @@ -0,0 +1,193 @@ +--- +id: artist_hosts_hiero +title: Hiero +sidebar_label: Hiero / Nuke Studio +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note +All the information also applies to **_Nuke Studio_**(NKS), but for simplicity we only refer to Hiero/NKS. The workflows are identical for both. We are supporting versions **`11.0`** and above. +::: + + + +## OpenPype global tools + +- [Work Files](artist_tools.md#workfiles) +- [Create](artist_tools.md#creator) +- [Load](artist_tools.md#loader) +- [Manage (Inventory)](artist_tools.md#inventory) +- [Publish](artist_tools.md#publisher) + + +## Hiero specific tools + + + +
+
+ +### Create Default Tags + +This tool will recreate all necessary OpenPype tags needed for successful publishing. It is automatically ran at start of the Hiero/NKS. Use this tool to manually re-create all the tags if you accidentaly delete them, or you want to reset them to default values. + +#### Result + +- Will create tags in Tags bin in case there were none +- Will set all tags to default values if they have been altered + +
+
+ +![Default Tags](assets/hiero_defaultTags.png) + +
+
+ +
+
+ +### Apply Colorspace Project + +This tool will set any defined colorspace definition from OpenPype `Settings / Project / Anatomy / Color Management and Output Formats / Hiero / Workfile` to Hiero `menu / Project / Edit Settings / Color Management tab` + +#### Result + +- Define corect color management settings on project + +
+
+ +![Default Tags](assets/hiero_menuColorspaceProject.png) + +
+
+ + +
+
+ +### Apply Colorspace Clips + +This tool will set any defined colorspace definition from OpenPype `Settings / Project / Anatomy / Color Management and Output Formats / Hiero / Colorspace on Inputs by regex detection` to any matching clip's source path. + +#### Result + +- Set correct `Set Media Color Transform` on each clip of active timeline if it matches defined expressions + +
+
+ +![Default Tags](assets/hiero_menuColorspaceClip.png) + +
+
+ +## Publishing Shots + + + +
+ +With OpenPype, you can use Hiero/NKS as a starting point for creating a project's **shots** as *assets* from timeline clips with its *hierarchycal parents* like **episodes**, **sequences**, **folders**, and its child **tasks**. Most importantly it will create **versions** of plate *subsets*, with or without **reference video**. Publishig is naturally creating clip's **thumbnails** and assigns it to shot *asset*. Hiero is also publishing **audio** *subset* and various **soft-effects** either as retiming component as part of published plates or **color-tranformations**, that will be evailable later on for compositor artists to use either as *viewport input-process* or *loaded nodes* in graph editor. +



+ +### Preparing timeline for conversion to instances +Because we don't support on-fly data conversion so in case of working with raw camera sources or some other formats which need to be converted for 2D/3D work. We suggest to convert those before and reconform the timeline. Before any clips in timeline could be converted to publishable instances we recomend following. +1. Merge all tracks which supposed to be one and they are multipy only because of editor's style +2. Rename tracks to follow basic structure > if only one layer then `main` in case of multiple layer (elements) for one shot then `main`, and other elements for example: `bg`, `greenscreen`, `fg01`, `fg02`, `display01`, etc. please avoid using [-/_.,%&*] or spaces. These names will be later used in *subset* name creation as `{family}{trackName}` so for example **plateMain** or **plateFg01** +3. Define correct `Set Media Color Transform` at all clips as those will be also published to metadata and used for later loading with correct color transformation. +4. Reviewable video material which you wish to be used as preview videos on any supported Projec manager platform (Ftrack) has to be added ideally to track named **review**. This can be offline edit used as reference video for 2D/3D artists. This video material can be edited to fit length of **main** timeline track or it cand be one long video clip under all clips in **main** track, because OpenPype will trim this to appropriate length with use of FFMPEG. Please be avare we only support MP4(h264) or JPG sequence at the moment. + +
+ +![Create menu](assets/hiero_timelinePrep.png) + +
+ + +### Converting timeline clips to instances + +Every clip on timeline which is inteded to be published has to be converted to publishable instance. + +
+ +In OpenPype it is done by tagging a clip with our own metadata. Select all clips you wish to convert and `menu > OpenPype > Create`. +



+ +
+ +
+ +![Create menu](assets/hiero_menuCreate.png) + +
+ +
+ +Then chose `Create Publishable Clip` in **Instance Creator** dialogue. +

+ +Then you can alter Subset name, but this will be changed dynamically and replaces with timeline's track name. +

+ +Keep **Use selection** on. +

+ +Hit **Create** +

+
+ +
+ +![Instance Creator](assets/hiero_instanceCreator.png) + +
+
+ +Dialogue `Pype publish attributes creator` will open. Here you can define instance properties. If you wish to rename clips dynamically during creation then Keep **Rename clips** ticked. +

+ +In case you wish to use *multiple elements of shots* workflow then keep **Enamble vertical sync** ticked on and define correct hero track which is holding main plates, this is usually the **main** track. +
+ +
+ +![Create menu](assets/hiero_createUIRename.png) + +
+
+ +Subset name is created dynamically if `` is selected on **Subset name**. +

+ +I case you wish to publish reviewable video as explained above then find the appropriate track from drop down menu **Use review track**. Usually named `review` +

+ +Hover above each input field for help. +

+ +Handles can be defined here to. In case you wish to have individual clip set differently we recomend to set here the default value and later change those in the created OpenPype tag's metadata under `handleStart` and `handleEnd` properties (look bellow for details) +
+ +
+ +![Create menu](assets/hiero_createUIFrames.png) + +
+
+ +After you hit **Ok** tags are added to selected clips (except clips in **review** tracks). +

+ +If you wish to change any individual propertie of the shot then you are able to do it here. In this example we can change `handleStart` and `handleEnd` to some other values. +
+ +
+ +![Create menu](assets/hiero_tagHandles.png) + +
+
diff --git a/website/docs/artist_hosts_nukestudio.md b/website/docs/artist_hosts_nukestudio.md deleted file mode 100644 index 23301f53bf..0000000000 --- a/website/docs/artist_hosts_nukestudio.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -id: artist_hosts_nukestudio -title: Hiero -sidebar_label: Hiero / Nuke Studio ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::note -All the information also applies to **_Nuke Studio_**, but for simplicity we only refer to Hiero. The workflows are identical for both. We are supporting versions **`11.0`** and above. -::: - - -## Hiero specific tools - - - -
-
- -### Create Default Tags - -This tool will recreate all necessary OpenPype tags needed for successful publishing. It is automatically ran at start of the Hiero. Use this tool to manually re-create all the tags if you accidentaly delete them, or you want to reset them to default values. - -
-
- -![Default Tags](assets/nukestudio_defaultTags.png) - -
-
- -#### Result - -- Will create tags in Tags bin in case there were none -- Will set all tags to default values if they have been altered - -## Publishing Shots - - - -
-
- -With OpenPype, you can use Hiero as a starting point for creating project hierarchy in avalon and ftrack database (episodes, sequences, shots, folders etc.), publishing plates, reference quicktimes, audio and various soft effects that will be evailable later on for compositors and 3D artist to use. - -There are two ways to `Publish` data and create shots in database from Hiero. Use either context menu on right clicking selected clips or go to top `menu > OpenPype > Publish`. - -
-
- -![Clips naming](assets/nukestudio_basic_clipNaming.png) - -
-
- -Keep in mind that the publishing currently works on selected shots - -Shot names for all the related plates that you want to publish (subsets) has to be the same to be correctly paired together (as it is shown in image). -Note the layer **review** which contains `plateMainReview`. -This media is just h264, 1920x1080 video for that will be used as preview of the actual `plateMain` subset and will be uploaded to Ftrack. We explain how to work with review tag in [**Reviewing**](#reviewing). - - -:::important -To to successfuly publish a shot from Hiero: -1. At least one clip of your shot must be tagged with `Hierarchy`, `subset` and `handleStart/End`. -2. Your source media must be pre-cut to correct length (including handles) -::: - -### Tagging - - -OpenPype's custom tags are used for defining shot parameters and to define which clips and how they are going to be published. - -If you want to add any properties to your clips you'll need to adjust values on the given tag and then drag it onto the clip. - - -
- -![Tags basic](assets/nukestudio_tagsToClips_basic.png) - -
- -1. double click on preferable tag and drag&drop it to selected clip(s) -2. Basic set of tags on clip (usually subset: plateMain) -3. Additionally select clip and edit its parameters -4. Edit parameters here but do not touch `family` - -
-
- -:::important -Only clips with `subset` will be directly processed for publishing. -::: - -### Custom Tags Details - -#### Asset related -| Icon | Description | Editable | Options | -| ------------------- | ---------------------------------------------------------------------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------- | -| ![Hierarchy][hi] | Define parent hierarchy of the shot. Usually combined with one of subset tags. | root, folder, sequence, episode, shot | example: {sequence} = name of Hiero sequence or overwrite by any text without `-` or `_` | -| ![Frame Start][fst] | Set start frame of the shot. Using `"source"` will keep original frame numbers. | number | int `number` or `"source"` | - - -#### Subsets - -| Icon | Description | Editable | Options | -| ------------------ | ------------------------------------------------------------------------------ | -------- | --------------------------------- | -| ![Review][rew] | Choose which track holds review quicktime for the given shot. | track | `"review"` or other track name | -| ![Plate Main][pmn] | Main plate subset identifier | subset | `"main"` or other | -| ![Plate FG][pfg] | Foreground plate subset identifier (comped over the main plate) | subset | `"Fg##"` or other | -| ![Plate BG][pbg] | Background plate subset identifier (comped under the main plate) | subset | `"Bg##"` or other | -| ![Plate Ref][ref] | Reference plate subset identifier | subset | `"Ref"` or other | - -#### Subset's attributes - -| Icon | Description | Editable | Options | -| ------------------ | --------------------------------------------------------------------------------- | ------------------- | ----------------------------- | -| ![Resolution][rsl] | Use source resolution instead of sequence settings. | none | | -| ![Retiming][rtm] | Publish retime metadata to shot if retime or time-warp found on clip | marginIn, marginOut | int `number` frame cushioning | -| ![Lens][lns] | Specify lens focal length metadata (work in progress) | focalLengthMm | int `number` | - -#### Handles - -| Icon | Description | Editable | Options | -| --------------------- | ---------------------------------------------------------------------------- | -------- | -------------------------- | -| ![Handles Start][ahs] | Handles at the start of the clip/shot | value | change to any int `number` | -| ![Handles End][ahe] | Handles at the end of a clip/shot | value | change to any int `number` | - -[hi]: assets/nks_icons/hierarchy.png - -[ahs]: assets/nks_icons/3_add_handles_start.png - -[ahe]: assets/nks_icons/1_add_handles_end.png - -[rsl]: assets/nks_icons/resolution.png - -[rtm]: assets/nks_icons/retiming.png - -[rew]: assets/nks_icons/review.png - -[pmn]: assets/nks_icons/z_layer_main.png - -[pfg]: assets/nks_icons/z_layer_fg.png - -[pbg]: assets/nks_icons/z_layer_bg.png - -[lns]: assets/nks_icons/lense1.png - -[fst]: assets/nks_icons/frame_start.png - -[ref]: assets/nks_icons/reference.png - -### Handles - -OpenPype requires handle information in shot metadata even if they are set to 0. -For this you need to add handles tags to the main clip (Should be the one with Hierarchy tag). -This way we are defining a shot property. In case you wish to have different -handles on other subsets (e.g. when plateBG is longer than plateFG) you can add handle tags with different value to this longer plate. - -If you wish to have different handles length (say 100) than one of the default tags, simply drag `start: add 10 frames` to your clip -and then go to clips tags, find the tag, then replace 10 for 100 in name and also change value to 100. -This is also explained following tutorial [`Extending premade handles tags`](#extending-premade-handles-tags) - -:::caution -Even if you don't need any handles you have to add `start: add 0 frames` and `end: add 0 frames` tags to the clip with Hierarchy tag. -::: - -### Retiming - -OpenPype is also able to publish retiming parameters into the database. -Any clip with **editorial**/**retime** or **TimeWarp** soft effect has to be tagged with `Retiming` tag, if you want this information preserved during publishing. - -Any animation on **TimeWarp** is also preserved and reapplied in _Nuke_. - -You can only combine **retime** and with a single **Timewarp**. - -### Reviewing - -There are two ways to publish reviewable **h264 mov** into OpenPype (and Ftrack). - - - - - - - -The first one uses the Review Tag pointing to the track that holds the reviewable quicktimes for plates. - -This tag metadata has `track` key inside that points to `review` track by default. If you drop this tag onto any publishable clip on the timeline you're telling OpenPype "you will find quicktime version of this plate on `review` track (clips must have the same name)" - -In the image on the right we dropped it to **plateMain** clip. Then we renamed the layer tha hold reviewable quicktime called `plateMainReview`. You can see that the clip names are the same. - - - -
- -![Reviewing](assets/nukestudio_reviewing.png) - -
- -1. `-review` suffix is added to publishing item label if any reviewable file is found -2. `plateMain` clip is holding the Review tag -3. layer name is `review` as it is used as default in _Review_ Tag in _track_ -4. name of clip is the same across all subsets - -
-
- - -
- - -Second way would be to add the **h264 mov 1920x1080** into the same folder -as image sequence. The name of the file has to be the same as image sequence. -Publisher will pick this file up and add it to the files list during collecting. -This will also add `"- review"` to instance label in **Publish**. - -Example: - -- img seq: `image_sequence_name.0001.exr` -- mov: `image_sequence_name.mov` - - -
- - --------------- - - -### LUT Workflow - - -
-
- -It is possible to publish Hiero soft effects for compositors to use later on. You can add the effect to a particular clip or to whole layer as shows on the picture. All clips -below the `Video 6` layer (green arrow) will be published with the **LUT** subset which combines all the colour corrections from he soft effects. Any clips above the `Video 6` layer will have no **LUT** published with them. - - -
-
- -![Reviewing](assets/nukestudio_softEffects.png) - -
-
- -Any external Lut files used in the soft effects will be copied over to `resources` of the published subset folder `lutPlateMain` (in our example). - -:::note - -
-
- -You cannot currently publish soft effects on their own because at the moment we only support soft effects as a part of other subset publishing. Image is demonstrating successful publishing. - -
-
- -![Reviewing](assets/nukestudio_lutSucess.png) - -
-
- -::: - -## Tutorials - - -### Basic publishing with soft effects - - - - -### Extending premade handles tags - - diff --git a/website/docs/assets/hiero_createUIFrames.png b/website/docs/assets/hiero_createUIFrames.png new file mode 100644 index 0000000000..798b3efb79 Binary files /dev/null and b/website/docs/assets/hiero_createUIFrames.png differ diff --git a/website/docs/assets/hiero_createUIRename.png b/website/docs/assets/hiero_createUIRename.png new file mode 100644 index 0000000000..3c02254559 Binary files /dev/null and b/website/docs/assets/hiero_createUIRename.png differ diff --git a/website/docs/assets/hiero_defaultTags.png b/website/docs/assets/hiero_defaultTags.png new file mode 100644 index 0000000000..225ec7d484 Binary files /dev/null and b/website/docs/assets/hiero_defaultTags.png differ diff --git a/website/docs/assets/hiero_instanceCreator.png b/website/docs/assets/hiero_instanceCreator.png new file mode 100644 index 0000000000..bcda6cdd18 Binary files /dev/null and b/website/docs/assets/hiero_instanceCreator.png differ diff --git a/website/docs/assets/hiero_menuColorspaceClip.png b/website/docs/assets/hiero_menuColorspaceClip.png new file mode 100644 index 0000000000..4014da2675 Binary files /dev/null and b/website/docs/assets/hiero_menuColorspaceClip.png differ diff --git a/website/docs/assets/hiero_menuColorspaceProject.png b/website/docs/assets/hiero_menuColorspaceProject.png new file mode 100644 index 0000000000..6b8e6e1b89 Binary files /dev/null and b/website/docs/assets/hiero_menuColorspaceProject.png differ diff --git a/website/docs/assets/hiero_menuCreate.png b/website/docs/assets/hiero_menuCreate.png new file mode 100644 index 0000000000..cbd816a203 Binary files /dev/null and b/website/docs/assets/hiero_menuCreate.png differ diff --git a/website/docs/assets/hiero_menuDefaultTags.png b/website/docs/assets/hiero_menuDefaultTags.png new file mode 100644 index 0000000000..ba542e1019 Binary files /dev/null and b/website/docs/assets/hiero_menuDefaultTags.png differ diff --git a/website/docs/assets/hiero_menuPublish.png b/website/docs/assets/hiero_menuPublish.png new file mode 100644 index 0000000000..4259dc44e4 Binary files /dev/null and b/website/docs/assets/hiero_menuPublish.png differ diff --git a/website/docs/assets/hiero_tagHandles.png b/website/docs/assets/hiero_tagHandles.png new file mode 100644 index 0000000000..84e7012088 Binary files /dev/null and b/website/docs/assets/hiero_tagHandles.png differ diff --git a/website/docs/assets/hiero_timelinePrep.png b/website/docs/assets/hiero_timelinePrep.png new file mode 100644 index 0000000000..253bc114ee Binary files /dev/null and b/website/docs/assets/hiero_timelinePrep.png differ diff --git a/website/docs/assets/nukestudio_basic_clipNaming.png b/website/docs/assets/nukestudio_basic_clipNaming.png deleted file mode 100644 index 71d623f706..0000000000 Binary files a/website/docs/assets/nukestudio_basic_clipNaming.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_defaultTags.png b/website/docs/assets/nukestudio_defaultTags.png deleted file mode 100644 index 3ba15ccc17..0000000000 Binary files a/website/docs/assets/nukestudio_defaultTags.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_lutSucess.png b/website/docs/assets/nukestudio_lutSucess.png deleted file mode 100644 index fa013b99b2..0000000000 Binary files a/website/docs/assets/nukestudio_lutSucess.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_publishing_basic.png b/website/docs/assets/nukestudio_publishing_basic.png deleted file mode 100644 index 6592ec423c..0000000000 Binary files a/website/docs/assets/nukestudio_publishing_basic.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_reviewing.png b/website/docs/assets/nukestudio_reviewing.png deleted file mode 100644 index 0d3b4170df..0000000000 Binary files a/website/docs/assets/nukestudio_reviewing.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_setContext.png b/website/docs/assets/nukestudio_setContext.png deleted file mode 100644 index 8c8746a264..0000000000 Binary files a/website/docs/assets/nukestudio_setContext.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_softEffects.png b/website/docs/assets/nukestudio_softEffects.png deleted file mode 100644 index 13b92801fd..0000000000 Binary files a/website/docs/assets/nukestudio_softEffects.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_tagsToClips_basic.png b/website/docs/assets/nukestudio_tagsToClips_basic.png deleted file mode 100644 index fadb85342b..0000000000 Binary files a/website/docs/assets/nukestudio_tagsToClips_basic.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_workfiles_openCorrect.png b/website/docs/assets/nukestudio_workfiles_openCorrect.png deleted file mode 100644 index e097e50d9e..0000000000 Binary files a/website/docs/assets/nukestudio_workfiles_openCorrect.png and /dev/null differ diff --git a/website/docs/assets/nukestudio_workfiles_openingLimit.png b/website/docs/assets/nukestudio_workfiles_openingLimit.png deleted file mode 100644 index d0e893f4e5..0000000000 Binary files a/website/docs/assets/nukestudio_workfiles_openingLimit.png and /dev/null differ diff --git a/website/sidebars.js b/website/sidebars.js index 82f063e252..842d7a0a49 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -16,7 +16,7 @@ module.exports = { collapsed: false, label: "Integrations", items: [ - "artist_hosts_nukestudio", + "artist_hosts_hiero", "artist_hosts_nuke", "artist_hosts_maya", "artist_hosts_blender",