diff --git a/client/ayon_core/scripts/otio_burnin.py b/client/ayon_core/scripts/otio_burnin.py index f12d298ac6..6b132b9a6a 100644 --- a/client/ayon_core/scripts/otio_burnin.py +++ b/client/ayon_core/scripts/otio_burnin.py @@ -14,9 +14,10 @@ from ayon_core.lib import ( convert_ffprobe_fps_value, ) +FFMPEG_EXE_COMMAND = subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg")) FFMPEG = ( '{}%(input_args)s -i "%(input)s" %(filters)s %(args)s%(output)s' -).format(subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))) +).format(FFMPEG_EXE_COMMAND) DRAWTEXT = ( "drawtext@'%(label)s'=fontfile='%(font)s':text=\\'%(text)s\\':" @@ -482,10 +483,19 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): ) print("Launching command: {}".format(command)) + use_shell = True + try: + test_proc = subprocess.Popen( + f"{FFMPEG_EXE_COMMAND} --help", shell=True + ) + test_proc.wait() + except BaseException: + use_shell = False + kwargs = { "stdout": subprocess.PIPE, "stderr": subprocess.PIPE, - "shell": True, + "shell": use_shell, } proc = subprocess.Popen(command, **kwargs) diff --git a/client/ayon_core/tools/common_models/users.py b/client/ayon_core/tools/common_models/users.py index f8beb31aa1..f7939e5cd3 100644 --- a/client/ayon_core/tools/common_models/users.py +++ b/client/ayon_core/tools/common_models/users.py @@ -1,6 +1,80 @@ -import ayon_api +import json +import collections -from ayon_core.lib import CacheItem +import ayon_api +from ayon_api.graphql import FIELD_VALUE, GraphQlQuery, fields_to_dict + +from ayon_core.lib import NestedCacheItem + + +# --- Implementation that should be in ayon-python-api --- +# The implementation is not available in all versions of ayon-python-api. +def users_graphql_query(fields): + query = GraphQlQuery("Users") + names_var = query.add_variable("userNames", "[String!]") + project_name_var = query.add_variable("projectName", "String!") + + users_field = query.add_field_with_edges("users") + users_field.set_filter("names", names_var) + users_field.set_filter("projectName", project_name_var) + + nested_fields = fields_to_dict(set(fields)) + + query_queue = collections.deque() + for key, value in nested_fields.items(): + query_queue.append((key, value, users_field)) + + while query_queue: + item = query_queue.popleft() + key, value, parent = item + field = parent.add_field(key) + if value is FIELD_VALUE: + continue + + for k, v in value.items(): + query_queue.append((k, v, field)) + return query + + +def get_users(project_name=None, usernames=None, fields=None): + """Get Users. + + Only administrators and managers can fetch all users. For other users + it is required to pass in 'project_name' filter. + + Args: + project_name (Optional[str]): Project name. + usernames (Optional[Iterable[str]]): Filter by usernames. + fields (Optional[Iterable[str]]): Fields to be queried + for users. + + Returns: + Generator[dict[str, Any]]: Queried users. + + """ + filters = {} + if usernames is not None: + usernames = set(usernames) + if not usernames: + return + filters["userNames"] = list(usernames) + + if project_name is not None: + filters["projectName"] = project_name + + con = ayon_api.get_server_api_connection() + if not fields: + fields = con.get_default_fields_for_type("user") + + query = users_graphql_query(set(fields)) + for attr, filter_value in filters.items(): + query.set_variable_value(attr, filter_value) + + for parsed_data in query.continuous_query(con): + for user in parsed_data["users"]: + user["accessGroups"] = json.loads(user["accessGroups"]) + yield user +# --- END of ayon-python-api implementation --- class UserItem: @@ -32,19 +106,19 @@ class UserItem: class UsersModel: def __init__(self, controller): self._controller = controller - self._users_cache = CacheItem(default_factory=list) + self._users_cache = NestedCacheItem(default_factory=list) - def get_user_items(self): + def get_user_items(self, project_name): """Get user items. Returns: List[UserItem]: List of user items. """ - self._invalidate_cache() - return self._users_cache.get_data() + self._invalidate_cache(project_name) + return self._users_cache[project_name].get_data() - def get_user_items_by_name(self): + def get_user_items_by_name(self, project_name): """Get user items by name. Implemented as most of cases using this model will need to find @@ -56,10 +130,10 @@ class UsersModel: """ return { user_item.username: user_item - for user_item in self.get_user_items() + for user_item in self.get_user_items(project_name) } - def get_user_item_by_username(self, username): + def get_user_item_by_username(self, project_name, username): """Get user item by username. Args: @@ -69,16 +143,22 @@ class UsersModel: Union[UserItem, None]: User item or None if not found. """ - self._invalidate_cache() - for user_item in self.get_user_items(): + self._invalidate_cache(project_name) + for user_item in self.get_user_items(project_name): if user_item.username == username: return user_item return None - def _invalidate_cache(self): - if self._users_cache.is_valid: + def _invalidate_cache(self, project_name): + cache = self._users_cache[project_name] + if cache.is_valid: return - self._users_cache.update_data([ + + if project_name is None: + cache.update_data([]) + return + + self._users_cache[project_name].update_data([ UserItem.from_entity_data(user) - for user in ayon_api.get_users() + for user in get_users(project_name) ]) diff --git a/client/ayon_core/tools/context_dialog/__init__.py b/client/ayon_core/tools/context_dialog/__init__.py index 4fb912fb62..8a77a46109 100644 --- a/client/ayon_core/tools/context_dialog/__init__.py +++ b/client/ayon_core/tools/context_dialog/__init__.py @@ -1,7 +1,8 @@ -from .window import ContextDialog, main +from .window import ContextDialog, main, ask_for_context __all__ = ( "ContextDialog", "main", + "ask_for_context" ) diff --git a/client/ayon_core/tools/context_dialog/window.py b/client/ayon_core/tools/context_dialog/window.py index 828d771142..ea5fdfbaec 100644 --- a/client/ayon_core/tools/context_dialog/window.py +++ b/client/ayon_core/tools/context_dialog/window.py @@ -791,3 +791,12 @@ def main( window.show() app.exec_() controller.store_output() + + +def ask_for_context(strict=True): + controller = ContextDialogController() + controller.set_strict(strict) + window = ContextDialog(controller=controller) + window.exec_() + + return controller.get_selected_context() diff --git a/client/ayon_core/tools/loader/abstract.py b/client/ayon_core/tools/loader/abstract.py index a1c1e6a062..ba1dcb73b6 100644 --- a/client/ayon_core/tools/loader/abstract.py +++ b/client/ayon_core/tools/loader/abstract.py @@ -177,7 +177,7 @@ class VersionItem: other_version = abs(other.version) # Hero version is greater than non-hero if version == other_version: - return self.is_hero + return not self.is_hero return version > other_version def __lt__(self, other): @@ -188,7 +188,7 @@ class VersionItem: other_version = abs(other.version) # Non-hero version is lesser than hero if version == other_version: - return not self.is_hero + return self.is_hero return version < other_version def __ge__(self, other): diff --git a/client/ayon_core/tools/sceneinventory/models/containers.py b/client/ayon_core/tools/sceneinventory/models/containers.py index 5230827ef6..95c5322343 100644 --- a/client/ayon_core/tools/sceneinventory/models/containers.py +++ b/client/ayon_core/tools/sceneinventory/models/containers.py @@ -90,7 +90,6 @@ class ContainerItem: representation_id, loader_name, namespace, - name, object_name, item_id ): @@ -98,7 +97,6 @@ class ContainerItem: self.loader_name = loader_name self.object_name = object_name self.namespace = namespace - self.name = name self.item_id = item_id @classmethod @@ -107,7 +105,6 @@ class ContainerItem: representation_id=container["representation"], loader_name=container["loader"], namespace=container["namespace"], - name=container["name"], object_name=container["objectName"], item_id=uuid.uuid4().hex, ) @@ -204,7 +201,7 @@ class ContainersModel: def get_container_items(self): self._update_cache() return list(self._items_cache) - + def get_container_items_by_id(self, item_ids): return { item_id: self._container_items_by_id.get(item_id) @@ -329,15 +326,25 @@ class ContainersModel: containers = list(host.ls()) else: containers = [] + container_items = [] containers_by_id = {} container_items_by_id = {} for container in containers: - item = ContainerItem.from_container_data(container) + try: + item = ContainerItem.from_container_data(container) + except Exception as e: + # skip item if required data are missing + self._controller.log_error( + f"Failed to create item: {e}" + ) + continue + containers_by_id[item.item_id] = container container_items_by_id[item.item_id] = item container_items.append(item) + self._containers_by_id = containers_by_id self._container_items_by_id = container_items_by_id self._items_cache = container_items diff --git a/client/ayon_core/tools/workfiles/abstract.py b/client/ayon_core/tools/workfiles/abstract.py index f345e20dca..330b413300 100644 --- a/client/ayon_core/tools/workfiles/abstract.py +++ b/client/ayon_core/tools/workfiles/abstract.py @@ -834,12 +834,13 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): pass @abstractmethod - def get_workarea_file_items(self, folder_id, task_id): + def get_workarea_file_items(self, folder_id, task_name, sender=None): """Get workarea file items. Args: folder_id (str): Folder id. - task_id (str): Task id. + task_name (str): Task name. + sender (Optional[str]): Who requested workarea file items. Returns: list[FileItem]: List of workarea file items. @@ -905,12 +906,12 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): pass @abstractmethod - def get_workfile_info(self, folder_id, task_id, filepath): + def get_workfile_info(self, folder_id, task_name, filepath): """Workfile info from database. Args: folder_id (str): Folder id. - task_id (str): Task id. + task_name (str): Task id. filepath (str): Workfile path. Returns: @@ -921,7 +922,7 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): pass @abstractmethod - def save_workfile_info(self, folder_id, task_id, filepath, note): + def save_workfile_info(self, folder_id, task_name, filepath, note): """Save workfile info to database. At this moment the only information which can be saved about @@ -932,7 +933,7 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon): Args: folder_id (str): Folder id. - task_id (str): Task id. + task_name (str): Task id. filepath (str): Workfile path. note (Union[str, None]): Note. """ diff --git a/client/ayon_core/tools/workfiles/control.py b/client/ayon_core/tools/workfiles/control.py index 8fa9135bc0..31bdb2bab6 100644 --- a/client/ayon_core/tools/workfiles/control.py +++ b/client/ayon_core/tools/workfiles/control.py @@ -278,7 +278,8 @@ class BaseWorkfileController( ) def get_user_items_by_name(self): - return self._users_model.get_user_items_by_name() + project_name = self.get_current_project_name() + return self._users_model.get_user_items_by_name(project_name) # Host information def get_workfile_extensions(self): @@ -410,9 +411,11 @@ class BaseWorkfileController( return self._workfiles_model.get_workarea_dir_by_context( folder_id, task_id) - def get_workarea_file_items(self, folder_id, task_id): + def get_workarea_file_items(self, folder_id, task_name, sender=None): + task_id = self._get_task_id(folder_id, task_name) return self._workfiles_model.get_workarea_file_items( - folder_id, task_id) + folder_id, task_id, task_name + ) def get_workarea_save_as_data(self, folder_id, task_id): return self._workfiles_model.get_workarea_save_as_data( @@ -447,12 +450,14 @@ class BaseWorkfileController( return self._workfiles_model.get_published_file_items( folder_id, task_name) - def get_workfile_info(self, folder_id, task_id, filepath): + def get_workfile_info(self, folder_id, task_name, filepath): + task_id = self._get_task_id(folder_id, task_name) return self._workfiles_model.get_workfile_info( folder_id, task_id, filepath ) - def save_workfile_info(self, folder_id, task_id, filepath, note): + def save_workfile_info(self, folder_id, task_name, filepath, note): + task_id = self._get_task_id(folder_id, task_name) self._workfiles_model.save_workfile_info( folder_id, task_id, filepath, note ) @@ -627,6 +632,17 @@ class BaseWorkfileController( def _emit_event(self, topic, data=None): self.emit_event(topic, data, "controller") + def _get_task_id(self, folder_id, task_name, sender=None): + task_item = self._hierarchy_model.get_task_item_by_name( + self.get_current_project_name(), + folder_id, + task_name, + sender + ) + if not task_item: + return None + return task_item.id + # Expected selection # - expected selection is used to restore selection after refresh # or when current context should be used @@ -722,7 +738,7 @@ class BaseWorkfileController( self._host_save_workfile(dst_filepath) # Make sure workfile info exists - self.save_workfile_info(folder_id, task_id, dst_filepath, None) + self.save_workfile_info(folder_id, task_name, dst_filepath, None) # Create extra folders create_workdir_extra_folders( diff --git a/client/ayon_core/tools/workfiles/models/workfiles.py b/client/ayon_core/tools/workfiles/models/workfiles.py index c93bbb6637..a268a9bd0e 100644 --- a/client/ayon_core/tools/workfiles/models/workfiles.py +++ b/client/ayon_core/tools/workfiles/models/workfiles.py @@ -1,6 +1,7 @@ import os import re import copy +import uuid import arrow import ayon_api @@ -173,7 +174,7 @@ class WorkareaModel: folder_mapping[task_id] = workdir return workdir - def get_file_items(self, folder_id, task_id): + def get_file_items(self, folder_id, task_id, task_name): items = [] if not folder_id or not task_id: return items @@ -192,7 +193,7 @@ class WorkareaModel: continue workfile_info = self._controller.get_workfile_info( - folder_id, task_id, filepath + folder_id, task_name, filepath ) modified = os.path.getmtime(filepath) items.append(FileItem( @@ -587,6 +588,7 @@ class WorkfileEntitiesModel: username = self._get_current_username() workfile_info = { + "id": uuid.uuid4().hex, "path": rootless_path, "taskId": task_id, "attrib": { @@ -770,19 +772,21 @@ class WorkfilesModel: return self._workarea_model.get_workarea_dir_by_context( folder_id, task_id) - def get_workarea_file_items(self, folder_id, task_id): + def get_workarea_file_items(self, folder_id, task_id, task_name): """Workfile items for passed context from workarea. Args: folder_id (Union[str, None]): Folder id. task_id (Union[str, None]): Task id. + task_name (Union[str, None]): Task name. Returns: list[FileItem]: List of file items matching workarea of passed context. """ - - return self._workarea_model.get_file_items(folder_id, task_id) + return self._workarea_model.get_file_items( + folder_id, task_id, task_name + ) def get_workarea_save_as_data(self, folder_id, task_id): return self._workarea_model.get_workarea_save_as_data( diff --git a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py index 5c102dcdd4..7f76b6a8ab 100644 --- a/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py +++ b/client/ayon_core/tools/workfiles/widgets/files_widget_workarea.py @@ -66,7 +66,7 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): self._empty_item_used = False self._published_mode = False self._selected_folder_id = None - self._selected_task_id = None + self._selected_task_name = None self._add_missing_context_item() @@ -153,7 +153,7 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): def _on_task_changed(self, event): self._selected_folder_id = event["folder_id"] - self._selected_task_id = event["task_id"] + self._selected_task_name = event["task_name"] if not self._published_mode: self._fill_items() @@ -179,13 +179,13 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel): def _fill_items_impl(self): folder_id = self._selected_folder_id - task_id = self._selected_task_id - if not folder_id or not task_id: + task_name = self._selected_task_name + if not folder_id or not task_name: self._add_missing_context_item() return file_items = self._controller.get_workarea_file_items( - folder_id, task_id + folder_id, task_name ) root_item = self.invisibleRootItem() if not file_items: diff --git a/client/ayon_core/tools/workfiles/widgets/side_panel.py b/client/ayon_core/tools/workfiles/widgets/side_panel.py index 53fdf0e0ac..7ba60b5544 100644 --- a/client/ayon_core/tools/workfiles/widgets/side_panel.py +++ b/client/ayon_core/tools/workfiles/widgets/side_panel.py @@ -75,7 +75,7 @@ class SidePanelWidget(QtWidgets.QWidget): self._btn_note_save = btn_note_save self._folder_id = None - self._task_id = None + self._task_name = None self._filepath = None self._orig_note = "" self._controller = controller @@ -93,10 +93,10 @@ class SidePanelWidget(QtWidgets.QWidget): def _on_selection_change(self, event): folder_id = event["folder_id"] - task_id = event["task_id"] + task_name = event["task_name"] filepath = event["path"] - self._set_context(folder_id, task_id, filepath) + self._set_context(folder_id, task_name, filepath) def _on_note_change(self): text = self._note_input.toPlainText() @@ -106,19 +106,19 @@ class SidePanelWidget(QtWidgets.QWidget): note = self._note_input.toPlainText() self._controller.save_workfile_info( self._folder_id, - self._task_id, + self._task_name, self._filepath, note ) self._orig_note = note self._btn_note_save.setEnabled(False) - def _set_context(self, folder_id, task_id, filepath): + def _set_context(self, folder_id, task_name, filepath): workfile_info = None # Check if folder, task and file are selected - if bool(folder_id) and bool(task_id) and bool(filepath): + if bool(folder_id) and bool(task_name) and bool(filepath): workfile_info = self._controller.get_workfile_info( - folder_id, task_id, filepath + folder_id, task_name, filepath ) enabled = workfile_info is not None @@ -127,7 +127,7 @@ class SidePanelWidget(QtWidgets.QWidget): self._btn_note_save.setEnabled(enabled) self._folder_id = folder_id - self._task_id = task_id + self._task_name = task_name self._filepath = filepath # Disable inputs and remove texts if any required arguments are diff --git a/server_addon/max/client/ayon_max/api/lib.py b/server_addon/max/client/ayon_max/api/lib.py index eb22dbafd2..7acc18196f 100644 --- a/server_addon/max/client/ayon_max/api/lib.py +++ b/server_addon/max/client/ayon_max/api/lib.py @@ -272,10 +272,8 @@ def reset_frame_range(fps: bool = True): scene frame rate in frames-per-second. """ if fps: - task_entity = get_current_task_entity() - task_attributes = task_entity["attrib"] - fps_number = float(task_attributes["fps"]) - rt.frameRate = fps_number + rt.frameRate = float(get_fps_for_current_context()) + frame_range = get_frame_range() set_timeline( @@ -284,6 +282,22 @@ def reset_frame_range(fps: bool = True): frame_range["frameStartHandle"], frame_range["frameEndHandle"]) +def get_fps_for_current_context(): + """Get fps that should be set for current context. + + Todos: + - Skip project value. + - Merge logic with 'get_frame_range' and 'reset_scene_resolution' -> + all the values in the functions can be collected at one place as + they have same requirements. + + Returns: + Union[int, float]: FPS value. + """ + task_entity = get_current_task_entity(fields={"attrib"}) + return task_entity["attrib"]["fps"] + + def reset_unit_scale(): """Apply the unit scale setting to 3dsMax """ @@ -358,7 +372,7 @@ def is_headless(): def set_timeline(frameStart, frameEnd): """Set frame range for timeline editor in Max """ - rt.animationRange = rt.interval(frameStart, frameEnd) + rt.animationRange = rt.interval(int(frameStart), int(frameEnd)) return rt.animationRange diff --git a/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py b/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py index d7def3d0ba..87ea5c75bc 100644 --- a/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py +++ b/server_addon/max/client/ayon_max/plugins/load/load_pointcache.py @@ -7,7 +7,7 @@ Because of limited api, alembics can be only loaded, but not easily updated. import os from ayon_core.pipeline import load, get_representation_path from ayon_max.api import lib, maintained_selection -from ayon_max.api.lib import unique_namespace +from ayon_max.api.lib import unique_namespace, reset_frame_range from ayon_max.api.pipeline import ( containerise, get_previous_loaded_object, @@ -38,6 +38,9 @@ class AbcLoader(load.LoaderPlugin): } rt.AlembicImport.ImportToRoot = False + # TODO: it will be removed after the improvement + # on the post-system setup + reset_frame_range() rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport) abc_after = { diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py b/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py index ea4cdb57fe..d98b0dd5fa 100644 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py +++ b/server_addon/maya/client/ayon_maya/plugins/create/create_animation_pointcache.py @@ -8,7 +8,7 @@ from ayon_core.lib import ( ) -def _get_animation_attr_defs(cls): +def _get_animation_attr_defs(): """Get Animation generic definitions.""" defs = lib.collect_animation_defs() defs.extend( @@ -99,9 +99,7 @@ class CreateAnimation(plugin.MayaHiddenCreator): return node_data def get_instance_attr_defs(self): - defs = super(CreateAnimation, self).get_instance_attr_defs() - defs += _get_animation_attr_defs(self) - return defs + return _get_animation_attr_defs() class CreatePointCache(plugin.MayaCreator): @@ -123,9 +121,7 @@ class CreatePointCache(plugin.MayaCreator): return node_data def get_instance_attr_defs(self): - defs = super(CreatePointCache, self).get_instance_attr_defs() - defs += _get_animation_attr_defs(self) - return defs + return _get_animation_attr_defs() def create(self, product_name, instance_data, pre_create_data): instance = super(CreatePointCache, self).create( diff --git a/server_addon/maya/client/ayon_maya/plugins/create/create_look.py b/server_addon/maya/client/ayon_maya/plugins/create/create_look.py index 1f90d18607..3e1ec103ba 100644 --- a/server_addon/maya/client/ayon_maya/plugins/create/create_look.py +++ b/server_addon/maya/client/ayon_maya/plugins/create/create_look.py @@ -42,6 +42,6 @@ class CreateLook(plugin.MayaCreator): def get_pre_create_attr_defs(self): # Show same attributes on create but include use selection - defs = super(CreateLook, self).get_pre_create_attr_defs() + defs = list(super().get_pre_create_attr_defs()) defs.extend(self.get_instance_attr_defs()) return defs diff --git a/server_addon/maya/client/ayon_maya/version.py b/server_addon/maya/client/ayon_maya/version.py index 1655067287..fcad19941f 100644 --- a/server_addon/maya/client/ayon_maya/version.py +++ b/server_addon/maya/client/ayon_maya/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON addon 'maya' version.""" -__version__ = "0.2.2" +__version__ = "0.2.3" diff --git a/server_addon/maya/package.py b/server_addon/maya/package.py index 627e824413..e0fc2ee5cf 100644 --- a/server_addon/maya/package.py +++ b/server_addon/maya/package.py @@ -1,6 +1,6 @@ name = "maya" title = "Maya" -version = "0.2.2" +version = "0.2.3" client_dir = "ayon_maya" ayon_required_addons = { diff --git a/server_addon/nuke/client/ayon_nuke/api/pipeline.py b/server_addon/nuke/client/ayon_nuke/api/pipeline.py index ad8e17b1f6..2ba430c272 100644 --- a/server_addon/nuke/client/ayon_nuke/api/pipeline.py +++ b/server_addon/nuke/client/ayon_nuke/api/pipeline.py @@ -37,8 +37,6 @@ from .lib import ( INSTANCE_DATA_KNOB, get_main_window, WorkfileSettings, - # TODO: remove this once workfile builder will be removed - process_workfile_builder, start_workfile_template_builder, launch_workfiles_app, check_inventory_versions, @@ -67,6 +65,7 @@ from .workio import ( current_file ) from .constants import ASSIST +from . import push_to_project log = Logger.get_logger(__name__) @@ -159,9 +158,6 @@ def add_nuke_callbacks(): # template builder callbacks nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root") - # TODO: remove this callback once workfile builder will be removed - nuke.addOnCreate(process_workfile_builder, nodeClass="Root") - # fix ffmpeg settings on script nuke.addOnScriptLoad(on_script_load) @@ -332,6 +328,11 @@ def _install_menu(): lambda: update_placeholder() ) + menu.addCommand( + "Push to Project", + lambda: push_to_project.main() + ) + menu.addSeparator() menu.addCommand( "Experimental tools...", diff --git a/server_addon/nuke/client/ayon_nuke/api/push_to_project.py b/server_addon/nuke/client/ayon_nuke/api/push_to_project.py new file mode 100644 index 0000000000..852e5d0e31 --- /dev/null +++ b/server_addon/nuke/client/ayon_nuke/api/push_to_project.py @@ -0,0 +1,118 @@ +from collections import defaultdict +import shutil +import os + +from ayon_api import get_project, get_folder_by_id, get_task_by_id +from ayon_core.settings import get_project_settings +from ayon_core.pipeline import Anatomy, registered_host +from ayon_core.pipeline.template_data import get_template_data +from ayon_core.pipeline.workfile import get_workdir_with_workdir_data +from ayon_core.tools import context_dialog + +from .utils import bake_gizmos_recursively +from .lib import MENU_LABEL + +import nuke + + +def bake_container(container): + """Bake containers to read nodes.""" + + node = container["node"] + + # Fetch knobs to remove in order. + knobs_to_remove = [] + remove = False + for count in range(0, node.numKnobs()): + knob = node.knob(count) + + # All knobs from "AYON" tab knob onwards. + if knob.name() == MENU_LABEL: + remove = True + + if remove: + knobs_to_remove.append(knob) + + # Dont remove knobs from "containerId" onwards. + if knob.name() == "containerId": + remove = False + + # Knobs needs to be remove in reverse order, because child knobs needs to + # be remove first. + for knob in reversed(knobs_to_remove): + node.removeKnob(knob) + + node["tile_color"].setValue(0) + + +def main(): + context = context_dialog.ask_for_context() + + if context is None: + return + + # Get workfile path to save to. + project_name = context["project_name"] + project = get_project(project_name) + folder = get_folder_by_id(project_name, context["folder_id"]) + task = get_task_by_id(project_name, context["task_id"]) + host = registered_host() + project_settings = get_project_settings(project_name) + anatomy = Anatomy(project_name) + + workdir_data = get_template_data( + project, folder, task, host.name, project_settings + ) + + workdir = get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy, + project_settings=project_settings + ) + # Save current workfile. + current_file = host.current_file() + host.save_file(current_file) + + for container in host.ls(): + bake_container(container) + + # Bake gizmos. + bake_gizmos_recursively() + + # Copy all read node files to "resources" folder next to workfile and + # change file path. + first_frame = int(nuke.root()["first_frame"].value()) + last_frame = int(nuke.root()["last_frame"].value()) + files_by_node_name = defaultdict(set) + nodes_by_name = {} + for count in range(first_frame, last_frame + 1): + nuke.frame(count) + for node in nuke.allNodes(filter="Read"): + files_by_node_name[node.name()].add( + nuke.filename(node, nuke.REPLACE) + ) + nodes_by_name[node.name()] = node + + resources_dir = os.path.join(workdir, "resources") + for name, files in files_by_node_name.items(): + dir = os.path.join(resources_dir, name) + if not os.path.exists(dir): + os.makedirs(dir) + + for f in files: + shutil.copy(f, os.path.join(dir, os.path.basename(f))) + + node = nodes_by_name[name] + path = node["file"].value().replace(os.path.dirname(f), dir) + node["file"].setValue(path.replace("\\", "/")) + + # Save current workfile to new context. + pushed_workfile = os.path.join( + workdir, os.path.basename(current_file)) + host.save_file(pushed_workfile) + + # Open current context workfile. + host.open_file(current_file) + + nuke.message(f"Pushed to project: \n{pushed_workfile}") diff --git a/server_addon/resolve/client/ayon_resolve/api/lib.py b/server_addon/resolve/client/ayon_resolve/api/lib.py index b9ad81c79d..829c72b80a 100644 --- a/server_addon/resolve/client/ayon_resolve/api/lib.py +++ b/server_addon/resolve/client/ayon_resolve/api/lib.py @@ -145,7 +145,9 @@ def get_new_timeline(timeline_name: str = None): return new_timeline -def create_bin(name: str, root: object = None) -> object: +def create_bin(name: str, + root: object = None, + set_as_current: bool = True) -> object: """ Create media pool's folder. @@ -156,6 +158,8 @@ def create_bin(name: str, root: object = None) -> object: Args: name (str): name of folder / bin, or hierarchycal name "parent/name" root (resolve.Folder)[optional]: root folder / bin object + set_as_current (resolve.Folder)[optional]: Whether to set the + resulting bin as current folder or not. Returns: object: resolve.Folder @@ -168,22 +172,24 @@ def create_bin(name: str, root: object = None) -> object: if "/" in name.replace("\\", "/"): child_bin = None for bname in name.split("/"): - child_bin = create_bin(bname, child_bin or root_bin) + child_bin = create_bin(bname, + root=child_bin or root_bin, + set_as_current=set_as_current) if child_bin: return child_bin else: - created_bin = None + # Find existing folder or create it for subfolder in root_bin.GetSubFolderList(): - if subfolder.GetName() in name: + if subfolder.GetName() == name: created_bin = subfolder - - if not created_bin: - new_folder = media_pool.AddSubFolder(root_bin, name) - media_pool.SetCurrentFolder(new_folder) + break else: + created_bin = media_pool.AddSubFolder(root_bin, name) + + if set_as_current: media_pool.SetCurrentFolder(created_bin) - return media_pool.GetCurrentFolder() + return created_bin def remove_media_pool_item(media_pool_item: object) -> bool: @@ -272,8 +278,7 @@ def create_timeline_item( # get all variables project = get_current_project() media_pool = project.GetMediaPool() - _clip_property = media_pool_item.GetClipProperty - clip_name = _clip_property("File Name") + clip_name = media_pool_item.GetClipProperty("File Name") timeline = timeline or get_current_timeline() # timing variables @@ -298,16 +303,22 @@ def create_timeline_item( if source_end: clip_data["endFrame"] = source_end if timecode_in: + # Note: specifying a recordFrame will fail to place the timeline + # item if there's already an existing clip at that time on the + # active track. clip_data["recordFrame"] = timeline_in # add to timeline - media_pool.AppendToTimeline([clip_data]) + output_timeline_item = media_pool.AppendToTimeline([clip_data])[0] - output_timeline_item = get_timeline_item( - media_pool_item, timeline) + # Adding the item may fail whilst Resolve will still return a + # TimelineItem instance - however all `Get*` calls return None + # Hence, we check whether the result is valid + if output_timeline_item.GetDuration() is None: + output_timeline_item = None assert output_timeline_item, AssertionError(( - "Clip name '{}' was't created on the timeline: '{}' \n\n" + "Clip name '{}' wasn't created on the timeline: '{}' \n\n" "Please check if correct track position is activated, \n" "or if a clip is not already at the timeline in \n" "position: '{}' out: '{}'. \n\n" @@ -947,3 +958,13 @@ def get_reformated_path(path, padded=False, first=False): else: path = re.sub(num_pattern, "%d", path) return path + + +def iter_all_media_pool_clips(): + """Recursively iterate all media pool clips in current project""" + root = get_current_project().GetMediaPool().GetRootFolder() + queue = [root] + for folder in queue: + for clip in folder.GetClipList(): + yield clip + queue.extend(folder.GetSubFolderList()) diff --git a/server_addon/resolve/client/ayon_resolve/api/pipeline.py b/server_addon/resolve/client/ayon_resolve/api/pipeline.py index d6d6dc799e..05d2c9bcd1 100644 --- a/server_addon/resolve/client/ayon_resolve/api/pipeline.py +++ b/server_addon/resolve/client/ayon_resolve/api/pipeline.py @@ -2,6 +2,7 @@ Basic avalon integration """ import os +import json import contextlib from collections import OrderedDict @@ -12,6 +13,7 @@ from ayon_core.pipeline import ( schema, register_loader_plugin_path, register_creator_plugin_path, + register_inventory_action_path, AVALON_CONTAINER_ID, ) from ayon_core.host import ( @@ -38,6 +40,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") AVALON_CONTAINERS = ":AVALON_CONTAINERS" @@ -65,6 +68,7 @@ class ResolveHost(HostBase, IWorkfileHost, ILoadHost): register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", @@ -145,6 +149,26 @@ def ls(): and the Maya equivalent, which is in `avalon.maya.pipeline` """ + # Media Pool instances from Load Media loader + for clip in lib.iter_all_media_pool_clips(): + data = clip.GetMetadata(lib.pype_tag_name) + if not data: + continue + data = json.loads(data) + + # If not all required data, skip it + required = ['schema', 'id', 'loader', 'representation'] + if not all(key in data for key in required): + continue + + container = {key: data[key] for key in required} + container["objectName"] = clip.GetName() # Get path in folders + container["namespace"] = clip.GetName() + container["name"] = clip.GetUniqueId() + container["_item"] = clip + yield container + + # Timeline instances from Load Clip loader # get all track items from current timeline all_timeline_items = lib.get_current_timeline_items(filter=False) diff --git a/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py b/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py new file mode 100644 index 0000000000..7ea55dc1ff --- /dev/null +++ b/server_addon/resolve/client/ayon_resolve/plugins/inventory/remove_unused_media_pool_items.py @@ -0,0 +1,31 @@ +from ayon_core.pipeline import ( + InventoryAction, +) +from ayon_core.pipeline.load.utils import remove_container + + +class RemoveUnusedMedia(InventoryAction): + + label = "Remove Unused Selected Media" + icon = "trash" + + @staticmethod + def is_compatible(container): + return ( + container.get("loader") == "LoadMedia" + ) + + def process(self, containers): + any_removed = False + for container in containers: + media_pool_item = container["_item"] + usage = int(media_pool_item.GetClipProperty("Usage")) + name = media_pool_item.GetName() + if usage == 0: + print(f"Removing {name}") + remove_container(container) + any_removed = True + else: + print(f"Keeping {name} with usage: {usage}") + + return any_removed diff --git a/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py b/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py new file mode 100644 index 0000000000..c1aaeca6bd --- /dev/null +++ b/server_addon/resolve/client/ayon_resolve/plugins/load/load_media.py @@ -0,0 +1,533 @@ +import json +import contextlib +from pathlib import Path +from collections import defaultdict +from typing import Union, List, Optional, TypedDict, Tuple + +from ayon_api import version_is_latest +from ayon_core.lib import StringTemplate +from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native +from ayon_core.pipeline import ( + Anatomy, + LoaderPlugin, + get_representation_path, + registered_host +) +from ayon_core.pipeline.load import get_representation_path_with_anatomy +from ayon_core.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) +from ayon_core.lib import BoolDef +from ayon_resolve.api import lib +from ayon_resolve.api.pipeline import AVALON_CONTAINER_ID + + +FRAME_SPLITTER = "__frame_splitter__" + + +class MetadataEntry(TypedDict): + """Metadata entry is dict with {"name": "key", "value: "value"}""" + name: str + value: str + + +@contextlib.contextmanager +def project_color_science_mode(project=None, mode="davinciYRGBColorManagedv2"): + """Set project color science mode during context. + + This is especially useful as context for setting the colorspace for media + pool items, because when Resolve is not set to `davinciYRGBColorManagedv2` + it fails to set its "Input Color Space" clip property even though it is + accessible and settable via the Resolve User Interface. + + Args + project (Project): The active Resolve Project. + mode (Optional[str]): The color science mode to apply during the + context. Defaults to 'davinciYRGBColorManagedv2' + + See Also: + https://forum.blackmagicdesign.com/viewtopic.php?f=21&t=197441 + """ + + if project is None: + project = lib.get_current_project() + + original_mode = project.GetSetting("colorScienceMode") + if original_mode != mode: + project.SetSetting("colorScienceMode", mode) + try: + yield + finally: + if project.GetSetting("colorScienceMode") != original_mode: + project.SetSetting("colorScienceMode", original_mode) + + +def set_colorspace(media_pool_item, + colorspace, + mode="davinciYRGBColorManagedv2"): + """Set MediaPoolItem colorspace. + + This implements a workaround that you cannot set the input colorspace + unless the Resolve project's color science mode is set to + `davinciYRGBColorManagedv2`. + + Args: + media_pool_item (MediaPoolItem): The media pool item. + colorspace (str): The colorspace to apply. + mode (Optional[str]): The Resolve project color science mode to be in + while setting the colorspace. + Defaults to 'davinciYRGBColorManagedv2' + + Returns: + bool: Whether applying the colorspace succeeded. + """ + with project_color_science_mode(mode=mode): + return media_pool_item.SetClipProperty("Input Color Space", colorspace) + + +def find_clip_usage(media_pool_item, project=None): + """Return all Timeline Items in the project using the Media Pool Item. + + Each entry in the list is a tuple of Timeline and TimelineItem so that + it's easy to know which Timeline the TimelineItem belongs to. + + Arguments: + media_pool_item (MediaPoolItem): The Media Pool Item to search for. + project (Project): The resolve project the media pool item resides in. + + Returns: + List[Tuple[Timeline, TimelineItem]]: A 2-tuple of a timeline with + the timeline item. + + """ + usage = int(media_pool_item.GetClipProperty("Usage")) + if not usage: + return [] + + if project is None: + project = lib.get_current_project() + + matching_items = [] + unique_id = media_pool_item.GetUniqueId() + for timeline_idx in range(project.GetTimelineCount()): + timeline = project.GetTimelineByIndex(timeline_idx + 1) + + # Consider audio and video tracks + for track_type in ["video", "audio"]: + for track_idx in range(timeline.GetTrackCount(track_type)): + timeline_items = timeline.GetItemListInTrack(track_type, + track_idx + 1) + for timeline_item in timeline_items: + timeline_item_mpi = timeline_item.GetMediaPoolItem() + if not timeline_item_mpi: + continue + + if timeline_item_mpi.GetUniqueId() == unique_id: + matching_items.append((timeline, timeline_item)) + usage -= 1 + if usage <= 0: + # If there should be no usage left after this found + # entry we return early + return matching_items + + return matching_items + + +class LoadMedia(LoaderPlugin): + """Load product as media pool item.""" + + product_types = {"render2d", "source", "plate", "render", "review"} + + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) + + label = "Load media" + order = -20 + icon = "code-fork" + color = "orange" + + options = [ + BoolDef( + "load_to_timeline", + label="Load to timeline", + default=True, + tooltip="Whether on load to automatically add it to the current " + "timeline" + ), + BoolDef( + "load_once", + label="Re-use existing", + default=True, + tooltip="When enabled - if this particular version is already" + "loaded it will not be loaded again but will be re-used." + ) + ] + + # for loader multiselection + timeline = None + + # presets + clip_color_last = "Olive" + clip_color_old = "Orange" + + media_pool_bin_path = "Loader/{folder[path]}" + + metadata: List[MetadataEntry] = [] + + # cached on apply settings + _host_imageio_settings = None + + @classmethod + def apply_settings(cls, project_settings): + super(LoadMedia, cls).apply_settings(project_settings) + cls._host_imageio_settings = project_settings["resolve"]["imageio"] + + def load(self, context, name, namespace, options): + + # For loading multiselection, we store timeline before first load + # because the current timeline can change with the imported media. + if self.timeline is None: + self.timeline = lib.get_current_timeline() + + representation = context["representation"] + self._project_name = context["project"]["name"] + + project = lib.get_current_project() + media_pool = project.GetMediaPool() + + # Allow to use an existing media pool item and re-use it + item = None + if options.get("load_once", True): + host = registered_host() + repre_id = context["representation"]["id"] + for container in host.ls(): + if container["representation"] != repre_id: + continue + + if container["loader"] != self.__class__.__name__: + continue + + print(f"Re-using existing container: {container}") + item = container["_item"] + + if item is None: + item = self._import_media_to_bin(context, media_pool, representation) + # Always update clip color - even if re-using existing clip + color = self.get_item_color(context) + item.SetClipColor(color) + + if options.get("load_to_timeline", True): + timeline = options.get("timeline", self.timeline) + if timeline: + # Add media to active timeline + lib.create_timeline_item( + media_pool_item=item, + timeline=timeline + ) + + def _import_media_to_bin( + self, context, media_pool, representation + ): + """Import media to Resolve Media Pool. + + Also create a bin if `media_pool_bin_path` is set. + + Args: + context (dict): The context dictionary. + media_pool (resolve.MediaPool): The Resolve Media Pool. + representation (dict): The representation data. + + Returns: + resolve.MediaPoolItem: The imported media pool item. + """ + # Create or set the bin folder, we add it in there + # If bin path is not set we just add into the current active bin + if self.media_pool_bin_path: + media_pool_bin_path = StringTemplate( + self.media_pool_bin_path).format_strict(context) + + folder = lib.create_bin( + # double slashes will create unconnected folders + name=media_pool_bin_path.replace("//", "/"), + root=media_pool.GetRootFolder(), + set_as_current=False + ) + media_pool.SetCurrentFolder(folder) + + # Import media + # Resolve API: ImportMedia function requires a list of dictionaries + # with keys "FilePath", "StartIndex" and "EndIndex" for sequences + # but only string with absolute path for single files. + is_sequence, file_info = self._get_file_info(context) + items = ( + media_pool.ImportMedia([file_info]) + if is_sequence + else media_pool.ImportMedia([file_info["FilePath"]]) + ) + assert len(items) == 1, "Must import only one media item" + + result = items[0] + + self._set_metadata(result, context) + self._set_colorspace_from_representation(result, representation) + + data = self._get_container_data(context) + + # Add containerise data only needed on first load + data.update({ + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "loader": str(self.__class__.__name__), + }) + + result.SetMetadata(lib.pype_tag_name, json.dumps(data)) + + return result + + def switch(self, container, context): + self.update(container, context) + + def update(self, container, context): + # Update MediaPoolItem filepath and metadata + item = container["_item"] + + # Get the existing metadata before we update because the + # metadata gets removed + data = json.loads(item.GetMetadata(lib.pype_tag_name)) + + # Get metadata to preserve after the clip replacement + # TODO: Maybe preserve more, like LUT, Alpha Mode, Input Sizing Preset + colorspace_before = item.GetClipProperty("Input Color Space") + + # Update path + path = get_representation_path(context["representation"]) + success = item.ReplaceClip(path) + if not success: + raise RuntimeError( + f"Failed to replace media pool item clip to filepath: {path}" + ) + + # Update the metadata + update_data = self._get_container_data(context) + data.update(update_data) + item.SetMetadata(lib.pype_tag_name, json.dumps(data)) + + self._set_metadata(media_pool_item=item, context=context) + self._set_colorspace_from_representation( + item, + representation=context["representation"] + ) + + # If no specific colorspace is set then we want to preserve the + # colorspace a user might have set before the clip replacement + if ( + item.GetClipProperty("Input Color Space") == "Project" + and colorspace_before != "Project" + ): + result = set_colorspace(item, colorspace_before) + if not result: + self.log.warning( + f"Failed to re-apply colorspace: {colorspace_before}." + ) + + # Update the clip color + color = self.get_item_color(context) + item.SetClipColor(color) + + def remove(self, container): + # Remove MediaPoolItem entry + project = lib.get_current_project() + media_pool = project.GetMediaPool() + item = container["_item"] + + # Delete any usages of the media pool item so there's no trail + # left in existing timelines. Currently only the media pool item + # gets removed which fits the Resolve workflow but is confusing + # artists + usage = find_clip_usage(media_pool_item=item, project=project) + if usage: + # Group all timeline items per timeline, so we can delete the clips + # in the timeline at once. The Resolve objects are not hashable, so + # we need to store them in the dict by id + usage_by_timeline = defaultdict(list) + timeline_by_id = {} + for timeline, timeline_item in usage: + timeline_id = timeline.GetUniqueId() + timeline_by_id[timeline_id] = timeline + usage_by_timeline[timeline.GetUniqueId()].append(timeline_item) + + for timeline_id, timeline_items in usage_by_timeline.items(): + timeline = timeline_by_id[timeline_id] + timeline.DeleteClips(timeline_items) + + # Delete the media pool item + media_pool.DeleteClips([item]) + + def _get_container_data(self, context: dict) -> dict: + """Return metadata related to the representation and version.""" + + # add additional metadata from the version to imprint AYON knob + version = context["version"] + data = {} + + # version.attrib + for key in [ + "frameStart", "frameEnd", + "handleStart", "handleEnd", + "source", "fps", "colorSpace" + ]: + data[key] = version["attrib"][key] + + # version.data + for key in ["author"]: + data[key] = version["data"][key] + + # add variables related to version context + data.update({ + "representation": context["representation"]["id"], + "version": version["name"], + }) + + return data + + @classmethod + def get_item_color(cls, context: dict) -> str: + """Return item color name. + + Coloring depends on whether representation is the latest version. + """ + # Compare version with last version + # set clip colour + if version_is_latest(project_name=context["project"]["name"], + version_id=context["version"]["id"]): + return cls.clip_color_last + else: + return cls.clip_color_old + + def _set_metadata(self, media_pool_item, context: dict): + """Set Media Pool Item Clip Properties""" + + # Set more clip metadata based on the loaded clip's context + for meta_item in self.metadata: + clip_property = meta_item["name"] + value = meta_item["value"] + value_formatted = StringTemplate(value).format_strict(context) + media_pool_item.SetClipProperty(clip_property, value_formatted) + + def _get_file_info(self, context: dict) -> Tuple[bool, Union[str, dict]]: + """Return file info for Resolve ImportMedia. + + Args: + context (dict): The context dictionary. + + Returns: + Tuple[bool, Union[str, dict]]: A tuple of whether the file is a + sequence and the file info dictionary. + """ + + representation = context["representation"] + anatomy = Anatomy(self._project_name) + + # Get path to representation with correct frame number + repre_path = get_representation_path_with_anatomy( + representation, anatomy) + + first_frame = representation["context"].get("frame") + + is_sequence = False + # is not sequence + if first_frame is None: + return ( + is_sequence, {"FilePath": repre_path} + ) + + # This is sequence + is_sequence = True + repre_files = [ + file["path"].format(root=anatomy.roots) + for file in representation["files"] + ] + + # Change frame in representation context to get path with frame + # splitter. + representation["context"]["frame"] = FRAME_SPLITTER + frame_repre_path = get_representation_path_with_anatomy( + representation, anatomy + ) + frame_repre_path = Path(frame_repre_path) + repre_dir, repre_filename = ( + frame_repre_path.parent, frame_repre_path.name) + # Get sequence prefix and suffix + file_prefix, file_suffix = repre_filename.split(FRAME_SPLITTER) + # Get frame number from path as string to get frame padding + frame_str = str(repre_path)[len(file_prefix):][:len(file_suffix)] + frame_padding = len(frame_str) + + file_name = f"{file_prefix}%0{frame_padding}d{file_suffix}" + + abs_filepath = Path(repre_dir, file_name) + + start_index = int(first_frame) + end_index = int(int(first_frame) + len(repre_files) - 1) + + # See Resolve API, to import for example clip "file_[001-100].dpx": + # ImportMedia([{"FilePath":"file_%03d.dpx", + # "StartIndex":1, + # "EndIndex":100}]) + return ( + is_sequence, + { + "FilePath": abs_filepath.as_posix(), + "StartIndex": start_index, + "EndIndex": end_index, + } + ) + + def _get_colorspace(self, representation: dict) -> Optional[str]: + """Return Resolve native colorspace from OCIO colorspace data. + + Returns: + Optional[str]: The Resolve native colorspace name, if any mapped. + """ + + data = representation.get("data", {}).get("colorspaceData", {}) + if not data: + return + + ocio_colorspace = data["colorspace"] + if not ocio_colorspace: + return + + resolve_colorspace = get_remapped_colorspace_to_native( + ocio_colorspace_name=ocio_colorspace, + host_name="resolve", + imageio_host_settings=self._host_imageio_settings + ) + if resolve_colorspace: + return resolve_colorspace + else: + self.log.warning( + f"No mapping from OCIO colorspace '{ocio_colorspace}' " + "found to a Resolve colorspace. " + "Ignoring colorspace." + ) + + def _set_colorspace_from_representation( + self, media_pool_item, representation: dict): + """Set the colorspace for the media pool item. + + Args: + media_pool_item (MediaPoolItem): The media pool item. + representation (dict): The representation data. + """ + # Set the Resolve Input Color Space for the media. + colorspace = self._get_colorspace(representation) + if colorspace: + result = set_colorspace(media_pool_item, colorspace) + if not result: + self.log.warning( + f"Failed to apply colorspace: {colorspace}." + ) diff --git a/server_addon/resolve/client/ayon_resolve/version.py b/server_addon/resolve/client/ayon_resolve/version.py index 53e8882ed7..585f44b5a5 100644 --- a/server_addon/resolve/client/ayon_resolve/version.py +++ b/server_addon/resolve/client/ayon_resolve/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring AYON addon 'resolve' version.""" -__version__ = "0.2.1" +__version__ = "0.2.2" diff --git a/server_addon/resolve/package.py b/server_addon/resolve/package.py index 47b6c9a8b6..643e497253 100644 --- a/server_addon/resolve/package.py +++ b/server_addon/resolve/package.py @@ -1,6 +1,6 @@ name = "resolve" title = "DaVinci Resolve" -version = "0.2.1" +version = "0.2.2" client_dir = "ayon_resolve" diff --git a/server_addon/resolve/server/settings.py b/server_addon/resolve/server/settings.py index d9cbb98340..4d363b1a8f 100644 --- a/server_addon/resolve/server/settings.py +++ b/server_addon/resolve/server/settings.py @@ -1,4 +1,9 @@ -from ayon_server.settings import BaseSettingsModel, SettingsField +from pydantic import validator +from ayon_server.settings import ( + BaseSettingsModel, + SettingsField, + ensure_unique_names, +) from .imageio import ResolveImageIOModel @@ -56,7 +61,7 @@ class CreateShotClipModels(BaseSettingsModel): workfileFrameStart: int = SettingsField( 1001, - title="Workfiles Start Frame", + title="Workfile Start Frame", section="Shot Attributes" ) handleStart: int = SettingsField( @@ -76,6 +81,62 @@ class CreatorPluginsModel(BaseSettingsModel): ) +class MetadataMappingModel(BaseSettingsModel): + """Metadata mapping + + Representation document context data are used for formatting of + anatomy tokens. Following are supported: + - version + - task + - asset + + """ + name: str = SettingsField( + "", + title="Metadata property name" + ) + value: str = SettingsField( + "", + title="Metadata value template" + ) + + +class LoadMediaModel(BaseSettingsModel): + clip_color_last: str = SettingsField( + "Olive", + title="Clip color for last version" + ) + clip_color_old: str = SettingsField( + "Orange", + title="Clip color for old version" + ) + media_pool_bin_path: str = SettingsField( + "Loader/{folder[path]}", + title="Media Pool bin path template" + ) + metadata: list[MetadataMappingModel] = SettingsField( + default_factory=list, + title="Metadata mapping", + description=( + "Set these media pool item metadata values on load and update. The" + " keys must match the exact Resolve metadata names like" + " 'Clip Name' or 'Shot'" + ) + ) + + @validator("metadata") + def validate_unique_outputs(cls, value): + ensure_unique_names(value) + return value + + +class LoaderPluginsModel(BaseSettingsModel): + LoadMedia: LoadMediaModel = SettingsField( + default_factory=LoadMediaModel, + title="Load Media" + ) + + class ResolveSettings(BaseSettingsModel): launch_openpype_menu_on_start: bool = SettingsField( False, title="Launch OpenPype menu on start of Resolve" @@ -88,6 +149,10 @@ class ResolveSettings(BaseSettingsModel): default_factory=CreatorPluginsModel, title="Creator plugins", ) + load: LoaderPluginsModel = SettingsField( + default_factory=LoaderPluginsModel, + title="Loader plugins", + ) DEFAULT_VALUES = { @@ -109,5 +174,35 @@ DEFAULT_VALUES = { "handleStart": 10, "handleEnd": 10 } + }, + "load": { + "LoadMedia": { + "clip_color_last": "Olive", + "clip_color_old": "Orange", + "media_pool_bin_path": ( + "Loader/{folder[path]}" + ), + "metadata": [ + { + "name": "Comments", + "value": "{version[attrib][comment]}" + }, + { + "name": "Shot", + "value": "{folder[path]}" + }, + { + "name": "Take", + "value": "{product[name]} {version[name]}" + }, + { + "name": "Clip Name", + "value": ( + "{folder[path]} {product[name]} " + "{version[name]} ({representation[name]})" + ) + } + ] + } } }