Merge branch 'develop' into bugfix/fix_collect_explicit_colorspace_enabled
|
|
@ -3,6 +3,7 @@ import warnings
|
|||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.settings import get_studio_settings, get_project_settings
|
||||
from ayon_core.pipeline.plugin_discover import (
|
||||
discover,
|
||||
register_plugin,
|
||||
|
|
@ -40,7 +41,8 @@ class LauncherActionSelection:
|
|||
task_name=None,
|
||||
project_entity=None,
|
||||
folder_entity=None,
|
||||
task_entity=None
|
||||
task_entity=None,
|
||||
project_settings=None,
|
||||
):
|
||||
self._project_name = project_name
|
||||
self._folder_id = folder_id
|
||||
|
|
@ -53,6 +55,8 @@ class LauncherActionSelection:
|
|||
self._folder_entity = folder_entity
|
||||
self._task_entity = task_entity
|
||||
|
||||
self._project_settings = project_settings
|
||||
|
||||
def __getitem__(self, key):
|
||||
warnings.warn(
|
||||
(
|
||||
|
|
@ -255,6 +259,22 @@ class LauncherActionSelection:
|
|||
)
|
||||
return self._task_entity
|
||||
|
||||
def get_project_settings(self):
|
||||
"""Project settings for the selection.
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: Project settings or studio settings if
|
||||
project is not selected.
|
||||
|
||||
"""
|
||||
if self._project_settings is None:
|
||||
if self._project_name is None:
|
||||
settings = get_studio_settings()
|
||||
else:
|
||||
settings = get_project_settings(self._project_name)
|
||||
self._project_settings = settings
|
||||
return self._project_settings
|
||||
|
||||
@property
|
||||
def is_project_selected(self):
|
||||
"""Return whether a project is selected.
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 3.9 KiB |
|
|
@ -14,9 +14,10 @@ from ayon_core.lib import (
|
|||
convert_ffprobe_fps_value,
|
||||
)
|
||||
|
||||
FFMPEG_EXE_COMMAND = subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))
|
||||
FFMPEG = (
|
||||
'{}%(input_args)s -i "%(input)s" %(filters)s %(args)s%(output)s'
|
||||
).format(subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg")))
|
||||
).format(FFMPEG_EXE_COMMAND)
|
||||
|
||||
DRAWTEXT = (
|
||||
"drawtext@'%(label)s'=fontfile='%(font)s':text=\\'%(text)s\\':"
|
||||
|
|
@ -482,10 +483,19 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
)
|
||||
print("Launching command: {}".format(command))
|
||||
|
||||
use_shell = True
|
||||
try:
|
||||
test_proc = subprocess.Popen(
|
||||
f"{FFMPEG_EXE_COMMAND} --help", shell=True
|
||||
)
|
||||
test_proc.wait()
|
||||
except BaseException:
|
||||
use_shell = False
|
||||
|
||||
kwargs = {
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"shell": True,
|
||||
"shell": use_shell,
|
||||
}
|
||||
proc = subprocess.Popen(command, **kwargs)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,80 @@
|
|||
import ayon_api
|
||||
import json
|
||||
import collections
|
||||
|
||||
from ayon_core.lib import CacheItem
|
||||
import ayon_api
|
||||
from ayon_api.graphql import FIELD_VALUE, GraphQlQuery, fields_to_dict
|
||||
|
||||
from ayon_core.lib import NestedCacheItem
|
||||
|
||||
|
||||
# --- Implementation that should be in ayon-python-api ---
|
||||
# The implementation is not available in all versions of ayon-python-api.
|
||||
def users_graphql_query(fields):
|
||||
query = GraphQlQuery("Users")
|
||||
names_var = query.add_variable("userNames", "[String!]")
|
||||
project_name_var = query.add_variable("projectName", "String!")
|
||||
|
||||
users_field = query.add_field_with_edges("users")
|
||||
users_field.set_filter("names", names_var)
|
||||
users_field.set_filter("projectName", project_name_var)
|
||||
|
||||
nested_fields = fields_to_dict(set(fields))
|
||||
|
||||
query_queue = collections.deque()
|
||||
for key, value in nested_fields.items():
|
||||
query_queue.append((key, value, users_field))
|
||||
|
||||
while query_queue:
|
||||
item = query_queue.popleft()
|
||||
key, value, parent = item
|
||||
field = parent.add_field(key)
|
||||
if value is FIELD_VALUE:
|
||||
continue
|
||||
|
||||
for k, v in value.items():
|
||||
query_queue.append((k, v, field))
|
||||
return query
|
||||
|
||||
|
||||
def get_users(project_name=None, usernames=None, fields=None):
|
||||
"""Get Users.
|
||||
|
||||
Only administrators and managers can fetch all users. For other users
|
||||
it is required to pass in 'project_name' filter.
|
||||
|
||||
Args:
|
||||
project_name (Optional[str]): Project name.
|
||||
usernames (Optional[Iterable[str]]): Filter by usernames.
|
||||
fields (Optional[Iterable[str]]): Fields to be queried
|
||||
for users.
|
||||
|
||||
Returns:
|
||||
Generator[dict[str, Any]]: Queried users.
|
||||
|
||||
"""
|
||||
filters = {}
|
||||
if usernames is not None:
|
||||
usernames = set(usernames)
|
||||
if not usernames:
|
||||
return
|
||||
filters["userNames"] = list(usernames)
|
||||
|
||||
if project_name is not None:
|
||||
filters["projectName"] = project_name
|
||||
|
||||
con = ayon_api.get_server_api_connection()
|
||||
if not fields:
|
||||
fields = con.get_default_fields_for_type("user")
|
||||
|
||||
query = users_graphql_query(set(fields))
|
||||
for attr, filter_value in filters.items():
|
||||
query.set_variable_value(attr, filter_value)
|
||||
|
||||
for parsed_data in query.continuous_query(con):
|
||||
for user in parsed_data["users"]:
|
||||
user["accessGroups"] = json.loads(user["accessGroups"])
|
||||
yield user
|
||||
# --- END of ayon-python-api implementation ---
|
||||
|
||||
|
||||
class UserItem:
|
||||
|
|
@ -32,19 +106,19 @@ class UserItem:
|
|||
class UsersModel:
|
||||
def __init__(self, controller):
|
||||
self._controller = controller
|
||||
self._users_cache = CacheItem(default_factory=list)
|
||||
self._users_cache = NestedCacheItem(default_factory=list)
|
||||
|
||||
def get_user_items(self):
|
||||
def get_user_items(self, project_name):
|
||||
"""Get user items.
|
||||
|
||||
Returns:
|
||||
List[UserItem]: List of user items.
|
||||
|
||||
"""
|
||||
self._invalidate_cache()
|
||||
return self._users_cache.get_data()
|
||||
self._invalidate_cache(project_name)
|
||||
return self._users_cache[project_name].get_data()
|
||||
|
||||
def get_user_items_by_name(self):
|
||||
def get_user_items_by_name(self, project_name):
|
||||
"""Get user items by name.
|
||||
|
||||
Implemented as most of cases using this model will need to find
|
||||
|
|
@ -56,10 +130,10 @@ class UsersModel:
|
|||
"""
|
||||
return {
|
||||
user_item.username: user_item
|
||||
for user_item in self.get_user_items()
|
||||
for user_item in self.get_user_items(project_name)
|
||||
}
|
||||
|
||||
def get_user_item_by_username(self, username):
|
||||
def get_user_item_by_username(self, project_name, username):
|
||||
"""Get user item by username.
|
||||
|
||||
Args:
|
||||
|
|
@ -69,16 +143,22 @@ class UsersModel:
|
|||
Union[UserItem, None]: User item or None if not found.
|
||||
|
||||
"""
|
||||
self._invalidate_cache()
|
||||
for user_item in self.get_user_items():
|
||||
self._invalidate_cache(project_name)
|
||||
for user_item in self.get_user_items(project_name):
|
||||
if user_item.username == username:
|
||||
return user_item
|
||||
return None
|
||||
|
||||
def _invalidate_cache(self):
|
||||
if self._users_cache.is_valid:
|
||||
def _invalidate_cache(self, project_name):
|
||||
cache = self._users_cache[project_name]
|
||||
if cache.is_valid:
|
||||
return
|
||||
self._users_cache.update_data([
|
||||
|
||||
if project_name is None:
|
||||
cache.update_data([])
|
||||
return
|
||||
|
||||
self._users_cache[project_name].update_data([
|
||||
UserItem.from_entity_data(user)
|
||||
for user in ayon_api.get_users()
|
||||
for user in get_users(project_name)
|
||||
])
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
from .window import ContextDialog, main
|
||||
from .window import ContextDialog, main, ask_for_context
|
||||
|
||||
|
||||
__all__ = (
|
||||
"ContextDialog",
|
||||
"main",
|
||||
"ask_for_context"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -791,3 +791,12 @@ def main(
|
|||
window.show()
|
||||
app.exec_()
|
||||
controller.store_output()
|
||||
|
||||
|
||||
def ask_for_context(strict=True):
|
||||
controller = ContextDialogController()
|
||||
controller.set_strict(strict)
|
||||
window = ContextDialog(controller=controller)
|
||||
window.exec_()
|
||||
|
||||
return controller.get_selected_context()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,108 @@ from ayon_core.pipeline.actions import (
|
|||
)
|
||||
from ayon_core.pipeline.workfile import should_use_last_workfile_on_launch
|
||||
|
||||
try:
|
||||
# Available since applications addon 0.2.4
|
||||
from ayon_applications.action import ApplicationAction
|
||||
except ImportError:
|
||||
# Backwards compatibility from 0.3.3 (24/06/10)
|
||||
# TODO: Remove in future releases
|
||||
class ApplicationAction(LauncherAction):
|
||||
"""Action to launch an application.
|
||||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
"""
|
||||
|
||||
# Application object
|
||||
application = None
|
||||
# Action attributes
|
||||
name = None
|
||||
label = None
|
||||
label_variant = None
|
||||
group = None
|
||||
icon = None
|
||||
color = None
|
||||
order = 0
|
||||
data = {}
|
||||
project_settings = {}
|
||||
project_entities = {}
|
||||
|
||||
_log = None
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger.get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, selection):
|
||||
if not selection.is_task_selected:
|
||||
return False
|
||||
|
||||
project_entity = self.project_entities[selection.project_name]
|
||||
apps = project_entity["attrib"].get("applications")
|
||||
if not apps or self.application.full_name not in apps:
|
||||
return False
|
||||
|
||||
project_settings = self.project_settings[selection.project_name]
|
||||
only_available = project_settings["applications"]["only_available"]
|
||||
if only_available and not self.application.find_executable():
|
||||
return False
|
||||
return True
|
||||
|
||||
def _show_message_box(self, title, message, details=None):
|
||||
from qtpy import QtWidgets, QtGui
|
||||
from ayon_core import style
|
||||
|
||||
dialog = QtWidgets.QMessageBox()
|
||||
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
|
||||
dialog.setWindowIcon(icon)
|
||||
dialog.setStyleSheet(style.load_stylesheet())
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(message)
|
||||
if details:
|
||||
dialog.setDetailedText(details)
|
||||
dialog.exec_()
|
||||
|
||||
def process(self, selection, **kwargs):
|
||||
"""Process the full Application action"""
|
||||
|
||||
from ayon_applications import (
|
||||
ApplicationExecutableNotFound,
|
||||
ApplicationLaunchFailed,
|
||||
)
|
||||
|
||||
try:
|
||||
self.application.launch(
|
||||
project_name=selection.project_name,
|
||||
folder_path=selection.folder_path,
|
||||
task_name=selection.task_name,
|
||||
**self.data
|
||||
)
|
||||
|
||||
except ApplicationExecutableNotFound as exc:
|
||||
details = exc.details
|
||||
msg = exc.msg
|
||||
log_msg = str(msg)
|
||||
if details:
|
||||
log_msg += "\n" + details
|
||||
self.log.warning(log_msg)
|
||||
self._show_message_box(
|
||||
"Application executable not found", msg, details
|
||||
)
|
||||
|
||||
except ApplicationLaunchFailed as exc:
|
||||
msg = str(exc)
|
||||
self.log.warning(msg, exc_info=True)
|
||||
self._show_message_box("Application launch failed", msg)
|
||||
|
||||
|
||||
# class Action:
|
||||
# def __init__(self, label, icon=None, identifier=None):
|
||||
|
|
@ -43,103 +145,6 @@ from ayon_core.pipeline.workfile import should_use_last_workfile_on_launch
|
|||
# self._actions.append(action)
|
||||
|
||||
|
||||
class ApplicationAction(LauncherAction):
|
||||
"""Action to launch an application.
|
||||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
"""
|
||||
|
||||
# Application object
|
||||
application = None
|
||||
# Action attributes
|
||||
name = None
|
||||
label = None
|
||||
label_variant = None
|
||||
group = None
|
||||
icon = None
|
||||
color = None
|
||||
order = 0
|
||||
data = {}
|
||||
project_settings = {}
|
||||
project_entities = {}
|
||||
|
||||
_log = None
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger.get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, selection):
|
||||
if not selection.is_task_selected:
|
||||
return False
|
||||
|
||||
project_entity = self.project_entities[selection.project_name]
|
||||
apps = project_entity["attrib"].get("applications")
|
||||
if not apps or self.application.full_name not in apps:
|
||||
return False
|
||||
|
||||
project_settings = self.project_settings[selection.project_name]
|
||||
only_available = project_settings["applications"]["only_available"]
|
||||
if only_available and not self.application.find_executable():
|
||||
return False
|
||||
return True
|
||||
|
||||
def _show_message_box(self, title, message, details=None):
|
||||
from qtpy import QtWidgets, QtGui
|
||||
from ayon_core import style
|
||||
|
||||
dialog = QtWidgets.QMessageBox()
|
||||
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
|
||||
dialog.setWindowIcon(icon)
|
||||
dialog.setStyleSheet(style.load_stylesheet())
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(message)
|
||||
if details:
|
||||
dialog.setDetailedText(details)
|
||||
dialog.exec_()
|
||||
|
||||
def process(self, selection, **kwargs):
|
||||
"""Process the full Application action"""
|
||||
|
||||
from ayon_applications import (
|
||||
ApplicationExecutableNotFound,
|
||||
ApplicationLaunchFailed,
|
||||
)
|
||||
|
||||
try:
|
||||
self.application.launch(
|
||||
project_name=selection.project_name,
|
||||
folder_path=selection.folder_path,
|
||||
task_name=selection.task_name,
|
||||
**self.data
|
||||
)
|
||||
|
||||
except ApplicationExecutableNotFound as exc:
|
||||
details = exc.details
|
||||
msg = exc.msg
|
||||
log_msg = str(msg)
|
||||
if details:
|
||||
log_msg += "\n" + details
|
||||
self.log.warning(log_msg)
|
||||
self._show_message_box(
|
||||
"Application executable not found", msg, details
|
||||
)
|
||||
|
||||
except ApplicationLaunchFailed as exc:
|
||||
msg = str(exc)
|
||||
self.log.warning(msg, exc_info=True)
|
||||
self._show_message_box("Application launch failed", msg)
|
||||
|
||||
|
||||
class ActionItem:
|
||||
"""Item representing single action to trigger.
|
||||
|
||||
|
|
@ -440,7 +445,17 @@ class ActionsModel:
|
|||
)
|
||||
|
||||
def _prepare_selection(self, project_name, folder_id, task_id):
|
||||
return LauncherActionSelection(project_name, folder_id, task_id)
|
||||
project_entity = None
|
||||
if project_name:
|
||||
project_entity = self._controller.get_project_entity(project_name)
|
||||
project_settings = self._controller.get_project_settings(project_name)
|
||||
return LauncherActionSelection(
|
||||
project_name,
|
||||
folder_id,
|
||||
task_id,
|
||||
project_entity=project_entity,
|
||||
project_settings=project_settings,
|
||||
)
|
||||
|
||||
def _get_discovered_action_classes(self):
|
||||
if self._discovered_actions is None:
|
||||
|
|
@ -475,7 +490,9 @@ class ActionsModel:
|
|||
action_items = {}
|
||||
for identifier, action in self._get_action_objects().items():
|
||||
is_application = isinstance(action, ApplicationAction)
|
||||
if is_application:
|
||||
# Backwards compatibility from 0.3.3 (24/06/10)
|
||||
# TODO: Remove in future releases
|
||||
if is_application and hasattr(action, "project_settings"):
|
||||
action.project_entities[project_name] = project_entity
|
||||
action.project_settings[project_name] = project_settings
|
||||
|
||||
|
|
@ -497,10 +514,14 @@ class ActionsModel:
|
|||
return action_items
|
||||
|
||||
def _get_applications_action_classes(self):
|
||||
actions = []
|
||||
|
||||
addons_manager = self._get_addons_manager()
|
||||
applications_addon = addons_manager.get_enabled_addon("applications")
|
||||
if hasattr(applications_addon, "get_applications_action_classes"):
|
||||
return applications_addon.get_applications_action_classes()
|
||||
|
||||
# Backwards compatibility from 0.3.3 (24/06/10)
|
||||
# TODO: Remove in future releases
|
||||
actions = []
|
||||
if applications_addon is None:
|
||||
return actions
|
||||
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ class VersionItem:
|
|||
other_version = abs(other.version)
|
||||
# Hero version is greater than non-hero
|
||||
if version == other_version:
|
||||
return self.is_hero
|
||||
return not self.is_hero
|
||||
return version > other_version
|
||||
|
||||
def __lt__(self, other):
|
||||
|
|
@ -188,7 +188,7 @@ class VersionItem:
|
|||
other_version = abs(other.version)
|
||||
# Non-hero version is lesser than hero
|
||||
if version == other_version:
|
||||
return not self.is_hero
|
||||
return self.is_hero
|
||||
return version < other_version
|
||||
|
||||
def __ge__(self, other):
|
||||
|
|
|
|||
|
|
@ -90,7 +90,6 @@ class ContainerItem:
|
|||
representation_id,
|
||||
loader_name,
|
||||
namespace,
|
||||
name,
|
||||
object_name,
|
||||
item_id
|
||||
):
|
||||
|
|
@ -98,7 +97,6 @@ class ContainerItem:
|
|||
self.loader_name = loader_name
|
||||
self.object_name = object_name
|
||||
self.namespace = namespace
|
||||
self.name = name
|
||||
self.item_id = item_id
|
||||
|
||||
@classmethod
|
||||
|
|
@ -107,7 +105,6 @@ class ContainerItem:
|
|||
representation_id=container["representation"],
|
||||
loader_name=container["loader"],
|
||||
namespace=container["namespace"],
|
||||
name=container["name"],
|
||||
object_name=container["objectName"],
|
||||
item_id=uuid.uuid4().hex,
|
||||
)
|
||||
|
|
@ -204,7 +201,7 @@ class ContainersModel:
|
|||
def get_container_items(self):
|
||||
self._update_cache()
|
||||
return list(self._items_cache)
|
||||
|
||||
|
||||
def get_container_items_by_id(self, item_ids):
|
||||
return {
|
||||
item_id: self._container_items_by_id.get(item_id)
|
||||
|
|
@ -329,15 +326,25 @@ class ContainersModel:
|
|||
containers = list(host.ls())
|
||||
else:
|
||||
containers = []
|
||||
|
||||
container_items = []
|
||||
containers_by_id = {}
|
||||
container_items_by_id = {}
|
||||
for container in containers:
|
||||
item = ContainerItem.from_container_data(container)
|
||||
try:
|
||||
item = ContainerItem.from_container_data(container)
|
||||
except Exception as e:
|
||||
# skip item if required data are missing
|
||||
self._controller.log_error(
|
||||
f"Failed to create item: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
containers_by_id[item.item_id] = container
|
||||
container_items_by_id[item.item_id] = item
|
||||
container_items.append(item)
|
||||
|
||||
|
||||
self._containers_by_id = containers_by_id
|
||||
self._container_items_by_id = container_items_by_id
|
||||
self._items_cache = container_items
|
||||
|
|
|
|||
|
|
@ -67,8 +67,12 @@ class SelectVersionComboBox(QtWidgets.QComboBox):
|
|||
self._combo_view = combo_view
|
||||
self._status_delegate = status_delegate
|
||||
self._items_by_id = {}
|
||||
self._status_visible = True
|
||||
|
||||
def paintEvent(self, event):
|
||||
if not self._status_visible:
|
||||
return super().paintEvent(event)
|
||||
|
||||
painter = QtWidgets.QStylePainter(self)
|
||||
option = QtWidgets.QStyleOptionComboBox()
|
||||
self.initStyleOption(option)
|
||||
|
|
@ -120,6 +124,12 @@ class SelectVersionComboBox(QtWidgets.QComboBox):
|
|||
|
||||
self.setCurrentIndex(index)
|
||||
|
||||
def set_status_visible(self, visible):
|
||||
header = self._combo_view.header()
|
||||
header.setSectionHidden(1, not visible)
|
||||
self._status_visible = visible
|
||||
self.update()
|
||||
|
||||
def get_item_by_id(self, item_id):
|
||||
return self._items_by_id[item_id]
|
||||
|
||||
|
|
@ -195,10 +205,16 @@ class SelectVersionDialog(QtWidgets.QDialog):
|
|||
def select_index(self, index):
|
||||
self._versions_combobox.set_current_index(index)
|
||||
|
||||
def set_status_visible(self, visible):
|
||||
self._versions_combobox.set_status_visible(visible)
|
||||
|
||||
@classmethod
|
||||
def ask_for_version(cls, version_options, index=None, parent=None):
|
||||
def ask_for_version(
|
||||
cls, version_options, index=None, show_statuses=True, parent=None
|
||||
):
|
||||
dialog = cls(parent)
|
||||
dialog.set_versions(version_options)
|
||||
dialog.set_status_visible(show_statuses)
|
||||
if index is not None:
|
||||
dialog.select_index(index)
|
||||
dialog.exec_()
|
||||
|
|
|
|||
|
|
@ -683,37 +683,51 @@ class SceneInventoryView(QtWidgets.QTreeView):
|
|||
repre_ids
|
||||
)
|
||||
|
||||
product_ids = {
|
||||
repre_info.product_id
|
||||
for repre_info in repre_info_by_id.values()
|
||||
}
|
||||
active_repre_info = repre_info_by_id[active_repre_id]
|
||||
active_product_id = active_repre_info.product_id
|
||||
active_version_id = active_repre_info.version_id
|
||||
filtered_repre_info_by_id = {
|
||||
repre_id: repre_info
|
||||
for repre_id, repre_info in repre_info_by_id.items()
|
||||
if repre_info.product_id == active_product_id
|
||||
}
|
||||
filtered_container_item_ids = {
|
||||
item_id
|
||||
for item_id, container_item in container_items_by_id.items()
|
||||
if container_item.representation_id in filtered_repre_info_by_id
|
||||
}
|
||||
version_items_by_id = self._controller.get_version_items(
|
||||
{active_product_id}
|
||||
)[active_product_id]
|
||||
active_product_id = active_repre_info.product_id
|
||||
version_items_by_product_id = self._controller.get_version_items(
|
||||
product_ids
|
||||
)
|
||||
version_items = list(
|
||||
version_items_by_product_id[active_product_id].values()
|
||||
)
|
||||
versions = {version_item.version for version_item in version_items}
|
||||
product_ids_by_version = collections.defaultdict(set)
|
||||
for version_items_by_id in version_items_by_product_id.values():
|
||||
for version_item in version_items_by_id.values():
|
||||
version = version_item.version
|
||||
_prod_version = version
|
||||
if _prod_version < 0:
|
||||
_prod_version = -1
|
||||
product_ids_by_version[_prod_version].add(
|
||||
version_item.product_id
|
||||
)
|
||||
if version in versions:
|
||||
continue
|
||||
versions.add(version)
|
||||
version_items.append(version_item)
|
||||
|
||||
def version_sorter(item):
|
||||
hero_value = 0
|
||||
version = item.version
|
||||
if version < 0:
|
||||
i_version = item.version
|
||||
if i_version < 0:
|
||||
hero_value = 1
|
||||
version = abs(version)
|
||||
return version, hero_value
|
||||
i_version = abs(i_version)
|
||||
return i_version, hero_value
|
||||
|
||||
version_items = list(version_items_by_id.values())
|
||||
version_items.sort(key=version_sorter, reverse=True)
|
||||
status_items_by_name = {
|
||||
status_item.name: status_item
|
||||
for status_item in self._controller.get_project_status_items()
|
||||
}
|
||||
show_statuses = len(product_ids) == 1
|
||||
status_items_by_name = {}
|
||||
if show_statuses:
|
||||
status_items_by_name = {
|
||||
status_item.name: status_item
|
||||
for status_item in self._controller.get_project_status_items()
|
||||
}
|
||||
|
||||
version_options = []
|
||||
active_version_idx = 0
|
||||
|
|
@ -743,17 +757,28 @@ class SceneInventoryView(QtWidgets.QTreeView):
|
|||
version_option = SelectVersionDialog.ask_for_version(
|
||||
version_options,
|
||||
active_version_idx,
|
||||
show_statuses=show_statuses,
|
||||
parent=self
|
||||
)
|
||||
if version_option is None:
|
||||
return
|
||||
|
||||
version = version_option.version
|
||||
product_version = version = version_option.version
|
||||
if version < 0:
|
||||
product_version = -1
|
||||
version = HeroVersionType(version)
|
||||
|
||||
product_ids = product_ids_by_version[product_version]
|
||||
|
||||
filtered_item_ids = set()
|
||||
for container_item in container_items_by_id.values():
|
||||
repre_id = container_item.representation_id
|
||||
repre_info = repre_info_by_id[repre_id]
|
||||
if repre_info.product_id in product_ids:
|
||||
filtered_item_ids.add(container_item.item_id)
|
||||
|
||||
self._update_containers_to_version(
|
||||
filtered_container_item_ids, version
|
||||
filtered_item_ids, version
|
||||
)
|
||||
|
||||
def _show_switch_dialog(self, item_ids):
|
||||
|
|
|
|||
|
|
@ -182,7 +182,27 @@ class TrayManager:
|
|||
}:
|
||||
envs.pop(key, None)
|
||||
|
||||
# Remove any existing addon path from 'PYTHONPATH'
|
||||
addons_dir = os.environ.get("AYON_ADDONS_DIR", "")
|
||||
if addons_dir:
|
||||
addons_dir = os.path.normpath(addons_dir)
|
||||
addons_dir = addons_dir.lower()
|
||||
|
||||
pythonpath = envs.get("PYTHONPATH") or ""
|
||||
new_python_paths = []
|
||||
for path in pythonpath.split(os.pathsep):
|
||||
if not path:
|
||||
continue
|
||||
path = os.path.normpath(path)
|
||||
if path.lower().startswith(addons_dir):
|
||||
continue
|
||||
new_python_paths.append(path)
|
||||
|
||||
envs["PYTHONPATH"] = os.pathsep.join(new_python_paths)
|
||||
|
||||
# Start new process
|
||||
run_detached_process(args, env=envs)
|
||||
# Exit current tray process
|
||||
self.exit()
|
||||
|
||||
def exit(self):
|
||||
|
|
|
|||
|
|
@ -834,12 +834,13 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workarea_file_items(self, folder_id, task_id):
|
||||
def get_workarea_file_items(self, folder_id, task_name, sender=None):
|
||||
"""Get workarea file items.
|
||||
|
||||
Args:
|
||||
folder_id (str): Folder id.
|
||||
task_id (str): Task id.
|
||||
task_name (str): Task name.
|
||||
sender (Optional[str]): Who requested workarea file items.
|
||||
|
||||
Returns:
|
||||
list[FileItem]: List of workarea file items.
|
||||
|
|
@ -905,12 +906,12 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workfile_info(self, folder_id, task_id, filepath):
|
||||
def get_workfile_info(self, folder_id, task_name, filepath):
|
||||
"""Workfile info from database.
|
||||
|
||||
Args:
|
||||
folder_id (str): Folder id.
|
||||
task_id (str): Task id.
|
||||
task_name (str): Task id.
|
||||
filepath (str): Workfile path.
|
||||
|
||||
Returns:
|
||||
|
|
@ -921,7 +922,7 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save_workfile_info(self, folder_id, task_id, filepath, note):
|
||||
def save_workfile_info(self, folder_id, task_name, filepath, note):
|
||||
"""Save workfile info to database.
|
||||
|
||||
At this moment the only information which can be saved about
|
||||
|
|
@ -932,7 +933,7 @@ class AbstractWorkfilesFrontend(AbstractWorkfilesCommon):
|
|||
|
||||
Args:
|
||||
folder_id (str): Folder id.
|
||||
task_id (str): Task id.
|
||||
task_name (str): Task id.
|
||||
filepath (str): Workfile path.
|
||||
note (Union[str, None]): Note.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -278,7 +278,8 @@ class BaseWorkfileController(
|
|||
)
|
||||
|
||||
def get_user_items_by_name(self):
|
||||
return self._users_model.get_user_items_by_name()
|
||||
project_name = self.get_current_project_name()
|
||||
return self._users_model.get_user_items_by_name(project_name)
|
||||
|
||||
# Host information
|
||||
def get_workfile_extensions(self):
|
||||
|
|
@ -410,9 +411,11 @@ class BaseWorkfileController(
|
|||
return self._workfiles_model.get_workarea_dir_by_context(
|
||||
folder_id, task_id)
|
||||
|
||||
def get_workarea_file_items(self, folder_id, task_id):
|
||||
def get_workarea_file_items(self, folder_id, task_name, sender=None):
|
||||
task_id = self._get_task_id(folder_id, task_name)
|
||||
return self._workfiles_model.get_workarea_file_items(
|
||||
folder_id, task_id)
|
||||
folder_id, task_id, task_name
|
||||
)
|
||||
|
||||
def get_workarea_save_as_data(self, folder_id, task_id):
|
||||
return self._workfiles_model.get_workarea_save_as_data(
|
||||
|
|
@ -447,12 +450,14 @@ class BaseWorkfileController(
|
|||
return self._workfiles_model.get_published_file_items(
|
||||
folder_id, task_name)
|
||||
|
||||
def get_workfile_info(self, folder_id, task_id, filepath):
|
||||
def get_workfile_info(self, folder_id, task_name, filepath):
|
||||
task_id = self._get_task_id(folder_id, task_name)
|
||||
return self._workfiles_model.get_workfile_info(
|
||||
folder_id, task_id, filepath
|
||||
)
|
||||
|
||||
def save_workfile_info(self, folder_id, task_id, filepath, note):
|
||||
def save_workfile_info(self, folder_id, task_name, filepath, note):
|
||||
task_id = self._get_task_id(folder_id, task_name)
|
||||
self._workfiles_model.save_workfile_info(
|
||||
folder_id, task_id, filepath, note
|
||||
)
|
||||
|
|
@ -627,6 +632,17 @@ class BaseWorkfileController(
|
|||
def _emit_event(self, topic, data=None):
|
||||
self.emit_event(topic, data, "controller")
|
||||
|
||||
def _get_task_id(self, folder_id, task_name, sender=None):
|
||||
task_item = self._hierarchy_model.get_task_item_by_name(
|
||||
self.get_current_project_name(),
|
||||
folder_id,
|
||||
task_name,
|
||||
sender
|
||||
)
|
||||
if not task_item:
|
||||
return None
|
||||
return task_item.id
|
||||
|
||||
# Expected selection
|
||||
# - expected selection is used to restore selection after refresh
|
||||
# or when current context should be used
|
||||
|
|
@ -722,7 +738,7 @@ class BaseWorkfileController(
|
|||
self._host_save_workfile(dst_filepath)
|
||||
|
||||
# Make sure workfile info exists
|
||||
self.save_workfile_info(folder_id, task_id, dst_filepath, None)
|
||||
self.save_workfile_info(folder_id, task_name, dst_filepath, None)
|
||||
|
||||
# Create extra folders
|
||||
create_workdir_extra_folders(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import re
|
||||
import copy
|
||||
import uuid
|
||||
|
||||
import arrow
|
||||
import ayon_api
|
||||
|
|
@ -173,7 +174,7 @@ class WorkareaModel:
|
|||
folder_mapping[task_id] = workdir
|
||||
return workdir
|
||||
|
||||
def get_file_items(self, folder_id, task_id):
|
||||
def get_file_items(self, folder_id, task_id, task_name):
|
||||
items = []
|
||||
if not folder_id or not task_id:
|
||||
return items
|
||||
|
|
@ -192,7 +193,7 @@ class WorkareaModel:
|
|||
continue
|
||||
|
||||
workfile_info = self._controller.get_workfile_info(
|
||||
folder_id, task_id, filepath
|
||||
folder_id, task_name, filepath
|
||||
)
|
||||
modified = os.path.getmtime(filepath)
|
||||
items.append(FileItem(
|
||||
|
|
@ -587,6 +588,7 @@ class WorkfileEntitiesModel:
|
|||
|
||||
username = self._get_current_username()
|
||||
workfile_info = {
|
||||
"id": uuid.uuid4().hex,
|
||||
"path": rootless_path,
|
||||
"taskId": task_id,
|
||||
"attrib": {
|
||||
|
|
@ -770,19 +772,21 @@ class WorkfilesModel:
|
|||
return self._workarea_model.get_workarea_dir_by_context(
|
||||
folder_id, task_id)
|
||||
|
||||
def get_workarea_file_items(self, folder_id, task_id):
|
||||
def get_workarea_file_items(self, folder_id, task_id, task_name):
|
||||
"""Workfile items for passed context from workarea.
|
||||
|
||||
Args:
|
||||
folder_id (Union[str, None]): Folder id.
|
||||
task_id (Union[str, None]): Task id.
|
||||
task_name (Union[str, None]): Task name.
|
||||
|
||||
Returns:
|
||||
list[FileItem]: List of file items matching workarea of passed
|
||||
context.
|
||||
"""
|
||||
|
||||
return self._workarea_model.get_file_items(folder_id, task_id)
|
||||
return self._workarea_model.get_file_items(
|
||||
folder_id, task_id, task_name
|
||||
)
|
||||
|
||||
def get_workarea_save_as_data(self, folder_id, task_id):
|
||||
return self._workarea_model.get_workarea_save_as_data(
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
|
|||
self._empty_item_used = False
|
||||
self._published_mode = False
|
||||
self._selected_folder_id = None
|
||||
self._selected_task_id = None
|
||||
self._selected_task_name = None
|
||||
|
||||
self._add_missing_context_item()
|
||||
|
||||
|
|
@ -153,7 +153,7 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
|
|||
|
||||
def _on_task_changed(self, event):
|
||||
self._selected_folder_id = event["folder_id"]
|
||||
self._selected_task_id = event["task_id"]
|
||||
self._selected_task_name = event["task_name"]
|
||||
if not self._published_mode:
|
||||
self._fill_items()
|
||||
|
||||
|
|
@ -179,13 +179,13 @@ class WorkAreaFilesModel(QtGui.QStandardItemModel):
|
|||
|
||||
def _fill_items_impl(self):
|
||||
folder_id = self._selected_folder_id
|
||||
task_id = self._selected_task_id
|
||||
if not folder_id or not task_id:
|
||||
task_name = self._selected_task_name
|
||||
if not folder_id or not task_name:
|
||||
self._add_missing_context_item()
|
||||
return
|
||||
|
||||
file_items = self._controller.get_workarea_file_items(
|
||||
folder_id, task_id
|
||||
folder_id, task_name
|
||||
)
|
||||
root_item = self.invisibleRootItem()
|
||||
if not file_items:
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class SidePanelWidget(QtWidgets.QWidget):
|
|||
self._btn_note_save = btn_note_save
|
||||
|
||||
self._folder_id = None
|
||||
self._task_id = None
|
||||
self._task_name = None
|
||||
self._filepath = None
|
||||
self._orig_note = ""
|
||||
self._controller = controller
|
||||
|
|
@ -93,10 +93,10 @@ class SidePanelWidget(QtWidgets.QWidget):
|
|||
|
||||
def _on_selection_change(self, event):
|
||||
folder_id = event["folder_id"]
|
||||
task_id = event["task_id"]
|
||||
task_name = event["task_name"]
|
||||
filepath = event["path"]
|
||||
|
||||
self._set_context(folder_id, task_id, filepath)
|
||||
self._set_context(folder_id, task_name, filepath)
|
||||
|
||||
def _on_note_change(self):
|
||||
text = self._note_input.toPlainText()
|
||||
|
|
@ -106,19 +106,19 @@ class SidePanelWidget(QtWidgets.QWidget):
|
|||
note = self._note_input.toPlainText()
|
||||
self._controller.save_workfile_info(
|
||||
self._folder_id,
|
||||
self._task_id,
|
||||
self._task_name,
|
||||
self._filepath,
|
||||
note
|
||||
)
|
||||
self._orig_note = note
|
||||
self._btn_note_save.setEnabled(False)
|
||||
|
||||
def _set_context(self, folder_id, task_id, filepath):
|
||||
def _set_context(self, folder_id, task_name, filepath):
|
||||
workfile_info = None
|
||||
# Check if folder, task and file are selected
|
||||
if bool(folder_id) and bool(task_id) and bool(filepath):
|
||||
if bool(folder_id) and bool(task_name) and bool(filepath):
|
||||
workfile_info = self._controller.get_workfile_info(
|
||||
folder_id, task_id, filepath
|
||||
folder_id, task_name, filepath
|
||||
)
|
||||
enabled = workfile_info is not None
|
||||
|
||||
|
|
@ -127,7 +127,7 @@ class SidePanelWidget(QtWidgets.QWidget):
|
|||
self._btn_note_save.setEnabled(enabled)
|
||||
|
||||
self._folder_id = folder_id
|
||||
self._task_id = task_id
|
||||
self._task_name = task_name
|
||||
self._filepath = filepath
|
||||
|
||||
# Disable inputs and remove texts if any required arguments are
|
||||
|
|
|
|||
147
server_addon/applications/client/ayon_applications/action.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
import copy
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core import resources
|
||||
from ayon_core.lib import Logger, NestedCacheItem
|
||||
from ayon_core.settings import get_studio_settings, get_project_settings
|
||||
from ayon_core.pipeline.actions import LauncherAction
|
||||
|
||||
from .exceptions import (
|
||||
ApplicationExecutableNotFound,
|
||||
ApplicationLaunchFailed,
|
||||
)
|
||||
|
||||
|
||||
class ApplicationAction(LauncherAction):
|
||||
"""Action to launch an application.
|
||||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
"""
|
||||
|
||||
# Application object
|
||||
application = None
|
||||
# Action attributes
|
||||
name = None
|
||||
label = None
|
||||
label_variant = None
|
||||
group = None
|
||||
icon = None
|
||||
color = None
|
||||
order = 0
|
||||
data = {}
|
||||
project_settings = {}
|
||||
project_entities = {}
|
||||
|
||||
_log = None
|
||||
|
||||
# --- For compatibility for combinations of new and old ayon-core ---
|
||||
project_settings_cache = NestedCacheItem(
|
||||
levels=1, default_factory=dict, lifetime=20
|
||||
)
|
||||
project_entities_cache = NestedCacheItem(
|
||||
levels=1, default_factory=dict, lifetime=20
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _app_get_project_settings(cls, selection):
|
||||
project_name = selection.project_name
|
||||
if project_name in ApplicationAction.project_settings:
|
||||
return ApplicationAction.project_settings[project_name]
|
||||
|
||||
if hasattr(selection, "get_project_settings"):
|
||||
return selection.get_project_settings()
|
||||
|
||||
cache = ApplicationAction.project_settings_cache[project_name]
|
||||
if not cache.is_valid:
|
||||
if project_name:
|
||||
settings = get_project_settings(project_name)
|
||||
else:
|
||||
settings = get_studio_settings()
|
||||
cache.update_data(settings)
|
||||
return copy.deepcopy(cache.get_data())
|
||||
|
||||
@classmethod
|
||||
def _app_get_project_entity(cls, selection):
|
||||
project_name = selection.project_name
|
||||
if project_name in ApplicationAction.project_entities:
|
||||
return ApplicationAction.project_entities[project_name]
|
||||
|
||||
if hasattr(selection, "get_project_settings"):
|
||||
return selection.get_project_entity()
|
||||
|
||||
cache = ApplicationAction.project_entities_cache[project_name]
|
||||
if not cache.is_valid:
|
||||
project_entity = None
|
||||
if project_name:
|
||||
project_entity = ayon_api.get_project(project_name)
|
||||
cache.update_data(project_entity)
|
||||
return copy.deepcopy(cache.get_data())
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger.get_logger(self.__class__.__name__)
|
||||
return self._log
|
||||
|
||||
def is_compatible(self, selection):
|
||||
if not selection.is_task_selected:
|
||||
return False
|
||||
|
||||
project_entity = self._app_get_project_entity(selection)
|
||||
apps = project_entity["attrib"].get("applications")
|
||||
if not apps or self.application.full_name not in apps:
|
||||
return False
|
||||
|
||||
project_settings = self._app_get_project_settings(selection)
|
||||
only_available = project_settings["applications"]["only_available"]
|
||||
if only_available and not self.application.find_executable():
|
||||
return False
|
||||
return True
|
||||
|
||||
def _show_message_box(self, title, message, details=None):
|
||||
from qtpy import QtWidgets, QtGui
|
||||
from ayon_core import style
|
||||
|
||||
dialog = QtWidgets.QMessageBox()
|
||||
icon = QtGui.QIcon(resources.get_ayon_icon_filepath())
|
||||
dialog.setWindowIcon(icon)
|
||||
dialog.setStyleSheet(style.load_stylesheet())
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(message)
|
||||
if details:
|
||||
dialog.setDetailedText(details)
|
||||
dialog.exec_()
|
||||
|
||||
def process(self, selection, **kwargs):
|
||||
"""Process the full Application action"""
|
||||
try:
|
||||
self.application.launch(
|
||||
project_name=selection.project_name,
|
||||
folder_path=selection.folder_path,
|
||||
task_name=selection.task_name,
|
||||
**self.data
|
||||
)
|
||||
|
||||
except ApplicationExecutableNotFound as exc:
|
||||
details = exc.details
|
||||
msg = exc.msg
|
||||
log_msg = str(msg)
|
||||
if details:
|
||||
log_msg += "\n" + details
|
||||
self.log.warning(log_msg)
|
||||
self._show_message_box(
|
||||
"Application executable not found", msg, details
|
||||
)
|
||||
|
||||
except ApplicationLaunchFailed as exc:
|
||||
msg = str(exc)
|
||||
self.log.warning(msg, exc_info=True)
|
||||
self._show_message_box("Application launch failed", msg)
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.addon import AYONAddon, IPluginPaths, click_wrap
|
||||
|
||||
from .version import __version__
|
||||
|
|
@ -112,6 +114,95 @@ class ApplicationsAddon(AYONAddon, IPluginPaths):
|
|||
]
|
||||
}
|
||||
|
||||
def get_app_icon_path(self, icon_filename):
|
||||
"""Get icon path.
|
||||
|
||||
Args:
|
||||
icon_filename (str): Icon filename.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Icon path or None if not found.
|
||||
|
||||
"""
|
||||
if not icon_filename:
|
||||
return None
|
||||
icon_name = os.path.basename(icon_filename)
|
||||
path = os.path.join(APPLICATIONS_ADDON_ROOT, "icons", icon_name)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
return None
|
||||
|
||||
def get_app_icon_url(self, icon_filename, server=False):
|
||||
"""Get icon path.
|
||||
|
||||
Method does not validate if icon filename exist on server.
|
||||
|
||||
Args:
|
||||
icon_filename (str): Icon name.
|
||||
server (Optional[bool]): Return url to AYON server.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Icon path or None is server url is not
|
||||
available.
|
||||
|
||||
"""
|
||||
if not icon_filename:
|
||||
return None
|
||||
icon_name = os.path.basename(icon_filename)
|
||||
if server:
|
||||
base_url = ayon_api.get_base_url()
|
||||
return (
|
||||
f"{base_url}/addons/{self.name}/{self.version}"
|
||||
f"/public/icons/{icon_name}"
|
||||
)
|
||||
server_url = os.getenv("AYON_WEBSERVER_URL")
|
||||
if not server_url:
|
||||
return None
|
||||
return "/".join([
|
||||
server_url, "addons", self.name, self.version, "icons", icon_name
|
||||
])
|
||||
|
||||
def get_applications_action_classes(self):
|
||||
"""Get application action classes for launcher tool.
|
||||
|
||||
This method should be used only by launcher tool. Please do not use it
|
||||
in other places as its implementation is not optimal, and might
|
||||
change or be removed.
|
||||
|
||||
Returns:
|
||||
list[ApplicationAction]: List of application action classes.
|
||||
|
||||
"""
|
||||
from .action import ApplicationAction
|
||||
|
||||
actions = []
|
||||
|
||||
manager = self.get_applications_manager()
|
||||
for full_name, application in manager.applications.items():
|
||||
if not application.enabled:
|
||||
continue
|
||||
|
||||
icon = self.get_app_icon_path(application.icon)
|
||||
|
||||
action = type(
|
||||
"app_{}".format(full_name),
|
||||
(ApplicationAction,),
|
||||
{
|
||||
"identifier": "application.{}".format(full_name),
|
||||
"application": application,
|
||||
"name": application.name,
|
||||
"label": application.group.label,
|
||||
"label_variant": application.label,
|
||||
"group": None,
|
||||
"icon": icon,
|
||||
"color": getattr(application, "color", None),
|
||||
"order": getattr(application, "order", None) or 0,
|
||||
"data": {}
|
||||
}
|
||||
)
|
||||
actions.append(action)
|
||||
return actions
|
||||
|
||||
def launch_application(
|
||||
self, app_name, project_name, folder_path, task_name
|
||||
):
|
||||
|
|
@ -132,6 +223,18 @@ class ApplicationsAddon(AYONAddon, IPluginPaths):
|
|||
task_name=task_name,
|
||||
)
|
||||
|
||||
def webserver_initialization(self, manager):
|
||||
"""Initialize webserver.
|
||||
|
||||
Args:
|
||||
manager (WebServerManager): Webserver manager.
|
||||
|
||||
"""
|
||||
static_prefix = f"/addons/{self.name}/{self.version}/icons"
|
||||
manager.add_static(
|
||||
static_prefix, os.path.join(APPLICATIONS_ADDON_ROOT, "icons")
|
||||
)
|
||||
|
||||
# --- CLI ---
|
||||
def cli(self, addon_click_group):
|
||||
main_group = click_wrap.group(
|
||||
|
|
|
|||
|
After Width: | Height: | Size: 16 KiB |
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 25 KiB |
|
After Width: | Height: | Size: 50 KiB |
|
After Width: | Height: | Size: 3.9 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 58 KiB |
|
After Width: | Height: | Size: 29 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 257 KiB |
|
After Width: | Height: | Size: 120 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 100 KiB |
|
After Width: | Height: | Size: 97 KiB |
|
After Width: | Height: | Size: 7.7 KiB |
|
After Width: | Height: | Size: 10 KiB |
|
After Width: | Height: | Size: 20 KiB |
|
After Width: | Height: | Size: 7.6 KiB |
|
After Width: | Height: | Size: 247 KiB |
|
After Width: | Height: | Size: 45 KiB |
|
After Width: | Height: | Size: 27 KiB |
|
After Width: | Height: | Size: 104 KiB |
|
After Width: | Height: | Size: 131 KiB |
BIN
server_addon/applications/client/ayon_applications/icons/ue4.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
|
After Width: | Height: | Size: 1 KiB |
|
After Width: | Height: | Size: 280 KiB |
|
|
@ -156,7 +156,7 @@ class ApplicationManager:
|
|||
|
||||
Args:
|
||||
app_name (str): Name of application that should be launched.
|
||||
**data (dict): Any additional data. Data may be used during
|
||||
**data (Any): Any additional data. Data may be used during
|
||||
preparation to store objects usable in multiple places.
|
||||
|
||||
Raises:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from ayon_core import AYON_CORE_ROOT
|
|||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.lib import Logger, get_ayon_username
|
||||
from ayon_core.addon import AddonsManager
|
||||
from ayon_core.pipeline import HOST_WORKFILE_EXTENSIONS
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
from ayon_core.pipeline.workfile import (
|
||||
get_workfile_template_key,
|
||||
|
|
@ -573,10 +572,9 @@ def _prepare_last_workfile(data, workdir, addons_manager):
|
|||
last_workfile_path = data.get("last_workfile_path") or ""
|
||||
if not last_workfile_path:
|
||||
host_addon = addons_manager.get_host_addon(app.host_name)
|
||||
extensions = None
|
||||
if host_addon:
|
||||
extensions = host_addon.get_workfile_extensions()
|
||||
else:
|
||||
extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name)
|
||||
|
||||
if extensions:
|
||||
anatomy = data["anatomy"]
|
||||
|
|
|
|||
BIN
server_addon/applications/public/icons/3de4.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
server_addon/applications/public/icons/3dsmax.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
server_addon/applications/public/icons/aftereffects.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
server_addon/applications/public/icons/blender.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
server_addon/applications/public/icons/celaction.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
server_addon/applications/public/icons/flame.png
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
server_addon/applications/public/icons/fusion.png
Normal file
|
After Width: | Height: | Size: 58 KiB |
BIN
server_addon/applications/public/icons/harmony.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
server_addon/applications/public/icons/hiero.png
Normal file
|
After Width: | Height: | Size: 85 KiB |
BIN
server_addon/applications/public/icons/houdini.png
Normal file
|
After Width: | Height: | Size: 257 KiB |
BIN
server_addon/applications/public/icons/maya.png
Normal file
|
After Width: | Height: | Size: 120 KiB |
BIN
server_addon/applications/public/icons/nuke.png
Normal file
|
After Width: | Height: | Size: 85 KiB |
BIN
server_addon/applications/public/icons/nukestudio.png
Normal file
|
After Width: | Height: | Size: 100 KiB |
BIN
server_addon/applications/public/icons/nukex.png
Normal file
|
After Width: | Height: | Size: 97 KiB |
BIN
server_addon/applications/public/icons/openrv.png
Normal file
|
After Width: | Height: | Size: 7.7 KiB |
BIN
server_addon/applications/public/icons/photoshop.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
server_addon/applications/public/icons/premiere.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
server_addon/applications/public/icons/python.png
Normal file
|
After Width: | Height: | Size: 7.6 KiB |
BIN
server_addon/applications/public/icons/resolve.png
Normal file
|
After Width: | Height: | Size: 247 KiB |
BIN
server_addon/applications/public/icons/shotgrid.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
server_addon/applications/public/icons/storyboardpro.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
server_addon/applications/public/icons/substancepainter.png
Normal file
|
After Width: | Height: | Size: 104 KiB |
BIN
server_addon/applications/public/icons/tvpaint.png
Normal file
|
After Width: | Height: | Size: 131 KiB |
BIN
server_addon/applications/public/icons/ue4.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
server_addon/applications/public/icons/wrap.png
Normal file
|
After Width: | Height: | Size: 1 KiB |
BIN
server_addon/applications/public/icons/zbrush.png
Normal file
|
After Width: | Height: | Size: 280 KiB |
|
|
@ -105,6 +105,8 @@ def find_files_in_subdir(
|
|||
List[Tuple[str, str]]: List of tuples with path to file and parent
|
||||
directories relative to 'src_path'.
|
||||
"""
|
||||
if not os.path.exists(src_path):
|
||||
return []
|
||||
|
||||
if ignore_file_patterns is None:
|
||||
ignore_file_patterns = IGNORE_FILE_PATTERNS
|
||||
|
|
@ -210,6 +212,7 @@ def _get_server_mapping(
|
|||
addon_dir: Path, addon_version: str
|
||||
) -> List[Tuple[str, str]]:
|
||||
server_dir = addon_dir / "server"
|
||||
public_dir = addon_dir / "public"
|
||||
src_package_py = addon_dir / "package.py"
|
||||
pyproject_toml = addon_dir / "client" / "pyproject.toml"
|
||||
|
||||
|
|
@ -217,6 +220,10 @@ def _get_server_mapping(
|
|||
(src_path, f"server/{sub_path}")
|
||||
for src_path, sub_path in find_files_in_subdir(str(server_dir))
|
||||
]
|
||||
mapping.extend([
|
||||
(src_path, f"public/{sub_path}")
|
||||
for src_path, sub_path in find_files_in_subdir(str(public_dir))
|
||||
])
|
||||
mapping.append((src_package_py.as_posix(), "package.py"))
|
||||
if pyproject_toml.exists():
|
||||
mapping.append((pyproject_toml.as_posix(), "private/pyproject.toml"))
|
||||
|
|
|
|||
|
|
@ -2,7 +2,10 @@
|
|||
"""Creator plugin for creating publishable Houdini Digital Assets."""
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import CreatorError
|
||||
from ayon_core.pipeline import (
|
||||
CreatorError,
|
||||
get_current_project_name
|
||||
)
|
||||
from ayon_houdini.api import plugin
|
||||
import hou
|
||||
|
||||
|
|
@ -56,8 +59,18 @@ class CreateHDA(plugin.HoudiniCreator):
|
|||
raise CreatorError(
|
||||
"cannot create hda from node {}".format(to_hda))
|
||||
|
||||
# Pick a unique type name for HDA product per folder path per project.
|
||||
type_name = (
|
||||
"{project_name}{folder_path}_{node_name}".format(
|
||||
project_name=get_current_project_name(),
|
||||
folder_path=folder_path.replace("/","_"),
|
||||
node_name=node_name
|
||||
)
|
||||
)
|
||||
|
||||
hda_node = to_hda.createDigitalAsset(
|
||||
name=node_name,
|
||||
name=type_name,
|
||||
description=node_name,
|
||||
hda_file_name="$HIP/{}.hda".format(node_name)
|
||||
)
|
||||
hda_node.layoutChildren()
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
from ayon_core.pipeline import get_representation_path
|
||||
from ayon_core.pipeline.load import LoadError
|
||||
from ayon_houdini.api import (
|
||||
pipeline,
|
||||
plugin
|
||||
|
|
@ -28,14 +29,18 @@ class HdaLoader(plugin.HoudiniLoader):
|
|||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Create a unique name
|
||||
counter = 1
|
||||
namespace = namespace or context["folder"]["name"]
|
||||
formatted = "{}_{}".format(namespace, name) if namespace else name
|
||||
node_name = "{0}_{1:03d}".format(formatted, counter)
|
||||
node_name = "{}_{}".format(namespace, name) if namespace else name
|
||||
|
||||
hou.hda.installFile(file_path)
|
||||
hda_node = obj.createNode(name, node_name)
|
||||
|
||||
# Get the type name from the HDA definition.
|
||||
hda_defs = hou.hda.definitionsInFile(file_path)
|
||||
if not hda_defs:
|
||||
raise LoadError(f"No HDA definitions found in file: {file_path}")
|
||||
|
||||
type_name = hda_defs[0].nodeTypeName()
|
||||
hda_node = obj.createNode(type_name, node_name)
|
||||
|
||||
self[:] = [hda_node]
|
||||
|
||||
|
|
|
|||
|
|
@ -272,10 +272,8 @@ def reset_frame_range(fps: bool = True):
|
|||
scene frame rate in frames-per-second.
|
||||
"""
|
||||
if fps:
|
||||
task_entity = get_current_task_entity()
|
||||
task_attributes = task_entity["attrib"]
|
||||
fps_number = float(task_attributes["fps"])
|
||||
rt.frameRate = fps_number
|
||||
rt.frameRate = float(get_fps_for_current_context())
|
||||
|
||||
frame_range = get_frame_range()
|
||||
|
||||
set_timeline(
|
||||
|
|
@ -284,6 +282,22 @@ def reset_frame_range(fps: bool = True):
|
|||
frame_range["frameStartHandle"], frame_range["frameEndHandle"])
|
||||
|
||||
|
||||
def get_fps_for_current_context():
|
||||
"""Get fps that should be set for current context.
|
||||
|
||||
Todos:
|
||||
- Skip project value.
|
||||
- Merge logic with 'get_frame_range' and 'reset_scene_resolution' ->
|
||||
all the values in the functions can be collected at one place as
|
||||
they have same requirements.
|
||||
|
||||
Returns:
|
||||
Union[int, float]: FPS value.
|
||||
"""
|
||||
task_entity = get_current_task_entity(fields={"attrib"})
|
||||
return task_entity["attrib"]["fps"]
|
||||
|
||||
|
||||
def reset_unit_scale():
|
||||
"""Apply the unit scale setting to 3dsMax
|
||||
"""
|
||||
|
|
@ -358,7 +372,7 @@ def is_headless():
|
|||
def set_timeline(frameStart, frameEnd):
|
||||
"""Set frame range for timeline editor in Max
|
||||
"""
|
||||
rt.animationRange = rt.interval(frameStart, frameEnd)
|
||||
rt.animationRange = rt.interval(int(frameStart), int(frameEnd))
|
||||
return rt.animationRange
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ Because of limited api, alembics can be only loaded, but not easily updated.
|
|||
import os
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
from ayon_max.api import lib, maintained_selection
|
||||
from ayon_max.api.lib import unique_namespace
|
||||
from ayon_max.api.lib import unique_namespace, reset_frame_range
|
||||
from ayon_max.api.pipeline import (
|
||||
containerise,
|
||||
get_previous_loaded_object,
|
||||
|
|
@ -38,6 +38,9 @@ class AbcLoader(load.LoaderPlugin):
|
|||
}
|
||||
|
||||
rt.AlembicImport.ImportToRoot = False
|
||||
# TODO: it will be removed after the improvement
|
||||
# on the post-system setup
|
||||
reset_frame_range()
|
||||
rt.importFile(file_path, rt.name("noPrompt"), using=rt.AlembicImport)
|
||||
|
||||
abc_after = {
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from ayon_core.lib import (
|
|||
)
|
||||
|
||||
|
||||
def _get_animation_attr_defs(cls):
|
||||
def _get_animation_attr_defs():
|
||||
"""Get Animation generic definitions."""
|
||||
defs = lib.collect_animation_defs()
|
||||
defs.extend(
|
||||
|
|
@ -99,9 +99,7 @@ class CreateAnimation(plugin.MayaHiddenCreator):
|
|||
return node_data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
defs = super(CreateAnimation, self).get_instance_attr_defs()
|
||||
defs += _get_animation_attr_defs(self)
|
||||
return defs
|
||||
return _get_animation_attr_defs()
|
||||
|
||||
|
||||
class CreatePointCache(plugin.MayaCreator):
|
||||
|
|
@ -123,9 +121,7 @@ class CreatePointCache(plugin.MayaCreator):
|
|||
return node_data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
defs = super(CreatePointCache, self).get_instance_attr_defs()
|
||||
defs += _get_animation_attr_defs(self)
|
||||
return defs
|
||||
return _get_animation_attr_defs()
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
instance = super(CreatePointCache, self).create(
|
||||
|
|
|
|||
|
|
@ -42,6 +42,6 @@ class CreateLook(plugin.MayaCreator):
|
|||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Show same attributes on create but include use selection
|
||||
defs = super(CreateLook, self).get_pre_create_attr_defs()
|
||||
defs = list(super().get_pre_create_attr_defs())
|
||||
defs.extend(self.get_instance_attr_defs())
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'maya' version."""
|
||||
__version__ = "0.2.2"
|
||||
__version__ = "0.2.3"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "maya"
|
||||
title = "Maya"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
client_dir = "ayon_maya"
|
||||
|
||||
ayon_required_addons = {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import re
|
||||
import json
|
||||
import six
|
||||
|
|
@ -37,6 +36,7 @@ from ayon_core.pipeline import (
|
|||
get_current_host_name,
|
||||
get_current_project_name,
|
||||
get_current_folder_path,
|
||||
get_current_task_name,
|
||||
AYON_INSTANCE_ID,
|
||||
AVALON_INSTANCE_ID,
|
||||
)
|
||||
|
|
@ -154,15 +154,9 @@ def set_node_data(node, knobname, data):
|
|||
"""
|
||||
# if exists then update data
|
||||
if knobname in node.knobs():
|
||||
log.debug("Updating knobname `{}` on node `{}`".format(
|
||||
knobname, node.name()
|
||||
))
|
||||
update_node_data(node, knobname, data)
|
||||
return
|
||||
|
||||
log.debug("Creating knobname `{}` on node `{}`".format(
|
||||
knobname, node.name()
|
||||
))
|
||||
# else create new
|
||||
knob_value = JSON_PREFIX + json.dumps(data)
|
||||
knob = nuke.String_Knob(knobname)
|
||||
|
|
@ -513,11 +507,9 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
|
|||
# check if the node is avalon tracked
|
||||
try:
|
||||
# check if data available on the node
|
||||
test = node[DATA_GROUP_KEY].value()
|
||||
log.debug("Only testing if data available: `{}`".format(test))
|
||||
except NameError as e:
|
||||
_ = node[DATA_GROUP_KEY].value()
|
||||
except NameError:
|
||||
# if it doesn't then create it
|
||||
log.debug("Creating avalon knob: `{}`".format(e))
|
||||
if create:
|
||||
node = set_avalon_knob_data(node)
|
||||
return get_avalon_knob_data(node)
|
||||
|
|
@ -678,8 +670,6 @@ def get_imageio_node_setting(node_class, plugin_name, product_name):
|
|||
imageio_node = node
|
||||
break
|
||||
|
||||
log.debug("__ imageio_node: {}".format(imageio_node))
|
||||
|
||||
if not imageio_node:
|
||||
return
|
||||
|
||||
|
|
@ -690,8 +680,6 @@ def get_imageio_node_setting(node_class, plugin_name, product_name):
|
|||
product_name,
|
||||
imageio_node["knobs"]
|
||||
)
|
||||
|
||||
log.info("ImageIO node: {}".format(imageio_node))
|
||||
return imageio_node
|
||||
|
||||
|
||||
|
|
@ -706,8 +694,6 @@ def get_imageio_node_override_setting(
|
|||
# find matching override node
|
||||
override_imageio_node = None
|
||||
for onode in override_nodes:
|
||||
log.debug("__ onode: {}".format(onode))
|
||||
log.debug("__ productName: {}".format(product_name))
|
||||
if node_class not in onode["nuke_node_class"]:
|
||||
continue
|
||||
|
||||
|
|
@ -727,7 +713,6 @@ def get_imageio_node_override_setting(
|
|||
override_imageio_node = onode
|
||||
break
|
||||
|
||||
log.debug("__ override_imageio_node: {}".format(override_imageio_node))
|
||||
# add overrides to imageio_node
|
||||
if override_imageio_node:
|
||||
# get all knob names in imageio_node
|
||||
|
|
@ -740,7 +725,6 @@ def get_imageio_node_override_setting(
|
|||
for knob in knobs_settings:
|
||||
# add missing knobs into imageio_node
|
||||
if oknob_name not in knob_names:
|
||||
log.debug("_ adding knob: `{}`".format(oknob))
|
||||
knobs_settings.append(oknob)
|
||||
knob_names.append(oknob_name)
|
||||
continue
|
||||
|
|
@ -750,9 +734,6 @@ def get_imageio_node_override_setting(
|
|||
|
||||
knob_type = knob["type"]
|
||||
# override matching knob name
|
||||
log.debug(
|
||||
"_ overriding knob: `{}` > `{}`".format(knob, oknob)
|
||||
)
|
||||
if not oknob_value:
|
||||
# remove original knob if no value found in oknob
|
||||
knobs_settings.remove(knob)
|
||||
|
|
@ -923,7 +904,6 @@ def writes_version_sync():
|
|||
new_version = "v" + str("{" + ":0>{}".format(padding) + "}").format(
|
||||
int(rootVersion)
|
||||
)
|
||||
log.debug("new_version: {}".format(new_version))
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
|
@ -936,13 +916,11 @@ def writes_version_sync():
|
|||
|
||||
try:
|
||||
if avalon_knob_data["families"] not in ["render"]:
|
||||
log.debug(avalon_knob_data["families"])
|
||||
continue
|
||||
|
||||
node_file = each["file"].value()
|
||||
|
||||
node_version = "v" + get_version_from_path(node_file)
|
||||
log.debug("node_version: {}".format(node_version))
|
||||
|
||||
node_new_file = node_file.replace(node_version, new_version)
|
||||
each["file"].setValue(node_new_file)
|
||||
|
|
@ -1332,7 +1310,6 @@ def set_node_knobs_from_settings(node, knob_settings, **kwargs):
|
|||
kwargs (dict)[optional]: keys for formattable knob settings
|
||||
"""
|
||||
for knob in knob_settings:
|
||||
log.debug("__ knob: {}".format(pformat(knob)))
|
||||
knob_name = knob["name"]
|
||||
if knob_name not in node.knobs():
|
||||
continue
|
||||
|
|
@ -1486,13 +1463,17 @@ class WorkfileSettings(object):
|
|||
Context._project_entity = project_entity
|
||||
self._project_name = project_name
|
||||
self._folder_path = get_current_folder_path()
|
||||
self._task_name = get_current_task_name()
|
||||
self._folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, self._folder_path
|
||||
)
|
||||
self._root_node = root_node or nuke.root()
|
||||
self._nodes = self.get_nodes(nodes=nodes)
|
||||
|
||||
self.data = kwargs
|
||||
context_data = get_template_data_with_names(
|
||||
project_name, self._folder_path, self._task_name, "nuke"
|
||||
)
|
||||
self.formatting_data = context_data
|
||||
|
||||
def get_nodes(self, nodes=None, nodes_filter=None):
|
||||
|
||||
|
|
@ -1509,36 +1490,23 @@ class WorkfileSettings(object):
|
|||
for filter in nodes_filter:
|
||||
return [n for n in self._nodes if filter in n.Class()]
|
||||
|
||||
def set_viewers_colorspace(self, viewer_dict):
|
||||
def set_viewers_colorspace(self, imageio_nuke):
|
||||
''' Adds correct colorspace to viewer
|
||||
|
||||
Arguments:
|
||||
viewer_dict (dict): adjustments from presets
|
||||
imageio_nuke (dict): nuke colorspace configurations
|
||||
|
||||
'''
|
||||
if not isinstance(viewer_dict, dict):
|
||||
msg = "set_viewers_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
nuke.message(msg)
|
||||
return
|
||||
|
||||
filter_knobs = [
|
||||
"viewerProcess",
|
||||
"wipe_position",
|
||||
"monitorOutOutputTransform"
|
||||
]
|
||||
|
||||
display, viewer = get_viewer_config_from_string(
|
||||
viewer_dict["viewerProcess"]
|
||||
viewer_process = self._display_and_view_formatted(
|
||||
imageio_nuke["viewer"]
|
||||
)
|
||||
viewer_process = create_viewer_profile_string(
|
||||
viewer, display, path_like=False
|
||||
)
|
||||
display, viewer = get_viewer_config_from_string(
|
||||
viewer_dict["output_transform"]
|
||||
)
|
||||
output_transform = create_viewer_profile_string(
|
||||
viewer, display, path_like=False
|
||||
output_transform = self._display_and_view_formatted(
|
||||
imageio_nuke["monitor"]
|
||||
)
|
||||
erased_viewers = []
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
|
|
@ -1547,8 +1515,10 @@ class WorkfileSettings(object):
|
|||
|
||||
if viewer_process not in v["viewerProcess"].value():
|
||||
copy_inputs = v.dependencies()
|
||||
copy_knobs = {k: v[k].value() for k in v.knobs()
|
||||
if k not in filter_knobs}
|
||||
copy_knobs = {
|
||||
k: v[k].value() for k in v.knobs()
|
||||
if k not in filter_knobs
|
||||
}
|
||||
|
||||
# delete viewer with wrong settings
|
||||
erased_viewers.append(v["name"].value())
|
||||
|
|
@ -1574,6 +1544,21 @@ class WorkfileSettings(object):
|
|||
"Attention! Viewer nodes {} were erased."
|
||||
"It had wrong color profile".format(erased_viewers))
|
||||
|
||||
def _display_and_view_formatted(self, view_profile):
|
||||
""" Format display and view profile string
|
||||
|
||||
Args:
|
||||
view_profile (dict): view and display profile
|
||||
|
||||
Returns:
|
||||
str: formatted display and view profile string
|
||||
"""
|
||||
display_view = create_viewer_profile_string(
|
||||
view_profile["view"], view_profile["display"], path_like=False
|
||||
)
|
||||
# format any template tokens used in the string
|
||||
return StringTemplate(display_view).format_strict(self.formatting_data)
|
||||
|
||||
def set_root_colorspace(self, imageio_host):
|
||||
''' Adds correct colorspace to root
|
||||
|
||||
|
|
@ -1590,12 +1575,12 @@ class WorkfileSettings(object):
|
|||
if not config_data:
|
||||
# no ocio config found and no custom path used
|
||||
if self._root_node["colorManagement"].value() \
|
||||
not in color_management:
|
||||
not in color_management:
|
||||
self._root_node["colorManagement"].setValue(color_management)
|
||||
|
||||
# second set ocio version
|
||||
if self._root_node["OCIO_config"].value() \
|
||||
not in native_ocio_config:
|
||||
not in native_ocio_config:
|
||||
self._root_node["OCIO_config"].setValue(native_ocio_config)
|
||||
|
||||
else:
|
||||
|
|
@ -1623,21 +1608,25 @@ class WorkfileSettings(object):
|
|||
if correct_settings:
|
||||
self._set_ocio_config_path_to_workfile(config_data)
|
||||
|
||||
workfile_settings_output = {}
|
||||
# get monitor lut from settings respecting Nuke version differences
|
||||
monitor_lut_data = self._get_monitor_settings(
|
||||
workfile_settings["monitor_out_lut"],
|
||||
workfile_settings["monitor_lut"]
|
||||
)
|
||||
monitor_lut_data.update({
|
||||
"workingSpaceLUT": workfile_settings["working_space"],
|
||||
"int8Lut": workfile_settings["int_8_lut"],
|
||||
"int16Lut": workfile_settings["int_16_lut"],
|
||||
"logLut": workfile_settings["log_lut"],
|
||||
"floatLut": workfile_settings["float_lut"]
|
||||
})
|
||||
workfile_settings_output.update(monitor_lut_data)
|
||||
workfile_settings_output.update(
|
||||
{
|
||||
"workingSpaceLUT": workfile_settings["working_space"],
|
||||
"int8Lut": workfile_settings["int_8_lut"],
|
||||
"int16Lut": workfile_settings["int_16_lut"],
|
||||
"logLut": workfile_settings["log_lut"],
|
||||
"floatLut": workfile_settings["float_lut"],
|
||||
}
|
||||
)
|
||||
|
||||
# then set the rest
|
||||
for knob, value_ in monitor_lut_data.items():
|
||||
for knob, value_ in workfile_settings_output.items():
|
||||
# skip unfilled ocio config path
|
||||
# it will be dict in value
|
||||
if isinstance(value_, dict):
|
||||
|
|
@ -1646,7 +1635,6 @@ class WorkfileSettings(object):
|
|||
if not value_:
|
||||
continue
|
||||
self._root_node[knob].setValue(str(value_))
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_))
|
||||
|
||||
def _get_monitor_settings(self, viewer_lut, monitor_lut):
|
||||
""" Get monitor settings from viewer and monitor lut
|
||||
|
|
@ -1889,8 +1877,6 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
elif node_data:
|
||||
nuke_imageio_writes = get_write_node_template_attr(node)
|
||||
|
||||
log.debug("nuke_imageio_writes: `{}`".format(nuke_imageio_writes))
|
||||
|
||||
if not nuke_imageio_writes:
|
||||
return
|
||||
|
||||
|
|
@ -1938,7 +1924,6 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
"to": future
|
||||
}
|
||||
|
||||
log.debug(changes)
|
||||
if changes:
|
||||
msg = "Read nodes are not set to correct colorspace:\n\n"
|
||||
for nname, knobs in changes.items():
|
||||
|
|
@ -1972,7 +1957,7 @@ Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
|||
|
||||
log.info("Setting colorspace to viewers...")
|
||||
try:
|
||||
self.set_viewers_colorspace(nuke_colorspace["viewer"])
|
||||
self.set_viewers_colorspace(nuke_colorspace)
|
||||
except AttributeError as _error:
|
||||
msg = "Set Colorspace to viewer error: {}".format(_error)
|
||||
nuke.message(msg)
|
||||
|
|
@ -2653,8 +2638,6 @@ class NukeDirmap(HostDirmap):
|
|||
def dirmap_routine(self, source_path, destination_path):
|
||||
source_path = source_path.lower().replace(os.sep, '/')
|
||||
destination_path = destination_path.lower().replace(os.sep, '/')
|
||||
log.debug("Map: {} with: {}->{}".format(self.file_name,
|
||||
source_path, destination_path))
|
||||
if platform.system().lower() == "windows":
|
||||
self.file_name = self.file_name.lower().replace(
|
||||
source_path, destination_path)
|
||||
|
|
|
|||
|
|
@ -37,8 +37,6 @@ from .lib import (
|
|||
INSTANCE_DATA_KNOB,
|
||||
get_main_window,
|
||||
WorkfileSettings,
|
||||
# TODO: remove this once workfile builder will be removed
|
||||
process_workfile_builder,
|
||||
start_workfile_template_builder,
|
||||
launch_workfiles_app,
|
||||
check_inventory_versions,
|
||||
|
|
@ -67,6 +65,7 @@ from .workio import (
|
|||
current_file
|
||||
)
|
||||
from .constants import ASSIST
|
||||
from . import push_to_project
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
|
@ -159,9 +158,6 @@ def add_nuke_callbacks():
|
|||
# template builder callbacks
|
||||
nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root")
|
||||
|
||||
# TODO: remove this callback once workfile builder will be removed
|
||||
nuke.addOnCreate(process_workfile_builder, nodeClass="Root")
|
||||
|
||||
# fix ffmpeg settings on script
|
||||
nuke.addOnScriptLoad(on_script_load)
|
||||
|
||||
|
|
@ -332,6 +328,11 @@ def _install_menu():
|
|||
lambda: update_placeholder()
|
||||
)
|
||||
|
||||
menu.addCommand(
|
||||
"Push to Project",
|
||||
lambda: push_to_project.main()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Experimental tools...",
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from ayon_core.lib import (
|
|||
BoolDef,
|
||||
EnumDef
|
||||
)
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.pipeline import (
|
||||
LoaderPlugin,
|
||||
CreatorError,
|
||||
|
|
@ -38,7 +39,6 @@ from .lib import (
|
|||
set_node_data,
|
||||
get_node_data,
|
||||
get_view_process_node,
|
||||
get_viewer_config_from_string,
|
||||
get_filenames_without_hash,
|
||||
link_knobs
|
||||
)
|
||||
|
|
@ -638,12 +638,15 @@ class ExporterReview(object):
|
|||
from . import lib as opnlib
|
||||
nuke_imageio = opnlib.get_nuke_imageio_settings()
|
||||
|
||||
# TODO: this is only securing backward compatibility lets remove
|
||||
# this once all projects's anatomy are updated to newer config
|
||||
if "baking" in nuke_imageio.keys():
|
||||
return nuke_imageio["baking"]["viewerProcess"]
|
||||
if nuke_imageio["baking_target"]["enabled"]:
|
||||
return nuke_imageio["baking_target"]
|
||||
else:
|
||||
return nuke_imageio["viewer"]["viewerProcess"]
|
||||
# viewer is having display and view keys only and it is
|
||||
# display_view type
|
||||
return {
|
||||
"type": "display_view",
|
||||
"display_view": nuke_imageio["viewer"],
|
||||
}
|
||||
|
||||
|
||||
class ExporterReviewLut(ExporterReview):
|
||||
|
|
@ -790,6 +793,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||
self.write_colorspace = instance.data["colorspace"]
|
||||
self.color_channels = instance.data["color_channels"]
|
||||
self.formatting_data = instance.data["anatomyData"]
|
||||
|
||||
self.name = name or "baked"
|
||||
self.ext = ext or "mov"
|
||||
|
|
@ -837,7 +841,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
with maintained_selection():
|
||||
self.log.info("Saving nodes as file... ")
|
||||
# create nk path
|
||||
path = os.path.splitext(self.path)[0] + ".nk"
|
||||
path = f"{os.path.splitext(self.path)[0]}.nk"
|
||||
# save file to the path
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
|
|
@ -861,21 +865,20 @@ class ExporterReviewMov(ExporterReview):
|
|||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
viewer_process_override = kwargs[
|
||||
"viewer_process_override"]
|
||||
|
||||
baking_view_profile = (
|
||||
viewer_process_override or self.get_imageio_baking_profile())
|
||||
baking_colorspace = self.get_imageio_baking_profile()
|
||||
|
||||
colorspace_override = kwargs["colorspace_override"]
|
||||
if colorspace_override["enabled"]:
|
||||
baking_colorspace = colorspace_override
|
||||
|
||||
fps = self.instance.context.data["fps"]
|
||||
|
||||
self.log.debug(">> baking_view_profile `{}`".format(
|
||||
baking_view_profile))
|
||||
self.log.debug(f">> baking_view_profile `{baking_colorspace}`")
|
||||
|
||||
add_custom_tags = kwargs.get("add_custom_tags", [])
|
||||
|
||||
self.log.info(
|
||||
"__ add_custom_tags: `{0}`".format(add_custom_tags))
|
||||
self.log.info(f"__ add_custom_tags: `{add_custom_tags}`")
|
||||
|
||||
product_name = self.instance.data["productName"]
|
||||
self._temp_nodes[product_name] = []
|
||||
|
|
@ -932,32 +935,64 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
if baking_colorspace["type"] == "display_view":
|
||||
display_view = baking_colorspace["display_view"]
|
||||
|
||||
# assign display
|
||||
display, viewer = get_viewer_config_from_string(
|
||||
str(baking_view_profile)
|
||||
)
|
||||
if display:
|
||||
dag_node["display"].setValue(display)
|
||||
message = "OCIODisplay... '{}'"
|
||||
node = nuke.createNode("OCIODisplay")
|
||||
|
||||
# assign viewer
|
||||
dag_node["view"].setValue(viewer)
|
||||
# assign display and view
|
||||
display = display_view["display"]
|
||||
view = display_view["view"]
|
||||
|
||||
if config_data:
|
||||
# convert display and view to colorspace
|
||||
colorspace = get_display_view_colorspace_name(
|
||||
config_path=config_data["path"],
|
||||
display=display,
|
||||
view=viewer
|
||||
# display could not be set in nuke_default config
|
||||
if display:
|
||||
# format display string with anatomy data
|
||||
display = StringTemplate(display).format_strict(
|
||||
self.formatting_data
|
||||
)
|
||||
node["display"].setValue(display)
|
||||
|
||||
# format view string with anatomy data
|
||||
view = StringTemplate(view).format_strict(
|
||||
self.formatting_data)
|
||||
# assign viewer
|
||||
node["view"].setValue(view)
|
||||
|
||||
if config_data:
|
||||
# convert display and view to colorspace
|
||||
colorspace = get_display_view_colorspace_name(
|
||||
config_path=config_data["path"],
|
||||
display=display, view=view
|
||||
)
|
||||
|
||||
# OCIOColorSpace
|
||||
elif baking_colorspace["type"] == "colorspace":
|
||||
baking_colorspace = baking_colorspace["colorspace"]
|
||||
# format colorspace string with anatomy data
|
||||
baking_colorspace = StringTemplate(
|
||||
baking_colorspace).format_strict(self.formatting_data)
|
||||
node = nuke.createNode("OCIOColorSpace")
|
||||
message = "OCIOColorSpace... '{}'"
|
||||
# no need to set input colorspace since it is driven by
|
||||
# working colorspace
|
||||
node["out_colorspace"].setValue(baking_colorspace)
|
||||
colorspace = baking_colorspace
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid baking color space type: "
|
||||
f"{baking_colorspace['type']}"
|
||||
)
|
||||
|
||||
self._connect_to_above_nodes(
|
||||
dag_node, product_name, "OCIODisplay... `{}`"
|
||||
node, product_name, message
|
||||
)
|
||||
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
self.log.debug("Path: {}".format(self.path))
|
||||
self.log.debug(f"Path: {self.path}")
|
||||
|
||||
write_node["file"].setValue(str(self.path))
|
||||
write_node["file_type"].setValue(str(self.ext))
|
||||
write_node["channels"].setValue(str(self.color_channels))
|
||||
|
|
@ -981,12 +1016,11 @@ class ExporterReviewMov(ExporterReview):
|
|||
self.log.info("`mov64_write_timecode` knob was not found")
|
||||
|
||||
write_node["raw"].setValue(1)
|
||||
|
||||
# connect
|
||||
write_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes[product_name].append(write_node)
|
||||
self.log.debug("Write... `{}`".format(
|
||||
self._temp_nodes[product_name])
|
||||
)
|
||||
self.log.debug(f"Write... `{self._temp_nodes[product_name]}`")
|
||||
# ---------- end nodes creation
|
||||
|
||||
# ---------- render or save to nk
|
||||
|
|
@ -1014,7 +1048,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
colorspace=colorspace,
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
self.log.debug(f"Representation... `{self.data}`")
|
||||
|
||||
self.clean_nodes(product_name)
|
||||
nuke.scriptSave()
|
||||
|
|
|
|||
118
server_addon/nuke/client/ayon_nuke/api/push_to_project.py
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
from collections import defaultdict
|
||||
import shutil
|
||||
import os
|
||||
|
||||
from ayon_api import get_project, get_folder_by_id, get_task_by_id
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline import Anatomy, registered_host
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
from ayon_core.pipeline.workfile import get_workdir_with_workdir_data
|
||||
from ayon_core.tools import context_dialog
|
||||
|
||||
from .utils import bake_gizmos_recursively
|
||||
from .lib import MENU_LABEL
|
||||
|
||||
import nuke
|
||||
|
||||
|
||||
def bake_container(container):
|
||||
"""Bake containers to read nodes."""
|
||||
|
||||
node = container["node"]
|
||||
|
||||
# Fetch knobs to remove in order.
|
||||
knobs_to_remove = []
|
||||
remove = False
|
||||
for count in range(0, node.numKnobs()):
|
||||
knob = node.knob(count)
|
||||
|
||||
# All knobs from "AYON" tab knob onwards.
|
||||
if knob.name() == MENU_LABEL:
|
||||
remove = True
|
||||
|
||||
if remove:
|
||||
knobs_to_remove.append(knob)
|
||||
|
||||
# Dont remove knobs from "containerId" onwards.
|
||||
if knob.name() == "containerId":
|
||||
remove = False
|
||||
|
||||
# Knobs needs to be remove in reverse order, because child knobs needs to
|
||||
# be remove first.
|
||||
for knob in reversed(knobs_to_remove):
|
||||
node.removeKnob(knob)
|
||||
|
||||
node["tile_color"].setValue(0)
|
||||
|
||||
|
||||
def main():
|
||||
context = context_dialog.ask_for_context()
|
||||
|
||||
if context is None:
|
||||
return
|
||||
|
||||
# Get workfile path to save to.
|
||||
project_name = context["project_name"]
|
||||
project = get_project(project_name)
|
||||
folder = get_folder_by_id(project_name, context["folder_id"])
|
||||
task = get_task_by_id(project_name, context["task_id"])
|
||||
host = registered_host()
|
||||
project_settings = get_project_settings(project_name)
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
workdir_data = get_template_data(
|
||||
project, folder, task, host.name, project_settings
|
||||
)
|
||||
|
||||
workdir = get_workdir_with_workdir_data(
|
||||
workdir_data,
|
||||
project_name,
|
||||
anatomy,
|
||||
project_settings=project_settings
|
||||
)
|
||||
# Save current workfile.
|
||||
current_file = host.current_file()
|
||||
host.save_file(current_file)
|
||||
|
||||
for container in host.ls():
|
||||
bake_container(container)
|
||||
|
||||
# Bake gizmos.
|
||||
bake_gizmos_recursively()
|
||||
|
||||
# Copy all read node files to "resources" folder next to workfile and
|
||||
# change file path.
|
||||
first_frame = int(nuke.root()["first_frame"].value())
|
||||
last_frame = int(nuke.root()["last_frame"].value())
|
||||
files_by_node_name = defaultdict(set)
|
||||
nodes_by_name = {}
|
||||
for count in range(first_frame, last_frame + 1):
|
||||
nuke.frame(count)
|
||||
for node in nuke.allNodes(filter="Read"):
|
||||
files_by_node_name[node.name()].add(
|
||||
nuke.filename(node, nuke.REPLACE)
|
||||
)
|
||||
nodes_by_name[node.name()] = node
|
||||
|
||||
resources_dir = os.path.join(workdir, "resources")
|
||||
for name, files in files_by_node_name.items():
|
||||
dir = os.path.join(resources_dir, name)
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
|
||||
for f in files:
|
||||
shutil.copy(f, os.path.join(dir, os.path.basename(f)))
|
||||
|
||||
node = nodes_by_name[name]
|
||||
path = node["file"].value().replace(os.path.dirname(f), dir)
|
||||
node["file"].setValue(path.replace("\\", "/"))
|
||||
|
||||
# Save current workfile to new context.
|
||||
pushed_workfile = os.path.join(
|
||||
workdir, os.path.basename(current_file))
|
||||
host.save_file(pushed_workfile)
|
||||
|
||||
# Open current context workfile.
|
||||
host.open_file(current_file)
|
||||
|
||||
nuke.message(f"Pushed to project: \n{pushed_workfile}")
|
||||
|
|
@ -28,29 +28,6 @@ class ExtractReviewIntermediates(publish.Extractor):
|
|||
viewer_lut_raw = None
|
||||
outputs = {}
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
"""Apply the settings from the deprecated
|
||||
ExtractReviewDataMov plugin for backwards compatibility
|
||||
"""
|
||||
nuke_publish = project_settings["nuke"]["publish"]
|
||||
deprecated_setting = nuke_publish["ExtractReviewDataMov"]
|
||||
current_setting = nuke_publish.get("ExtractReviewIntermediates")
|
||||
if not deprecated_setting["enabled"] and (
|
||||
not current_setting["enabled"]
|
||||
):
|
||||
cls.enabled = False
|
||||
|
||||
if deprecated_setting["enabled"]:
|
||||
# Use deprecated settings if they are still enabled
|
||||
cls.viewer_lut_raw = deprecated_setting["viewer_lut_raw"]
|
||||
cls.outputs = deprecated_setting["outputs"]
|
||||
elif current_setting is None:
|
||||
pass
|
||||
elif current_setting["enabled"]:
|
||||
cls.viewer_lut_raw = current_setting["viewer_lut_raw"]
|
||||
cls.outputs = current_setting["outputs"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO 'families' should not be included for filtering of outputs
|
||||
families = set(instance.data["families"])
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'nuke' version."""
|
||||
__version__ = "0.2.2"
|
||||
__version__ = "0.2.3"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "nuke"
|
||||
title = "Nuke"
|
||||
version = "0.2.2"
|
||||
version = "0.2.3"
|
||||
|
||||
client_dir = "ayon_nuke"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
from typing import Type
|
||||
from typing import Type, Any
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import NukeSettings, DEFAULT_VALUES
|
||||
from .settings import (
|
||||
NukeSettings,
|
||||
DEFAULT_VALUES,
|
||||
convert_settings_overrides
|
||||
)
|
||||
|
||||
|
||||
class NukeAddon(BaseServerAddon):
|
||||
|
|
@ -11,3 +15,13 @@ class NukeAddon(BaseServerAddon):
|
|||
async def get_default_settings(self):
|
||||
settings_model_cls = self.get_settings_model()
|
||||
return settings_model_cls(**DEFAULT_VALUES)
|
||||
|
||||
async def convert_settings_overrides(
|
||||
self,
|
||||
source_version: str,
|
||||
overrides: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
convert_settings_overrides(source_version, overrides)
|
||||
# Use super conversion
|
||||
return await super().convert_settings_overrides(
|
||||
source_version, overrides)
|
||||
|
|
|
|||
|
|
@ -2,9 +2,12 @@ from .main import (
|
|||
NukeSettings,
|
||||
DEFAULT_VALUES,
|
||||
)
|
||||
from .conversion import convert_settings_overrides
|
||||
|
||||
|
||||
__all__ = (
|
||||
"NukeSettings",
|
||||
"DEFAULT_VALUES",
|
||||
|
||||
"convert_settings_overrides",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -133,3 +133,63 @@ class KnobModel(BaseSettingsModel):
|
|||
"",
|
||||
title="Expression"
|
||||
)
|
||||
|
||||
|
||||
colorspace_types_enum = [
|
||||
{"value": "colorspace", "label": "Use Colorspace"},
|
||||
{"value": "display_view", "label": "Use Display & View"},
|
||||
]
|
||||
|
||||
|
||||
class DisplayAndViewProfileModel(BaseSettingsModel):
|
||||
_layout = "expanded"
|
||||
|
||||
display: str = SettingsField(
|
||||
"",
|
||||
title="Display",
|
||||
description="What display to use",
|
||||
)
|
||||
|
||||
view: str = SettingsField(
|
||||
"",
|
||||
title="View",
|
||||
description=(
|
||||
"What view to use. Anatomy context tokens can "
|
||||
"be used to dynamically set the value."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ColorspaceConfigurationModel(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
enabled: bool = SettingsField(
|
||||
False,
|
||||
title="Enabled",
|
||||
description=(
|
||||
"Enable baking target (colorspace or display/view)."
|
||||
),
|
||||
)
|
||||
|
||||
type: str = SettingsField(
|
||||
"colorspace",
|
||||
title="Target baking type",
|
||||
description="Switch between different knob types",
|
||||
enum_resolver=lambda: colorspace_types_enum,
|
||||
conditionalEnum=True,
|
||||
)
|
||||
|
||||
colorspace: str = SettingsField(
|
||||
"",
|
||||
title="Colorspace",
|
||||
description=(
|
||||
"What colorspace name to use. Anatomy context tokens can "
|
||||
"be used to dynamically set the value."
|
||||
),
|
||||
)
|
||||
|
||||
display_view: DisplayAndViewProfileModel = SettingsField(
|
||||
title="Display & View",
|
||||
description="What display & view to use",
|
||||
default_factory=DisplayAndViewProfileModel,
|
||||
)
|
||||
|
|
|
|||
143
server_addon/nuke/server/settings/conversion.py
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
import re
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _get_viewer_config_from_string(input_string):
|
||||
"""Convert string to display and viewer string
|
||||
|
||||
Args:
|
||||
input_string (str): string with viewer
|
||||
|
||||
Raises:
|
||||
IndexError: if more then one slash in input string
|
||||
IndexError: if missing closing bracket
|
||||
|
||||
Returns:
|
||||
tuple[str]: display, viewer
|
||||
"""
|
||||
display = None
|
||||
viewer = input_string
|
||||
# check if () or / or \ in name
|
||||
if "/" in viewer:
|
||||
split = viewer.split("/")
|
||||
|
||||
# rise if more then one column
|
||||
if len(split) > 2:
|
||||
raise IndexError(
|
||||
"Viewer Input string is not correct. "
|
||||
f"More then two `/` slashes! {input_string}"
|
||||
)
|
||||
|
||||
viewer = split[1]
|
||||
display = split[0]
|
||||
elif "(" in viewer:
|
||||
pattern = r"([\w\d\s\.\-]+).*[(](.*)[)]"
|
||||
result_ = re.findall(pattern, viewer)
|
||||
try:
|
||||
result_ = result_.pop()
|
||||
display = str(result_[1]).rstrip()
|
||||
viewer = str(result_[0]).rstrip()
|
||||
except IndexError as e:
|
||||
raise IndexError(
|
||||
"Viewer Input string is not correct. "
|
||||
f"Missing bracket! {input_string}"
|
||||
) from e
|
||||
|
||||
return (display, viewer)
|
||||
|
||||
|
||||
def _convert_imageio_baking_0_2_3(overrides):
|
||||
if "baking" not in overrides:
|
||||
return
|
||||
|
||||
baking_view_process = overrides["baking"].get("viewerProcess")
|
||||
|
||||
if baking_view_process is None:
|
||||
return
|
||||
|
||||
display, view = _get_viewer_config_from_string(baking_view_process)
|
||||
|
||||
overrides["baking_target"] = {
|
||||
"enabled": True,
|
||||
"type": "display_view",
|
||||
"display_view": {
|
||||
"display": display,
|
||||
"view": view,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _convert_viewers_0_2_3(overrides):
|
||||
if "viewer" not in overrides:
|
||||
return
|
||||
|
||||
viewer = overrides["viewer"]
|
||||
|
||||
if "viewerProcess" in viewer:
|
||||
viewer_process = viewer["viewerProcess"]
|
||||
display, view = _get_viewer_config_from_string(viewer_process)
|
||||
viewer.update({
|
||||
"display": display,
|
||||
"view": view,
|
||||
})
|
||||
if "output_transform" in viewer:
|
||||
output_transform = viewer["output_transform"]
|
||||
display, view = _get_viewer_config_from_string(output_transform)
|
||||
overrides["monitor"] = {
|
||||
"display": display,
|
||||
"view": view,
|
||||
}
|
||||
|
||||
|
||||
def _convert_imageio_configs_0_2_3(overrides):
|
||||
"""Image IO settings had changed.
|
||||
|
||||
0.2.2. is the latest version using the old way.
|
||||
"""
|
||||
if "imageio" not in overrides:
|
||||
return
|
||||
|
||||
imageio_overrides = overrides["imageio"]
|
||||
|
||||
_convert_imageio_baking_0_2_3(imageio_overrides)
|
||||
_convert_viewers_0_2_3(imageio_overrides)
|
||||
|
||||
|
||||
def _convert_extract_intermediate_files_0_2_3(publish_overrides):
|
||||
"""Extract intermediate files settings had changed.
|
||||
|
||||
0.2.2. is the latest version using the old way.
|
||||
"""
|
||||
# override can be either `display/view` or `view (display)`
|
||||
if "ExtractReviewIntermediates" in publish_overrides:
|
||||
extract_review_intermediates = publish_overrides[
|
||||
"ExtractReviewIntermediates"]
|
||||
|
||||
for output in extract_review_intermediates.get("outputs", []):
|
||||
if viewer_process_override := output.get("viewer_process_override"):
|
||||
display, view = _get_viewer_config_from_string(
|
||||
viewer_process_override)
|
||||
|
||||
output["colorspace_override"] = {
|
||||
"enabled": True,
|
||||
"type": "display_view",
|
||||
"display_view": {
|
||||
"display": display,
|
||||
"view": view,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _convert_publish_plugins(overrides):
|
||||
if "publish" not in overrides:
|
||||
return
|
||||
_convert_extract_intermediate_files_0_2_3(overrides["publish"])
|
||||
|
||||
|
||||
def convert_settings_overrides(
|
||||
source_version: str,
|
||||
overrides: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
_convert_imageio_configs_0_2_3(overrides)
|
||||
_convert_publish_plugins(overrides)
|
||||
return overrides
|
||||
|
|
@ -6,7 +6,10 @@ from ayon_server.settings import (
|
|||
ensure_unique_names,
|
||||
)
|
||||
|
||||
from .common import KnobModel
|
||||
from .common import (
|
||||
KnobModel,
|
||||
ColorspaceConfigurationModel,
|
||||
)
|
||||
|
||||
|
||||
class NodesModel(BaseSettingsModel):
|
||||
|
|
@ -52,6 +55,8 @@ class OverrideNodesModel(NodesModel):
|
|||
|
||||
|
||||
class NodesSetting(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
required_nodes: list[RequiredNodesModel] = SettingsField(
|
||||
title="Plugin required",
|
||||
default_factory=list
|
||||
|
|
@ -83,6 +88,8 @@ def ocio_configs_switcher_enum():
|
|||
class WorkfileColorspaceSettings(BaseSettingsModel):
|
||||
"""Nuke workfile colorspace preset. """
|
||||
|
||||
_isGroup: bool = True
|
||||
|
||||
color_management: Literal["Nuke", "OCIO"] = SettingsField(
|
||||
title="Color Management Workflow"
|
||||
)
|
||||
|
|
@ -125,6 +132,8 @@ class ReadColorspaceRulesItems(BaseSettingsModel):
|
|||
|
||||
|
||||
class RegexInputsModel(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
inputs: list[ReadColorspaceRulesItems] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Inputs"
|
||||
|
|
@ -132,15 +141,44 @@ class RegexInputsModel(BaseSettingsModel):
|
|||
|
||||
|
||||
class ViewProcessModel(BaseSettingsModel):
|
||||
viewerProcess: str = SettingsField(
|
||||
title="Viewer Process Name"
|
||||
_isGroup: bool = True
|
||||
|
||||
display: str = SettingsField(
|
||||
"",
|
||||
title="Display",
|
||||
description="What display to use",
|
||||
)
|
||||
output_transform: str = SettingsField(
|
||||
title="Output Transform"
|
||||
view: str = SettingsField(
|
||||
"",
|
||||
title="View",
|
||||
description=(
|
||||
"What view to use. Anatomy context tokens can "
|
||||
"be used to dynamically set the value."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class MonitorProcessModel(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
display: str = SettingsField(
|
||||
"",
|
||||
title="Display",
|
||||
description="What display to use",
|
||||
)
|
||||
view: str = SettingsField(
|
||||
"",
|
||||
title="View",
|
||||
description=(
|
||||
"What view to use. Anatomy context tokens can "
|
||||
"be used to dynamically set the value."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ImageIOConfigModel(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
override_global_config: bool = SettingsField(
|
||||
False,
|
||||
title="Override global OCIO config"
|
||||
|
|
@ -159,6 +197,8 @@ class ImageIOFileRuleModel(BaseSettingsModel):
|
|||
|
||||
|
||||
class ImageIOFileRulesModel(BaseSettingsModel):
|
||||
_isGroup: bool = True
|
||||
|
||||
activate_host_rules: bool = SettingsField(False)
|
||||
rules: list[ImageIOFileRuleModel] = SettingsField(
|
||||
default_factory=list,
|
||||
|
|
@ -173,14 +213,7 @@ class ImageIOFileRulesModel(BaseSettingsModel):
|
|||
|
||||
class ImageIOSettings(BaseSettingsModel):
|
||||
"""Nuke color management project settings. """
|
||||
_isGroup: bool = True
|
||||
|
||||
"""# TODO: enhance settings with host api:
|
||||
to restructure settings for simplification.
|
||||
|
||||
now: nuke/imageio/viewer/viewerProcess
|
||||
future: nuke/imageio/viewer
|
||||
"""
|
||||
activate_host_color_management: bool = SettingsField(
|
||||
True, title="Enable Color Management")
|
||||
ocio_config: ImageIOConfigModel = SettingsField(
|
||||
|
|
@ -197,18 +230,13 @@ class ImageIOSettings(BaseSettingsModel):
|
|||
description="""Viewer profile is used during
|
||||
Creation of new viewer node at knob viewerProcess"""
|
||||
)
|
||||
|
||||
"""# TODO: enhance settings with host api:
|
||||
to restructure settings for simplification.
|
||||
|
||||
now: nuke/imageio/baking/viewerProcess
|
||||
future: nuke/imageio/baking
|
||||
"""
|
||||
baking: ViewProcessModel = SettingsField(
|
||||
default_factory=ViewProcessModel,
|
||||
title="Baking",
|
||||
description="""Baking profile is used during
|
||||
publishing baked colorspace data at knob viewerProcess"""
|
||||
monitor: MonitorProcessModel = SettingsField(
|
||||
default_factory=MonitorProcessModel,
|
||||
title="Monitor OUT"
|
||||
)
|
||||
baking_target: ColorspaceConfigurationModel = SettingsField(
|
||||
default_factory=ColorspaceConfigurationModel,
|
||||
title="Baking Target Colorspace"
|
||||
)
|
||||
|
||||
workfile: WorkfileColorspaceSettings = SettingsField(
|
||||
|
|
@ -231,13 +259,12 @@ class ImageIOSettings(BaseSettingsModel):
|
|||
|
||||
|
||||
DEFAULT_IMAGEIO_SETTINGS = {
|
||||
"viewer": {
|
||||
"viewerProcess": "ACES/sRGB",
|
||||
"output_transform": "ACES/sRGB"
|
||||
},
|
||||
"baking": {
|
||||
"viewerProcess": "ACES/Rec.709",
|
||||
"output_transform": "ACES/Rec.709"
|
||||
"viewer": {"display": "ACES", "view": "sRGB"},
|
||||
"monitor": {"display": "ACES", "view": "Rec.709"},
|
||||
"baking_target": {
|
||||
"enabled": True,
|
||||
"type": "colorspace",
|
||||
"colorspace": "Output - Rec.709",
|
||||
},
|
||||
"workfile": {
|
||||
"color_management": "OCIO",
|
||||
|
|
@ -248,170 +275,67 @@ DEFAULT_IMAGEIO_SETTINGS = {
|
|||
"int_8_lut": "role_matte_paint",
|
||||
"int_16_lut": "role_texture_paint",
|
||||
"log_lut": "role_compositing_log",
|
||||
"float_lut": "role_scene_linear"
|
||||
"float_lut": "role_scene_linear",
|
||||
},
|
||||
"nodes": {
|
||||
"required_nodes": [
|
||||
{
|
||||
"plugins": [
|
||||
"CreateWriteRender"
|
||||
],
|
||||
"plugins": ["CreateWriteRender"],
|
||||
"nuke_node_class": "Write",
|
||||
"knobs": [
|
||||
{
|
||||
"type": "text",
|
||||
"name": "file_type",
|
||||
"text": "exr"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "datatype",
|
||||
"text": "16 bit half"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "compression",
|
||||
"text": "Zip (1 scanline)"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "autocrop",
|
||||
"boolean": True
|
||||
},
|
||||
{"type": "text", "name": "file_type", "text": "exr"},
|
||||
{"type": "text", "name": "datatype", "text": "16 bit half"},
|
||||
{"type": "text", "name": "compression", "text": "Zip (1 scanline)"},
|
||||
{"type": "boolean", "name": "autocrop", "boolean": True},
|
||||
{
|
||||
"type": "color_gui",
|
||||
"name": "tile_color",
|
||||
"color_gui": [
|
||||
186,
|
||||
35,
|
||||
35
|
||||
]
|
||||
"color_gui": [186, 35, 35],
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "channels",
|
||||
"text": "rgb"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "colorspace",
|
||||
"text": "scene_linear"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "create_directories",
|
||||
"boolean": True
|
||||
}
|
||||
]
|
||||
{"type": "text", "name": "channels", "text": "rgb"},
|
||||
{"type": "text", "name": "colorspace", "text": "scene_linear"},
|
||||
{"type": "boolean", "name": "create_directories", "boolean": True},
|
||||
],
|
||||
},
|
||||
{
|
||||
"plugins": [
|
||||
"CreateWritePrerender"
|
||||
],
|
||||
"plugins": ["CreateWritePrerender"],
|
||||
"nuke_node_class": "Write",
|
||||
"knobs": [
|
||||
{
|
||||
"type": "text",
|
||||
"name": "file_type",
|
||||
"text": "exr"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "datatype",
|
||||
"text": "16 bit half"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "compression",
|
||||
"text": "Zip (1 scanline)"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "autocrop",
|
||||
"boolean": True
|
||||
},
|
||||
{"type": "text", "name": "file_type", "text": "exr"},
|
||||
{"type": "text", "name": "datatype", "text": "16 bit half"},
|
||||
{"type": "text", "name": "compression", "text": "Zip (1 scanline)"},
|
||||
{"type": "boolean", "name": "autocrop", "boolean": True},
|
||||
{
|
||||
"type": "color_gui",
|
||||
"name": "tile_color",
|
||||
"color_gui": [
|
||||
171,
|
||||
171,
|
||||
10
|
||||
]
|
||||
"color_gui": [171, 171, 10],
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "channels",
|
||||
"text": "rgb"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "colorspace",
|
||||
"text": "scene_linear"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "create_directories",
|
||||
"boolean": True
|
||||
}
|
||||
]
|
||||
{"type": "text", "name": "channels", "text": "rgb"},
|
||||
{"type": "text", "name": "colorspace", "text": "scene_linear"},
|
||||
{"type": "boolean", "name": "create_directories", "boolean": True},
|
||||
],
|
||||
},
|
||||
{
|
||||
"plugins": [
|
||||
"CreateWriteImage"
|
||||
],
|
||||
"plugins": ["CreateWriteImage"],
|
||||
"nuke_node_class": "Write",
|
||||
"knobs": [
|
||||
{
|
||||
"type": "text",
|
||||
"name": "file_type",
|
||||
"text": "tiff"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "datatype",
|
||||
"text": "16 bit"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "compression",
|
||||
"text": "Deflate"
|
||||
},
|
||||
{"type": "text", "name": "file_type", "text": "tiff"},
|
||||
{"type": "text", "name": "datatype", "text": "16 bit"},
|
||||
{"type": "text", "name": "compression", "text": "Deflate"},
|
||||
{
|
||||
"type": "color_gui",
|
||||
"name": "tile_color",
|
||||
"color_gui": [
|
||||
56,
|
||||
162,
|
||||
7
|
||||
]
|
||||
"color_gui": [56, 162, 7],
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "channels",
|
||||
"text": "rgb"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "colorspace",
|
||||
"text": "texture_paint"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "create_directories",
|
||||
"boolean": True
|
||||
}
|
||||
]
|
||||
}
|
||||
{"type": "text", "name": "channels", "text": "rgb"},
|
||||
{"type": "text", "name": "colorspace", "text": "texture_paint"},
|
||||
{"type": "boolean", "name": "create_directories", "boolean": True},
|
||||
],
|
||||
},
|
||||
],
|
||||
"override_nodes": []
|
||||
"override_nodes": [],
|
||||
},
|
||||
"regex_inputs": {
|
||||
"inputs": [
|
||||
{
|
||||
"regex": "(beauty).*(?=.exr)",
|
||||
"colorspace": "linear"
|
||||
}
|
||||
]
|
||||
}
|
||||
"inputs": [{"regex": "(beauty).*(?=.exr)", "colorspace": "linear"}]
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,11 @@ from ayon_server.settings import (
|
|||
ensure_unique_names,
|
||||
task_types_enum
|
||||
)
|
||||
from .common import KnobModel, validate_json_dict
|
||||
from .common import (
|
||||
KnobModel,
|
||||
ColorspaceConfigurationModel,
|
||||
validate_json_dict,
|
||||
)
|
||||
|
||||
|
||||
def nuke_render_publish_types_enum():
|
||||
|
|
@ -130,19 +134,22 @@ class IntermediateOutputModel(BaseSettingsModel):
|
|||
title="Filter", default_factory=BakingStreamFilterModel)
|
||||
read_raw: bool = SettingsField(
|
||||
False,
|
||||
title="Read raw switch"
|
||||
)
|
||||
viewer_process_override: str = SettingsField(
|
||||
"",
|
||||
title="Viewer process override"
|
||||
title="Input read node RAW switch"
|
||||
)
|
||||
bake_viewer_process: bool = SettingsField(
|
||||
True,
|
||||
title="Bake viewer process"
|
||||
title="Bake viewer process",
|
||||
section="Baking target",
|
||||
)
|
||||
colorspace_override: ColorspaceConfigurationModel = SettingsField(
|
||||
title="Target baking colorspace override",
|
||||
description="Override Baking target with colorspace or display/view",
|
||||
default_factory=ColorspaceConfigurationModel
|
||||
)
|
||||
bake_viewer_input_process: bool = SettingsField(
|
||||
True,
|
||||
title="Bake viewer input process node (LUT)"
|
||||
title="Bake viewer input process node (LUT)",
|
||||
section="Baking additional",
|
||||
)
|
||||
reformat_nodes_config: ReformatNodesConfigModel = SettingsField(
|
||||
default_factory=ReformatNodesConfigModel,
|
||||
|
|
@ -155,18 +162,6 @@ class IntermediateOutputModel(BaseSettingsModel):
|
|||
title="Custom tags", default_factory=list)
|
||||
|
||||
|
||||
class ExtractReviewDataMovModel(BaseSettingsModel):
|
||||
"""[deprecated] use Extract Review Data Baking
|
||||
Streams instead.
|
||||
"""
|
||||
enabled: bool = SettingsField(title="Enabled")
|
||||
viewer_lut_raw: bool = SettingsField(title="Viewer lut raw")
|
||||
outputs: list[IntermediateOutputModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Baking streams"
|
||||
)
|
||||
|
||||
|
||||
class ExtractReviewIntermediatesModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(title="Enabled")
|
||||
viewer_lut_raw: bool = SettingsField(title="Viewer lut raw")
|
||||
|
|
@ -259,10 +254,6 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
title="Extract Review Data Lut",
|
||||
default_factory=ExtractReviewDataLutModel
|
||||
)
|
||||
ExtractReviewDataMov: ExtractReviewDataMovModel = SettingsField(
|
||||
title="Extract Review Data Mov",
|
||||
default_factory=ExtractReviewDataMovModel
|
||||
)
|
||||
ExtractReviewIntermediates: ExtractReviewIntermediatesModel = (
|
||||
SettingsField(
|
||||
title="Extract Review Intermediates",
|
||||
|
|
@ -332,62 +323,6 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
|
|||
"ExtractReviewDataLut": {
|
||||
"enabled": False
|
||||
},
|
||||
"ExtractReviewDataMov": {
|
||||
"enabled": False,
|
||||
"viewer_lut_raw": False,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "baking",
|
||||
"publish": False,
|
||||
"filter": {
|
||||
"task_types": [],
|
||||
"product_types": [],
|
||||
"product_names": []
|
||||
},
|
||||
"read_raw": False,
|
||||
"viewer_process_override": "",
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True,
|
||||
"reformat_nodes_config": {
|
||||
"enabled": False,
|
||||
"reposition_nodes": [
|
||||
{
|
||||
"node_class": "Reformat",
|
||||
"knobs": [
|
||||
{
|
||||
"type": "text",
|
||||
"name": "type",
|
||||
"text": "to format"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "format",
|
||||
"text": "HD_1080"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "filter",
|
||||
"text": "Lanczos6"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "black_outside",
|
||||
"boolean": True
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"name": "pbb",
|
||||
"boolean": False
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"extension": "mov",
|
||||
"add_custom_tags": []
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExtractReviewIntermediates": {
|
||||
"enabled": True,
|
||||
"viewer_lut_raw": False,
|
||||
|
|
@ -401,7 +336,15 @@ DEFAULT_PUBLISH_PLUGIN_SETTINGS = {
|
|||
"product_names": []
|
||||
},
|
||||
"read_raw": False,
|
||||
"viewer_process_override": "",
|
||||
"colorspace_override": {
|
||||
"enabled": False,
|
||||
"type": "colorspace",
|
||||
"colorspace": "",
|
||||
"display_view": {
|
||||
"display": "",
|
||||
"view": ""
|
||||
}
|
||||
},
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True,
|
||||
"reformat_nodes_config": {
|
||||
|
|
|
|||
|
|
@ -145,7 +145,9 @@ def get_new_timeline(timeline_name: str = None):
|
|||
return new_timeline
|
||||
|
||||
|
||||
def create_bin(name: str, root: object = None) -> object:
|
||||
def create_bin(name: str,
|
||||
root: object = None,
|
||||
set_as_current: bool = True) -> object:
|
||||
"""
|
||||
Create media pool's folder.
|
||||
|
||||
|
|
@ -156,6 +158,8 @@ def create_bin(name: str, root: object = None) -> object:
|
|||
Args:
|
||||
name (str): name of folder / bin, or hierarchycal name "parent/name"
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
set_as_current (resolve.Folder)[optional]: Whether to set the
|
||||
resulting bin as current folder or not.
|
||||
|
||||
Returns:
|
||||
object: resolve.Folder
|
||||
|
|
@ -168,22 +172,24 @@ def create_bin(name: str, root: object = None) -> object:
|
|||
if "/" in name.replace("\\", "/"):
|
||||
child_bin = None
|
||||
for bname in name.split("/"):
|
||||
child_bin = create_bin(bname, child_bin or root_bin)
|
||||
child_bin = create_bin(bname,
|
||||
root=child_bin or root_bin,
|
||||
set_as_current=set_as_current)
|
||||
if child_bin:
|
||||
return child_bin
|
||||
else:
|
||||
created_bin = None
|
||||
# Find existing folder or create it
|
||||
for subfolder in root_bin.GetSubFolderList():
|
||||
if subfolder.GetName() in name:
|
||||
if subfolder.GetName() == name:
|
||||
created_bin = subfolder
|
||||
|
||||
if not created_bin:
|
||||
new_folder = media_pool.AddSubFolder(root_bin, name)
|
||||
media_pool.SetCurrentFolder(new_folder)
|
||||
break
|
||||
else:
|
||||
created_bin = media_pool.AddSubFolder(root_bin, name)
|
||||
|
||||
if set_as_current:
|
||||
media_pool.SetCurrentFolder(created_bin)
|
||||
|
||||
return media_pool.GetCurrentFolder()
|
||||
return created_bin
|
||||
|
||||
|
||||
def remove_media_pool_item(media_pool_item: object) -> bool:
|
||||
|
|
@ -272,8 +278,7 @@ def create_timeline_item(
|
|||
# get all variables
|
||||
project = get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
clip_name = _clip_property("File Name")
|
||||
clip_name = media_pool_item.GetClipProperty("File Name")
|
||||
timeline = timeline or get_current_timeline()
|
||||
|
||||
# timing variables
|
||||
|
|
@ -298,16 +303,22 @@ def create_timeline_item(
|
|||
if source_end:
|
||||
clip_data["endFrame"] = source_end
|
||||
if timecode_in:
|
||||
# Note: specifying a recordFrame will fail to place the timeline
|
||||
# item if there's already an existing clip at that time on the
|
||||
# active track.
|
||||
clip_data["recordFrame"] = timeline_in
|
||||
|
||||
# add to timeline
|
||||
media_pool.AppendToTimeline([clip_data])
|
||||
output_timeline_item = media_pool.AppendToTimeline([clip_data])[0]
|
||||
|
||||
output_timeline_item = get_timeline_item(
|
||||
media_pool_item, timeline)
|
||||
# Adding the item may fail whilst Resolve will still return a
|
||||
# TimelineItem instance - however all `Get*` calls return None
|
||||
# Hence, we check whether the result is valid
|
||||
if output_timeline_item.GetDuration() is None:
|
||||
output_timeline_item = None
|
||||
|
||||
assert output_timeline_item, AssertionError((
|
||||
"Clip name '{}' was't created on the timeline: '{}' \n\n"
|
||||
"Clip name '{}' wasn't created on the timeline: '{}' \n\n"
|
||||
"Please check if correct track position is activated, \n"
|
||||
"or if a clip is not already at the timeline in \n"
|
||||
"position: '{}' out: '{}'. \n\n"
|
||||
|
|
@ -947,3 +958,13 @@ def get_reformated_path(path, padded=False, first=False):
|
|||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def iter_all_media_pool_clips():
|
||||
"""Recursively iterate all media pool clips in current project"""
|
||||
root = get_current_project().GetMediaPool().GetRootFolder()
|
||||
queue = [root]
|
||||
for folder in queue:
|
||||
for clip in folder.GetClipList():
|
||||
yield clip
|
||||
queue.extend(folder.GetSubFolderList())
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
|
|
@ -12,6 +13,7 @@ from ayon_core.pipeline import (
|
|||
schema,
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
register_inventory_action_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_core.host import (
|
||||
|
|
@ -38,6 +40,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
|||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
||||
|
|
@ -65,6 +68,7 @@ class ResolveHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled",
|
||||
|
|
@ -145,6 +149,26 @@ def ls():
|
|||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
|
||||
# Media Pool instances from Load Media loader
|
||||
for clip in lib.iter_all_media_pool_clips():
|
||||
data = clip.GetMetadata(lib.pype_tag_name)
|
||||
if not data:
|
||||
continue
|
||||
data = json.loads(data)
|
||||
|
||||
# If not all required data, skip it
|
||||
required = ['schema', 'id', 'loader', 'representation']
|
||||
if not all(key in data for key in required):
|
||||
continue
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
container["objectName"] = clip.GetName() # Get path in folders
|
||||
container["namespace"] = clip.GetName()
|
||||
container["name"] = clip.GetUniqueId()
|
||||
container["_item"] = clip
|
||||
yield container
|
||||
|
||||
# Timeline instances from Load Clip loader
|
||||
# get all track items from current timeline
|
||||
all_timeline_items = lib.get_current_timeline_items(filter=False)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
from ayon_core.pipeline import (
|
||||
InventoryAction,
|
||||
)
|
||||
from ayon_core.pipeline.load.utils import remove_container
|
||||
|
||||
|
||||
class RemoveUnusedMedia(InventoryAction):
|
||||
|
||||
label = "Remove Unused Selected Media"
|
||||
icon = "trash"
|
||||
|
||||
@staticmethod
|
||||
def is_compatible(container):
|
||||
return (
|
||||
container.get("loader") == "LoadMedia"
|
||||
)
|
||||
|
||||
def process(self, containers):
|
||||
any_removed = False
|
||||
for container in containers:
|
||||
media_pool_item = container["_item"]
|
||||
usage = int(media_pool_item.GetClipProperty("Usage"))
|
||||
name = media_pool_item.GetName()
|
||||
if usage == 0:
|
||||
print(f"Removing {name}")
|
||||
remove_container(container)
|
||||
any_removed = True
|
||||
else:
|
||||
print(f"Keeping {name} with usage: {usage}")
|
||||
|
||||
return any_removed
|
||||
|
|
@ -0,0 +1,533 @@
|
|||
import json
|
||||
import contextlib
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from typing import Union, List, Optional, TypedDict, Tuple
|
||||
|
||||
from ayon_api import version_is_latest
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native
|
||||
from ayon_core.pipeline import (
|
||||
Anatomy,
|
||||
LoaderPlugin,
|
||||
get_representation_path,
|
||||
registered_host
|
||||
)
|
||||
from ayon_core.pipeline.load import get_representation_path_with_anatomy
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_resolve.api import lib
|
||||
from ayon_resolve.api.pipeline import AVALON_CONTAINER_ID
|
||||
|
||||
|
||||
FRAME_SPLITTER = "__frame_splitter__"
|
||||
|
||||
|
||||
class MetadataEntry(TypedDict):
|
||||
"""Metadata entry is dict with {"name": "key", "value: "value"}"""
|
||||
name: str
|
||||
value: str
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def project_color_science_mode(project=None, mode="davinciYRGBColorManagedv2"):
|
||||
"""Set project color science mode during context.
|
||||
|
||||
This is especially useful as context for setting the colorspace for media
|
||||
pool items, because when Resolve is not set to `davinciYRGBColorManagedv2`
|
||||
it fails to set its "Input Color Space" clip property even though it is
|
||||
accessible and settable via the Resolve User Interface.
|
||||
|
||||
Args
|
||||
project (Project): The active Resolve Project.
|
||||
mode (Optional[str]): The color science mode to apply during the
|
||||
context. Defaults to 'davinciYRGBColorManagedv2'
|
||||
|
||||
See Also:
|
||||
https://forum.blackmagicdesign.com/viewtopic.php?f=21&t=197441
|
||||
"""
|
||||
|
||||
if project is None:
|
||||
project = lib.get_current_project()
|
||||
|
||||
original_mode = project.GetSetting("colorScienceMode")
|
||||
if original_mode != mode:
|
||||
project.SetSetting("colorScienceMode", mode)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if project.GetSetting("colorScienceMode") != original_mode:
|
||||
project.SetSetting("colorScienceMode", original_mode)
|
||||
|
||||
|
||||
def set_colorspace(media_pool_item,
|
||||
colorspace,
|
||||
mode="davinciYRGBColorManagedv2"):
|
||||
"""Set MediaPoolItem colorspace.
|
||||
|
||||
This implements a workaround that you cannot set the input colorspace
|
||||
unless the Resolve project's color science mode is set to
|
||||
`davinciYRGBColorManagedv2`.
|
||||
|
||||
Args:
|
||||
media_pool_item (MediaPoolItem): The media pool item.
|
||||
colorspace (str): The colorspace to apply.
|
||||
mode (Optional[str]): The Resolve project color science mode to be in
|
||||
while setting the colorspace.
|
||||
Defaults to 'davinciYRGBColorManagedv2'
|
||||
|
||||
Returns:
|
||||
bool: Whether applying the colorspace succeeded.
|
||||
"""
|
||||
with project_color_science_mode(mode=mode):
|
||||
return media_pool_item.SetClipProperty("Input Color Space", colorspace)
|
||||
|
||||
|
||||
def find_clip_usage(media_pool_item, project=None):
|
||||
"""Return all Timeline Items in the project using the Media Pool Item.
|
||||
|
||||
Each entry in the list is a tuple of Timeline and TimelineItem so that
|
||||
it's easy to know which Timeline the TimelineItem belongs to.
|
||||
|
||||
Arguments:
|
||||
media_pool_item (MediaPoolItem): The Media Pool Item to search for.
|
||||
project (Project): The resolve project the media pool item resides in.
|
||||
|
||||
Returns:
|
||||
List[Tuple[Timeline, TimelineItem]]: A 2-tuple of a timeline with
|
||||
the timeline item.
|
||||
|
||||
"""
|
||||
usage = int(media_pool_item.GetClipProperty("Usage"))
|
||||
if not usage:
|
||||
return []
|
||||
|
||||
if project is None:
|
||||
project = lib.get_current_project()
|
||||
|
||||
matching_items = []
|
||||
unique_id = media_pool_item.GetUniqueId()
|
||||
for timeline_idx in range(project.GetTimelineCount()):
|
||||
timeline = project.GetTimelineByIndex(timeline_idx + 1)
|
||||
|
||||
# Consider audio and video tracks
|
||||
for track_type in ["video", "audio"]:
|
||||
for track_idx in range(timeline.GetTrackCount(track_type)):
|
||||
timeline_items = timeline.GetItemListInTrack(track_type,
|
||||
track_idx + 1)
|
||||
for timeline_item in timeline_items:
|
||||
timeline_item_mpi = timeline_item.GetMediaPoolItem()
|
||||
if not timeline_item_mpi:
|
||||
continue
|
||||
|
||||
if timeline_item_mpi.GetUniqueId() == unique_id:
|
||||
matching_items.append((timeline, timeline_item))
|
||||
usage -= 1
|
||||
if usage <= 0:
|
||||
# If there should be no usage left after this found
|
||||
# entry we return early
|
||||
return matching_items
|
||||
|
||||
return matching_items
|
||||
|
||||
|
||||
class LoadMedia(LoaderPlugin):
|
||||
"""Load product as media pool item."""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load media"
|
||||
order = -20
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
options = [
|
||||
BoolDef(
|
||||
"load_to_timeline",
|
||||
label="Load to timeline",
|
||||
default=True,
|
||||
tooltip="Whether on load to automatically add it to the current "
|
||||
"timeline"
|
||||
),
|
||||
BoolDef(
|
||||
"load_once",
|
||||
label="Re-use existing",
|
||||
default=True,
|
||||
tooltip="When enabled - if this particular version is already"
|
||||
"loaded it will not be loaded again but will be re-used."
|
||||
)
|
||||
]
|
||||
|
||||
# for loader multiselection
|
||||
timeline = None
|
||||
|
||||
# presets
|
||||
clip_color_last = "Olive"
|
||||
clip_color_old = "Orange"
|
||||
|
||||
media_pool_bin_path = "Loader/{folder[path]}"
|
||||
|
||||
metadata: List[MetadataEntry] = []
|
||||
|
||||
# cached on apply settings
|
||||
_host_imageio_settings = None
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
super(LoadMedia, cls).apply_settings(project_settings)
|
||||
cls._host_imageio_settings = project_settings["resolve"]["imageio"]
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# For loading multiselection, we store timeline before first load
|
||||
# because the current timeline can change with the imported media.
|
||||
if self.timeline is None:
|
||||
self.timeline = lib.get_current_timeline()
|
||||
|
||||
representation = context["representation"]
|
||||
self._project_name = context["project"]["name"]
|
||||
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
|
||||
# Allow to use an existing media pool item and re-use it
|
||||
item = None
|
||||
if options.get("load_once", True):
|
||||
host = registered_host()
|
||||
repre_id = context["representation"]["id"]
|
||||
for container in host.ls():
|
||||
if container["representation"] != repre_id:
|
||||
continue
|
||||
|
||||
if container["loader"] != self.__class__.__name__:
|
||||
continue
|
||||
|
||||
print(f"Re-using existing container: {container}")
|
||||
item = container["_item"]
|
||||
|
||||
if item is None:
|
||||
item = self._import_media_to_bin(context, media_pool, representation)
|
||||
# Always update clip color - even if re-using existing clip
|
||||
color = self.get_item_color(context)
|
||||
item.SetClipColor(color)
|
||||
|
||||
if options.get("load_to_timeline", True):
|
||||
timeline = options.get("timeline", self.timeline)
|
||||
if timeline:
|
||||
# Add media to active timeline
|
||||
lib.create_timeline_item(
|
||||
media_pool_item=item,
|
||||
timeline=timeline
|
||||
)
|
||||
|
||||
def _import_media_to_bin(
|
||||
self, context, media_pool, representation
|
||||
):
|
||||
"""Import media to Resolve Media Pool.
|
||||
|
||||
Also create a bin if `media_pool_bin_path` is set.
|
||||
|
||||
Args:
|
||||
context (dict): The context dictionary.
|
||||
media_pool (resolve.MediaPool): The Resolve Media Pool.
|
||||
representation (dict): The representation data.
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: The imported media pool item.
|
||||
"""
|
||||
# Create or set the bin folder, we add it in there
|
||||
# If bin path is not set we just add into the current active bin
|
||||
if self.media_pool_bin_path:
|
||||
media_pool_bin_path = StringTemplate(
|
||||
self.media_pool_bin_path).format_strict(context)
|
||||
|
||||
folder = lib.create_bin(
|
||||
# double slashes will create unconnected folders
|
||||
name=media_pool_bin_path.replace("//", "/"),
|
||||
root=media_pool.GetRootFolder(),
|
||||
set_as_current=False
|
||||
)
|
||||
media_pool.SetCurrentFolder(folder)
|
||||
|
||||
# Import media
|
||||
# Resolve API: ImportMedia function requires a list of dictionaries
|
||||
# with keys "FilePath", "StartIndex" and "EndIndex" for sequences
|
||||
# but only string with absolute path for single files.
|
||||
is_sequence, file_info = self._get_file_info(context)
|
||||
items = (
|
||||
media_pool.ImportMedia([file_info])
|
||||
if is_sequence
|
||||
else media_pool.ImportMedia([file_info["FilePath"]])
|
||||
)
|
||||
assert len(items) == 1, "Must import only one media item"
|
||||
|
||||
result = items[0]
|
||||
|
||||
self._set_metadata(result, context)
|
||||
self._set_colorspace_from_representation(result, representation)
|
||||
|
||||
data = self._get_container_data(context)
|
||||
|
||||
# Add containerise data only needed on first load
|
||||
data.update({
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"loader": str(self.__class__.__name__),
|
||||
})
|
||||
|
||||
result.SetMetadata(lib.pype_tag_name, json.dumps(data))
|
||||
|
||||
return result
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def update(self, container, context):
|
||||
# Update MediaPoolItem filepath and metadata
|
||||
item = container["_item"]
|
||||
|
||||
# Get the existing metadata before we update because the
|
||||
# metadata gets removed
|
||||
data = json.loads(item.GetMetadata(lib.pype_tag_name))
|
||||
|
||||
# Get metadata to preserve after the clip replacement
|
||||
# TODO: Maybe preserve more, like LUT, Alpha Mode, Input Sizing Preset
|
||||
colorspace_before = item.GetClipProperty("Input Color Space")
|
||||
|
||||
# Update path
|
||||
path = get_representation_path(context["representation"])
|
||||
success = item.ReplaceClip(path)
|
||||
if not success:
|
||||
raise RuntimeError(
|
||||
f"Failed to replace media pool item clip to filepath: {path}"
|
||||
)
|
||||
|
||||
# Update the metadata
|
||||
update_data = self._get_container_data(context)
|
||||
data.update(update_data)
|
||||
item.SetMetadata(lib.pype_tag_name, json.dumps(data))
|
||||
|
||||
self._set_metadata(media_pool_item=item, context=context)
|
||||
self._set_colorspace_from_representation(
|
||||
item,
|
||||
representation=context["representation"]
|
||||
)
|
||||
|
||||
# If no specific colorspace is set then we want to preserve the
|
||||
# colorspace a user might have set before the clip replacement
|
||||
if (
|
||||
item.GetClipProperty("Input Color Space") == "Project"
|
||||
and colorspace_before != "Project"
|
||||
):
|
||||
result = set_colorspace(item, colorspace_before)
|
||||
if not result:
|
||||
self.log.warning(
|
||||
f"Failed to re-apply colorspace: {colorspace_before}."
|
||||
)
|
||||
|
||||
# Update the clip color
|
||||
color = self.get_item_color(context)
|
||||
item.SetClipColor(color)
|
||||
|
||||
def remove(self, container):
|
||||
# Remove MediaPoolItem entry
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
item = container["_item"]
|
||||
|
||||
# Delete any usages of the media pool item so there's no trail
|
||||
# left in existing timelines. Currently only the media pool item
|
||||
# gets removed which fits the Resolve workflow but is confusing
|
||||
# artists
|
||||
usage = find_clip_usage(media_pool_item=item, project=project)
|
||||
if usage:
|
||||
# Group all timeline items per timeline, so we can delete the clips
|
||||
# in the timeline at once. The Resolve objects are not hashable, so
|
||||
# we need to store them in the dict by id
|
||||
usage_by_timeline = defaultdict(list)
|
||||
timeline_by_id = {}
|
||||
for timeline, timeline_item in usage:
|
||||
timeline_id = timeline.GetUniqueId()
|
||||
timeline_by_id[timeline_id] = timeline
|
||||
usage_by_timeline[timeline.GetUniqueId()].append(timeline_item)
|
||||
|
||||
for timeline_id, timeline_items in usage_by_timeline.items():
|
||||
timeline = timeline_by_id[timeline_id]
|
||||
timeline.DeleteClips(timeline_items)
|
||||
|
||||
# Delete the media pool item
|
||||
media_pool.DeleteClips([item])
|
||||
|
||||
def _get_container_data(self, context: dict) -> dict:
|
||||
"""Return metadata related to the representation and version."""
|
||||
|
||||
# add additional metadata from the version to imprint AYON knob
|
||||
version = context["version"]
|
||||
data = {}
|
||||
|
||||
# version.attrib
|
||||
for key in [
|
||||
"frameStart", "frameEnd",
|
||||
"handleStart", "handleEnd",
|
||||
"source", "fps", "colorSpace"
|
||||
]:
|
||||
data[key] = version["attrib"][key]
|
||||
|
||||
# version.data
|
||||
for key in ["author"]:
|
||||
data[key] = version["data"][key]
|
||||
|
||||
# add variables related to version context
|
||||
data.update({
|
||||
"representation": context["representation"]["id"],
|
||||
"version": version["name"],
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def get_item_color(cls, context: dict) -> str:
|
||||
"""Return item color name.
|
||||
|
||||
Coloring depends on whether representation is the latest version.
|
||||
"""
|
||||
# Compare version with last version
|
||||
# set clip colour
|
||||
if version_is_latest(project_name=context["project"]["name"],
|
||||
version_id=context["version"]["id"]):
|
||||
return cls.clip_color_last
|
||||
else:
|
||||
return cls.clip_color_old
|
||||
|
||||
def _set_metadata(self, media_pool_item, context: dict):
|
||||
"""Set Media Pool Item Clip Properties"""
|
||||
|
||||
# Set more clip metadata based on the loaded clip's context
|
||||
for meta_item in self.metadata:
|
||||
clip_property = meta_item["name"]
|
||||
value = meta_item["value"]
|
||||
value_formatted = StringTemplate(value).format_strict(context)
|
||||
media_pool_item.SetClipProperty(clip_property, value_formatted)
|
||||
|
||||
def _get_file_info(self, context: dict) -> Tuple[bool, Union[str, dict]]:
|
||||
"""Return file info for Resolve ImportMedia.
|
||||
|
||||
Args:
|
||||
context (dict): The context dictionary.
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[str, dict]]: A tuple of whether the file is a
|
||||
sequence and the file info dictionary.
|
||||
"""
|
||||
|
||||
representation = context["representation"]
|
||||
anatomy = Anatomy(self._project_name)
|
||||
|
||||
# Get path to representation with correct frame number
|
||||
repre_path = get_representation_path_with_anatomy(
|
||||
representation, anatomy)
|
||||
|
||||
first_frame = representation["context"].get("frame")
|
||||
|
||||
is_sequence = False
|
||||
# is not sequence
|
||||
if first_frame is None:
|
||||
return (
|
||||
is_sequence, {"FilePath": repre_path}
|
||||
)
|
||||
|
||||
# This is sequence
|
||||
is_sequence = True
|
||||
repre_files = [
|
||||
file["path"].format(root=anatomy.roots)
|
||||
for file in representation["files"]
|
||||
]
|
||||
|
||||
# Change frame in representation context to get path with frame
|
||||
# splitter.
|
||||
representation["context"]["frame"] = FRAME_SPLITTER
|
||||
frame_repre_path = get_representation_path_with_anatomy(
|
||||
representation, anatomy
|
||||
)
|
||||
frame_repre_path = Path(frame_repre_path)
|
||||
repre_dir, repre_filename = (
|
||||
frame_repre_path.parent, frame_repre_path.name)
|
||||
# Get sequence prefix and suffix
|
||||
file_prefix, file_suffix = repre_filename.split(FRAME_SPLITTER)
|
||||
# Get frame number from path as string to get frame padding
|
||||
frame_str = str(repre_path)[len(file_prefix):][:len(file_suffix)]
|
||||
frame_padding = len(frame_str)
|
||||
|
||||
file_name = f"{file_prefix}%0{frame_padding}d{file_suffix}"
|
||||
|
||||
abs_filepath = Path(repre_dir, file_name)
|
||||
|
||||
start_index = int(first_frame)
|
||||
end_index = int(int(first_frame) + len(repre_files) - 1)
|
||||
|
||||
# See Resolve API, to import for example clip "file_[001-100].dpx":
|
||||
# ImportMedia([{"FilePath":"file_%03d.dpx",
|
||||
# "StartIndex":1,
|
||||
# "EndIndex":100}])
|
||||
return (
|
||||
is_sequence,
|
||||
{
|
||||
"FilePath": abs_filepath.as_posix(),
|
||||
"StartIndex": start_index,
|
||||
"EndIndex": end_index,
|
||||
}
|
||||
)
|
||||
|
||||
def _get_colorspace(self, representation: dict) -> Optional[str]:
|
||||
"""Return Resolve native colorspace from OCIO colorspace data.
|
||||
|
||||
Returns:
|
||||
Optional[str]: The Resolve native colorspace name, if any mapped.
|
||||
"""
|
||||
|
||||
data = representation.get("data", {}).get("colorspaceData", {})
|
||||
if not data:
|
||||
return
|
||||
|
||||
ocio_colorspace = data["colorspace"]
|
||||
if not ocio_colorspace:
|
||||
return
|
||||
|
||||
resolve_colorspace = get_remapped_colorspace_to_native(
|
||||
ocio_colorspace_name=ocio_colorspace,
|
||||
host_name="resolve",
|
||||
imageio_host_settings=self._host_imageio_settings
|
||||
)
|
||||
if resolve_colorspace:
|
||||
return resolve_colorspace
|
||||
else:
|
||||
self.log.warning(
|
||||
f"No mapping from OCIO colorspace '{ocio_colorspace}' "
|
||||
"found to a Resolve colorspace. "
|
||||
"Ignoring colorspace."
|
||||
)
|
||||
|
||||
def _set_colorspace_from_representation(
|
||||
self, media_pool_item, representation: dict):
|
||||
"""Set the colorspace for the media pool item.
|
||||
|
||||
Args:
|
||||
media_pool_item (MediaPoolItem): The media pool item.
|
||||
representation (dict): The representation data.
|
||||
"""
|
||||
# Set the Resolve Input Color Space for the media.
|
||||
colorspace = self._get_colorspace(representation)
|
||||
if colorspace:
|
||||
result = set_colorspace(media_pool_item, colorspace)
|
||||
if not result:
|
||||
self.log.warning(
|
||||
f"Failed to apply colorspace: {colorspace}."
|
||||
)
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'resolve' version."""
|
||||
__version__ = "0.2.1"
|
||||
__version__ = "0.2.2"
|
||||
|
|
|
|||