diff --git a/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py b/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py index d1ae5f64fd..6bf0f55081 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py +++ b/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py @@ -1,7 +1,7 @@ -import os -import platform +from openpype.lib import PreLaunchHook -from openpype.lib import PreLaunchHook, ApplicationLaunchFailed +from openpype.pipeline.colorspace import get_imageio_config +from openpype.pipeline.template_data import get_template_data_with_names class FusionPreLaunchOCIO(PreLaunchHook): @@ -11,24 +11,22 @@ class FusionPreLaunchOCIO(PreLaunchHook): def execute(self): """Hook entry method.""" - # get image io - project_settings = self.data["project_settings"] + template_data = get_template_data_with_names( + project_name=self.data["project_name"], + asset_name=self.data["asset_name"], + task_name=self.data["task_name"], + host_name=self.host_name, + system_settings=self.data["system_settings"] + ) - # make sure anatomy settings are having flame key - imageio_fusion = project_settings["fusion"]["imageio"] - - ocio = imageio_fusion.get("ocio") - enabled = ocio.get("enabled", False) - if not enabled: - return - - platform_key = platform.system().lower() - ocio_path = ocio["configFilePath"][platform_key] - if not ocio_path: - raise ApplicationLaunchFailed( - "Fusion OCIO is enabled in project settings but no OCIO config" - f"path is set for your current platform: {platform_key}" - ) + config_data = get_imageio_config( + project_name=self.data["project_name"], + host_name=self.host_name, + project_settings=self.data["project_settings"], + anatomy_data=template_data, + anatomy=self.data["anatomy"] + ) + ocio_path = config_data["path"] self.log.info(f"Setting OCIO config path: {ocio_path}") - self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path) + self.launch_context.env["OCIO"] = ocio_path diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index 542c3c1a51..38fd41c8b2 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,11 +1,9 @@ -import os import contextlib -from openpype.client import get_version_by_id -from openpype.pipeline import ( - load, - legacy_io, - get_representation_path, +import openpype.pipeline.load as load +from openpype.pipeline.load import ( + get_representation_context, + get_representation_path_from_context ) from openpype.hosts.fusion.api import ( imprint_container, @@ -148,7 +146,7 @@ class FusionLoadSequence(load.LoaderPlugin): namespace = context['asset']['name'] # Use the first file for now - path = self._get_first_image(os.path.dirname(self.fname)) + path = get_representation_path_from_context(context) # Create the Loader with the filename path set comp = get_current_comp() @@ -217,13 +215,11 @@ class FusionLoadSequence(load.LoaderPlugin): assert tool.ID == "Loader", "Must be Loader" comp = tool.Comp() - root = os.path.dirname(get_representation_path(representation)) - path = self._get_first_image(root) + context = get_representation_context(representation) + path = get_representation_path_from_context(context) # Get start frame from version data - project_name = legacy_io.active_project() - version = get_version_by_id(project_name, representation["parent"]) - start = self._get_start(version, tool) + start = self._get_start(context["version"], tool) with comp_lock_and_undo_chunk(comp, "Update Loader"): @@ -256,11 +252,6 @@ class FusionLoadSequence(load.LoaderPlugin): with comp_lock_and_undo_chunk(comp, "Remove Loader"): tool.Delete() - def _get_first_image(self, root): - """Get first file in representation root""" - files = sorted(os.listdir(root)) - return os.path.join(root, files[0]) - def _get_start(self, version_doc, tool): """Return real start frame of published files (incl. handles)""" data = version_doc["data"] diff --git a/openpype/hosts/fusion/plugins/publish/collect_inputs.py b/openpype/hosts/fusion/plugins/publish/collect_inputs.py index e06649da99..1bb3cd1220 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_inputs.py +++ b/openpype/hosts/fusion/plugins/publish/collect_inputs.py @@ -1,5 +1,3 @@ -from bson.objectid import ObjectId - import pyblish.api from openpype.pipeline import registered_host @@ -113,7 +111,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # Collect containers for the given set of nodes containers = collect_input_containers(nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - self.log.info("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py index 30943edd4b..7d5f1a40c7 100644 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ b/openpype/hosts/fusion/plugins/publish/render_local.py @@ -1,9 +1,11 @@ import os import pyblish.api +from openpype.pipeline import publish from openpype.hosts.fusion.api import comp_lock_and_undo_chunk -class Fusionlocal(pyblish.api.InstancePlugin): +class Fusionlocal(pyblish.api.InstancePlugin, + publish.ColormanagedPyblishPluginMixin): """Render the current Fusion composition locally. Extract the result of savers by starting a comp render @@ -17,18 +19,20 @@ class Fusionlocal(pyblish.api.InstancePlugin): families = ["render.local"] def process(self, instance): - - # This plug-in runs only once and thus assumes all instances - # currently will render the same frame range context = instance.context - key = f"__hasRun{self.__class__.__name__}" - if context.data.get(key, False): - return - - context.data[key] = True + # Start render self.render_once(context) + # Log render status + self.log.info( + "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( + nm=instance.data["name"], + ast=instance.data["asset"], + tsk=instance.data["task"], + ) + ) + frame_start = context.data["frameStartHandle"] frame_end = context.data["frameEndHandle"] path = instance.data["path"] @@ -41,13 +45,18 @@ class Fusionlocal(pyblish.api.InstancePlugin): for frame in range(frame_start, frame_end + 1) ] repre = { - 'name': ext[1:], - 'ext': ext[1:], - 'frameStart': f"%0{len(str(frame_end))}d" % frame_start, - 'files': files, + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{len(str(frame_end))}d" % frame_start, + "files": files, "stagingDir": output_dir, } + self.set_representation_colorspace( + representation=repre, + context=context, + ) + if "representations" not in instance.data: instance.data["representations"] = [] instance.data["representations"].append(repre) @@ -59,20 +68,33 @@ class Fusionlocal(pyblish.api.InstancePlugin): def render_once(self, context): """Render context comp only once, even with more render instances""" - current_comp = context.data["currentComp"] - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] + # This plug-in assumes all render nodes get rendered at the same time + # to speed up the rendering. The check below makes sure that we only + # execute the rendering once and not for each instance. + key = f"__hasRun{self.__class__.__name__}" + if key not in context.data: + # We initialize as false to indicate it wasn't successful yet + # so we can keep track of whether Fusion succeeded + context.data[key] = False - self.log.info("Starting render") - self.log.info(f"Start frame: {frame_start}") - self.log.info(f"End frame: {frame_end}") + current_comp = context.data["currentComp"] + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] - with comp_lock_and_undo_chunk(current_comp): - result = current_comp.Render({ - "Start": frame_start, - "End": frame_end, - "Wait": True - }) + self.log.info("Starting Fusion render") + self.log.info(f"Start frame: {frame_start}") + self.log.info(f"End frame: {frame_end}") - if not result: + with comp_lock_and_undo_chunk(current_comp): + result = current_comp.Render( + { + "Start": frame_start, + "End": frame_end, + "Wait": True, + } + ) + + context.data[key] = bool(result) + + if context.data[key] is False: raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 0b54b244bb..6411376ea3 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,5 +1,3 @@ -from bson.objectid import ObjectId - import pyblish.api from openpype.pipeline import registered_host @@ -117,7 +115,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # Collect containers for the given set of nodes containers = collect_input_containers(nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - self.log.info("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index 2f550e787a..90ab6e21e0 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -22,6 +22,8 @@ PLACEHOLDER_SET = "PLACEHOLDERS_SET" class MayaTemplateBuilder(AbstractTemplateBuilder): """Concrete implementation of AbstractTemplateBuilder for maya""" + use_legacy_creators = True + def import_template(self, path): """Import template into current scene. Block if a template is already loaded. diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py index 470fceffc9..9c3f0f5efa 100644 --- a/openpype/hosts/maya/plugins/publish/collect_inputs.py +++ b/openpype/hosts/maya/plugins/publish/collect_inputs.py @@ -1,5 +1,4 @@ import copy -from bson.objectid import ObjectId from maya import cmds import maya.api.OpenMaya as om @@ -165,9 +164,8 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): containers = collect_input_containers(scene_containers, nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - self.log.info("Collected inputs: %s" % inputs) def _collect_renderlayer_inputs(self, scene_containers, instance): diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 6c2d4b84be..aec87be5ab 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -239,7 +239,11 @@ class NukeCreator(NewCreator): def get_pre_create_attr_defs(self): return [ - BoolDef("use_selection", label="Use selection") + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ) ] def get_creator_settings(self, project_settings, settings_key=None): diff --git a/openpype/hosts/nuke/plugins/create/create_write_image.py b/openpype/hosts/nuke/plugins/create/create_write_image.py index 1e23b3ad7f..d38253ab2f 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_image.py +++ b/openpype/hosts/nuke/plugins/create/create_write_image.py @@ -35,7 +35,7 @@ class CreateWriteImage(napi.NukeWriteCreator): attr_defs = [ BoolDef( "use_selection", - default=True, + default=not self.create_context.headless, label="Use selection" ), self._get_render_target_enum(), diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index 1603bf17e3..8103cb7c4d 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -34,7 +34,7 @@ class CreateWritePrerender(napi.NukeWriteCreator): attr_defs = [ BoolDef( "use_selection", - default=True, + default=not self.create_context.headless, label="Use selection" ), self._get_render_target_enum() diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 72fcb4f232..23efa62e36 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -31,7 +31,7 @@ class CreateWriteRender(napi.NukeWriteCreator): attr_defs = [ BoolDef( "use_selection", - default=True, + default=not self.create_context.headless, label="Use selection" ), self._get_render_target_enum() diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index a4377a9972..89ba6ad4e6 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -10,10 +10,20 @@ from wsrpc_aiohttp import ( from qtpy import QtCore -from openpype.lib import Logger -from openpype.pipeline import legacy_io +from openpype.lib import Logger, StringTemplate +from openpype.pipeline import ( + registered_host, + Anatomy, +) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile, +) +from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import host_tools from openpype.tools.adobe_webserver.app import WebServerTool +from openpype.pipeline.context_tools import change_current_context +from openpype.client import get_asset_by_name from .ws_stub import PhotoshopServerStub @@ -310,23 +320,28 @@ class PhotoshopRoute(WebSocketRoute): # client functions async def set_context(self, project, asset, task): """ - Sets 'project' and 'asset' to envs, eg. setting context + Sets 'project' and 'asset' to envs, eg. setting context. - Args: - project (str) - asset (str) + Opens last workile from that context if exists. + + Args: + project (str) + asset (str) + task (str """ log.info("Setting context change") - log.info("project {} asset {} ".format(project, asset)) - if project: - legacy_io.Session["AVALON_PROJECT"] = project - os.environ["AVALON_PROJECT"] = project - if asset: - legacy_io.Session["AVALON_ASSET"] = asset - os.environ["AVALON_ASSET"] = asset - if task: - legacy_io.Session["AVALON_TASK"] = task - os.environ["AVALON_TASK"] = task + log.info(f"project {project} asset {asset} task {task}") + + asset_doc = get_asset_by_name(project, asset) + change_current_context(asset_doc, task) + + last_workfile_path = self._get_last_workfile_path(project, + asset, + task) + if last_workfile_path and os.path.exists(last_workfile_path): + ProcessLauncher.execute_in_main_thread( + lambda: stub().open(last_workfile_path)) + async def read(self): log.debug("photoshop.read client calls server server calls " @@ -356,3 +371,35 @@ class PhotoshopRoute(WebSocketRoute): # Required return statement. return "nothing" + + def _get_last_workfile_path(self, project_name, asset_name, task_name): + """Returns last workfile path if exists""" + host = registered_host() + host_name = "photoshop" + template_key = get_workfile_template_key_from_context( + asset_name, + task_name, + host_name, + project_name=project_name + ) + anatomy = Anatomy(project_name) + + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) + data["root"] = anatomy.roots + + file_template = anatomy.templates[template_key]["file"] + + # Define saving file extension + extensions = host.get_workfile_extensions() + + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data + ) + last_workfile_path = get_last_workfile( + work_root, file_template, data, extensions, True + ) + + return last_workfile_path diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py index 119e4aaeb7..27214af79f 100644 --- a/openpype/pipeline/workfile/workfile_template_builder.py +++ b/openpype/pipeline/workfile/workfile_template_builder.py @@ -43,7 +43,8 @@ from openpype.pipeline.load import ( load_with_repre_context, ) from openpype.pipeline.create import ( - discover_legacy_creator_plugins + discover_legacy_creator_plugins, + CreateContext, ) @@ -91,6 +92,7 @@ class AbstractTemplateBuilder(object): """ _log = None + use_legacy_creators = False def __init__(self, host): # Get host name @@ -110,6 +112,7 @@ class AbstractTemplateBuilder(object): self._placeholder_plugins = None self._loaders_by_name = None self._creators_by_name = None + self._create_context = None self._system_settings = None self._project_settings = None @@ -171,6 +174,16 @@ class AbstractTemplateBuilder(object): .get("type") ) + @property + def create_context(self): + if self._create_context is None: + self._create_context = CreateContext( + self.host, + discover_publish_plugins=False, + headless=True + ) + return self._create_context + def get_placeholder_plugin_classes(self): """Get placeholder plugin classes that can be used to build template. @@ -235,18 +248,29 @@ class AbstractTemplateBuilder(object): self._loaders_by_name = get_loaders_by_name() return self._loaders_by_name + def _collect_legacy_creators(self): + creators_by_name = {} + for creator in discover_legacy_creator_plugins(): + if not creator.enabled: + continue + creator_name = creator.__name__ + if creator_name in creators_by_name: + raise KeyError( + "Duplicated creator name {} !".format(creator_name) + ) + creators_by_name[creator_name] = creator + self._creators_by_name = creators_by_name + + def _collect_creators(self): + self._creators_by_name = dict(self.create_context.creators) + def get_creators_by_name(self): if self._creators_by_name is None: - self._creators_by_name = {} - for creator in discover_legacy_creator_plugins(): - if not creator.enabled: - continue - creator_name = creator.__name__ - if creator_name in self._creators_by_name: - raise KeyError( - "Duplicated creator name {} !".format(creator_name) - ) - self._creators_by_name[creator_name] = creator + if self.use_legacy_creators: + self._collect_legacy_creators() + else: + self._collect_creators() + return self._creators_by_name def get_shared_data(self, key): @@ -1579,6 +1603,8 @@ class PlaceholderCreateMixin(object): placeholder (PlaceholderItem): Placeholder item with information about requested publishable instance. """ + + legacy_create = self.builder.use_legacy_creators creator_name = placeholder.data["creator"] create_variant = placeholder.data["create_variant"] @@ -1589,17 +1615,28 @@ class PlaceholderCreateMixin(object): task_name = legacy_io.Session["AVALON_TASK"] asset_name = legacy_io.Session["AVALON_ASSET"] - # get asset id - asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) - assert asset_doc, "No current asset found in Session" - asset_id = asset_doc['_id'] + if legacy_create: + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] + ) + assert asset_doc, "No current asset found in Session" + subset_name = creator_plugin.get_subset_name( + create_variant, + task_name, + asset_doc["_id"], + project_name + ) - subset_name = creator_plugin.get_subset_name( - create_variant, - task_name, - asset_id, - project_name - ) + else: + asset_doc = get_asset_by_name(project_name, asset_name) + assert asset_doc, "No current asset found in Session" + subset_name = creator_plugin.get_subset_name( + create_variant, + task_name, + asset_doc, + project_name, + self.builder.host_name + ) creator_data = { "creator_name": creator_name, @@ -1612,12 +1649,20 @@ class PlaceholderCreateMixin(object): # compile subset name from variant try: - creator_instance = creator_plugin( - subset_name, - asset_name - ).process() + if legacy_create: + creator_instance = creator_plugin( + subset_name, + asset_name + ).process() + else: + creator_instance = self.builder.create_context.create( + creator_plugin.identifier, + create_variant, + asset_doc, + task_name=task_name + ) - except Exception: + except: # noqa: E722 failed = True self.create_failed(placeholder, creator_data) diff --git a/openpype/plugins/publish/collect_input_representations_to_versions.py b/openpype/plugins/publish/collect_input_representations_to_versions.py index 18a19bce80..54a3214647 100644 --- a/openpype/plugins/publish/collect_input_representations_to_versions.py +++ b/openpype/plugins/publish/collect_input_representations_to_versions.py @@ -23,7 +23,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): representations = set() for instance in context: inst_repre = instance.data.get("inputRepresentations", []) - representations.update(inst_repre) + if inst_repre: + representations.update(inst_repre) representations_docs = get_representations( project_name=context.data["projectEntity"]["name"], @@ -31,7 +32,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): fields=["_id", "parent"]) representation_id_to_version_id = { - repre["_id"]: repre["parent"] for repre in representations_docs + str(repre["_id"]): repre["parent"] + for repre in representations_docs } for instance in context: @@ -39,9 +41,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): if not inst_repre: continue - input_versions = instance.data.get("inputVersions", []) + input_versions = instance.data.setdefault("inputVersions", []) for repre_id in inst_repre: - repre_id = ObjectId(repre_id) - version_id = representation_id_to_version_id[repre_id] - input_versions.append(version_id) - instance.data["inputVersions"] = input_versions + version_id = representation_id_to_version_id.get(repre_id) + if version_id: + input_versions.append(version_id)