[Automated] Merged develop into main

This commit is contained in:
ynbot 2023-03-08 04:29:44 +01:00 committed by GitHub
commit 21022890c7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
54 changed files with 1218 additions and 535 deletions

View file

@ -1,16 +1,9 @@
## Brief description
First sentence is brief description.
## Description
Next paragraf is more elaborate text with more info. This will be displayed for example in collapsed form under the first sentence in a changelog.
## Changelog Description
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
## Additional info
The rest will be ignored in changelog and should contain any additional
technical information.
## Documentation (add _"type: documentation"_ label)
[feature_documentation](future_url_after_it_will_be_merged)
Paragraphs of text giving context of additional technical information or code examples.
## Testing notes:
1. start with this step
2. follow this step
2. follow this step

View file

@ -1,20 +1,11 @@
from .pipeline import (
install,
uninstall,
FusionHost,
ls,
imprint_container,
parse_container
)
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root
parse_container,
list_instances,
remove_instance
)
from .lib import (
@ -30,21 +21,11 @@ from .menu import launch_openpype_menu
__all__ = [
# pipeline
"install",
"uninstall",
"ls",
"imprint_container",
"parse_container",
# workio
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
# lib
"maintained_selection",
"update_frame_range",

View file

@ -0,0 +1,54 @@
import pyblish.api
from openpype.hosts.fusion.api.lib import get_current_comp
from openpype.pipeline.publish import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid nodes in Maya when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
"""
label = "Select invalid"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning("Plug-in returned to be invalid, "
"but has no selectable nodes.")
if not invalid:
# Assume relevant comp is current comp and clear selection
self.log.info("No invalid tools found.")
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
return
# Assume a single comp
first_tool = invalid[0]
comp = first_tool.Comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
names = set()
for tool in invalid:
flow.Select(tool, True)
names.add(tool.Name)
self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names)))

View file

@ -7,11 +7,11 @@ from openpype.style import load_stylesheet
from openpype.lib import register_event_callback
from openpype.hosts.fusion.scripts import (
set_rendermode,
duplicate_with_inputs
duplicate_with_inputs,
)
from openpype.hosts.fusion.api.lib import (
set_asset_framerange,
set_asset_resolution
set_asset_resolution,
)
from openpype.pipeline import legacy_io
from openpype.resources import get_openpype_icon_filepath
@ -45,17 +45,19 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.setWindowTitle("OpenPype")
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet("""QLabel {
asset_label.setStyleSheet(
"""QLabel {
font-size: 14px;
font-weight: 600;
color: #5f9fb8;
}""")
}"""
)
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
manager_btn = QtWidgets.QPushButton("Manage...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
@ -108,7 +110,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
libload_btn.clicked.connect(self.on_libload_clicked)
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
duplicate_with_inputs_btn.clicked.connect(
self.on_duplicate_with_inputs_clicked)
self.on_duplicate_with_inputs_clicked
)
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
@ -130,7 +133,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.asset_label.setText(label)
def register_callback(self, name, fn):
# Create a wrapper callback that we only store
# for as long as we want it to persist as callback
def _callback(*args):
@ -146,10 +148,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
host_tools.show_workfiles()
def on_create_clicked(self):
host_tools.show_creator()
host_tools.show_publisher(tab="create")
def on_publish_clicked(self):
host_tools.show_publish()
host_tools.show_publisher(tab="publish")
def on_load_clicked(self):
host_tools.show_loader(use_context=True)

View file

@ -4,6 +4,7 @@ Basic avalon integration
import os
import sys
import logging
import contextlib
import pyblish.api
from qtpy import QtCore
@ -17,15 +18,14 @@ from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
deregister_inventory_action_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.load import any_outdated_containers
from openpype.hosts.fusion import FUSION_HOST_DIR
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from openpype.tools.utils import host_tools
from .lib import (
get_current_comp,
comp_lock_and_undo_chunk,
@ -66,94 +66,98 @@ class FusionLogHandler(logging.Handler):
self.print(entry)
def install():
"""Install fusion-specific functionality of OpenPype.
class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "fusion"
This is where you install menus and register families, data
and loaders into fusion.
def install(self):
"""Install fusion-specific functionality of OpenPype.
It is called automatically when installing via
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
This is where you install menus and register families, data
and loaders into fusion.
See the Maya equivalent for inspiration on how to implement this.
It is called automatically when installing via
`openpype.pipeline.install_host(openpype.hosts.fusion.api)`
"""
# Remove all handlers associated with the root logger object, because
# that one always logs as "warnings" incorrectly.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
See the Maya equivalent for inspiration on how to implement this.
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
"""
# Remove all handlers associated with the root logger object, because
# that one always logs as "warnings" incorrectly.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
pyblish.api.register_host("fusion")
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
pyblish.api.register_host("fusion")
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
pyblish.api.register_callback(
"instanceToggled", on_pyblish_instance_toggled
)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
# region workfile io api
def has_unsaved_changes(self):
comp = get_current_comp()
return comp.GetAttrs()["COMPB_Modified"]
def uninstall():
"""Uninstall all that was installed
def get_workfile_extensions(self):
return [".comp"]
This is where you undo everything that was done in `install()`.
That means, removing menus, deregistering families and data
and everything. It should be as though `install()` was never run,
because odds are calling this function means the user is interested
in re-installing shortly afterwards. If, for example, he has been
modifying the menu or registered families.
def save_workfile(self, dst_path=None):
comp = get_current_comp()
comp.Save(dst_path)
"""
pyblish.api.deregister_host("fusion")
pyblish.api.deregister_plugin_path(PUBLISH_PATH)
log.info("Deregistering Fusion plug-ins..")
def open_workfile(self, filepath):
# Hack to get fusion, see
# openpype.hosts.fusion.api.pipeline.get_current_comp()
fusion = getattr(sys.modules["__main__"], "fusion", None)
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
deregister_inventory_action_path(INVENTORY_PATH)
return fusion.LoadComp(filepath)
pyblish.api.deregister_callback(
"instanceToggled", on_pyblish_instance_toggled
)
def get_current_workfile(self):
comp = get_current_comp()
current_filepath = comp.GetAttrs()["COMPS_FileName"]
if not current_filepath:
return None
return current_filepath
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle saver tool passthrough states on instance toggles."""
comp = instance.context.data.get("currentComp")
if not comp:
return
def work_root(self, session):
work_dir = session["AVALON_WORKDIR"]
scene_dir = session.get("AVALON_SCENEDIR")
if scene_dir:
return os.path.join(work_dir, scene_dir)
else:
return work_dir
# endregion
savers = [tool for tool in instance if
getattr(tool, "ID", None) == "Saver"]
if not savers:
return
@contextlib.contextmanager
def maintained_selection(self):
from .lib import maintained_selection
return maintained_selection()
# Whether instances should be passthrough based on new value
passthrough = not new_value
with comp_lock_and_undo_chunk(comp,
undo_queue_name="Change instance "
"active state"):
for tool in savers:
attrs = tool.GetAttrs()
current = attrs["TOOLB_PassThrough"]
if current != passthrough:
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def get_containers(self):
return ls()
def update_context_data(self, data, changes):
comp = get_current_comp()
comp.SetData("openpype", data)
def get_context_data(self):
comp = get_current_comp()
return comp.GetData("openpype") or {}
def on_new(event):
@ -283,9 +287,51 @@ def parse_container(tool):
return container
# TODO: Function below is currently unused prototypes
def list_instances(creator_id=None):
"""Return created instances in current workfile which will be published.
Returns:
(list) of dictionaries matching instances format
"""
comp = get_current_comp()
tools = comp.GetToolList(False).values()
instance_signature = {
"id": "pyblish.avalon.instance",
"identifier": creator_id
}
instances = []
for tool in tools:
data = tool.GetData('openpype')
if not isinstance(data, dict):
continue
if data.get("id") != instance_signature["id"]:
continue
if creator_id and data.get("identifier") != creator_id:
continue
instances.append(tool)
return instances
# TODO: Function below is currently unused prototypes
def remove_instance(instance):
"""Remove instance from current workfile.
Args:
instance (dict): instance representation from subsetmanager model
"""
# Assume instance is a Fusion tool directly
instance["tool"].Delete()
class FusionEventThread(QtCore.QThread):
"""QThread which will periodically ping Fusion app for any events.
The fusion.UIManager must be set up to be notified of events before they'll
be reported by this thread, for example:
fusion.UIManager.AddNotify("Comp_Save", None)

View file

@ -1,45 +0,0 @@
"""Host API required Work Files tool"""
import sys
import os
from .lib import get_current_comp
def file_extensions():
return [".comp"]
def has_unsaved_changes():
comp = get_current_comp()
return comp.GetAttrs()["COMPB_Modified"]
def save_file(filepath):
comp = get_current_comp()
comp.Save(filepath)
def open_file(filepath):
# Hack to get fusion, see
# openpype.hosts.fusion.api.pipeline.get_current_comp()
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.LoadComp(filepath)
def current_file():
comp = get_current_comp()
current_filepath = comp.GetAttrs()["COMPS_FileName"]
if not current_filepath:
return None
return current_filepath
def work_root(session):
work_dir = session["AVALON_WORKDIR"]
scene_dir = session.get("AVALON_SCENEDIR")
if scene_dir:
return os.path.join(work_dir, scene_dir)
else:
return work_dir

View file

@ -13,11 +13,11 @@ def main(env):
# However the contents of that folder can conflict with Qt library dlls
# so we make sure to move out of it to avoid DLL Load Failed errors.
os.chdir("..")
from openpype.hosts.fusion import api
from openpype.hosts.fusion.api import FusionHost
from openpype.hosts.fusion.api import menu
# activate resolve from pype
install_host(api)
install_host(FusionHost())
log = Logger.get_logger(__name__)
log.info(f"Registered host: {registered_host()}")

View file

@ -1,7 +1,7 @@
import os
import platform
from openpype.lib import PreLaunchHook
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.pipeline.colorspace import get_imageio_config
from openpype.pipeline.template_data import get_template_data_with_names
class FusionPreLaunchOCIO(PreLaunchHook):
@ -11,24 +11,22 @@ class FusionPreLaunchOCIO(PreLaunchHook):
def execute(self):
"""Hook entry method."""
# get image io
project_settings = self.data["project_settings"]
template_data = get_template_data_with_names(
project_name=self.data["project_name"],
asset_name=self.data["asset_name"],
task_name=self.data["task_name"],
host_name=self.host_name,
system_settings=self.data["system_settings"]
)
# make sure anatomy settings are having flame key
imageio_fusion = project_settings["fusion"]["imageio"]
ocio = imageio_fusion.get("ocio")
enabled = ocio.get("enabled", False)
if not enabled:
return
platform_key = platform.system().lower()
ocio_path = ocio["configFilePath"][platform_key]
if not ocio_path:
raise ApplicationLaunchFailed(
"Fusion OCIO is enabled in project settings but no OCIO config"
f"path is set for your current platform: {platform_key}"
)
config_data = get_imageio_config(
project_name=self.data["project_name"],
host_name=self.host_name,
project_settings=self.data["project_settings"],
anatomy_data=template_data,
anatomy=self.data["anatomy"]
)
ocio_path = config_data["path"]
self.log.info(f"Setting OCIO config path: {ocio_path}")
self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path)
self.launch_context.env["OCIO"] = ocio_path

View file

@ -1,49 +0,0 @@
import os
from openpype.pipeline import (
LegacyCreator,
legacy_io
)
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
class CreateOpenEXRSaver(LegacyCreator):
name = "openexrDefault"
label = "Create OpenEXR Saver"
hosts = ["fusion"]
family = "render"
defaults = ["Main"]
def process(self):
file_format = "OpenEXRFormat"
comp = get_current_comp()
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
filename = "{}..exr".format(self.name)
filepath = os.path.join(workdir, "render", filename)
with comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
saver.SetAttrs({"TOOLS_Name": self.name})
# Setting input attributes is different from basic attributes
# Not confused with "MainInputAttributes" which
saver["Clip"] = filepath
saver["OutputFormat"] = file_format
# Check file format settings are available
if saver[file_format] is None:
raise RuntimeError("File format is not set to {}, "
"this is a bug".format(file_format))
# Set file format attributes
saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other
saver[file_format]["SaveAlpha"] = 0

View file

@ -0,0 +1,215 @@
import os
import qtawesome
from openpype.hosts.fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
from openpype.lib import BoolDef
from openpype.pipeline import (
legacy_io,
Creator,
CreatedInstance
)
from openpype.client import get_asset_by_name
class CreateSaver(Creator):
identifier = "io.openpype.creators.fusion.saver"
name = "saver"
label = "Saver"
family = "render"
default_variants = ["Main"]
description = "Fusion Saver to generate image sequence"
def create(self, subset_name, instance_data, pre_create_data):
# TODO: Add pre_create attributes to choose file format?
file_format = "OpenEXRFormat"
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
instance_data["subset"] = subset_name
self._update_tool_with_data(saver, data=instance_data)
saver["OutputFormat"] = file_format
# Check file format settings are available
if saver[file_format] is None:
raise RuntimeError(
f"File format is not set to {file_format}, this is a bug"
)
# Set file format attributes
saver[file_format]["Depth"] = 0 # Auto | float16 | float32
# TODO Is this needed?
saver[file_format]["SaveAlpha"] = 1
self._imprint(saver, instance_data)
# Register the CreatedInstance
instance = CreatedInstance(
family=self.family,
subset_name=subset_name,
data=instance_data,
creator=self)
# Insert the transient data
instance.transient_data["tool"] = saver
self._add_instance_to_context(instance)
return instance
def collect_instances(self):
comp = get_current_comp()
tools = comp.GetToolList(False, "Saver").values()
for tool in tools:
data = self.get_managed_tool_data(tool)
if not data:
data = self._collect_unmanaged_saver(tool)
# Add instance
created_instance = CreatedInstance.from_existing(data, self)
# Collect transient data
created_instance.transient_data["tool"] = tool
self._add_instance_to_context(created_instance)
def get_icon(self):
return qtawesome.icon("fa.eye", color="white")
def update_instances(self, update_list):
for created_inst, _changes in update_list:
new_data = created_inst.data_to_store()
tool = created_inst.transient_data["tool"]
self._update_tool_with_data(tool, new_data)
self._imprint(tool, new_data)
def remove_instances(self, instances):
for instance in instances:
# Remove the tool from the scene
tool = instance.transient_data["tool"]
if tool:
tool.Delete()
# Remove the collected CreatedInstance to remove from UI directly
self._remove_instance_from_context(instance)
def _imprint(self, tool, data):
# Save all data in a "openpype.{key}" = value data
active = data.pop("active", None)
if active is not None:
# Use active value to set the passthrough state
tool.SetAttrs({"TOOLB_PassThrough": not active})
for key, value in data.items():
tool.SetData(f"openpype.{key}", value)
def _update_tool_with_data(self, tool, data):
"""Update tool node name and output path based on subset data"""
if "subset" not in data:
return
original_subset = tool.GetData("openpype.subset")
subset = data["subset"]
if original_subset != subset:
# Subset change detected
# Update output filepath
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
filename = f"{subset}..exr"
filepath = os.path.join(workdir, "render", subset, filename)
tool["Clip"] = filepath
# Rename tool
if tool.Name != subset:
print(f"Renaming {tool.Name} -> {subset}")
tool.SetAttrs({"TOOLS_Name": subset})
def _collect_unmanaged_saver(self, tool):
# TODO: this should not be done this way - this should actually
# get the data as stored on the tool explicitly (however)
# that would disallow any 'regular saver' to be collected
# unless the instance data is stored on it to begin with
print("Collecting unmanaged saver..")
comp = tool.Comp()
# Allow regular non-managed savers to also be picked up
project = legacy_io.Session["AVALON_PROJECT"]
asset = legacy_io.Session["AVALON_ASSET"]
task = legacy_io.Session["AVALON_TASK"]
asset_doc = get_asset_by_name(project_name=project,
asset_name=asset)
path = tool["Clip"][comp.TIME_UNDEFINED]
fname = os.path.basename(path)
fname, _ext = os.path.splitext(fname)
variant = fname.rstrip(".")
subset = self.get_subset_name(
variant=variant,
task_name=task,
asset_doc=asset_doc,
project_name=project,
)
attrs = tool.GetAttrs()
passthrough = attrs["TOOLB_PassThrough"]
return {
# Required data
"project": project,
"asset": asset,
"subset": subset,
"task": task,
"variant": variant,
"active": not passthrough,
"family": self.family,
# Unique identifier for instance and this creator
"id": "pyblish.avalon.instance",
"creator_identifier": self.identifier
}
def get_managed_tool_data(self, tool):
"""Return data of the tool if it matches creator identifier"""
data = tool.GetData('openpype')
if not isinstance(data, dict):
return
required = {
"id": "pyblish.avalon.instance",
"creator_identifier": self.identifier
}
for key, value in required.items():
if key not in data or data[key] != value:
return
# Get active state from the actual tool state
attrs = tool.GetAttrs()
passthrough = attrs["TOOLB_PassThrough"]
data["active"] = not passthrough
return data
def get_instance_attr_defs(self):
return [
BoolDef(
"review",
default=True,
label="Review"
)
]

View file

@ -0,0 +1,109 @@
import qtawesome
from openpype.hosts.fusion.api import (
get_current_comp
)
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
CreatedInstance,
legacy_io,
)
class FusionWorkfileCreator(AutoCreator):
identifier = "workfile"
family = "workfile"
label = "Workfile"
default_variant = "Main"
create_allow_context_change = False
data_key = "openpype_workfile"
def collect_instances(self):
comp = get_current_comp()
data = comp.GetData(self.data_key)
if not data:
return
instance = CreatedInstance(
family=self.family,
subset_name=data["subset"],
data=data,
creator=self
)
instance.transient_data["comp"] = comp
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
comp = created_inst.transient_data["comp"]
if not hasattr(comp, "SetData"):
# Comp is not alive anymore, likely closed by the user
self.log.error("Workfile comp not found for existing instance."
" Comp might have been closed in the meantime.")
continue
# Imprint data into the comp
data = created_inst.data_to_store()
comp.SetData(self.data_key, data)
def create(self, options=None):
comp = get_current_comp()
if not comp:
self.log.error("Unable to find current comp")
return
existing_instance = None
for instance in self.create_context.instances:
if instance.family == self.family:
existing_instance = instance
break
project_name = legacy_io.Session["AVALON_PROJECT"]
asset_name = legacy_io.Session["AVALON_ASSET"]
task_name = legacy_io.Session["AVALON_TASK"]
host_name = legacy_io.Session["AVALON_APP"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant, task_name, asset_doc,
project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
))
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
new_instance.transient_data["comp"] = comp
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
def get_icon(self):
return qtawesome.icon("fa.file-o", color="white")

View file

@ -1,11 +1,9 @@
import os
import contextlib
from openpype.client import get_version_by_id
from openpype.pipeline import (
load,
legacy_io,
get_representation_path,
import openpype.pipeline.load as load
from openpype.pipeline.load import (
get_representation_context,
get_representation_path_from_context
)
from openpype.hosts.fusion.api import (
imprint_container,
@ -148,7 +146,7 @@ class FusionLoadSequence(load.LoaderPlugin):
namespace = context['asset']['name']
# Use the first file for now
path = self._get_first_image(os.path.dirname(self.fname))
path = get_representation_path_from_context(context)
# Create the Loader with the filename path set
comp = get_current_comp()
@ -217,13 +215,11 @@ class FusionLoadSequence(load.LoaderPlugin):
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
root = os.path.dirname(get_representation_path(representation))
path = self._get_first_image(root)
context = get_representation_context(representation)
path = get_representation_path_from_context(context)
# Get start frame from version data
project_name = legacy_io.active_project()
version = get_version_by_id(project_name, representation["parent"])
start = self._get_start(version, tool)
start = self._get_start(context["version"], tool)
with comp_lock_and_undo_chunk(comp, "Update Loader"):
@ -256,11 +252,6 @@ class FusionLoadSequence(load.LoaderPlugin):
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
tool.Delete()
def _get_first_image(self, root):
"""Get first file in representation root"""
files = sorted(os.listdir(root))
return os.path.join(root, files[0])
def _get_start(self, version_doc, tool):
"""Return real start frame of published files (incl. handles)"""
data = version_doc["data"]

View file

@ -1,5 +1,3 @@
import os
import pyblish.api
from openpype.hosts.fusion.api import get_current_comp

View file

@ -0,0 +1,41 @@
import pyblish.api
def get_comp_render_range(comp):
"""Return comp's start-end render range and global start-end range."""
comp_attrs = comp.GetAttrs()
start = comp_attrs["COMPN_RenderStart"]
end = comp_attrs["COMPN_RenderEnd"]
global_start = comp_attrs["COMPN_GlobalStart"]
global_end = comp_attrs["COMPN_GlobalEnd"]
# Whenever render ranges are undefined fall back
# to the comp's global start and end
if start == -1000000000:
start = global_start
if end == -1000000000:
end = global_end
return start, end, global_start, global_end
class CollectFusionCompFrameRanges(pyblish.api.ContextPlugin):
"""Collect current comp"""
# We run this after CollectorOrder - 0.1 otherwise it gets
# overridden by global plug-in `CollectContextEntities`
order = pyblish.api.CollectorOrder - 0.05
label = "Collect Comp Frame Ranges"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
comp = context.data["currentComp"]
# Store comp render ranges
start, end, global_start, global_end = get_comp_render_range(comp)
context.data["frameStart"] = int(start)
context.data["frameEnd"] = int(end)
context.data["frameStartHandle"] = int(global_start)
context.data["frameEndHandle"] = int(global_end)

View file

@ -1,5 +1,3 @@
from bson.objectid import ObjectId
import pyblish.api
from openpype.pipeline import registered_host
@ -97,10 +95,15 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.2
hosts = ["fusion"]
families = ["render"]
def process(self, instance):
# Get all upstream and include itself
if not any(instance[:]):
self.log.debug("No tool found in instance, skipping..")
return
tool = instance[0]
nodes = list(iter_upstream(tool))
nodes.append(tool)
@ -108,7 +111,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)

View file

@ -3,25 +3,7 @@ import os
import pyblish.api
def get_comp_render_range(comp):
"""Return comp's start-end render range and global start-end range."""
comp_attrs = comp.GetAttrs()
start = comp_attrs["COMPN_RenderStart"]
end = comp_attrs["COMPN_RenderEnd"]
global_start = comp_attrs["COMPN_GlobalStart"]
global_end = comp_attrs["COMPN_GlobalEnd"]
# Whenever render ranges are undefined fall back
# to the comp's global start and end
if start == -1000000000:
start = global_start
if end == -1000000000:
end = global_end
return start, end, global_start, global_end
class CollectInstances(pyblish.api.ContextPlugin):
class CollectInstanceData(pyblish.api.InstancePlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
@ -30,77 +12,66 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""
order = pyblish.api.CollectorOrder
label = "Collect Instances"
label = "Collect Instances Data"
hosts = ["fusion"]
def process(self, context):
def process(self, instance):
"""Collect all image sequence tools"""
from openpype.hosts.fusion.api.lib import get_frame_path
context = instance.context
comp = context.data["currentComp"]
# Include creator attributes directly as instance data
creator_attributes = instance.data["creator_attributes"]
instance.data.update(creator_attributes)
# Get all savers in the comp
tools = comp.GetToolList(False).values()
savers = [tool for tool in tools if tool.ID == "Saver"]
# Include start and end render frame in label
subset = instance.data["subset"]
start = context.data["frameStart"]
end = context.data["frameEnd"]
label = "{subset} ({start}-{end})".format(subset=subset,
start=int(start),
end=int(end))
instance.data.update({
"label": label,
start, end, global_start, global_end = get_comp_render_range(comp)
context.data["frameStart"] = int(start)
context.data["frameEnd"] = int(end)
context.data["frameStartHandle"] = int(global_start)
context.data["frameEndHandle"] = int(global_end)
# todo: Allow custom frame range per instance
"frameStart": context.data["frameStart"],
"frameEnd": context.data["frameEnd"],
"frameStartHandle": context.data["frameStartHandle"],
"frameEndHandle": context.data["frameStartHandle"],
"fps": context.data["fps"],
})
for tool in savers:
# Add review family if the instance is marked as 'review'
# This could be done through a 'review' Creator attribute.
if instance.data.get("review", False):
self.log.info("Adding review family..")
instance.data["families"].append("review")
if instance.data["family"] == "render":
# TODO: This should probably move into a collector of
# its own for the "render" family
from openpype.hosts.fusion.api.lib import get_frame_path
comp = context.data["currentComp"]
# This is only the case for savers currently but not
# for workfile instances. So we assume saver here.
tool = instance.data["transientData"]["tool"]
path = tool["Clip"][comp.TIME_UNDEFINED]
tool_attrs = tool.GetAttrs()
active = not tool_attrs["TOOLB_PassThrough"]
if not path:
self.log.warning("Skipping saver because it "
"has no path set: {}".format(tool.Name))
continue
filename = os.path.basename(path)
head, padding, tail = get_frame_path(filename)
ext = os.path.splitext(path)[1]
assert tail == ext, ("Tail does not match %s" % ext)
subset = head.rstrip("_. ") # subset is head of the filename
# Include start and end render frame in label
label = "{subset} ({start}-{end})".format(subset=subset,
start=int(start),
end=int(end))
instance = context.create_instance(subset)
instance.data.update({
"asset": os.environ["AVALON_ASSET"], # todo: not a constant
"subset": subset,
"path": path,
"outputDir": os.path.dirname(path),
"ext": ext, # todo: should be redundant
"label": label,
"task": context.data["task"],
"frameStart": context.data["frameStart"],
"frameEnd": context.data["frameEnd"],
"frameStartHandle": context.data["frameStartHandle"],
"frameEndHandle": context.data["frameStartHandle"],
"fps": context.data["fps"],
"families": ["render", "review"],
"family": "render",
"active": active,
"publish": active # backwards compatibility
"ext": ext, # todo: should be redundant?
# Backwards compatibility: embed tool in instance.data
"tool": tool
})
# Add tool itself as member
instance.append(tool)
self.log.info("Found: \"%s\" " % path)
# Sort/grouped by family (preserving local index)
context[:] = sorted(context, key=self.sort_by_family)
return context
def sort_by_family(self, instance):
"""Sort by family"""
return instance.data.get("families", instance.data.get("family"))

View file

@ -0,0 +1,26 @@
import os
import pyblish.api
class CollectFusionWorkfile(pyblish.api.InstancePlugin):
"""Collect Fusion workfile representation."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
hosts = ["fusion"]
families = ["workfile"]
def process(self, instance):
current_file = instance.context.data["currentFile"]
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
instance.data['representations'] = [{
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]

View file

@ -11,7 +11,7 @@ class FusionIncrementCurrentFile(pyblish.api.ContextPlugin):
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["fusion"]
families = ["render.farm"]
families = ["workfile"]
optional = True
def process(self, context):

View file

@ -1,9 +1,11 @@
import os
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
class Fusionlocal(pyblish.api.InstancePlugin):
class Fusionlocal(pyblish.api.InstancePlugin,
publish.ColormanagedPyblishPluginMixin):
"""Render the current Fusion composition locally.
Extract the result of savers by starting a comp render
@ -17,18 +19,20 @@ class Fusionlocal(pyblish.api.InstancePlugin):
families = ["render.local"]
def process(self, instance):
# This plug-in runs only once and thus assumes all instances
# currently will render the same frame range
context = instance.context
key = f"__hasRun{self.__class__.__name__}"
if context.data.get(key, False):
return
context.data[key] = True
# Start render
self.render_once(context)
# Log render status
self.log.info(
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
nm=instance.data["name"],
ast=instance.data["asset"],
tsk=instance.data["task"],
)
)
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
path = instance.data["path"]
@ -41,40 +45,56 @@ class Fusionlocal(pyblish.api.InstancePlugin):
for frame in range(frame_start, frame_end + 1)
]
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': f"%0{len(str(frame_end))}d" % frame_start,
'files': files,
"name": ext[1:],
"ext": ext[1:],
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
"files": files,
"stagingDir": output_dir,
}
self.set_representation_colorspace(
representation=repre,
context=context,
)
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
if instance.data.get("review", False):
repre["tags"] = ["review", "ftrackreview"]
def render_once(self, context):
"""Render context comp only once, even with more render instances"""
current_comp = context.data["currentComp"]
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
# This plug-in assumes all render nodes get rendered at the same time
# to speed up the rendering. The check below makes sure that we only
# execute the rendering once and not for each instance.
key = f"__hasRun{self.__class__.__name__}"
if key not in context.data:
# We initialize as false to indicate it wasn't successful yet
# so we can keep track of whether Fusion succeeded
context.data[key] = False
self.log.info("Starting render")
self.log.info(f"Start frame: {frame_start}")
self.log.info(f"End frame: {frame_end}")
current_comp = context.data["currentComp"]
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
with comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render({
"Start": frame_start,
"End": frame_end,
"Wait": True
})
self.log.info("Starting Fusion render")
self.log.info(f"Start frame: {frame_start}")
self.log.info(f"End frame: {frame_end}")
if not result:
with comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render(
{
"Start": frame_start,
"End": frame_end,
"Wait": True,
}
)
context.data[key] = bool(result)
if context.data[key] is False:
raise RuntimeError("Comp render failed")

View file

@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["fusion"]
families = ["render"]
families = ["render", "workfile"]
def process(self, context):

View file

@ -1,6 +1,9 @@
import pyblish.api
from openpype.pipeline.publish import RepairAction
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
@ -8,11 +11,12 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
label = "Validate Background Depth 32 bit"
actions = [RepairAction]
hosts = ["fusion"]
families = ["render"]
optional = True
actions = [SelectInvalidAction, RepairAction]
@classmethod
def get_invalid(cls, instance):
@ -29,8 +33,10 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found %i nodes which are not set to float32"
% len(invalid))
raise PublishValidationError(
"Found {} Backgrounds tools which"
" are not set to float32".format(len(invalid)),
title=self.label)
@classmethod
def repair(cls, instance):

View file

@ -1,6 +1,7 @@
import os
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
@ -19,10 +20,12 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
filename = attrs["COMPS_FileName"]
if not filename:
raise RuntimeError("Comp is not saved.")
raise PublishValidationError("Comp is not saved.",
title=self.label)
if not os.path.exists(filename):
raise RuntimeError("Comp file does not exist: %s" % filename)
raise PublishValidationError(
"Comp file does not exist: %s" % filename, title=self.label)
if attrs["COMPB_Modified"]:
self.log.warning("Comp is modified. Save your comp to ensure your "

View file

@ -1,6 +1,9 @@
import pyblish.api
from openpype.pipeline.publish import RepairAction
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
@ -15,6 +18,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
label = "Validate Create Folder Checked"
families = ["render"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
@ -31,8 +35,9 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver with Create Folder During "
"Render checked off")
raise PublishValidationError(
"Found Saver with Create Folder During Render checked off",
title=self.label)
@classmethod
def repair(cls, instance):

View file

@ -1,6 +1,9 @@
import os
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
@ -16,11 +19,13 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
label = "Validate Filename Has Extension"
families = ["render"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Found Saver without an extension")
raise PublishValidationError("Found Saver without an extension",
title=self.label)
@classmethod
def get_invalid(cls, instance):

View file

@ -1,4 +1,7 @@
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
@ -12,6 +15,7 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
label = "Validate Saver Has Input"
families = ["render"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
@ -25,5 +29,8 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Saver has no incoming connection: "
"{} ({})".format(instance, invalid[0].Name))
saver_name = invalid[0].Name
raise PublishValidationError(
"Saver has no incoming connection: {} ({})".format(instance,
saver_name),
title=self.label)

View file

@ -1,4 +1,7 @@
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
@ -8,6 +11,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
label = "Validate Saver Passthrough"
families = ["render"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
def process(self, context):
@ -27,8 +31,9 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
if invalid_instances:
self.log.info("Reset pyblish to collect your current scene state, "
"that should fix error.")
raise RuntimeError("Invalid instances: "
"{0}".format(invalid_instances))
raise PublishValidationError(
"Invalid instances: {0}".format(invalid_instances),
title=self.label)
def is_invalid(self, instance):
@ -36,7 +41,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
attr = saver.GetAttrs()
active = not attr["TOOLB_PassThrough"]
if active != instance.data["publish"]:
if active != instance.data.get("publish", True):
self.log.info("Saver has different passthrough state than "
"Pyblish: {} ({})".format(instance, saver.Name))
return [saver]

View file

@ -0,0 +1,55 @@
from collections import defaultdict
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.fusion.api.action import SelectInvalidAction
class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
"""Ensure all instances have a unique subset name"""
order = pyblish.api.ValidatorOrder
label = "Validate Unique Subsets"
families = ["render"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@classmethod
def get_invalid(cls, context):
# Collect instances per subset per asset
instances_per_subset_asset = defaultdict(lambda: defaultdict(list))
for instance in context:
asset = instance.data.get("asset", context.data.get("asset"))
subset = instance.data.get("subset", context.data.get("subset"))
instances_per_subset_asset[asset][subset].append(instance)
# Find which asset + subset combination has more than one instance
# Those are considered invalid because they'd integrate to the same
# destination.
invalid = []
for asset, instances_per_subset in instances_per_subset_asset.items():
for subset, instances in instances_per_subset.items():
if len(instances) > 1:
cls.log.warning(
"{asset} > {subset} used by more than "
"one instance: {instances}".format(
asset=asset,
subset=subset,
instances=instances
)
)
invalid.extend(instances)
# Return tools for the invalid instances so they can be selected
invalid = [instance.data["tool"] for instance in invalid]
return invalid
def process(self, context):
invalid = self.get_invalid(context)
if invalid:
raise PublishValidationError("Multiple instances are set to "
"the same asset > subset.",
title=self.label)

View file

@ -1,5 +1,3 @@
from bson.objectid import ObjectId
import pyblish.api
from openpype.pipeline import registered_host
@ -117,7 +115,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)

View file

@ -22,6 +22,8 @@ PLACEHOLDER_SET = "PLACEHOLDERS_SET"
class MayaTemplateBuilder(AbstractTemplateBuilder):
"""Concrete implementation of AbstractTemplateBuilder for maya"""
use_legacy_creators = True
def import_template(self, path):
"""Import template into current scene.
Block if a template is already loaded.

View file

@ -1,5 +1,4 @@
import copy
from bson.objectid import ObjectId
from maya import cmds
import maya.api.OpenMaya as om
@ -165,9 +164,8 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
containers = collect_input_containers(scene_containers,
nodes)
inputs = [ObjectId(c["representation"]) for c in containers]
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.info("Collected inputs: %s" % inputs)
def _collect_renderlayer_inputs(self, scene_containers, instance):

View file

@ -48,7 +48,6 @@ from openpype.pipeline.colorspace import (
get_imageio_config
)
from openpype.pipeline.workfile import BuildWorkfile
from . import gizmo_menu
from .constants import ASSIST
@ -2678,6 +2677,18 @@ def process_workfile_builder():
open_file(last_workfile_path)
def start_workfile_template_builder():
from .workfile_template_builder import (
build_workfile_template
)
# to avoid looping of the callback, remove it!
log.info("Starting workfile template builder...")
build_workfile_template(workfile_creation_enabled=True)
# remove callback since it would be duplicating the workfile
nuke.removeOnCreate(start_workfile_template_builder, nodeClass="Root")
@deprecated
def recreate_instance(origin_node, avalon_data=None):
"""Recreate input instance to different data

View file

@ -33,6 +33,7 @@ from .lib import (
add_publish_knob,
WorkfileSettings,
process_workfile_builder,
start_workfile_template_builder,
launch_workfiles_app,
check_inventory_versions,
set_avalon_knob_data,
@ -48,7 +49,6 @@ from .workfile_template_builder import (
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin,
build_workfile_template,
update_workfile_template,
create_placeholder,
update_placeholder,
)
@ -156,6 +156,7 @@ def add_nuke_callbacks():
nuke.addOnCreate(
workfile_settings.set_context_settings, nodeClass="Root")
nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root")
nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root")
nuke.addOnCreate(process_workfile_builder, nodeClass="Root")
# fix ffmpeg settings on script

View file

@ -239,7 +239,11 @@ class NukeCreator(NewCreator):
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection", label="Use selection")
BoolDef(
"use_selection",
default=not self.create_context.headless,
label="Use selection"
)
]
def get_creator_settings(self, project_settings, settings_key=None):

View file

@ -1,7 +1,5 @@
import collections
import nuke
from openpype.pipeline import registered_host
from openpype.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
@ -14,7 +12,6 @@ from openpype.pipeline.workfile.workfile_template_builder import (
from openpype.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from .lib import (
find_free_space_to_paste_nodes,
get_extreme_positions,
@ -45,7 +42,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
get_template_preset implementation)
Returns:
bool: Wether the template was succesfully imported or not
bool: Wether the template was successfully imported or not
"""
# TODO check if the template is already imported
@ -55,7 +52,6 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
return True
class NukePlaceholderPlugin(PlaceholderPlugin):
node_color = 4278190335
@ -947,9 +943,9 @@ class NukePlaceholderCreatePlugin(
siblings_input.setInput(0, copy_output)
def build_workfile_template(*args):
def build_workfile_template(*args, **kwargs):
builder = NukeTemplateBuilder(registered_host())
builder.build_template()
builder.build_template(*args, **kwargs)
def update_workfile_template(*args):

View file

@ -13,7 +13,7 @@ def has_unsaved_changes():
def save_file(filepath):
path = filepath.replace("\\", "/")
nuke.scriptSaveAs(path)
nuke.scriptSaveAs(path, overwrite=1)
nuke.Root()["name"].setValue(path)
nuke.Root()["project_directory"].setValue(os.path.dirname(path))
nuke.Root().setModified(False)

View file

@ -35,7 +35,7 @@ class CreateWriteImage(napi.NukeWriteCreator):
attr_defs = [
BoolDef(
"use_selection",
default=True,
default=not self.create_context.headless,
label="Use selection"
),
self._get_render_target_enum(),

View file

@ -34,7 +34,7 @@ class CreateWritePrerender(napi.NukeWriteCreator):
attr_defs = [
BoolDef(
"use_selection",
default=True,
default=not self.create_context.headless,
label="Use selection"
),
self._get_render_target_enum()

View file

@ -31,7 +31,7 @@ class CreateWriteRender(napi.NukeWriteCreator):
attr_defs = [
BoolDef(
"use_selection",
default=True,
default=not self.create_context.headless,
label="Use selection"
),
self._get_render_target_enum()

View file

@ -3,12 +3,14 @@ from pprint import pformat
import nuke
import pyblish.api
from openpype.hosts.nuke import api as napi
from openpype.pipeline import publish
class CollectNukeWrites(pyblish.api.InstancePlugin):
class CollectNukeWrites(pyblish.api.InstancePlugin,
publish.ColormanagedPyblishPluginMixin):
"""Collect all write nodes."""
order = pyblish.api.CollectorOrder - 0.48
order = pyblish.api.CollectorOrder + 0.0021
label = "Collect Writes"
hosts = ["nuke", "nukeassist"]
families = ["render", "prerender", "image"]
@ -66,6 +68,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
write_file_path = nuke.filename(write_node)
output_dir = os.path.dirname(write_file_path)
# get colorspace and add to version data
colorspace = napi.get_colorspace_from_node(write_node)
self.log.debug('output dir: {}'.format(output_dir))
if render_target == "frames":
@ -128,6 +133,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
else:
representation['files'] = collected_frames
# inject colorspace data
self.set_representation_colorspace(
representation, instance.context,
colorspace=colorspace
)
instance.data["representations"].append(representation)
self.log.info("Publishing rendered frames ...")
@ -145,8 +156,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
instance.data["farm"] = True
self.log.info("Farm rendering ON ...")
# get colorspace and add to version data
colorspace = napi.get_colorspace_from_node(write_node)
# TODO: remove this when we have proper colorspace support
version_data = {
"colorspace": colorspace
}

View file

@ -4,12 +4,13 @@ import shutil
import pyblish.api
import clique
import nuke
from openpype.hosts.nuke import api as napi
from openpype.pipeline import publish
from openpype.lib import collect_frames
class NukeRenderLocal(publish.ExtractorColormanaged):
class NukeRenderLocal(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
"""Render the current Nuke composition locally.
Extract the result of savers by starting a comp render
@ -85,7 +86,7 @@ class NukeRenderLocal(publish.ExtractorColormanaged):
)
ext = node["file_type"].value()
colorspace = node["colorspace"].value()
colorspace = napi.get_colorspace_from_node(node)
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -10,10 +10,20 @@ from wsrpc_aiohttp import (
from qtpy import QtCore
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.lib import Logger, StringTemplate
from openpype.pipeline import (
registered_host,
Anatomy,
)
from openpype.pipeline.workfile import (
get_workfile_template_key_from_context,
get_last_workfile,
)
from openpype.pipeline.template_data import get_template_data_with_names
from openpype.tools.utils import host_tools
from openpype.tools.adobe_webserver.app import WebServerTool
from openpype.pipeline.context_tools import change_current_context
from openpype.client import get_asset_by_name
from .ws_stub import PhotoshopServerStub
@ -310,23 +320,28 @@ class PhotoshopRoute(WebSocketRoute):
# client functions
async def set_context(self, project, asset, task):
"""
Sets 'project' and 'asset' to envs, eg. setting context
Sets 'project' and 'asset' to envs, eg. setting context.
Args:
project (str)
asset (str)
Opens last workile from that context if exists.
Args:
project (str)
asset (str)
task (str
"""
log.info("Setting context change")
log.info("project {} asset {} ".format(project, asset))
if project:
legacy_io.Session["AVALON_PROJECT"] = project
os.environ["AVALON_PROJECT"] = project
if asset:
legacy_io.Session["AVALON_ASSET"] = asset
os.environ["AVALON_ASSET"] = asset
if task:
legacy_io.Session["AVALON_TASK"] = task
os.environ["AVALON_TASK"] = task
log.info(f"project {project} asset {asset} task {task}")
asset_doc = get_asset_by_name(project, asset)
change_current_context(asset_doc, task)
last_workfile_path = self._get_last_workfile_path(project,
asset,
task)
if last_workfile_path and os.path.exists(last_workfile_path):
ProcessLauncher.execute_in_main_thread(
lambda: stub().open(last_workfile_path))
async def read(self):
log.debug("photoshop.read client calls server server calls "
@ -356,3 +371,35 @@ class PhotoshopRoute(WebSocketRoute):
# Required return statement.
return "nothing"
def _get_last_workfile_path(self, project_name, asset_name, task_name):
"""Returns last workfile path if exists"""
host = registered_host()
host_name = "photoshop"
template_key = get_workfile_template_key_from_context(
asset_name,
task_name,
host_name,
project_name=project_name
)
anatomy = Anatomy(project_name)
data = get_template_data_with_names(
project_name, asset_name, task_name, host_name
)
data["root"] = anatomy.roots
file_template = anatomy.templates[template_key]["file"]
# Define saving file extension
extensions = host.get_workfile_extensions()
folder_template = anatomy.templates[template_key]["folder"]
work_root = StringTemplate.format_strict_template(
folder_template, data
)
last_workfile_path = get_last_workfile(
work_root, file_template, data, extensions, True
)
return last_workfile_path

View file

@ -95,7 +95,8 @@ def update_op_assets(
op_asset = create_op_asset(item)
insert_result = dbcon.insert_one(op_asset)
item_doc = get_asset_by_id(
project_name, insert_result.inserted_id)
project_name, insert_result.inserted_id
)
# Update asset
item_data = deepcopy(item_doc["data"])
@ -133,39 +134,47 @@ def update_op_assets(
try:
fps = float(item_data.get("fps"))
except (TypeError, ValueError):
fps = float(gazu_project.get(
"fps", project_doc["data"].get("fps", 25)))
fps = float(
gazu_project.get("fps", project_doc["data"].get("fps", 25))
)
item_data["fps"] = fps
# Resolution, fall back to project default
match_res = re.match(
r"(\d+)x(\d+)",
item_data.get("resolution", gazu_project.get("resolution"))
item_data.get("resolution", gazu_project.get("resolution")),
)
if match_res:
item_data["resolutionWidth"] = int(match_res.group(1))
item_data["resolutionHeight"] = int(match_res.group(2))
else:
item_data["resolutionWidth"] = project_doc["data"].get(
"resolutionWidth")
"resolutionWidth"
)
item_data["resolutionHeight"] = project_doc["data"].get(
"resolutionHeight")
"resolutionHeight"
)
# Properties that doesn't fully exist in Kitsu.
# Guessing those property names below:
# Pixel Aspect Ratio
item_data["pixelAspect"] = item_data.get(
"pixel_aspect", project_doc["data"].get("pixelAspect"))
"pixel_aspect", project_doc["data"].get("pixelAspect")
)
# Handle Start
item_data["handleStart"] = item_data.get(
"handle_start", project_doc["data"].get("handleStart"))
"handle_start", project_doc["data"].get("handleStart")
)
# Handle End
item_data["handleEnd"] = item_data.get(
"handle_end", project_doc["data"].get("handleEnd"))
"handle_end", project_doc["data"].get("handleEnd")
)
# Clip In
item_data["clipIn"] = item_data.get(
"clip_in", project_doc["data"].get("clipIn"))
"clip_in", project_doc["data"].get("clipIn")
)
# Clip Out
item_data["clipOut"] = item_data.get(
"clip_out", project_doc["data"].get("clipOut"))
"clip_out", project_doc["data"].get("clipOut")
)
# Tasks
tasks_list = []
@ -175,11 +184,9 @@ def update_op_assets(
elif item_type == "Shot":
tasks_list = gazu.task.all_tasks_for_shot(item)
item_data["tasks"] = {
item_data["tasks"] = {
t["task_type_name"]: {
"type": t["task_type_name"],
"zou": gazu.task.get_task(t["id"]),
}
t["task_type_name"]: {
"type": t["task_type_name"],
"zou": gazu.task.get_task(t["id"]),
}
for t in tasks_list
}
@ -218,7 +225,9 @@ def update_op_assets(
if parent_zou_id_dict is not None:
visual_parent_doc_id = (
parent_zou_id_dict.get("_id")
if parent_zou_id_dict else None)
if parent_zou_id_dict
else None
)
if visual_parent_doc_id is None:
# Find root folder doc ("Assets" or "Shots")
@ -345,7 +354,8 @@ def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne:
def sync_all_projects(
login: str, password: str, ignore_projects: list = None):
login: str, password: str, ignore_projects: list = None
):
"""Update all OP projects in DB with Zou data.
Args:
@ -390,7 +400,7 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
if not project:
project = gazu.project.get_project_by_name(project["name"])
log.info("Synchronizing {}...".format(project['name']))
log.info("Synchronizing {}...".format(project["name"]))
# Get all assets from zou
all_assets = gazu.asset.all_assets_for_project(project)
@ -473,8 +483,11 @@ def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict):
[
UpdateOne({"_id": id}, update)
for id, update in update_op_assets(
dbcon, project, project_dict,
all_entities, zou_ids_and_asset_docs
dbcon,
project,
project_dict,
all_entities,
zou_ids_and_asset_docs,
)
]
)

View file

@ -335,9 +335,10 @@ def get_imageio_config(
get_template_data_from_session)
anatomy_data = get_template_data_from_session()
formatting_data = deepcopy(anatomy_data)
# add project roots to anatomy data
anatomy_data["root"] = anatomy.roots
anatomy_data["platform"] = platform.system().lower()
formatting_data["root"] = anatomy.roots
formatting_data["platform"] = platform.system().lower()
# get colorspace settings
imageio_global, imageio_host = _get_imageio_settings(
@ -347,7 +348,7 @@ def get_imageio_config(
if config_host.get("enabled"):
config_data = _get_config_data(
config_host["filepath"], anatomy_data
config_host["filepath"], formatting_data
)
else:
config_data = None
@ -356,7 +357,7 @@ def get_imageio_config(
# get config path from either global or host_name
config_global = imageio_global["ocio_config"]
config_data = _get_config_data(
config_global["filepath"], anatomy_data
config_global["filepath"], formatting_data
)
if not config_data:
@ -372,12 +373,12 @@ def _get_config_data(path_list, anatomy_data):
"""Return first existing path in path list.
If template is used in path inputs,
then it is formated by anatomy data
then it is formatted by anatomy data
and environment variables
Args:
path_list (list[str]): list of abs paths
anatomy_data (dict): formating data
anatomy_data (dict): formatting data
Returns:
dict: config data
@ -389,30 +390,30 @@ def _get_config_data(path_list, anatomy_data):
# first try host config paths
for path_ in path_list:
formated_path = _format_path(path_, formatting_data)
formatted_path = _format_path(path_, formatting_data)
if not os.path.exists(formated_path):
if not os.path.exists(formatted_path):
continue
return {
"path": os.path.normpath(formated_path),
"path": os.path.normpath(formatted_path),
"template": path_
}
def _format_path(tempate_path, formatting_data):
"""Single template path formating.
def _format_path(template_path, formatting_data):
"""Single template path formatting.
Args:
tempate_path (str): template string
template_path (str): template string
formatting_data (dict): data to be used for
template formating
template formatting
Returns:
str: absolute formated path
str: absolute formatted path
"""
# format path for anatomy keys
formatted_path = StringTemplate(tempate_path).format(
formatted_path = StringTemplate(template_path).format(
formatting_data)
return os.path.abspath(formatted_path)

View file

@ -19,7 +19,7 @@ from .publish_plugins import (
RepairContextAction,
Extractor,
ExtractorColormanaged,
ColormanagedPyblishPluginMixin
)
from .lib import (
@ -64,7 +64,7 @@ __all__ = (
"RepairContextAction",
"Extractor",
"ExtractorColormanaged",
"ColormanagedPyblishPluginMixin",
"get_publish_template_name",

View file

@ -3,7 +3,7 @@ from abc import ABCMeta
from pprint import pformat
import pyblish.api
from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin
from openpype.lib.transcoding import VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
from openpype.lib import BoolDef
from .lib import (
@ -288,28 +288,29 @@ class Extractor(pyblish.api.InstancePlugin):
return get_instance_staging_dir(instance)
class ExtractorColormanaged(Extractor):
"""Extractor base for color managed image data.
Each Extractor intended to export pixel data representation
should inherit from this class to allow color managed data.
Class implements "get_colorspace_settings" and
"set_representation_colorspace" functions used
for injecting colorspace data to representation data for farther
integration into db document.
class ColormanagedPyblishPluginMixin(object):
"""Mixin for colormanaged plugins.
This class is used to set colorspace data to a publishing
representation. It contains a static method,
get_colorspace_settings, which returns config and
file rules data for the host context.
It also contains a method, set_representation_colorspace,
which sets colorspace data to the representation.
The allowed file extensions are listed in the allowed_ext variable.
The method first checks if the file extension is in
the list of allowed extensions. If it is, it then gets the
colorspace settings from the host context and gets a
matching colorspace from rules. Finally, it infuses this
data into the representation.
"""
allowed_ext = [
"cin", "dpx", "avi", "dv", "gif", "flv", "mkv", "mov", "mpg", "mpeg",
"mp4", "m4v", "mxf", "iff", "z", "ifl", "jpeg", "jpg", "jfif", "lut",
"1dl", "exr", "pic", "png", "ppm", "pnm", "pgm", "pbm", "rla", "rpf",
"sgi", "rgba", "rgb", "bw", "tga", "tiff", "tif", "img"
]
allowed_ext = set(
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
)
@staticmethod
def get_colorspace_settings(context):
"""Retuns solved settings for the host context.
"""Returns solved settings for the host context.
Args:
context (publish.Context): publishing context
@ -375,7 +376,10 @@ class ExtractorColormanaged(Extractor):
ext = representation["ext"]
# check extension
self.log.debug("__ ext: `{}`".format(ext))
if ext.lower() not in self.allowed_ext:
# check if ext in lower case is in self.allowed_ext
if ext.lstrip(".").lower() not in self.allowed_ext:
self.log.debug("Extension is not in allowed extensions.")
return
if colorspace_settings is None:

View file

@ -28,6 +28,7 @@ from openpype.settings import (
get_project_settings,
get_system_settings,
)
from openpype.host import IWorkfileHost
from openpype.host import HostBase
from openpype.lib import (
Logger,
@ -43,7 +44,8 @@ from openpype.pipeline.load import (
load_with_repre_context,
)
from openpype.pipeline.create import (
discover_legacy_creator_plugins
discover_legacy_creator_plugins,
CreateContext,
)
@ -91,6 +93,7 @@ class AbstractTemplateBuilder(object):
"""
_log = None
use_legacy_creators = False
def __init__(self, host):
# Get host name
@ -110,6 +113,7 @@ class AbstractTemplateBuilder(object):
self._placeholder_plugins = None
self._loaders_by_name = None
self._creators_by_name = None
self._create_context = None
self._system_settings = None
self._project_settings = None
@ -171,6 +175,16 @@ class AbstractTemplateBuilder(object):
.get("type")
)
@property
def create_context(self):
if self._create_context is None:
self._create_context = CreateContext(
self.host,
discover_publish_plugins=False,
headless=True
)
return self._create_context
def get_placeholder_plugin_classes(self):
"""Get placeholder plugin classes that can be used to build template.
@ -235,18 +249,29 @@ class AbstractTemplateBuilder(object):
self._loaders_by_name = get_loaders_by_name()
return self._loaders_by_name
def _collect_legacy_creators(self):
creators_by_name = {}
for creator in discover_legacy_creator_plugins():
if not creator.enabled:
continue
creator_name = creator.__name__
if creator_name in creators_by_name:
raise KeyError(
"Duplicated creator name {} !".format(creator_name)
)
creators_by_name[creator_name] = creator
self._creators_by_name = creators_by_name
def _collect_creators(self):
self._creators_by_name = dict(self.create_context.creators)
def get_creators_by_name(self):
if self._creators_by_name is None:
self._creators_by_name = {}
for creator in discover_legacy_creator_plugins():
if not creator.enabled:
continue
creator_name = creator.__name__
if creator_name in self._creators_by_name:
raise KeyError(
"Duplicated creator name {} !".format(creator_name)
)
self._creators_by_name[creator_name] = creator
if self.use_legacy_creators:
self._collect_legacy_creators()
else:
self._collect_creators()
return self._creators_by_name
def get_shared_data(self, key):
@ -416,7 +441,9 @@ class AbstractTemplateBuilder(object):
self,
template_path=None,
level_limit=None,
keep_placeholders=None
keep_placeholders=None,
create_first_version=None,
workfile_creation_enabled=False
):
"""Main callback for building workfile from template path.
@ -433,6 +460,11 @@ class AbstractTemplateBuilder(object):
keep_placeholders (bool): Add flag to placeholder data for
hosts to decide if they want to remove
placeholder after it is used.
create_first_version (bool): create first version of a workfile
workfile_creation_enabled (bool): If True, it might create
first version but ignore
process if version is created
"""
template_preset = self.get_template_preset()
@ -441,6 +473,30 @@ class AbstractTemplateBuilder(object):
if keep_placeholders is None:
keep_placeholders = template_preset["keep_placeholder"]
if create_first_version is None:
create_first_version = template_preset["create_first_version"]
# check if first version is created
created_version_workfile = self.create_first_workfile_version()
# if first version is created, import template
# and populate placeholders
if (
create_first_version
and workfile_creation_enabled
and created_version_workfile
):
self.import_template(template_path)
self.populate_scene_placeholders(
level_limit, keep_placeholders)
# save workfile after template is populated
self.save_workfile(created_version_workfile)
# ignore process if first workfile is enabled
# but a version is already created
if workfile_creation_enabled:
return
self.import_template(template_path)
self.populate_scene_placeholders(
@ -492,6 +548,39 @@ class AbstractTemplateBuilder(object):
pass
def create_first_workfile_version(self):
"""
Create first version of workfile.
Should load the content of template into scene so
'populate_scene_placeholders' can be started.
Args:
template_path (str): Fullpath for current task and
host's template file.
"""
last_workfile_path = os.environ.get("AVALON_LAST_WORKFILE")
self.log.info("__ last_workfile_path: {}".format(last_workfile_path))
if os.path.exists(last_workfile_path):
# ignore in case workfile existence
self.log.info("Workfile already exists, skipping creation.")
return False
# Create first version
self.log.info("Creating first version of workfile.")
self.save_workfile(last_workfile_path)
# Confirm creation of first version
return last_workfile_path
def save_workfile(self, workfile_path):
"""Save workfile in current host."""
# Save current scene, continue to open file
if isinstance(self.host, IWorkfileHost):
self.host.save_workfile(workfile_path)
else:
self.host.save_file(workfile_path)
def _prepare_placeholders(self, placeholders):
"""Run preparation part for placeholders on plugins.
@ -675,6 +764,8 @@ class AbstractTemplateBuilder(object):
# switch to remove placeholders after they are used
keep_placeholder = profile.get("keep_placeholder")
create_first_version = profile.get("create_first_version")
# backward compatibility, since default is True
if keep_placeholder is None:
keep_placeholder = True
@ -708,7 +799,8 @@ class AbstractTemplateBuilder(object):
self.log.info("Found template at: '{}'".format(path))
return {
"path": path,
"keep_placeholder": keep_placeholder
"keep_placeholder": keep_placeholder,
"create_first_version": create_first_version
}
solved_path = None
@ -737,7 +829,8 @@ class AbstractTemplateBuilder(object):
return {
"path": solved_path,
"keep_placeholder": keep_placeholder
"keep_placeholder": keep_placeholder,
"create_first_version": create_first_version
}
@ -1579,6 +1672,8 @@ class PlaceholderCreateMixin(object):
placeholder (PlaceholderItem): Placeholder item with information
about requested publishable instance.
"""
legacy_create = self.builder.use_legacy_creators
creator_name = placeholder.data["creator"]
create_variant = placeholder.data["create_variant"]
@ -1589,17 +1684,28 @@ class PlaceholderCreateMixin(object):
task_name = legacy_io.Session["AVALON_TASK"]
asset_name = legacy_io.Session["AVALON_ASSET"]
# get asset id
asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"])
assert asset_doc, "No current asset found in Session"
asset_id = asset_doc['_id']
if legacy_create:
asset_doc = get_asset_by_name(
project_name, asset_name, fields=["_id"]
)
assert asset_doc, "No current asset found in Session"
subset_name = creator_plugin.get_subset_name(
create_variant,
task_name,
asset_doc["_id"],
project_name
)
subset_name = creator_plugin.get_subset_name(
create_variant,
task_name,
asset_id,
project_name
)
else:
asset_doc = get_asset_by_name(project_name, asset_name)
assert asset_doc, "No current asset found in Session"
subset_name = creator_plugin.get_subset_name(
create_variant,
task_name,
asset_doc,
project_name,
self.builder.host_name
)
creator_data = {
"creator_name": creator_name,
@ -1612,12 +1718,20 @@ class PlaceholderCreateMixin(object):
# compile subset name from variant
try:
creator_instance = creator_plugin(
subset_name,
asset_name
).process()
if legacy_create:
creator_instance = creator_plugin(
subset_name,
asset_name
).process()
else:
creator_instance = self.builder.create_context.create(
creator_plugin.identifier,
create_variant,
asset_doc,
task_name=task_name
)
except Exception:
except: # noqa: E722
failed = True
self.create_failed(placeholder, creator_data)

View file

@ -23,7 +23,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
representations = set()
for instance in context:
inst_repre = instance.data.get("inputRepresentations", [])
representations.update(inst_repre)
if inst_repre:
representations.update(inst_repre)
representations_docs = get_representations(
project_name=context.data["projectEntity"]["name"],
@ -31,7 +32,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
fields=["_id", "parent"])
representation_id_to_version_id = {
repre["_id"]: repre["parent"] for repre in representations_docs
str(repre["_id"]): repre["parent"]
for repre in representations_docs
}
for instance in context:
@ -39,9 +41,8 @@ class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin):
if not inst_repre:
continue
input_versions = instance.data.get("inputVersions", [])
input_versions = instance.data.setdefault("inputVersions", [])
for repre_id in inst_repre:
repre_id = ObjectId(repre_id)
version_id = representation_id_to_version_id[repre_id]
input_versions.append(version_id)
instance.data["inputVersions"] = input_versions
version_id = representation_id_to_version_id.get(repre_id)
if version_id:
input_versions.append(version_id)

View file

@ -2,7 +2,8 @@ import pyblish.api
from openpype.pipeline import publish
class ExtractColorspaceData(publish.ExtractorColormanaged):
class ExtractColorspaceData(publish.Extractor,
publish.ColormanagedPyblishPluginMixin):
""" Inject Colorspace data to available representations.
Input data:

View file

@ -135,6 +135,38 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
)
return project_doc
def _prepare_new_tasks(self, asset_doc, entity_data):
new_tasks = entity_data.get("tasks") or {}
if not asset_doc:
return new_tasks
old_tasks = asset_doc.get("data", {}).get("tasks")
# Just use new tasks if old are not available
if not old_tasks:
return new_tasks
output = deepcopy(old_tasks)
# Create mapping of lowered task names from old tasks
cur_task_low_mapping = {
task_name.lower(): task_name
for task_name in old_tasks
}
# Add/update tasks from new entity data
for task_name, task_info in new_tasks.items():
task_info = deepcopy(task_info)
task_name_low = task_name.lower()
# Add new task
if task_name_low not in cur_task_low_mapping:
output[task_name] = task_info
continue
# Update existing task with new info
mapped_task_name = cur_task_low_mapping.pop(task_name_low)
src_task_info = output.pop(mapped_task_name)
src_task_info.update(task_info)
output[task_name] = src_task_info
return output
def sync_asset(
self,
asset_name,
@ -170,11 +202,12 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
data["parents"] = parents
asset_doc = asset_docs_by_name.get(asset_name)
# Tasks
data["tasks"] = self._prepare_new_tasks(asset_doc, entity_data)
# --- Create/Unarchive asset and end ---
if not asset_doc:
# Just use tasks from entity data as they are
# - this is different from the case when tasks are updated
data["tasks"] = entity_data.get("tasks") or {}
archived_asset_doc = None
for archived_entity in archived_asset_docs_by_name[asset_name]:
archived_parents = (
@ -201,19 +234,6 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
if "data" not in asset_doc:
asset_doc["data"] = {}
cur_entity_data = asset_doc["data"]
cur_entity_tasks = cur_entity_data.get("tasks") or {}
# Tasks
data["tasks"] = {}
new_tasks = entity_data.get("tasks") or {}
for task_name, task_info in new_tasks.items():
task_info = deepcopy(task_info)
if task_name in cur_entity_tasks:
src_task_info = deepcopy(cur_entity_tasks[task_name])
src_task_info.update(task_info)
task_info = src_task_info
data["tasks"][task_name] = task_info
changes = {}
for key, value in data.items():

View file

@ -565,7 +565,17 @@
]
},
"templated_workfile_build": {
"profiles": []
"profiles": [
{
"task_types": [
"Compositing"
],
"task_names": [],
"path": "{project[name]}/templates/comp.nk",
"keep_placeholder": true,
"create_first_version": true
}
]
},
"filters": {}
}

View file

@ -34,6 +34,12 @@
"label": "Keep placeholders",
"type": "boolean",
"default": true
},
{
"key": "create_first_version",
"label": "Create first version",
"type": "boolean",
"default": true
}
]
}

View file

@ -186,7 +186,7 @@ class AttributeDefinitionsWidget(QtWidgets.QWidget):
class _BaseAttrDefWidget(QtWidgets.QWidget):
# Type 'object' may not work with older PySide versions
value_changed = QtCore.Signal(object, uuid.UUID)
value_changed = QtCore.Signal(object, str)
def __init__(self, attr_def, parent):
super(_BaseAttrDefWidget, self).__init__(parent)

View file

@ -98,15 +98,22 @@ class Popup(QtWidgets.QDialog):
height = window.height()
height = max(height, window.sizeHint().height())
desktop_geometry = QtWidgets.QDesktopWidget().availableGeometry()
screen_geometry = window.geometry()
try:
screen = window.screen()
desktop_geometry = screen.availableGeometry()
except AttributeError:
# Backwards compatibility for older Qt versions
# PySide6 removed QDesktopWidget
desktop_geometry = QtWidgets.QDesktopWidget().availableGeometry()
screen_width = screen_geometry.width()
screen_height = screen_geometry.height()
window_geometry = window.geometry()
screen_width = window_geometry.width()
screen_height = window_geometry.height()
# Calculate width and height of system tray
systray_width = screen_geometry.width() - desktop_geometry.width()
systray_height = screen_geometry.height() - desktop_geometry.height()
systray_width = window_geometry.width() - desktop_geometry.width()
systray_height = window_geometry.height() - desktop_geometry.height()
padding = 10