mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 08:24:53 +01:00
Merge remote-tracking branch 'origin/develop' into enhancement/render-product-names-templated
This commit is contained in:
commit
b37b0eccc5
274 changed files with 9127 additions and 4245 deletions
|
|
@ -1,7 +1,7 @@
|
||||||
from ayon_applications import PreLaunchHook
|
from ayon_applications import PreLaunchHook
|
||||||
|
|
||||||
from ayon_core.pipeline.colorspace import get_imageio_config
|
from ayon_core.pipeline.colorspace import get_imageio_config_preset
|
||||||
from ayon_core.pipeline.template_data import get_template_data_with_names
|
from ayon_core.pipeline.template_data import get_template_data
|
||||||
|
|
||||||
|
|
||||||
class OCIOEnvHook(PreLaunchHook):
|
class OCIOEnvHook(PreLaunchHook):
|
||||||
|
|
@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
|
||||||
def execute(self):
|
def execute(self):
|
||||||
"""Hook entry method."""
|
"""Hook entry method."""
|
||||||
|
|
||||||
template_data = get_template_data_with_names(
|
folder_entity = self.data["folder_entity"]
|
||||||
project_name=self.data["project_name"],
|
|
||||||
folder_path=self.data["folder_path"],
|
template_data = get_template_data(
|
||||||
task_name=self.data["task_name"],
|
self.data["project_entity"],
|
||||||
|
folder_entity=folder_entity,
|
||||||
|
task_entity=self.data["task_entity"],
|
||||||
host_name=self.host_name,
|
host_name=self.host_name,
|
||||||
settings=self.data["project_settings"]
|
settings=self.data["project_settings"],
|
||||||
)
|
)
|
||||||
|
|
||||||
config_data = get_imageio_config(
|
config_data = get_imageio_config_preset(
|
||||||
project_name=self.data["project_name"],
|
self.data["project_name"],
|
||||||
host_name=self.host_name,
|
self.data["folder_path"],
|
||||||
project_settings=self.data["project_settings"],
|
self.data["task_name"],
|
||||||
anatomy_data=template_data,
|
self.host_name,
|
||||||
anatomy=self.data["anatomy"],
|
anatomy=self.data["anatomy"],
|
||||||
|
project_settings=self.data["project_settings"],
|
||||||
|
template_data=template_data,
|
||||||
env=self.launch_context.env,
|
env=self.launch_context.env,
|
||||||
|
folder_id=folder_entity["id"],
|
||||||
)
|
)
|
||||||
|
|
||||||
if config_data:
|
if not config_data:
|
||||||
ocio_path = config_data["path"]
|
|
||||||
|
|
||||||
if self.host_name in ["nuke", "hiero"]:
|
|
||||||
ocio_path = ocio_path.replace("\\", "/")
|
|
||||||
|
|
||||||
self.log.info(
|
|
||||||
f"Setting OCIO environment to config path: {ocio_path}")
|
|
||||||
|
|
||||||
self.launch_context.env["OCIO"] = ocio_path
|
|
||||||
else:
|
|
||||||
self.log.debug("OCIO not set or enabled")
|
self.log.debug("OCIO not set or enabled")
|
||||||
|
return
|
||||||
|
|
||||||
|
ocio_path = config_data["path"]
|
||||||
|
|
||||||
|
if self.host_name in ["nuke", "hiero"]:
|
||||||
|
ocio_path = ocio_path.replace("\\", "/")
|
||||||
|
|
||||||
|
self.log.info(
|
||||||
|
f"Setting OCIO environment to config path: {ocio_path}")
|
||||||
|
|
||||||
|
self.launch_context.env["OCIO"] = ocio_path
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ def main(*subprocess_args):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
|
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
|
||||||
save = False
|
save = False
|
||||||
if os.getenv("WORKFILES_SAVE_AS"):
|
if os.getenv("WORKFILES_SAVE_AS"):
|
||||||
save = True
|
save = True
|
||||||
|
|
|
||||||
|
|
@ -8,14 +8,11 @@ from ayon_core.lib import Logger, register_event_callback
|
||||||
from ayon_core.pipeline import (
|
from ayon_core.pipeline import (
|
||||||
register_loader_plugin_path,
|
register_loader_plugin_path,
|
||||||
register_creator_plugin_path,
|
register_creator_plugin_path,
|
||||||
|
register_workfile_build_plugin_path,
|
||||||
AVALON_CONTAINER_ID,
|
AVALON_CONTAINER_ID,
|
||||||
AVALON_INSTANCE_ID,
|
AVALON_INSTANCE_ID,
|
||||||
AYON_INSTANCE_ID,
|
AYON_INSTANCE_ID,
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.aftereffects.api.workfile_template_builder import (
|
|
||||||
AEPlaceholderLoadPlugin,
|
|
||||||
AEPlaceholderCreatePlugin
|
|
||||||
)
|
|
||||||
from ayon_core.pipeline.load import any_outdated_containers
|
from ayon_core.pipeline.load import any_outdated_containers
|
||||||
import ayon_core.hosts.aftereffects
|
import ayon_core.hosts.aftereffects
|
||||||
|
|
||||||
|
|
@ -40,6 +37,7 @@ PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||||
|
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
|
||||||
|
|
||||||
|
|
||||||
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
|
|
@ -76,6 +74,7 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
|
|
||||||
register_loader_plugin_path(LOAD_PATH)
|
register_loader_plugin_path(LOAD_PATH)
|
||||||
register_creator_plugin_path(CREATE_PATH)
|
register_creator_plugin_path(CREATE_PATH)
|
||||||
|
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
|
||||||
|
|
||||||
register_event_callback("application.launched", application_launch)
|
register_event_callback("application.launched", application_launch)
|
||||||
|
|
||||||
|
|
@ -118,12 +117,6 @@ class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
item["id"] = "publish_context"
|
item["id"] = "publish_context"
|
||||||
self.stub.imprint(item["id"], item)
|
self.stub.imprint(item["id"], item)
|
||||||
|
|
||||||
def get_workfile_build_placeholder_plugins(self):
|
|
||||||
return [
|
|
||||||
AEPlaceholderLoadPlugin,
|
|
||||||
AEPlaceholderCreatePlugin
|
|
||||||
]
|
|
||||||
|
|
||||||
# created instances section
|
# created instances section
|
||||||
def list_instances(self):
|
def list_instances(self):
|
||||||
"""List all created instances from current workfile which
|
"""List all created instances from current workfile which
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
import os.path
|
import os.path
|
||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
|
from abc import abstractmethod
|
||||||
|
|
||||||
from ayon_core.pipeline import registered_host
|
from ayon_core.pipeline import registered_host
|
||||||
from ayon_core.tools.workfile_template_build import (
|
from ayon_core.tools.workfile_template_build import (
|
||||||
|
|
@ -9,13 +10,9 @@ from ayon_core.tools.workfile_template_build import (
|
||||||
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
AbstractTemplateBuilder,
|
AbstractTemplateBuilder,
|
||||||
PlaceholderPlugin,
|
PlaceholderPlugin,
|
||||||
LoadPlaceholderItem,
|
PlaceholderItem
|
||||||
CreatePlaceholderItem,
|
|
||||||
PlaceholderLoadMixin,
|
|
||||||
PlaceholderCreateMixin
|
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.aftereffects.api import get_stub
|
from ayon_core.hosts.aftereffects.api import get_stub
|
||||||
from ayon_core.hosts.aftereffects.api.lib import set_settings
|
|
||||||
|
|
||||||
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
||||||
PLACEHOLDER_ID = "openpype.placeholder"
|
PLACEHOLDER_ID = "openpype.placeholder"
|
||||||
|
|
@ -51,6 +48,10 @@ class AETemplateBuilder(AbstractTemplateBuilder):
|
||||||
class AEPlaceholderPlugin(PlaceholderPlugin):
|
class AEPlaceholderPlugin(PlaceholderPlugin):
|
||||||
"""Contains generic methods for all PlaceholderPlugins."""
|
"""Contains generic methods for all PlaceholderPlugins."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem:
|
||||||
|
pass
|
||||||
|
|
||||||
def collect_placeholders(self):
|
def collect_placeholders(self):
|
||||||
"""Collect info from file metadata about created placeholders.
|
"""Collect info from file metadata about created placeholders.
|
||||||
|
|
||||||
|
|
@ -63,17 +64,7 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
|
||||||
if item.get("plugin_identifier") != self.identifier:
|
if item.get("plugin_identifier") != self.identifier:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if isinstance(self, AEPlaceholderLoadPlugin):
|
item = self._create_placeholder_item(item)
|
||||||
item = LoadPlaceholderItem(item["uuid"],
|
|
||||||
item["data"],
|
|
||||||
self)
|
|
||||||
elif isinstance(self, AEPlaceholderCreatePlugin):
|
|
||||||
item = CreatePlaceholderItem(item["uuid"],
|
|
||||||
item["data"],
|
|
||||||
self)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError(f"Not implemented for {type(self)}")
|
|
||||||
|
|
||||||
output.append(item)
|
output.append(item)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
@ -135,87 +126,6 @@ class AEPlaceholderPlugin(PlaceholderPlugin):
|
||||||
stub.imprint(item_id, container_data)
|
stub.imprint(item_id, container_data)
|
||||||
|
|
||||||
|
|
||||||
class AEPlaceholderCreatePlugin(AEPlaceholderPlugin, PlaceholderCreateMixin):
|
|
||||||
"""Adds Create placeholder.
|
|
||||||
|
|
||||||
This adds composition and runs Create
|
|
||||||
"""
|
|
||||||
identifier = "aftereffects.create"
|
|
||||||
label = "AfterEffects create"
|
|
||||||
|
|
||||||
def create_placeholder(self, placeholder_data):
|
|
||||||
stub = get_stub()
|
|
||||||
name = "CREATEPLACEHOLDER"
|
|
||||||
item_id = stub.add_item(name, "COMP")
|
|
||||||
|
|
||||||
self._imprint_item(item_id, name, placeholder_data, stub)
|
|
||||||
|
|
||||||
def populate_placeholder(self, placeholder):
|
|
||||||
"""Replace 'placeholder' with publishable instance.
|
|
||||||
|
|
||||||
Renames prepared composition name, creates publishable instance, sets
|
|
||||||
frame/duration settings according to DB.
|
|
||||||
"""
|
|
||||||
pre_create_data = {"use_selection": True}
|
|
||||||
item_id, item = self._get_item(placeholder)
|
|
||||||
get_stub().select_items([item_id])
|
|
||||||
self.populate_create_placeholder(placeholder, pre_create_data)
|
|
||||||
|
|
||||||
# apply settings for populated composition
|
|
||||||
item_id, metadata_item = self._get_item(placeholder)
|
|
||||||
set_settings(True, True, [item_id])
|
|
||||||
|
|
||||||
def get_placeholder_options(self, options=None):
|
|
||||||
return self.get_create_plugin_options(options)
|
|
||||||
|
|
||||||
|
|
||||||
class AEPlaceholderLoadPlugin(AEPlaceholderPlugin, PlaceholderLoadMixin):
|
|
||||||
identifier = "aftereffects.load"
|
|
||||||
label = "AfterEffects load"
|
|
||||||
|
|
||||||
def create_placeholder(self, placeholder_data):
|
|
||||||
"""Creates AE's Placeholder item in Project items list.
|
|
||||||
|
|
||||||
Sets dummy resolution/duration/fps settings, will be replaced when
|
|
||||||
populated.
|
|
||||||
"""
|
|
||||||
stub = get_stub()
|
|
||||||
name = "LOADERPLACEHOLDER"
|
|
||||||
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
|
|
||||||
|
|
||||||
self._imprint_item(item_id, name, placeholder_data, stub)
|
|
||||||
|
|
||||||
def populate_placeholder(self, placeholder):
|
|
||||||
"""Use Openpype Loader from `placeholder` to create new FootageItems
|
|
||||||
|
|
||||||
New FootageItems are created, files are imported.
|
|
||||||
"""
|
|
||||||
self.populate_load_placeholder(placeholder)
|
|
||||||
errors = placeholder.get_errors()
|
|
||||||
stub = get_stub()
|
|
||||||
if errors:
|
|
||||||
stub.print_msg("\n".join(errors))
|
|
||||||
else:
|
|
||||||
if not placeholder.data["keep_placeholder"]:
|
|
||||||
metadata = stub.get_metadata()
|
|
||||||
for item in metadata:
|
|
||||||
if not item.get("is_placeholder"):
|
|
||||||
continue
|
|
||||||
scene_identifier = item.get("uuid")
|
|
||||||
if (scene_identifier and
|
|
||||||
scene_identifier == placeholder.scene_identifier):
|
|
||||||
stub.delete_item(item["members"][0])
|
|
||||||
stub.remove_instance(placeholder.scene_identifier, metadata)
|
|
||||||
|
|
||||||
def get_placeholder_options(self, options=None):
|
|
||||||
return self.get_load_plugin_options(options)
|
|
||||||
|
|
||||||
def load_succeed(self, placeholder, container):
|
|
||||||
placeholder_item_id, _ = self._get_item(placeholder)
|
|
||||||
item_id = container.id
|
|
||||||
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
|
|
||||||
|
|
||||||
|
|
||||||
def build_workfile_template(*args, **kwargs):
|
def build_workfile_template(*args, **kwargs):
|
||||||
builder = AETemplateBuilder(registered_host())
|
builder = AETemplateBuilder(registered_host())
|
||||||
builder.build_template(*args, **kwargs)
|
builder.build_template(*args, **kwargs)
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance):
|
||||||
|
|
||||||
class CollectAERender(publish.AbstractCollectRender):
|
class CollectAERender(publish.AbstractCollectRender):
|
||||||
|
|
||||||
order = pyblish.api.CollectorOrder + 0.405
|
order = pyblish.api.CollectorOrder + 0.100
|
||||||
label = "Collect After Effects Render Layers"
|
label = "Collect After Effects Render Layers"
|
||||||
hosts = ["aftereffects"]
|
hosts = ["aftereffects"]
|
||||||
|
|
||||||
|
|
@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender):
|
||||||
if "review" in instance.families:
|
if "review" in instance.families:
|
||||||
# to skip ExtractReview locally
|
# to skip ExtractReview locally
|
||||||
instance.families.remove("review")
|
instance.families.remove("review")
|
||||||
|
instance.deadline = inst.data.get("deadline")
|
||||||
|
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
|
CreatePlaceholderItem,
|
||||||
|
PlaceholderCreateMixin
|
||||||
|
)
|
||||||
|
from ayon_core.hosts.aftereffects.api import get_stub
|
||||||
|
from ayon_core.hosts.aftereffects.api.lib import set_settings
|
||||||
|
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
|
||||||
|
|
||||||
|
|
||||||
|
class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin,
|
||||||
|
PlaceholderCreateMixin):
|
||||||
|
"""Adds Create placeholder.
|
||||||
|
|
||||||
|
This adds composition and runs Create
|
||||||
|
"""
|
||||||
|
identifier = "aftereffects.create"
|
||||||
|
label = "AfterEffects create"
|
||||||
|
|
||||||
|
def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem:
|
||||||
|
return CreatePlaceholderItem(
|
||||||
|
scene_identifier=item_data["uuid"],
|
||||||
|
data=item_data["data"],
|
||||||
|
plugin=self
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_placeholder(self, placeholder_data):
|
||||||
|
stub = get_stub()
|
||||||
|
name = "CREATEPLACEHOLDER"
|
||||||
|
item_id = stub.add_item(name, "COMP")
|
||||||
|
|
||||||
|
self._imprint_item(item_id, name, placeholder_data, stub)
|
||||||
|
|
||||||
|
def populate_placeholder(self, placeholder):
|
||||||
|
"""Replace 'placeholder' with publishable instance.
|
||||||
|
|
||||||
|
Renames prepared composition name, creates publishable instance, sets
|
||||||
|
frame/duration settings according to DB.
|
||||||
|
"""
|
||||||
|
pre_create_data = {"use_selection": True}
|
||||||
|
item_id, item = self._get_item(placeholder)
|
||||||
|
get_stub().select_items([item_id])
|
||||||
|
self.populate_create_placeholder(placeholder, pre_create_data)
|
||||||
|
|
||||||
|
# apply settings for populated composition
|
||||||
|
item_id, metadata_item = self._get_item(placeholder)
|
||||||
|
set_settings(True, True, [item_id])
|
||||||
|
|
||||||
|
def get_placeholder_options(self, options=None):
|
||||||
|
return self.get_create_plugin_options(options)
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
|
LoadPlaceholderItem,
|
||||||
|
PlaceholderLoadMixin
|
||||||
|
)
|
||||||
|
from ayon_core.hosts.aftereffects.api import get_stub
|
||||||
|
import ayon_core.hosts.aftereffects.api.workfile_template_builder as wtb
|
||||||
|
|
||||||
|
|
||||||
|
class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin):
|
||||||
|
identifier = "aftereffects.load"
|
||||||
|
label = "AfterEffects load"
|
||||||
|
|
||||||
|
def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem:
|
||||||
|
return LoadPlaceholderItem(
|
||||||
|
scene_identifier=item_data["uuid"],
|
||||||
|
data=item_data["data"],
|
||||||
|
plugin=self
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_placeholder(self, placeholder_data):
|
||||||
|
"""Creates AE's Placeholder item in Project items list.
|
||||||
|
|
||||||
|
Sets dummy resolution/duration/fps settings, will be replaced when
|
||||||
|
populated.
|
||||||
|
"""
|
||||||
|
stub = get_stub()
|
||||||
|
name = "LOADERPLACEHOLDER"
|
||||||
|
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
|
||||||
|
|
||||||
|
self._imprint_item(item_id, name, placeholder_data, stub)
|
||||||
|
|
||||||
|
def populate_placeholder(self, placeholder):
|
||||||
|
"""Use Openpype Loader from `placeholder` to create new FootageItems
|
||||||
|
|
||||||
|
New FootageItems are created, files are imported.
|
||||||
|
"""
|
||||||
|
self.populate_load_placeholder(placeholder)
|
||||||
|
errors = placeholder.get_errors()
|
||||||
|
stub = get_stub()
|
||||||
|
if errors:
|
||||||
|
stub.print_msg("\n".join(errors))
|
||||||
|
else:
|
||||||
|
if not placeholder.data["keep_placeholder"]:
|
||||||
|
metadata = stub.get_metadata()
|
||||||
|
for item in metadata:
|
||||||
|
if not item.get("is_placeholder"):
|
||||||
|
continue
|
||||||
|
scene_identifier = item.get("uuid")
|
||||||
|
if (scene_identifier and
|
||||||
|
scene_identifier == placeholder.scene_identifier):
|
||||||
|
stub.delete_item(item["members"][0])
|
||||||
|
stub.remove_instance(placeholder.scene_identifier, metadata)
|
||||||
|
|
||||||
|
def get_placeholder_options(self, options=None):
|
||||||
|
return self.get_load_plugin_options(options)
|
||||||
|
|
||||||
|
def load_succeed(self, placeholder, container):
|
||||||
|
placeholder_item_id, _ = self._get_item(placeholder)
|
||||||
|
item_id = container.id
|
||||||
|
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)
|
||||||
|
|
@ -33,7 +33,7 @@ def load_scripts(paths):
|
||||||
if register:
|
if register:
|
||||||
try:
|
try:
|
||||||
register()
|
register()
|
||||||
except:
|
except: # noqa E722
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
else:
|
else:
|
||||||
print("\nWarning! '%s' has no register function, "
|
print("\nWarning! '%s' has no register function, "
|
||||||
|
|
@ -45,7 +45,7 @@ def load_scripts(paths):
|
||||||
if unregister:
|
if unregister:
|
||||||
try:
|
try:
|
||||||
unregister()
|
unregister()
|
||||||
except:
|
except: # noqa E722
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def test_reload(mod):
|
def test_reload(mod):
|
||||||
|
|
@ -57,7 +57,7 @@ def load_scripts(paths):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return importlib.reload(mod)
|
return importlib.reload(mod)
|
||||||
except:
|
except: # noqa E722
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
def test_register(mod):
|
def test_register(mod):
|
||||||
|
|
|
||||||
|
|
@ -143,13 +143,19 @@ def deselect_all():
|
||||||
if obj.mode != 'OBJECT':
|
if obj.mode != 'OBJECT':
|
||||||
modes.append((obj, obj.mode))
|
modes.append((obj, obj.mode))
|
||||||
bpy.context.view_layer.objects.active = obj
|
bpy.context.view_layer.objects.active = obj
|
||||||
bpy.ops.object.mode_set(mode='OBJECT')
|
context_override = create_blender_context(active=obj)
|
||||||
|
with bpy.context.temp_override(**context_override):
|
||||||
|
bpy.ops.object.mode_set(mode='OBJECT')
|
||||||
|
|
||||||
bpy.ops.object.select_all(action='DESELECT')
|
context_override = create_blender_context()
|
||||||
|
with bpy.context.temp_override(**context_override):
|
||||||
|
bpy.ops.object.select_all(action='DESELECT')
|
||||||
|
|
||||||
for p in modes:
|
for p in modes:
|
||||||
bpy.context.view_layer.objects.active = p[0]
|
bpy.context.view_layer.objects.active = p[0]
|
||||||
bpy.ops.object.mode_set(mode=p[1])
|
context_override = create_blender_context(active=p[0])
|
||||||
|
with bpy.context.temp_override(**context_override):
|
||||||
|
bpy.ops.object.mode_set(mode=p[1])
|
||||||
|
|
||||||
bpy.context.view_layer.objects.active = active
|
bpy.context.view_layer.objects.active = active
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
|
||||||
def _process(self, libpath, asset_group, group_name):
|
def _process(self, libpath, asset_group, group_name):
|
||||||
plugin.deselect_all()
|
plugin.deselect_all()
|
||||||
|
|
||||||
bpy.ops.wm.alembic_import(filepath=libpath)
|
# Force the creation of the transform cache even if the camera
|
||||||
|
# doesn't have an animation. We use the cache to update the camera.
|
||||||
|
bpy.ops.wm.alembic_import(
|
||||||
|
filepath=libpath, always_add_cache_reader=True)
|
||||||
|
|
||||||
objects = lib.get_selection()
|
objects = lib.get_selection()
|
||||||
|
|
||||||
|
|
@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
|
||||||
self.log.info("Library already loaded, not updating...")
|
self.log.info("Library already loaded, not updating...")
|
||||||
return
|
return
|
||||||
|
|
||||||
mat = asset_group.matrix_basis.copy()
|
for obj in asset_group.children:
|
||||||
|
found = False
|
||||||
|
for constraint in obj.constraints:
|
||||||
|
if constraint.type == "TRANSFORM_CACHE":
|
||||||
|
constraint.cache_file.filepath = libpath.as_posix()
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
# This is to keep compatibility with cameras loaded with
|
||||||
|
# the old loader
|
||||||
|
# Create a new constraint for the cache file
|
||||||
|
constraint = obj.constraints.new("TRANSFORM_CACHE")
|
||||||
|
bpy.ops.cachefile.open(filepath=libpath.as_posix())
|
||||||
|
constraint.cache_file = bpy.data.cache_files[-1]
|
||||||
|
constraint.cache_file.scale = 1.0
|
||||||
|
|
||||||
self._remove(asset_group)
|
# This is a workaround to set the object path. Blender doesn't
|
||||||
self._process(str(libpath), asset_group, object_name)
|
# load the list of object paths until the object is evaluated.
|
||||||
|
# This is a hack to force the object to be evaluated.
|
||||||
|
# The modifier doesn't need to be removed because camera
|
||||||
|
# objects don't have modifiers.
|
||||||
|
obj.modifiers.new(
|
||||||
|
name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
|
||||||
|
bpy.context.evaluated_depsgraph_get()
|
||||||
|
|
||||||
asset_group.matrix_basis = mat
|
constraint.object_path = (
|
||||||
|
constraint.cache_file.object_paths[0].path)
|
||||||
|
|
||||||
metadata["libpath"] = str(libpath)
|
metadata["libpath"] = str(libpath)
|
||||||
metadata["representation"] = repre_entity["id"]
|
metadata["representation"] = repre_entity["id"]
|
||||||
|
|
|
||||||
|
|
@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action):
|
||||||
self.log.info(
|
self.log.info(
|
||||||
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SelectToolAction(pyblish.api.Action):
|
||||||
|
"""Select invalid output tool in Fusion when plug-in failed.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
label = "Select saver"
|
||||||
|
on = "failed" # This action is only available on a failed plug-in
|
||||||
|
icon = "search" # Icon from Awesome Icon
|
||||||
|
|
||||||
|
def process(self, context, plugin):
|
||||||
|
errored_instances = get_errored_instances_from_context(
|
||||||
|
context,
|
||||||
|
plugin=plugin,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get the invalid nodes for the plug-ins
|
||||||
|
self.log.info("Finding invalid nodes..")
|
||||||
|
tools = []
|
||||||
|
for instance in errored_instances:
|
||||||
|
|
||||||
|
tool = instance.data.get("tool")
|
||||||
|
if tool is not None:
|
||||||
|
tools.append(tool)
|
||||||
|
else:
|
||||||
|
self.log.warning(
|
||||||
|
"Plug-in returned to be invalid, "
|
||||||
|
f"but has no saver for instance {instance.name}."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not tools:
|
||||||
|
# Assume relevant comp is current comp and clear selection
|
||||||
|
self.log.info("No invalid tools found.")
|
||||||
|
comp = get_current_comp()
|
||||||
|
flow = comp.CurrentFrame.FlowView
|
||||||
|
flow.Select() # No args equals clearing selection
|
||||||
|
return
|
||||||
|
|
||||||
|
# Assume a single comp
|
||||||
|
first_tool = tools[0]
|
||||||
|
comp = first_tool.Comp()
|
||||||
|
flow = comp.CurrentFrame.FlowView
|
||||||
|
flow.Select() # No args equals clearing selection
|
||||||
|
names = set()
|
||||||
|
for tool in tools:
|
||||||
|
flow.Select(tool, True)
|
||||||
|
comp.SetActiveTool(tool)
|
||||||
|
names.add(tool.Name)
|
||||||
|
self.log.info(
|
||||||
|
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
|
||||||
def _on_repair():
|
def _on_repair():
|
||||||
attributes = dict()
|
attributes = dict()
|
||||||
for key, comp_key, _label in validations:
|
for key, comp_key, _label in validations:
|
||||||
value = folder_value[key]
|
value = folder_attributes[key]
|
||||||
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
|
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
|
||||||
attributes[comp_key_full] = value
|
attributes[comp_key_full] = value
|
||||||
comp.SetPrefs(attributes)
|
comp.SetPrefs(attributes)
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ class CollectFusionRender(
|
||||||
if product_type not in ["render", "image"]:
|
if product_type not in ["render", "image"]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
task_name = context.data["task"]
|
task_name = inst.data["task"]
|
||||||
tool = inst.data["transientData"]["tool"]
|
tool = inst.data["transientData"]["tool"]
|
||||||
|
|
||||||
instance_families = inst.data.get("families", [])
|
instance_families = inst.data.get("families", [])
|
||||||
|
|
@ -115,6 +115,7 @@ class CollectFusionRender(
|
||||||
if "review" in instance.families:
|
if "review" in instance.families:
|
||||||
# to skip ExtractReview locally
|
# to skip ExtractReview locally
|
||||||
instance.families.remove("review")
|
instance.families.remove("review")
|
||||||
|
instance.deadline = inst.data.get("deadline")
|
||||||
|
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,80 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Validate if instance context is the same as publish context."""
|
||||||
|
|
||||||
|
import pyblish.api
|
||||||
|
from ayon_core.hosts.fusion.api.action import SelectToolAction
|
||||||
|
from ayon_core.pipeline.publish import (
|
||||||
|
RepairAction,
|
||||||
|
ValidateContentsOrder,
|
||||||
|
PublishValidationError,
|
||||||
|
OptionalPyblishPluginMixin
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin,
|
||||||
|
OptionalPyblishPluginMixin):
|
||||||
|
"""Validator to check if instance context matches context of publish.
|
||||||
|
|
||||||
|
When working in per-shot style you always publish data in context of
|
||||||
|
current asset (shot). This validator checks if this is so. It is optional
|
||||||
|
so it can be disabled when needed.
|
||||||
|
"""
|
||||||
|
# Similar to maya and houdini-equivalent `ValidateInstanceInContext`
|
||||||
|
|
||||||
|
order = ValidateContentsOrder
|
||||||
|
label = "Instance in same Context"
|
||||||
|
optional = True
|
||||||
|
hosts = ["fusion"]
|
||||||
|
actions = [SelectToolAction, RepairAction]
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
if not self.is_active(instance.data):
|
||||||
|
return
|
||||||
|
|
||||||
|
instance_context = self.get_context(instance.data)
|
||||||
|
context = self.get_context(instance.context.data)
|
||||||
|
if instance_context != context:
|
||||||
|
context_label = "{} > {}".format(*context)
|
||||||
|
instance_label = "{} > {}".format(*instance_context)
|
||||||
|
|
||||||
|
raise PublishValidationError(
|
||||||
|
message=(
|
||||||
|
"Instance '{}' publishes to different asset than current "
|
||||||
|
"context: {}. Current context: {}".format(
|
||||||
|
instance.name, instance_label, context_label
|
||||||
|
)
|
||||||
|
),
|
||||||
|
description=(
|
||||||
|
"## Publishing to a different asset\n"
|
||||||
|
"There are publish instances present which are publishing "
|
||||||
|
"into a different asset than your current context.\n\n"
|
||||||
|
"Usually this is not what you want but there can be cases "
|
||||||
|
"where you might want to publish into another asset or "
|
||||||
|
"shot. If that's the case you can disable the validation "
|
||||||
|
"on the instance to ignore it."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def repair(cls, instance):
|
||||||
|
|
||||||
|
create_context = instance.context.data["create_context"]
|
||||||
|
instance_id = instance.data.get("instance_id")
|
||||||
|
created_instance = create_context.get_instance_by_id(
|
||||||
|
instance_id
|
||||||
|
)
|
||||||
|
if created_instance is None:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"No CreatedInstances found with id '{instance_id} "
|
||||||
|
f"in {create_context.instances_by_id}"
|
||||||
|
)
|
||||||
|
|
||||||
|
context_asset, context_task = cls.get_context(instance.context.data)
|
||||||
|
created_instance["folderPath"] = context_asset
|
||||||
|
created_instance["task"] = context_task
|
||||||
|
create_context.save_changes()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_context(data):
|
||||||
|
"""Return asset, task from publishing context data"""
|
||||||
|
return data["folderPath"], data["task"]
|
||||||
|
|
@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender):
|
||||||
outputFormat=info[1],
|
outputFormat=info[1],
|
||||||
outputStartFrame=info[3],
|
outputStartFrame=info[3],
|
||||||
leadingZeros=info[2],
|
leadingZeros=info[2],
|
||||||
ignoreFrameHandleCheck=True
|
ignoreFrameHandleCheck=True,
|
||||||
|
#todo: inst is not available, must be determined, fix when
|
||||||
|
#reworking to Publisher
|
||||||
|
# deadline=inst.data.get("deadline")
|
||||||
|
|
||||||
)
|
)
|
||||||
render_instance.context = context
|
render_instance.context = context
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ from .lib import (
|
||||||
sync_avalon_data_to_workfile,
|
sync_avalon_data_to_workfile,
|
||||||
launch_workfiles_app,
|
launch_workfiles_app,
|
||||||
before_project_save,
|
before_project_save,
|
||||||
|
apply_colorspace_project
|
||||||
)
|
)
|
||||||
from .tags import add_tags_to_workfile
|
from .tags import add_tags_to_workfile
|
||||||
from .menu import update_menu_task_label
|
from .menu import update_menu_task_label
|
||||||
|
|
@ -44,6 +45,8 @@ def afterNewProjectCreated(event):
|
||||||
# reset workfiles startup not to open any more in session
|
# reset workfiles startup not to open any more in session
|
||||||
os.environ["WORKFILES_STARTUP"] = "0"
|
os.environ["WORKFILES_STARTUP"] = "0"
|
||||||
|
|
||||||
|
apply_colorspace_project()
|
||||||
|
|
||||||
|
|
||||||
def beforeProjectLoad(event):
|
def beforeProjectLoad(event):
|
||||||
log.info("before project load event...")
|
log.info("before project load event...")
|
||||||
|
|
@ -122,6 +125,7 @@ def register_hiero_events():
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def register_events():
|
def register_events():
|
||||||
"""
|
"""
|
||||||
Adding all callbacks.
|
Adding all callbacks.
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@ import warnings
|
||||||
import json
|
import json
|
||||||
import ast
|
import ast
|
||||||
import secrets
|
import secrets
|
||||||
import shutil
|
|
||||||
import hiero
|
import hiero
|
||||||
|
|
||||||
from qtpy import QtWidgets, QtCore
|
from qtpy import QtWidgets, QtCore
|
||||||
|
|
@ -36,9 +35,6 @@ from .constants import (
|
||||||
DEFAULT_SEQUENCE_NAME,
|
DEFAULT_SEQUENCE_NAME,
|
||||||
DEFAULT_BIN_NAME
|
DEFAULT_BIN_NAME
|
||||||
)
|
)
|
||||||
from ayon_core.pipeline.colorspace import (
|
|
||||||
get_imageio_config
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class _CTX:
|
class _CTX:
|
||||||
|
|
@ -105,9 +101,9 @@ def flatten(list_):
|
||||||
|
|
||||||
|
|
||||||
def get_current_project(remove_untitled=False):
|
def get_current_project(remove_untitled=False):
|
||||||
projects = flatten(hiero.core.projects())
|
projects = hiero.core.projects()
|
||||||
if not remove_untitled:
|
if not remove_untitled:
|
||||||
return next(iter(projects))
|
return projects[0]
|
||||||
|
|
||||||
# if remove_untitled
|
# if remove_untitled
|
||||||
for proj in projects:
|
for proj in projects:
|
||||||
|
|
@ -1050,30 +1046,84 @@ def _set_hrox_project_knobs(doc, **knobs):
|
||||||
|
|
||||||
|
|
||||||
def apply_colorspace_project():
|
def apply_colorspace_project():
|
||||||
project_name = get_current_project_name()
|
"""Apply colorspaces from settings.
|
||||||
# get path the the active projects
|
|
||||||
project = get_current_project(remove_untitled=True)
|
|
||||||
current_file = project.path()
|
|
||||||
|
|
||||||
# close the active project
|
|
||||||
project.close()
|
|
||||||
|
|
||||||
|
Due to not being able to set the project settings through the Python API,
|
||||||
|
we need to do use some dubious code to find the widgets and set them. It is
|
||||||
|
possible to set the project settings without traversing through the widgets
|
||||||
|
but it involves reading the hrox files from disk with XML, so no in-memory
|
||||||
|
support. See https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
|
||||||
|
for more details.
|
||||||
|
"""
|
||||||
# get presets for hiero
|
# get presets for hiero
|
||||||
|
project_name = get_current_project_name()
|
||||||
imageio = get_project_settings(project_name)["hiero"]["imageio"]
|
imageio = get_project_settings(project_name)["hiero"]["imageio"]
|
||||||
presets = imageio.get("workfile")
|
presets = imageio.get("workfile")
|
||||||
|
|
||||||
|
# Open Project Settings UI.
|
||||||
|
for act in hiero.ui.registeredActions():
|
||||||
|
if act.objectName() == "foundry.project.settings":
|
||||||
|
act.trigger()
|
||||||
|
|
||||||
|
# Find widgets from their sibling label.
|
||||||
|
labels = {
|
||||||
|
"Working Space:": "workingSpace",
|
||||||
|
"Viewer:": "viewerLut",
|
||||||
|
"Thumbnails:": "thumbnailLut",
|
||||||
|
"Monitor Out:": "monitorOutLut",
|
||||||
|
"8 Bit Files:": "eightBitLut",
|
||||||
|
"16 Bit Files:": "sixteenBitLut",
|
||||||
|
"Log Files:": "logLut",
|
||||||
|
"Floating Point Files:": "floatLut"
|
||||||
|
}
|
||||||
|
widgets = {x: None for x in labels.values()}
|
||||||
|
|
||||||
|
def _recursive_children(widget, labels, widgets):
|
||||||
|
children = widget.children()
|
||||||
|
for count, child in enumerate(children):
|
||||||
|
if isinstance(child, QtWidgets.QLabel):
|
||||||
|
if child.text() in labels.keys():
|
||||||
|
widgets[labels[child.text()]] = children[count + 1]
|
||||||
|
_recursive_children(child, labels, widgets)
|
||||||
|
|
||||||
|
app = QtWidgets.QApplication.instance()
|
||||||
|
title = "Project Settings"
|
||||||
|
for widget in app.topLevelWidgets():
|
||||||
|
if isinstance(widget, QtWidgets.QMainWindow):
|
||||||
|
if widget.windowTitle() != title:
|
||||||
|
continue
|
||||||
|
_recursive_children(widget, labels, widgets)
|
||||||
|
widget.close()
|
||||||
|
|
||||||
|
msg = "Setting value \"{}\" is not a valid option for \"{}\""
|
||||||
|
for key, widget in widgets.items():
|
||||||
|
options = [widget.itemText(i) for i in range(widget.count())]
|
||||||
|
setting_value = presets[key]
|
||||||
|
assert setting_value in options, msg.format(setting_value, key)
|
||||||
|
widget.setCurrentText(presets[key])
|
||||||
|
|
||||||
|
# This code block is for setting up project colorspaces for files on disk.
|
||||||
|
# Due to not having Python API access to set the project settings, the
|
||||||
|
# Foundry recommended way is to modify the hrox files on disk with XML. See
|
||||||
|
# this forum thread for more details;
|
||||||
|
# https://community.foundry.com/discuss/topic/137771/change-a-project-s-default-color-transform-with-python # noqa
|
||||||
|
'''
|
||||||
# backward compatibility layer
|
# backward compatibility layer
|
||||||
# TODO: remove this after some time
|
# TODO: remove this after some time
|
||||||
config_data = get_imageio_config(
|
config_data = get_current_context_imageio_config_preset()
|
||||||
project_name=get_current_project_name(),
|
|
||||||
host_name="hiero"
|
|
||||||
)
|
|
||||||
|
|
||||||
if config_data:
|
if config_data:
|
||||||
presets.update({
|
presets.update({
|
||||||
"ocioConfigName": "custom"
|
"ocioConfigName": "custom"
|
||||||
})
|
})
|
||||||
|
|
||||||
|
# get path the the active projects
|
||||||
|
project = get_current_project()
|
||||||
|
current_file = project.path()
|
||||||
|
|
||||||
|
msg = "The project needs to be saved to disk to apply colorspace settings."
|
||||||
|
assert current_file, msg
|
||||||
|
|
||||||
# save the workfile as subversion "comment:_colorspaceChange"
|
# save the workfile as subversion "comment:_colorspaceChange"
|
||||||
split_current_file = os.path.splitext(current_file)
|
split_current_file = os.path.splitext(current_file)
|
||||||
copy_current_file = current_file
|
copy_current_file = current_file
|
||||||
|
|
@ -1116,6 +1166,7 @@ def apply_colorspace_project():
|
||||||
|
|
||||||
# open the file as current project
|
# open the file as current project
|
||||||
hiero.core.openProject(copy_current_file)
|
hiero.core.openProject(copy_current_file)
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
def apply_colorspace_clips():
|
def apply_colorspace_clips():
|
||||||
|
|
@ -1125,10 +1176,8 @@ def apply_colorspace_clips():
|
||||||
|
|
||||||
# get presets for hiero
|
# get presets for hiero
|
||||||
imageio = get_project_settings(project_name)["hiero"]["imageio"]
|
imageio = get_project_settings(project_name)["hiero"]["imageio"]
|
||||||
from pprint import pprint
|
|
||||||
|
|
||||||
presets = imageio.get("regexInputs", {}).get("inputs", {})
|
presets = imageio.get("regexInputs", {}).get("inputs", {})
|
||||||
pprint(presets)
|
|
||||||
for clip in clips:
|
for clip in clips:
|
||||||
clip_media_source_path = clip.mediaSource().firstpath()
|
clip_media_source_path = clip.mediaSource().firstpath()
|
||||||
clip_name = clip.name()
|
clip_name = clip.name()
|
||||||
|
|
|
||||||
|
|
@ -144,7 +144,7 @@ def add_tags_to_workfile():
|
||||||
# Get project task types.
|
# Get project task types.
|
||||||
project_name = get_current_project_name()
|
project_name = get_current_project_name()
|
||||||
project_entity = ayon_api.get_project(project_name)
|
project_entity = ayon_api.get_project(project_name)
|
||||||
task_types = project_entity["taskType"]
|
task_types = project_entity["taskTypes"]
|
||||||
nks_pres_tags["[Tasks]"] = {}
|
nks_pres_tags["[Tasks]"] = {}
|
||||||
log.debug("__ tasks: {}".format(task_types))
|
log.debug("__ tasks: {}".format(task_types))
|
||||||
for task_type in task_types:
|
for task_type in task_types:
|
||||||
|
|
|
||||||
|
|
@ -51,13 +51,12 @@ def open_file(filepath):
|
||||||
|
|
||||||
project = hiero.core.projects()[-1]
|
project = hiero.core.projects()[-1]
|
||||||
|
|
||||||
# open project file
|
# Close previous project if its different to the current project.
|
||||||
hiero.core.openProject(filepath.replace(os.path.sep, "/"))
|
filepath = filepath.replace(os.path.sep, "/")
|
||||||
|
if project.path().replace(os.path.sep, "/") != filepath:
|
||||||
# close previous project
|
# open project file
|
||||||
project.close()
|
hiero.core.openProject(filepath)
|
||||||
|
project.close()
|
||||||
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -811,6 +811,43 @@ def get_current_context_template_data_with_folder_attrs():
|
||||||
return template_data
|
return template_data
|
||||||
|
|
||||||
|
|
||||||
|
def set_review_color_space(opengl_node, review_color_space="", log=None):
|
||||||
|
"""Set ociocolorspace parameter for the given OpenGL node.
|
||||||
|
|
||||||
|
Set `ociocolorspace` parameter of the given OpenGl node
|
||||||
|
to to the given review_color_space value.
|
||||||
|
If review_color_space is empty, a default colorspace corresponding to
|
||||||
|
the display & view of the current Houdini session will be used.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
opengl_node (hou.Node): ROP node to set its ociocolorspace parm.
|
||||||
|
review_color_space (str): Colorspace value for ociocolorspace parm.
|
||||||
|
log (logging.Logger): Logger to log to.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if log is None:
|
||||||
|
log = self.log
|
||||||
|
|
||||||
|
# Set Color Correction parameter to OpenColorIO
|
||||||
|
colorcorrect_parm = opengl_node.parm("colorcorrect")
|
||||||
|
if colorcorrect_parm.eval() != 2:
|
||||||
|
colorcorrect_parm.set(2)
|
||||||
|
log.debug(
|
||||||
|
"'Color Correction' parm on '{}' has been set to"
|
||||||
|
" 'OpenColorIO'".format(opengl_node.path())
|
||||||
|
)
|
||||||
|
|
||||||
|
opengl_node.setParms(
|
||||||
|
{"ociocolorspace": review_color_space}
|
||||||
|
)
|
||||||
|
|
||||||
|
log.debug(
|
||||||
|
"'OCIO Colorspace' parm on '{}' has been set to "
|
||||||
|
"the view color space '{}'"
|
||||||
|
.format(opengl_node, review_color_space)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_context_var_changes():
|
def get_context_var_changes():
|
||||||
"""get context var changes."""
|
"""get context var changes."""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,64 @@
|
||||||
|
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||||
|
|
||||||
|
|
||||||
|
class SetDefaultDisplayView(PreLaunchHook):
|
||||||
|
"""Set default view and default display for houdini via OpenColorIO.
|
||||||
|
|
||||||
|
Houdini's defaultDisplay and defaultView are set by
|
||||||
|
setting 'OCIO_ACTIVE_DISPLAYS' and 'OCIO_ACTIVE_VIEWS'
|
||||||
|
environment variables respectively.
|
||||||
|
|
||||||
|
More info: https://www.sidefx.com/docs/houdini/io/ocio.html#set-up
|
||||||
|
"""
|
||||||
|
|
||||||
|
app_groups = {"houdini"}
|
||||||
|
launch_types = {LaunchTypes.local}
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
|
||||||
|
OCIO = self.launch_context.env.get("OCIO")
|
||||||
|
|
||||||
|
# This is a cheap way to skip this hook if either global color
|
||||||
|
# management or houdini color management was disabled because the
|
||||||
|
# OCIO var would be set by the global OCIOEnvHook
|
||||||
|
if not OCIO:
|
||||||
|
return
|
||||||
|
|
||||||
|
# workfile settings added in '0.2.13'
|
||||||
|
houdini_color_settings = \
|
||||||
|
self.data["project_settings"]["houdini"]["imageio"].get("workfile")
|
||||||
|
|
||||||
|
if not houdini_color_settings:
|
||||||
|
self.log.info("Hook 'SetDefaultDisplayView' requires Houdini "
|
||||||
|
"addon version >= '0.2.13'")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not houdini_color_settings["enabled"]:
|
||||||
|
self.log.info(
|
||||||
|
"Houdini workfile color management is disabled."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 'OCIO_ACTIVE_DISPLAYS', 'OCIO_ACTIVE_VIEWS' are checked
|
||||||
|
# as Admins can add them in Ayon env vars or Ayon tools.
|
||||||
|
|
||||||
|
default_display = houdini_color_settings["default_display"]
|
||||||
|
if default_display:
|
||||||
|
# get 'OCIO_ACTIVE_DISPLAYS' value if exists.
|
||||||
|
self._set_context_env("OCIO_ACTIVE_DISPLAYS", default_display)
|
||||||
|
|
||||||
|
default_view = houdini_color_settings["default_view"]
|
||||||
|
if default_view:
|
||||||
|
# get 'OCIO_ACTIVE_VIEWS' value if exists.
|
||||||
|
self._set_context_env("OCIO_ACTIVE_VIEWS", default_view)
|
||||||
|
|
||||||
|
def _set_context_env(self, env_var, default_value):
|
||||||
|
env_value = self.launch_context.env.get(env_var, "")
|
||||||
|
new_value = ":".join(
|
||||||
|
key for key in [default_value, env_value] if key
|
||||||
|
)
|
||||||
|
self.log.info(
|
||||||
|
"Setting {} environment to: {}"
|
||||||
|
.format(env_var, new_value)
|
||||||
|
)
|
||||||
|
self.launch_context.env[env_var] = new_value
|
||||||
|
|
@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
||||||
# Default extension
|
# Default extension
|
||||||
ext = "exr"
|
ext = "exr"
|
||||||
|
|
||||||
# Default to split export and render jobs
|
# Default render target
|
||||||
export_job = True
|
render_target = "farm_split"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
import hou
|
import hou
|
||||||
|
# Transfer settings from pre create to instance
|
||||||
|
creator_attributes = instance_data.setdefault(
|
||||||
|
"creator_attributes", dict())
|
||||||
|
for key in ["render_target", "review"]:
|
||||||
|
if key in pre_create_data:
|
||||||
|
creator_attributes[key] = pre_create_data[key]
|
||||||
|
|
||||||
# Remove the active, we are checking the bypass flag of the nodes
|
# Remove the active, we are checking the bypass flag of the nodes
|
||||||
instance_data.pop("active", None)
|
instance_data.pop("active", None)
|
||||||
|
|
@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
||||||
|
|
||||||
# Add chunk size attribute
|
# Add chunk size attribute
|
||||||
instance_data["chunkSize"] = 1
|
instance_data["chunkSize"] = 1
|
||||||
# Submit for job publishing
|
|
||||||
instance_data["farm"] = pre_create_data.get("farm")
|
|
||||||
|
|
||||||
instance = super(CreateArnoldRop, self).create(
|
instance = super(CreateArnoldRop, self).create(
|
||||||
product_name,
|
product_name,
|
||||||
|
|
@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
||||||
"ar_exr_half_precision": 1 # half precision
|
"ar_exr_half_precision": 1 # half precision
|
||||||
}
|
}
|
||||||
|
|
||||||
if pre_create_data.get("export_job"):
|
if pre_create_data.get("render_target") == "farm_split":
|
||||||
ass_filepath = \
|
ass_filepath = \
|
||||||
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
|
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
|
||||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||||
|
|
@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
||||||
to_lock = ["productType", "id"]
|
to_lock = ["productType", "id"]
|
||||||
self.lock_parameters(instance_node, to_lock)
|
self.lock_parameters(instance_node, to_lock)
|
||||||
|
|
||||||
def get_pre_create_attr_defs(self):
|
def get_instance_attr_defs(self):
|
||||||
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
|
"""get instance attribute definitions.
|
||||||
|
|
||||||
|
Attributes defined in this method are exposed in
|
||||||
|
publish tab in the publisher UI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
render_target_items = {
|
||||||
|
"local": "Local machine rendering",
|
||||||
|
"local_no_render": "Use existing frames (local)",
|
||||||
|
"farm": "Farm Rendering",
|
||||||
|
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
BoolDef("review",
|
||||||
|
label="Review",
|
||||||
|
tooltip="Mark as reviewable",
|
||||||
|
default=True),
|
||||||
|
EnumDef("render_target",
|
||||||
|
items=render_target_items,
|
||||||
|
label="Render target",
|
||||||
|
default=self.render_target),
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_pre_create_attr_defs(self):
|
||||||
image_format_enum = [
|
image_format_enum = [
|
||||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||||
]
|
]
|
||||||
|
|
||||||
return attrs + [
|
attrs = [
|
||||||
BoolDef("farm",
|
|
||||||
label="Submitting to Farm",
|
|
||||||
default=True),
|
|
||||||
BoolDef("export_job",
|
|
||||||
label="Split export and render jobs",
|
|
||||||
default=self.export_job),
|
|
||||||
EnumDef("image_format",
|
EnumDef("image_format",
|
||||||
image_format_enum,
|
image_format_enum,
|
||||||
default=self.ext,
|
default=self.ext,
|
||||||
label="Image Format Options")
|
label="Image Format Options"),
|
||||||
]
|
]
|
||||||
|
return attrs + self.get_instance_attr_defs()
|
||||||
|
|
|
||||||
|
|
@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
||||||
product_type = "karma_rop"
|
product_type = "karma_rop"
|
||||||
icon = "magic"
|
icon = "magic"
|
||||||
|
|
||||||
|
# Default render target
|
||||||
|
render_target = "farm"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
import hou # noqa
|
import hou # noqa
|
||||||
|
# Transfer settings from pre create to instance
|
||||||
|
creator_attributes = instance_data.setdefault(
|
||||||
|
"creator_attributes", dict())
|
||||||
|
|
||||||
|
for key in ["render_target", "review"]:
|
||||||
|
if key in pre_create_data:
|
||||||
|
creator_attributes[key] = pre_create_data[key]
|
||||||
|
|
||||||
instance_data.pop("active", None)
|
instance_data.pop("active", None)
|
||||||
instance_data.update({"node_type": "karma"})
|
instance_data.update({"node_type": "karma"})
|
||||||
# Add chunk size attribute
|
# Add chunk size attribute
|
||||||
instance_data["chunkSize"] = 10
|
instance_data["chunkSize"] = 10
|
||||||
# Submit for job publishing
|
|
||||||
instance_data["farm"] = pre_create_data.get("farm")
|
|
||||||
|
|
||||||
instance = super(CreateKarmaROP, self).create(
|
instance = super(CreateKarmaROP, self).create(
|
||||||
product_name,
|
product_name,
|
||||||
|
|
@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
||||||
to_lock = ["productType", "id"]
|
to_lock = ["productType", "id"]
|
||||||
self.lock_parameters(instance_node, to_lock)
|
self.lock_parameters(instance_node, to_lock)
|
||||||
|
|
||||||
def get_pre_create_attr_defs(self):
|
def get_instance_attr_defs(self):
|
||||||
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
"""get instance attribute definitions.
|
||||||
|
|
||||||
|
Attributes defined in this method are exposed in
|
||||||
|
publish tab in the publisher UI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
render_target_items = {
|
||||||
|
"local": "Local machine rendering",
|
||||||
|
"local_no_render": "Use existing frames (local)",
|
||||||
|
"farm": "Farm Rendering",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
BoolDef("review",
|
||||||
|
label="Review",
|
||||||
|
tooltip="Mark as reviewable",
|
||||||
|
default=True),
|
||||||
|
EnumDef("render_target",
|
||||||
|
items=render_target_items,
|
||||||
|
label="Render target",
|
||||||
|
default=self.render_target)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_pre_create_attr_defs(self):
|
||||||
image_format_enum = [
|
image_format_enum = [
|
||||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||||
]
|
]
|
||||||
|
|
||||||
return attrs + [
|
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
||||||
BoolDef("farm",
|
|
||||||
label="Submitting to Farm",
|
attrs += [
|
||||||
default=True),
|
|
||||||
EnumDef("image_format",
|
EnumDef("image_format",
|
||||||
image_format_enum,
|
image_format_enum,
|
||||||
default="exr",
|
default="exr",
|
||||||
|
|
@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
||||||
decimals=0),
|
decimals=0),
|
||||||
BoolDef("cam_res",
|
BoolDef("cam_res",
|
||||||
label="Camera Resolution",
|
label="Camera Resolution",
|
||||||
default=False)
|
default=False),
|
||||||
]
|
]
|
||||||
|
return attrs + self.get_instance_attr_defs()
|
||||||
|
|
|
||||||
|
|
@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
||||||
product_type = "mantra_rop"
|
product_type = "mantra_rop"
|
||||||
icon = "magic"
|
icon = "magic"
|
||||||
|
|
||||||
# Default to split export and render jobs
|
# Default render target
|
||||||
export_job = True
|
render_target = "farm_split"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
import hou # noqa
|
import hou # noqa
|
||||||
|
# Transfer settings from pre create to instance
|
||||||
|
creator_attributes = instance_data.setdefault(
|
||||||
|
"creator_attributes", dict())
|
||||||
|
for key in ["render_target", "review"]:
|
||||||
|
if key in pre_create_data:
|
||||||
|
creator_attributes[key] = pre_create_data[key]
|
||||||
|
|
||||||
instance_data.pop("active", None)
|
instance_data.pop("active", None)
|
||||||
instance_data.update({"node_type": "ifd"})
|
instance_data.update({"node_type": "ifd"})
|
||||||
# Add chunk size attribute
|
# Add chunk size attribute
|
||||||
instance_data["chunkSize"] = 10
|
instance_data["chunkSize"] = 10
|
||||||
# Submit for job publishing
|
|
||||||
instance_data["farm"] = pre_create_data.get("farm")
|
|
||||||
|
|
||||||
instance = super(CreateMantraROP, self).create(
|
instance = super(CreateMantraROP, self).create(
|
||||||
product_name,
|
product_name,
|
||||||
|
|
@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
||||||
"vm_picture": filepath,
|
"vm_picture": filepath,
|
||||||
}
|
}
|
||||||
|
|
||||||
if pre_create_data.get("export_job"):
|
if pre_create_data.get("render_target") == "farm_split":
|
||||||
ifd_filepath = \
|
ifd_filepath = \
|
||||||
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
|
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
|
||||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||||
|
|
@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
||||||
to_lock = ["productType", "id"]
|
to_lock = ["productType", "id"]
|
||||||
self.lock_parameters(instance_node, to_lock)
|
self.lock_parameters(instance_node, to_lock)
|
||||||
|
|
||||||
def get_pre_create_attr_defs(self):
|
def get_instance_attr_defs(self):
|
||||||
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
"""get instance attribute definitions.
|
||||||
|
|
||||||
|
Attributes defined in this method are exposed in
|
||||||
|
publish tab in the publisher UI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
render_target_items = {
|
||||||
|
"local": "Local machine rendering",
|
||||||
|
"local_no_render": "Use existing frames (local)",
|
||||||
|
"farm": "Farm Rendering",
|
||||||
|
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
BoolDef("review",
|
||||||
|
label="Review",
|
||||||
|
tooltip="Mark as reviewable",
|
||||||
|
default=True),
|
||||||
|
EnumDef("render_target",
|
||||||
|
items=render_target_items,
|
||||||
|
label="Render target",
|
||||||
|
default=self.render_target)
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_pre_create_attr_defs(self):
|
||||||
image_format_enum = [
|
image_format_enum = [
|
||||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||||
]
|
]
|
||||||
|
|
||||||
return attrs + [
|
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
||||||
BoolDef("farm",
|
|
||||||
label="Submitting to Farm",
|
attrs += [
|
||||||
default=True),
|
|
||||||
BoolDef("export_job",
|
|
||||||
label="Split export and render jobs",
|
|
||||||
default=self.export_job),
|
|
||||||
EnumDef("image_format",
|
EnumDef("image_format",
|
||||||
image_format_enum,
|
image_format_enum,
|
||||||
default="exr",
|
default="exr",
|
||||||
|
|
@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
||||||
label="Override Camera Resolution",
|
label="Override Camera Resolution",
|
||||||
tooltip="Override the current camera "
|
tooltip="Override the current camera "
|
||||||
"resolution, recommended for IPR.",
|
"resolution, recommended for IPR.",
|
||||||
default=False)
|
default=False),
|
||||||
]
|
]
|
||||||
|
return attrs + self.get_instance_attr_defs()
|
||||||
|
|
|
||||||
|
|
@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||||
ext = "exr"
|
ext = "exr"
|
||||||
multi_layered_mode = "No Multi-Layered EXR File"
|
multi_layered_mode = "No Multi-Layered EXR File"
|
||||||
|
|
||||||
# Default to split export and render jobs
|
# Default render target
|
||||||
split_render = True
|
render_target = "farm_split"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
|
# Transfer settings from pre create to instance
|
||||||
|
creator_attributes = instance_data.setdefault(
|
||||||
|
"creator_attributes", dict())
|
||||||
|
for key in ["render_target", "review"]:
|
||||||
|
if key in pre_create_data:
|
||||||
|
creator_attributes[key] = pre_create_data[key]
|
||||||
|
|
||||||
instance_data.pop("active", None)
|
instance_data.pop("active", None)
|
||||||
instance_data.update({"node_type": "Redshift_ROP"})
|
instance_data.update({"node_type": "Redshift_ROP"})
|
||||||
# Add chunk size attribute
|
# Add chunk size attribute
|
||||||
instance_data["chunkSize"] = 10
|
instance_data["chunkSize"] = 10
|
||||||
# Submit for job publishing
|
|
||||||
instance_data["farm"] = pre_create_data.get("farm")
|
|
||||||
|
|
||||||
instance = super(CreateRedshiftROP, self).create(
|
instance = super(CreateRedshiftROP, self).create(
|
||||||
product_name,
|
product_name,
|
||||||
|
|
@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||||
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
|
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
|
||||||
parms["RS_archive_file"] = rs_filepath
|
parms["RS_archive_file"] = rs_filepath
|
||||||
|
|
||||||
if pre_create_data.get("split_render", self.split_render):
|
if pre_create_data.get("render_target") == "farm_split":
|
||||||
parms["RS_archive_enable"] = 1
|
parms["RS_archive_enable"] = 1
|
||||||
|
|
||||||
instance_node.setParms(parms)
|
instance_node.setParms(parms)
|
||||||
|
|
@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||||
|
|
||||||
return super(CreateRedshiftROP, self).remove_instances(instances)
|
return super(CreateRedshiftROP, self).remove_instances(instances)
|
||||||
|
|
||||||
|
def get_instance_attr_defs(self):
|
||||||
|
"""get instance attribute definitions.
|
||||||
|
|
||||||
|
Attributes defined in this method are exposed in
|
||||||
|
publish tab in the publisher UI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
render_target_items = {
|
||||||
|
"local": "Local machine rendering",
|
||||||
|
"local_no_render": "Use existing frames (local)",
|
||||||
|
"farm": "Farm Rendering",
|
||||||
|
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
BoolDef("review",
|
||||||
|
label="Review",
|
||||||
|
tooltip="Mark as reviewable",
|
||||||
|
default=True),
|
||||||
|
EnumDef("render_target",
|
||||||
|
items=render_target_items,
|
||||||
|
label="Render target",
|
||||||
|
default=self.render_target)
|
||||||
|
]
|
||||||
|
|
||||||
def get_pre_create_attr_defs(self):
|
def get_pre_create_attr_defs(self):
|
||||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
|
||||||
image_format_enum = [
|
image_format_enum = [
|
||||||
"exr", "tif", "jpg", "png",
|
"exr", "tif", "jpg", "png",
|
||||||
]
|
]
|
||||||
|
|
||||||
multi_layered_mode = [
|
multi_layered_mode = [
|
||||||
"No Multi-Layered EXR File",
|
"No Multi-Layered EXR File",
|
||||||
"Full Multi-Layered EXR File"
|
"Full Multi-Layered EXR File"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||||
return attrs + [
|
attrs += [
|
||||||
BoolDef("farm",
|
|
||||||
label="Submitting to Farm",
|
|
||||||
default=True),
|
|
||||||
BoolDef("split_render",
|
|
||||||
label="Split export and render jobs",
|
|
||||||
default=self.split_render),
|
|
||||||
EnumDef("image_format",
|
EnumDef("image_format",
|
||||||
image_format_enum,
|
image_format_enum,
|
||||||
default=self.ext,
|
default=self.ext,
|
||||||
|
|
@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||||
EnumDef("multi_layered_mode",
|
EnumDef("multi_layered_mode",
|
||||||
multi_layered_mode,
|
multi_layered_mode,
|
||||||
default=self.multi_layered_mode,
|
default=self.multi_layered_mode,
|
||||||
label="Multi-Layered EXR")
|
label="Multi-Layered EXR"),
|
||||||
]
|
]
|
||||||
|
return attrs + self.get_instance_attr_defs()
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""Creator plugin for creating openGL reviews."""
|
"""Creator plugin for creating openGL reviews."""
|
||||||
from ayon_core.hosts.houdini.api import plugin
|
from ayon_core.hosts.houdini.api import lib, plugin
|
||||||
from ayon_core.lib import EnumDef, BoolDef, NumberDef
|
from ayon_core.lib import EnumDef, BoolDef, NumberDef
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
@ -14,6 +14,16 @@ class CreateReview(plugin.HoudiniCreator):
|
||||||
label = "Review"
|
label = "Review"
|
||||||
product_type = "review"
|
product_type = "review"
|
||||||
icon = "video-camera"
|
icon = "video-camera"
|
||||||
|
review_color_space = ""
|
||||||
|
|
||||||
|
def apply_settings(self, project_settings):
|
||||||
|
super(CreateReview, self).apply_settings(project_settings)
|
||||||
|
# workfile settings added in '0.2.13'
|
||||||
|
color_settings = project_settings["houdini"]["imageio"].get(
|
||||||
|
"workfile", {}
|
||||||
|
)
|
||||||
|
if color_settings.get("enabled"):
|
||||||
|
self.review_color_space = color_settings.get("review_color_space")
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
|
|
||||||
|
|
@ -85,10 +95,20 @@ class CreateReview(plugin.HoudiniCreator):
|
||||||
|
|
||||||
instance_node.setParms(parms)
|
instance_node.setParms(parms)
|
||||||
|
|
||||||
# Set OCIO Colorspace to the default output colorspace
|
# Set OCIO Colorspace to the default colorspace
|
||||||
# if there's OCIO
|
# if there's OCIO
|
||||||
if os.getenv("OCIO"):
|
if os.getenv("OCIO"):
|
||||||
self.set_colorcorrect_to_default_view_space(instance_node)
|
# Fall to the default value if cls.review_color_space is empty.
|
||||||
|
if not self.review_color_space:
|
||||||
|
# cls.review_color_space is an empty string
|
||||||
|
# when the imageio/workfile setting is disabled or
|
||||||
|
# when the Review colorspace setting is empty.
|
||||||
|
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||||
|
self.review_color_space = get_default_display_view_colorspace()
|
||||||
|
|
||||||
|
lib.set_review_color_space(instance_node,
|
||||||
|
self.review_color_space,
|
||||||
|
self.log)
|
||||||
|
|
||||||
to_lock = ["id", "productType"]
|
to_lock = ["id", "productType"]
|
||||||
|
|
||||||
|
|
@ -131,23 +151,3 @@ class CreateReview(plugin.HoudiniCreator):
|
||||||
minimum=0.0001,
|
minimum=0.0001,
|
||||||
decimals=3)
|
decimals=3)
|
||||||
]
|
]
|
||||||
|
|
||||||
def set_colorcorrect_to_default_view_space(self,
|
|
||||||
instance_node):
|
|
||||||
"""Set ociocolorspace to the default output space."""
|
|
||||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
|
||||||
|
|
||||||
# set Color Correction parameter to OpenColorIO
|
|
||||||
instance_node.setParms({"colorcorrect": 2})
|
|
||||||
|
|
||||||
# Get default view space for ociocolorspace parm.
|
|
||||||
default_view_space = get_default_display_view_colorspace()
|
|
||||||
instance_node.setParms(
|
|
||||||
{"ociocolorspace": default_view_space}
|
|
||||||
)
|
|
||||||
|
|
||||||
self.log.debug(
|
|
||||||
"'OCIO Colorspace' parm on '{}' has been set to "
|
|
||||||
"the default view color space '{}'"
|
|
||||||
.format(instance_node, default_view_space)
|
|
||||||
)
|
|
||||||
|
|
|
||||||
|
|
@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
||||||
icon = "magic"
|
icon = "magic"
|
||||||
ext = "exr"
|
ext = "exr"
|
||||||
|
|
||||||
# Default to split export and render jobs
|
# Default render target
|
||||||
export_job = True
|
render_target = "farm_split"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
|
# Transfer settings from pre create to instance
|
||||||
|
creator_attributes = instance_data.setdefault(
|
||||||
|
"creator_attributes", dict())
|
||||||
|
for key in ["render_target", "review"]:
|
||||||
|
if key in pre_create_data:
|
||||||
|
creator_attributes[key] = pre_create_data[key]
|
||||||
|
|
||||||
instance_data.pop("active", None)
|
instance_data.pop("active", None)
|
||||||
instance_data.update({"node_type": "vray_renderer"})
|
instance_data.update({"node_type": "vray_renderer"})
|
||||||
# Add chunk size attribute
|
# Add chunk size attribute
|
||||||
instance_data["chunkSize"] = 10
|
instance_data["chunkSize"] = 10
|
||||||
# Submit for job publishing
|
|
||||||
instance_data["farm"] = pre_create_data.get("farm")
|
|
||||||
|
|
||||||
instance = super(CreateVrayROP, self).create(
|
instance = super(CreateVrayROP, self).create(
|
||||||
product_name,
|
product_name,
|
||||||
|
|
@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
||||||
"SettingsEXR_bits_per_channel": "16" # half precision
|
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||||
}
|
}
|
||||||
|
|
||||||
if pre_create_data.get("export_job"):
|
if pre_create_data.get("render_target") == "farm_split":
|
||||||
scene_filepath = \
|
scene_filepath = \
|
||||||
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
|
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
|
||||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||||
|
|
@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
||||||
|
|
||||||
return super(CreateVrayROP, self).remove_instances(instances)
|
return super(CreateVrayROP, self).remove_instances(instances)
|
||||||
|
|
||||||
|
def get_instance_attr_defs(self):
|
||||||
|
"""get instance attribute definitions.
|
||||||
|
|
||||||
|
Attributes defined in this method are exposed in
|
||||||
|
publish tab in the publisher UI.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
render_target_items = {
|
||||||
|
"local": "Local machine rendering",
|
||||||
|
"local_no_render": "Use existing frames (local)",
|
||||||
|
"farm": "Farm Rendering",
|
||||||
|
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
BoolDef("review",
|
||||||
|
label="Review",
|
||||||
|
tooltip="Mark as reviewable",
|
||||||
|
default=True),
|
||||||
|
EnumDef("render_target",
|
||||||
|
items=render_target_items,
|
||||||
|
label="Render target",
|
||||||
|
default=self.render_target)
|
||||||
|
]
|
||||||
|
|
||||||
def get_pre_create_attr_defs(self):
|
def get_pre_create_attr_defs(self):
|
||||||
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
|
||||||
image_format_enum = [
|
image_format_enum = [
|
||||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||||
]
|
]
|
||||||
|
|
||||||
return attrs + [
|
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
||||||
BoolDef("farm",
|
|
||||||
label="Submitting to Farm",
|
attrs += [
|
||||||
default=True),
|
|
||||||
BoolDef("export_job",
|
|
||||||
label="Split export and render jobs",
|
|
||||||
default=self.export_job),
|
|
||||||
EnumDef("image_format",
|
EnumDef("image_format",
|
||||||
image_format_enum,
|
image_format_enum,
|
||||||
default=self.ext,
|
default=self.ext,
|
||||||
|
|
@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
||||||
"if enabled",
|
"if enabled",
|
||||||
default=False)
|
default=False)
|
||||||
]
|
]
|
||||||
|
return attrs + self.get_instance_attr_defs()
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
|
||||||
# write workfile information to context container.
|
# write workfile information to context container.
|
||||||
op_ctx = hou.node(CONTEXT_CONTAINER)
|
op_ctx = hou.node(CONTEXT_CONTAINER)
|
||||||
if not op_ctx:
|
if not op_ctx:
|
||||||
op_ctx = self.create_context_node()
|
op_ctx = self.host.create_context_node()
|
||||||
|
|
||||||
workfile_data = {"workfile": current_instance.data_to_store()}
|
workfile_data = {"workfile": current_instance.data_to_store()}
|
||||||
imprint(op_ctx, workfile_data)
|
imprint(op_ctx, workfile_data)
|
||||||
|
|
|
||||||
|
|
@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||||
render_products = []
|
render_products = []
|
||||||
|
|
||||||
# Store whether we are splitting the render job (export + render)
|
|
||||||
split_render = bool(rop.parm("ar_ass_export_enable").eval())
|
|
||||||
instance.data["splitRender"] = split_render
|
|
||||||
export_prefix = None
|
export_prefix = None
|
||||||
export_products = []
|
export_products = []
|
||||||
if split_render:
|
if instance.data["splitRender"]:
|
||||||
export_prefix = evalParmNoFrame(
|
export_prefix = evalParmNoFrame(
|
||||||
rop, "ar_ass_file", pad_character="0"
|
rop, "ar_ass_file", pad_character="0"
|
||||||
)
|
)
|
||||||
|
|
@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
"": self.generate_expected_files(instance, beauty_product)
|
"": self.generate_expected_files(instance, beauty_product)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Assume it's a multipartExr Render.
|
||||||
|
multipartExr = True
|
||||||
|
|
||||||
num_aovs = rop.evalParm("ar_aovs")
|
num_aovs = rop.evalParm("ar_aovs")
|
||||||
|
# TODO: Check the following logic.
|
||||||
|
# as it always assumes that all AOV are not merged.
|
||||||
for index in range(1, num_aovs + 1):
|
for index in range(1, num_aovs + 1):
|
||||||
# Skip disabled AOVs
|
# Skip disabled AOVs
|
||||||
if not rop.evalParm("ar_enable_aov{}".format(index)):
|
if not rop.evalParm("ar_enable_aov{}".format(index)):
|
||||||
|
|
@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
files_by_aov[label] = self.generate_expected_files(instance,
|
files_by_aov[label] = self.generate_expected_files(instance,
|
||||||
aov_product)
|
aov_product)
|
||||||
|
|
||||||
|
# Set to False as soon as we have a separated aov.
|
||||||
|
multipartExr = False
|
||||||
|
|
||||||
|
# Review Logic expects this key to exist and be True
|
||||||
|
# if render is a multipart Exr.
|
||||||
|
# As long as we have one AOV then multipartExr should be True.
|
||||||
|
instance.data["multipartExr"] = multipartExr
|
||||||
|
|
||||||
for product in render_products:
|
for product in render_products:
|
||||||
self.log.debug("Found render product: {}".format(product))
|
self.log.debug("Found render product: {}".format(product))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api import lib
|
||||||
class CollectDataforCache(pyblish.api.InstancePlugin):
|
class CollectDataforCache(pyblish.api.InstancePlugin):
|
||||||
"""Collect data for caching to Deadline."""
|
"""Collect data for caching to Deadline."""
|
||||||
|
|
||||||
order = pyblish.api.CollectorOrder + 0.04
|
# Run after Collect Frames
|
||||||
|
order = pyblish.api.CollectorOrder + 0.11
|
||||||
families = ["ass", "pointcache",
|
families = ["ass", "pointcache",
|
||||||
"mantraifd", "redshiftproxy",
|
"mantraifd", "redshiftproxy",
|
||||||
"vdbcache"]
|
"vdbcache"]
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
|
||||||
|
class CollectFarmInstances(pyblish.api.InstancePlugin):
|
||||||
|
"""Collect instances for farm render."""
|
||||||
|
|
||||||
|
order = pyblish.api.CollectorOrder
|
||||||
|
families = ["mantra_rop",
|
||||||
|
"karma_rop",
|
||||||
|
"redshift_rop",
|
||||||
|
"arnold_rop",
|
||||||
|
"vray_rop"]
|
||||||
|
|
||||||
|
hosts = ["houdini"]
|
||||||
|
targets = ["local", "remote"]
|
||||||
|
label = "Collect farm instances"
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
|
||||||
|
creator_attribute = instance.data["creator_attributes"]
|
||||||
|
|
||||||
|
# Collect Render Target
|
||||||
|
if creator_attribute.get("render_target") not in {
|
||||||
|
"farm_split", "farm"
|
||||||
|
}:
|
||||||
|
instance.data["farm"] = False
|
||||||
|
instance.data["splitRender"] = False
|
||||||
|
self.log.debug("Render on farm is disabled. "
|
||||||
|
"Skipping farm collecting.")
|
||||||
|
return
|
||||||
|
|
||||||
|
instance.data["farm"] = True
|
||||||
|
instance.data["splitRender"] = (
|
||||||
|
creator_attribute.get("render_target") == "farm_split"
|
||||||
|
)
|
||||||
|
|
@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
|
||||||
label = "Collect Frames"
|
label = "Collect Frames"
|
||||||
families = ["vdbcache", "imagesequence", "ass",
|
families = ["vdbcache", "imagesequence", "ass",
|
||||||
"mantraifd", "redshiftproxy", "review",
|
"mantraifd", "redshiftproxy", "review",
|
||||||
"bgeo"]
|
"pointcache"]
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
beauty_product)
|
beauty_product)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Review Logic expects this key to exist and be True
|
||||||
|
# if render is a multipart Exr.
|
||||||
|
# As long as we have one AOV then multipartExr should be True.
|
||||||
|
# By default karma render is a multipart Exr.
|
||||||
|
instance.data["multipartExr"] = True
|
||||||
|
|
||||||
filenames = list(render_products)
|
filenames = list(render_products)
|
||||||
instance.data["files"] = filenames
|
instance.data["files"] = filenames
|
||||||
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,137 @@
|
||||||
|
import os
|
||||||
|
import pyblish.api
|
||||||
|
from ayon_core.pipeline.create import get_product_name
|
||||||
|
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||||
|
from ayon_core.pipeline.publish import (
|
||||||
|
get_plugin_settings,
|
||||||
|
apply_plugin_settings_automatically
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
|
||||||
|
"""Collect instances for local render.
|
||||||
|
|
||||||
|
Agnostic Local Render Collector.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# this plugin runs after Collect Render Products
|
||||||
|
order = pyblish.api.CollectorOrder + 0.12
|
||||||
|
families = ["mantra_rop",
|
||||||
|
"karma_rop",
|
||||||
|
"redshift_rop",
|
||||||
|
"arnold_rop",
|
||||||
|
"vray_rop"]
|
||||||
|
|
||||||
|
hosts = ["houdini"]
|
||||||
|
label = "Collect local render instances"
|
||||||
|
|
||||||
|
use_deadline_aov_filter = False
|
||||||
|
aov_filter = {"host_name": "houdini",
|
||||||
|
"value": [".*([Bb]eauty).*"]}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def apply_settings(cls, project_settings):
|
||||||
|
# Preserve automatic settings applying logic
|
||||||
|
settings = get_plugin_settings(plugin=cls,
|
||||||
|
project_settings=project_settings,
|
||||||
|
log=cls.log,
|
||||||
|
category="houdini")
|
||||||
|
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
|
||||||
|
|
||||||
|
if not cls.use_deadline_aov_filter:
|
||||||
|
# get aov_filter from collector settings
|
||||||
|
# and restructure it as match_aov_pattern requires.
|
||||||
|
cls.aov_filter = {
|
||||||
|
cls.aov_filter["host_name"]: cls.aov_filter["value"]
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# get aov_filter from deadline settings
|
||||||
|
cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
|
||||||
|
cls.aov_filter = {
|
||||||
|
item["name"]: item["value"]
|
||||||
|
for item in cls.aov_filter
|
||||||
|
}
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
|
||||||
|
if instance.data["farm"]:
|
||||||
|
self.log.debug("Render on farm is enabled. "
|
||||||
|
"Skipping local render collecting.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create Instance for each AOV.
|
||||||
|
context = instance.context
|
||||||
|
expectedFiles = next(iter(instance.data["expectedFiles"]), {})
|
||||||
|
|
||||||
|
product_type = "render" # is always render
|
||||||
|
product_group = get_product_name(
|
||||||
|
context.data["projectName"],
|
||||||
|
context.data["taskEntity"]["name"],
|
||||||
|
context.data["taskEntity"]["taskType"],
|
||||||
|
context.data["hostName"],
|
||||||
|
product_type,
|
||||||
|
instance.data["productName"]
|
||||||
|
)
|
||||||
|
|
||||||
|
for aov_name, aov_filepaths in expectedFiles.items():
|
||||||
|
product_name = product_group
|
||||||
|
|
||||||
|
if aov_name:
|
||||||
|
product_name = "{}_{}".format(product_name, aov_name)
|
||||||
|
|
||||||
|
# Create instance for each AOV
|
||||||
|
aov_instance = context.create_instance(product_name)
|
||||||
|
|
||||||
|
# Prepare Representation for each AOV
|
||||||
|
aov_filenames = [os.path.basename(path) for path in aov_filepaths]
|
||||||
|
staging_dir = os.path.dirname(aov_filepaths[0])
|
||||||
|
ext = aov_filepaths[0].split(".")[-1]
|
||||||
|
|
||||||
|
# Decide if instance is reviewable
|
||||||
|
preview = False
|
||||||
|
if instance.data.get("multipartExr", False):
|
||||||
|
# Add preview tag because its multipartExr.
|
||||||
|
preview = True
|
||||||
|
else:
|
||||||
|
# Add Preview tag if the AOV matches the filter.
|
||||||
|
preview = match_aov_pattern(
|
||||||
|
"houdini", self.aov_filter, aov_filenames[0]
|
||||||
|
)
|
||||||
|
|
||||||
|
preview = preview and instance.data.get("review", False)
|
||||||
|
|
||||||
|
# Support Single frame.
|
||||||
|
# The integrator wants single files to be a single
|
||||||
|
# filename instead of a list.
|
||||||
|
# More info: https://github.com/ynput/ayon-core/issues/238
|
||||||
|
if len(aov_filenames) == 1:
|
||||||
|
aov_filenames = aov_filenames[0]
|
||||||
|
|
||||||
|
aov_instance.data.update({
|
||||||
|
# 'label': label,
|
||||||
|
"task": instance.data["task"],
|
||||||
|
"folderPath": instance.data["folderPath"],
|
||||||
|
"frameStart": instance.data["frameStartHandle"],
|
||||||
|
"frameEnd": instance.data["frameEndHandle"],
|
||||||
|
"productType": product_type,
|
||||||
|
"family": product_type,
|
||||||
|
"productName": product_name,
|
||||||
|
"productGroup": product_group,
|
||||||
|
"families": ["render.local.hou", "review"],
|
||||||
|
"instance_node": instance.data["instance_node"],
|
||||||
|
"representations": [
|
||||||
|
{
|
||||||
|
"stagingDir": staging_dir,
|
||||||
|
"ext": ext,
|
||||||
|
"name": ext,
|
||||||
|
"tags": ["review"] if preview else [],
|
||||||
|
"files": aov_filenames,
|
||||||
|
"frameStart": instance.data["frameStartHandle"],
|
||||||
|
"frameEnd": instance.data["frameEndHandle"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Remove original render instance
|
||||||
|
# I can't remove it here as I still need it to trigger the render.
|
||||||
|
# context.remove(instance)
|
||||||
|
|
@ -46,12 +46,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||||
render_products = []
|
render_products = []
|
||||||
|
|
||||||
# Store whether we are splitting the render job (export + render)
|
|
||||||
split_render = bool(rop.parm("soho_outputmode").eval())
|
|
||||||
instance.data["splitRender"] = split_render
|
|
||||||
export_prefix = None
|
export_prefix = None
|
||||||
export_products = []
|
export_products = []
|
||||||
if split_render:
|
if instance.data["splitRender"]:
|
||||||
export_prefix = evalParmNoFrame(
|
export_prefix = evalParmNoFrame(
|
||||||
rop, "soho_diskfile", pad_character="0"
|
rop, "soho_diskfile", pad_character="0"
|
||||||
)
|
)
|
||||||
|
|
@ -76,6 +73,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
beauty_product)
|
beauty_product)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Assume it's a multipartExr Render.
|
||||||
|
multipartExr = True
|
||||||
|
|
||||||
|
# TODO: This logic doesn't take into considerations
|
||||||
|
# cryptomatte defined in 'Images > Cryptomatte'
|
||||||
aov_numbers = rop.evalParm("vm_numaux")
|
aov_numbers = rop.evalParm("vm_numaux")
|
||||||
if aov_numbers > 0:
|
if aov_numbers > 0:
|
||||||
# get the filenames of the AOVs
|
# get the filenames of the AOVs
|
||||||
|
|
@ -95,6 +97,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
|
|
||||||
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
|
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
|
||||||
|
|
||||||
|
# Set to False as soon as we have a separated aov.
|
||||||
|
multipartExr = False
|
||||||
|
|
||||||
|
# Review Logic expects this key to exist and be True
|
||||||
|
# if render is a multipart Exr.
|
||||||
|
# As long as we have one AOV then multipartExr should be True.
|
||||||
|
instance.data["multipartExr"] = multipartExr
|
||||||
|
|
||||||
for product in render_products:
|
for product in render_products:
|
||||||
self.log.debug("Found render product: %s" % product)
|
self.log.debug("Found render product: %s" % product)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -53,11 +53,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
|
|
||||||
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
|
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
|
||||||
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
|
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
|
||||||
# Store whether we are splitting the render job (export + render)
|
|
||||||
split_render = bool(rop.parm("RS_archive_enable").eval())
|
|
||||||
instance.data["splitRender"] = split_render
|
|
||||||
export_products = []
|
export_products = []
|
||||||
if split_render:
|
if instance.data["splitRender"]:
|
||||||
export_prefix = evalParmNoFrame(
|
export_prefix = evalParmNoFrame(
|
||||||
rop, "RS_archive_file", pad_character="0"
|
rop, "RS_archive_file", pad_character="0"
|
||||||
)
|
)
|
||||||
|
|
@ -77,6 +75,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
# As this is what the rop does.
|
# As this is what the rop does.
|
||||||
beauty_suffix = ""
|
beauty_suffix = ""
|
||||||
|
|
||||||
|
# Assume it's a multipartExr Render.
|
||||||
|
multipartExr = True
|
||||||
|
|
||||||
# Default beauty/main layer AOV
|
# Default beauty/main layer AOV
|
||||||
beauty_product = self.get_render_product_name(
|
beauty_product = self.get_render_product_name(
|
||||||
prefix=default_prefix, suffix=beauty_suffix
|
prefix=default_prefix, suffix=beauty_suffix
|
||||||
|
|
@ -116,6 +117,14 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
|
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
|
||||||
aov_product) # noqa
|
aov_product) # noqa
|
||||||
|
|
||||||
|
# Set to False as soon as we have a separated aov.
|
||||||
|
multipartExr = False
|
||||||
|
|
||||||
|
# Review Logic expects this key to exist and be True
|
||||||
|
# if render is a multipart Exr.
|
||||||
|
# As long as we have one AOV then multipartExr should be True.
|
||||||
|
instance.data["multipartExr"] = multipartExr
|
||||||
|
|
||||||
for product in render_products:
|
for product in render_products:
|
||||||
self.log.debug("Found render product: %s" % product)
|
self.log.debug("Found render product: %s" % product)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
||||||
label = "Collect Review Data"
|
label = "Collect Review Data"
|
||||||
# This specific order value is used so that
|
# This specific order value is used so that
|
||||||
# this plugin runs after CollectRopFrameRange
|
# this plugin runs after CollectRopFrameRange
|
||||||
order = pyblish.api.CollectorOrder + 0.1
|
# Also after CollectLocalRenderInstances
|
||||||
|
order = pyblish.api.CollectorOrder + 0.13
|
||||||
hosts = ["houdini"]
|
hosts = ["houdini"]
|
||||||
families = ["review"]
|
families = ["review"]
|
||||||
|
|
||||||
|
|
@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
||||||
ropnode_path = instance.data["instance_node"]
|
ropnode_path = instance.data["instance_node"]
|
||||||
ropnode = hou.node(ropnode_path)
|
ropnode = hou.node(ropnode_path)
|
||||||
|
|
||||||
camera_path = ropnode.parm("camera").eval()
|
# Get camera based on the instance_node type.
|
||||||
|
camera_path = self._get_camera_path(ropnode)
|
||||||
camera_node = hou.node(camera_path)
|
camera_node = hou.node(camera_path)
|
||||||
if not camera_node:
|
if not camera_node:
|
||||||
self.log.warning("No valid camera node found on review node: "
|
self.log.warning("No valid camera node found on review node: "
|
||||||
|
|
@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
||||||
# Store focal length in `burninDataMembers`
|
# Store focal length in `burninDataMembers`
|
||||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||||
burnin_members["focalLength"] = focal_length
|
burnin_members["focalLength"] = focal_length
|
||||||
|
|
||||||
|
def _get_camera_path(self, ropnode):
|
||||||
|
"""Get the camera path associated with the given rop node.
|
||||||
|
|
||||||
|
This function evaluates the camera parameter according to the
|
||||||
|
type of the given rop node.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Union[str, None]: Camera path or None.
|
||||||
|
|
||||||
|
This function can return empty string if the camera
|
||||||
|
path is empty i.e. no camera path.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if ropnode.type().name() in {
|
||||||
|
"opengl", "karma", "ifd", "arnold"
|
||||||
|
}:
|
||||||
|
return ropnode.parm("camera").eval()
|
||||||
|
|
||||||
|
elif ropnode.type().name() == "Redshift_ROP":
|
||||||
|
return ropnode.parm("RS_renderCamera").eval()
|
||||||
|
|
||||||
|
elif ropnode.type().name() == "vray_renderer":
|
||||||
|
return ropnode.parm("render_camera").eval()
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
|
||||||
|
class CollectReviewableInstances(pyblish.api.InstancePlugin):
|
||||||
|
"""Collect Reviewable Instances.
|
||||||
|
|
||||||
|
Basically, all instances of the specified families
|
||||||
|
with creator_attribure["review"]
|
||||||
|
"""
|
||||||
|
|
||||||
|
order = pyblish.api.CollectorOrder
|
||||||
|
label = "Collect Reviewable Instances"
|
||||||
|
families = ["mantra_rop",
|
||||||
|
"karma_rop",
|
||||||
|
"redshift_rop",
|
||||||
|
"arnold_rop",
|
||||||
|
"vray_rop"]
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
creator_attribute = instance.data["creator_attributes"]
|
||||||
|
|
||||||
|
instance.data["review"] = creator_attribute.get("review", False)
|
||||||
|
|
@ -47,12 +47,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
render_products = []
|
render_products = []
|
||||||
# TODO: add render elements if render element
|
# TODO: add render elements if render element
|
||||||
|
|
||||||
# Store whether we are splitting the render job in an export + render
|
|
||||||
split_render = rop.parm("render_export_mode").eval() == "2"
|
|
||||||
instance.data["splitRender"] = split_render
|
|
||||||
export_prefix = None
|
export_prefix = None
|
||||||
export_products = []
|
export_products = []
|
||||||
if split_render:
|
if instance.data["splitRender"]:
|
||||||
export_prefix = evalParmNoFrame(
|
export_prefix = evalParmNoFrame(
|
||||||
rop, "render_export_filepath", pad_character="0"
|
rop, "render_export_filepath", pad_character="0"
|
||||||
)
|
)
|
||||||
|
|
@ -72,6 +69,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
"": self.generate_expected_files(instance,
|
"": self.generate_expected_files(instance,
|
||||||
beauty_product)}
|
beauty_product)}
|
||||||
|
|
||||||
|
# Assume it's a multipartExr Render.
|
||||||
|
multipartExr = True
|
||||||
|
|
||||||
if instance.data.get("RenderElement", True):
|
if instance.data.get("RenderElement", True):
|
||||||
render_element = self.get_render_element_name(rop, default_prefix)
|
render_element = self.get_render_element_name(rop, default_prefix)
|
||||||
if render_element:
|
if render_element:
|
||||||
|
|
@ -79,7 +79,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
||||||
render_products.append(renderpass)
|
render_products.append(renderpass)
|
||||||
files_by_aov[aov] = self.generate_expected_files(
|
files_by_aov[aov] = self.generate_expected_files(
|
||||||
instance, renderpass)
|
instance, renderpass)
|
||||||
|
# Set to False as soon as we have a separated aov.
|
||||||
|
multipartExr = False
|
||||||
|
|
||||||
|
# Review Logic expects this key to exist and be True
|
||||||
|
# if render is a multipart Exr.
|
||||||
|
# As long as we have one AOV then multipartExr should be True.
|
||||||
|
instance.data["multipartExr"] = multipartExr
|
||||||
|
|
||||||
for product in render_products:
|
for product in render_products:
|
||||||
self.log.debug("Found render product: %s" % product)
|
self.log.debug("Found render product: %s" % product)
|
||||||
|
|
|
||||||
|
|
@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor):
|
||||||
staging_dir = os.path.dirname(output)
|
staging_dir = os.path.dirname(output)
|
||||||
instance.data["stagingDir"] = staging_dir
|
instance.data["stagingDir"] = staging_dir
|
||||||
|
|
||||||
file_name = os.path.basename(output)
|
if instance.data.get("frames"):
|
||||||
|
# list of files
|
||||||
|
files = instance.data["frames"]
|
||||||
|
else:
|
||||||
|
# single file
|
||||||
|
files = os.path.basename(output)
|
||||||
|
|
||||||
# We run the render
|
# We run the render
|
||||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
|
self.log.info("Writing alembic '%s' to '%s'" % (files,
|
||||||
staging_dir))
|
staging_dir))
|
||||||
|
|
||||||
render_rop(ropnode)
|
render_rop(ropnode)
|
||||||
|
|
@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor):
|
||||||
representation = {
|
representation = {
|
||||||
'name': 'abc',
|
'name': 'abc',
|
||||||
'ext': 'abc',
|
'ext': 'abc',
|
||||||
'files': file_name,
|
'files': files,
|
||||||
"stagingDir": staging_dir,
|
"stagingDir": staging_dir,
|
||||||
}
|
}
|
||||||
instance.data["representations"].append(representation)
|
instance.data["representations"].append(representation)
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop, splitext
|
||||||
import hou
|
import hou
|
||||||
|
|
||||||
|
|
||||||
class ExtractComposite(publish.Extractor):
|
class ExtractComposite(publish.Extractor,
|
||||||
|
publish.ColormanagedPyblishPluginMixin):
|
||||||
|
|
||||||
order = pyblish.api.ExtractorOrder
|
order = pyblish.api.ExtractorOrder
|
||||||
label = "Extract Composite (Image Sequence)"
|
label = "Extract Composite (Image Sequence)"
|
||||||
|
|
@ -45,8 +46,14 @@ class ExtractComposite(publish.Extractor):
|
||||||
"frameEnd": instance.data["frameEndHandle"],
|
"frameEnd": instance.data["frameEndHandle"],
|
||||||
}
|
}
|
||||||
|
|
||||||
from pprint import pformat
|
if ext.lower() == "exr":
|
||||||
|
# Inject colorspace with 'scene_linear' as that's the
|
||||||
self.log.info(pformat(representation))
|
# default Houdini working colorspace and all extracted
|
||||||
|
# OpenEXR images should be in that colorspace.
|
||||||
|
# https://www.sidefx.com/docs/houdini/render/linear.html#image-formats
|
||||||
|
self.set_representation_colorspace(
|
||||||
|
representation, instance.context,
|
||||||
|
colorspace="scene_linear"
|
||||||
|
)
|
||||||
|
|
||||||
instance.data["representations"].append(representation)
|
instance.data["representations"].append(representation)
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,8 @@ from ayon_core.hosts.houdini.api.lib import render_rop
|
||||||
import hou
|
import hou
|
||||||
|
|
||||||
|
|
||||||
class ExtractOpenGL(publish.Extractor):
|
class ExtractOpenGL(publish.Extractor,
|
||||||
|
publish.ColormanagedPyblishPluginMixin):
|
||||||
|
|
||||||
order = pyblish.api.ExtractorOrder - 0.01
|
order = pyblish.api.ExtractorOrder - 0.01
|
||||||
label = "Extract OpenGL"
|
label = "Extract OpenGL"
|
||||||
|
|
@ -18,6 +19,16 @@ class ExtractOpenGL(publish.Extractor):
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
ropnode = hou.node(instance.data.get("instance_node"))
|
ropnode = hou.node(instance.data.get("instance_node"))
|
||||||
|
|
||||||
|
# This plugin is triggered when marking render as reviewable.
|
||||||
|
# Therefore, this plugin will run on over wrong instances.
|
||||||
|
# TODO: Don't run this plugin on wrong instances.
|
||||||
|
# This plugin should run only on review product type
|
||||||
|
# with instance node of opengl type.
|
||||||
|
if ropnode.type().name() != "opengl":
|
||||||
|
self.log.debug("Skipping OpenGl extraction. Rop node {} "
|
||||||
|
"is not an OpenGl node.".format(ropnode.path()))
|
||||||
|
return
|
||||||
|
|
||||||
output = ropnode.evalParm("picture")
|
output = ropnode.evalParm("picture")
|
||||||
staging_dir = os.path.normpath(os.path.dirname(output))
|
staging_dir = os.path.normpath(os.path.dirname(output))
|
||||||
instance.data["stagingDir"] = staging_dir
|
instance.data["stagingDir"] = staging_dir
|
||||||
|
|
@ -46,6 +57,14 @@ class ExtractOpenGL(publish.Extractor):
|
||||||
"camera_name": instance.data.get("review_camera")
|
"camera_name": instance.data.get("review_camera")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ropnode.evalParm("colorcorrect") == 2: # OpenColorIO enabled
|
||||||
|
colorspace = ropnode.evalParm("ociocolorspace")
|
||||||
|
# inject colorspace data
|
||||||
|
self.set_representation_colorspace(
|
||||||
|
representation, instance.context,
|
||||||
|
colorspace=colorspace
|
||||||
|
)
|
||||||
|
|
||||||
if "representations" not in instance.data:
|
if "representations" not in instance.data:
|
||||||
instance.data["representations"] = []
|
instance.data["representations"] = []
|
||||||
instance.data["representations"].append(representation)
|
instance.data["representations"].append(representation)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
from ayon_core.pipeline import publish
|
||||||
|
from ayon_core.hosts.houdini.api.lib import render_rop
|
||||||
|
import hou
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
class ExtractRender(publish.Extractor):
|
||||||
|
|
||||||
|
order = pyblish.api.ExtractorOrder
|
||||||
|
label = "Extract Render"
|
||||||
|
hosts = ["houdini"]
|
||||||
|
families = ["mantra_rop",
|
||||||
|
"karma_rop",
|
||||||
|
"redshift_rop",
|
||||||
|
"arnold_rop",
|
||||||
|
"vray_rop"]
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
creator_attribute = instance.data["creator_attributes"]
|
||||||
|
product_type = instance.data["productType"]
|
||||||
|
rop_node = hou.node(instance.data.get("instance_node"))
|
||||||
|
|
||||||
|
# Align split parameter value on rop node to the render target.
|
||||||
|
if instance.data["splitRender"]:
|
||||||
|
if product_type == "arnold_rop":
|
||||||
|
rop_node.setParms({"ar_ass_export_enable": 1})
|
||||||
|
elif product_type == "mantra_rop":
|
||||||
|
rop_node.setParms({"soho_outputmode": 1})
|
||||||
|
elif product_type == "redshift_rop":
|
||||||
|
rop_node.setParms({"RS_archive_enable": 1})
|
||||||
|
elif product_type == "vray_rop":
|
||||||
|
rop_node.setParms({"render_export_mode": "2"})
|
||||||
|
else:
|
||||||
|
if product_type == "arnold_rop":
|
||||||
|
rop_node.setParms({"ar_ass_export_enable": 0})
|
||||||
|
elif product_type == "mantra_rop":
|
||||||
|
rop_node.setParms({"soho_outputmode": 0})
|
||||||
|
elif product_type == "redshift_rop":
|
||||||
|
rop_node.setParms({"RS_archive_enable": 0})
|
||||||
|
elif product_type == "vray_rop":
|
||||||
|
rop_node.setParms({"render_export_mode": "1"})
|
||||||
|
|
||||||
|
if instance.data.get("farm"):
|
||||||
|
self.log.debug("Render should be processed on farm, skipping local render.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if creator_attribute.get("render_target") == "local":
|
||||||
|
ropnode = hou.node(instance.data.get("instance_node"))
|
||||||
|
render_rop(ropnode)
|
||||||
|
|
||||||
|
# `ExpectedFiles` is a list that includes one dict.
|
||||||
|
expected_files = instance.data["expectedFiles"][0]
|
||||||
|
# Each key in that dict is a list of files.
|
||||||
|
# Combine lists of files into one big list.
|
||||||
|
all_frames = []
|
||||||
|
for value in expected_files.values():
|
||||||
|
if isinstance(value, str):
|
||||||
|
all_frames.append(value)
|
||||||
|
elif isinstance(value, list):
|
||||||
|
all_frames.extend(value)
|
||||||
|
# Check missing frames.
|
||||||
|
# Frames won't exist if user cancels the render.
|
||||||
|
missing_frames = [
|
||||||
|
frame
|
||||||
|
for frame in all_frames
|
||||||
|
if not os.path.exists(frame)
|
||||||
|
]
|
||||||
|
if missing_frames:
|
||||||
|
# TODO: Use user friendly error reporting.
|
||||||
|
raise RuntimeError("Failed to complete render extraction. "
|
||||||
|
"Missing output files: {}".format(
|
||||||
|
missing_frames))
|
||||||
|
|
@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
||||||
order = pyblish.api.IntegratorOrder + 9.0
|
order = pyblish.api.IntegratorOrder + 9.0
|
||||||
hosts = ["houdini"]
|
hosts = ["houdini"]
|
||||||
families = ["workfile",
|
families = ["workfile",
|
||||||
"redshift_rop",
|
"usdrender",
|
||||||
"arnold_rop",
|
|
||||||
"mantra_rop",
|
"mantra_rop",
|
||||||
"karma_rop",
|
"karma_rop",
|
||||||
"usdrender",
|
"redshift_rop",
|
||||||
|
"arnold_rop",
|
||||||
|
"vray_rop",
|
||||||
|
"render.local.hou",
|
||||||
"publish.hou"]
|
"publish.hou"]
|
||||||
optional = True
|
optional = True
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import sys
|
import hou
|
||||||
import pyblish.api
|
import pyblish.api
|
||||||
import six
|
|
||||||
|
|
||||||
from ayon_core.pipeline import PublishValidationError
|
from ayon_core.pipeline import PublishValidationError
|
||||||
|
|
||||||
|
|
@ -26,28 +25,21 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
||||||
invalid = self.get_invalid(instance)
|
invalid = self.get_invalid(instance)
|
||||||
if invalid:
|
if invalid:
|
||||||
raise PublishValidationError(
|
raise PublishValidationError(
|
||||||
("Output node(s) `{}` are incorrect. "
|
"Output node '{}' is incorrect. "
|
||||||
"See plug-in log for details.").format(invalid),
|
"See plug-in log for details.".format(invalid),
|
||||||
title=self.label
|
title=self.label,
|
||||||
|
description=(
|
||||||
|
"### Invalid COP output node\n\n"
|
||||||
|
"The output node path for the instance must be set to a "
|
||||||
|
"valid COP node path.\n\nSee the log for more details."
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_invalid(cls, instance):
|
def get_invalid(cls, instance):
|
||||||
|
output_node = instance.data.get("output_node")
|
||||||
|
|
||||||
import hou
|
if not output_node:
|
||||||
|
|
||||||
try:
|
|
||||||
output_node = instance.data["output_node"]
|
|
||||||
except KeyError:
|
|
||||||
six.reraise(
|
|
||||||
PublishValidationError,
|
|
||||||
PublishValidationError(
|
|
||||||
"Can't determine COP output node.",
|
|
||||||
title=cls.__name__),
|
|
||||||
sys.exc_info()[2]
|
|
||||||
)
|
|
||||||
|
|
||||||
if output_node is None:
|
|
||||||
node = hou.node(instance.data.get("instance_node"))
|
node = hou.node(instance.data.get("instance_node"))
|
||||||
cls.log.error(
|
cls.log.error(
|
||||||
"COP Output node in '%s' does not exist. "
|
"COP Output node in '%s' does not exist. "
|
||||||
|
|
@ -61,8 +53,8 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
||||||
cls.log.error(
|
cls.log.error(
|
||||||
"Output node %s is not a COP node. "
|
"Output node %s is not a COP node. "
|
||||||
"COP Path must point to a COP node, "
|
"COP Path must point to a COP node, "
|
||||||
"instead found category type: %s"
|
"instead found category type: %s",
|
||||||
% (output_node.path(), output_node.type().category().name())
|
output_node.path(), output_node.type().category().name()
|
||||||
)
|
)
|
||||||
return [output_node.path()]
|
return [output_node.path()]
|
||||||
|
|
||||||
|
|
@ -70,9 +62,7 @@ class ValidateCopOutputNode(pyblish.api.InstancePlugin):
|
||||||
# is Cop2 to avoid potential edge case scenarios even though
|
# is Cop2 to avoid potential edge case scenarios even though
|
||||||
# the isinstance check above should be stricter than this category
|
# the isinstance check above should be stricter than this category
|
||||||
if output_node.type().category().name() != "Cop2":
|
if output_node.type().category().name() != "Cop2":
|
||||||
raise PublishValidationError(
|
cls.log.error(
|
||||||
(
|
"Output node %s is not of category Cop2.", output_node.path()
|
||||||
"Output node {} is not of category Cop2."
|
)
|
||||||
" This is a bug..."
|
return [output_node.path()]
|
||||||
).format(output_node.path()),
|
|
||||||
title=cls.label)
|
|
||||||
|
|
|
||||||
|
|
@ -4,15 +4,19 @@ from ayon_core.pipeline import (
|
||||||
PublishValidationError,
|
PublishValidationError,
|
||||||
OptionalPyblishPluginMixin
|
OptionalPyblishPluginMixin
|
||||||
)
|
)
|
||||||
from ayon_core.pipeline.publish import RepairAction
|
from ayon_core.pipeline.publish import (
|
||||||
|
RepairAction,
|
||||||
|
get_plugin_settings,
|
||||||
|
apply_plugin_settings_automatically
|
||||||
|
)
|
||||||
from ayon_core.hosts.houdini.api.action import SelectROPAction
|
from ayon_core.hosts.houdini.api.action import SelectROPAction
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import hou
|
import hou
|
||||||
|
|
||||||
|
|
||||||
class SetDefaultViewSpaceAction(RepairAction):
|
class ResetViewSpaceAction(RepairAction):
|
||||||
label = "Set default view colorspace"
|
label = "Reset OCIO colorspace parm"
|
||||||
icon = "mdi.monitor"
|
icon = "mdi.monitor"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -27,12 +31,43 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
||||||
families = ["review"]
|
families = ["review"]
|
||||||
hosts = ["houdini"]
|
hosts = ["houdini"]
|
||||||
label = "Validate Review Colorspace"
|
label = "Validate Review Colorspace"
|
||||||
actions = [SetDefaultViewSpaceAction, SelectROPAction]
|
actions = [ResetViewSpaceAction, SelectROPAction]
|
||||||
|
|
||||||
optional = True
|
optional = True
|
||||||
|
review_color_space = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def apply_settings(cls, project_settings):
|
||||||
|
# Preserve automatic settings applying logic
|
||||||
|
settings = get_plugin_settings(plugin=cls,
|
||||||
|
project_settings=project_settings,
|
||||||
|
log=cls.log,
|
||||||
|
category="houdini")
|
||||||
|
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
|
||||||
|
|
||||||
|
# workfile settings added in '0.2.13'
|
||||||
|
color_settings = project_settings["houdini"]["imageio"].get(
|
||||||
|
"workfile", {}
|
||||||
|
)
|
||||||
|
# Add review color settings
|
||||||
|
if color_settings.get("enabled"):
|
||||||
|
cls.review_color_space = color_settings.get("review_color_space")
|
||||||
|
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
|
||||||
|
rop_node = hou.node(instance.data["instance_node"])
|
||||||
|
|
||||||
|
# This plugin is triggered when marking render as reviewable.
|
||||||
|
# Therefore, this plugin will run on over wrong instances.
|
||||||
|
# TODO: Don't run this plugin on wrong instances.
|
||||||
|
# This plugin should run only on review product type
|
||||||
|
# with instance node of opengl type.
|
||||||
|
if rop_node.type().name() != "opengl":
|
||||||
|
self.log.debug("Skipping Validation. Rop node {} "
|
||||||
|
"is not an OpenGl node.".format(rop_node.path()))
|
||||||
|
return
|
||||||
|
|
||||||
if not self.is_active(instance.data):
|
if not self.is_active(instance.data):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
@ -43,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
rop_node = hou.node(instance.data["instance_node"])
|
|
||||||
if rop_node.evalParm("colorcorrect") != 2:
|
if rop_node.evalParm("colorcorrect") != 2:
|
||||||
# any colorspace settings other than default requires
|
# any colorspace settings other than default requires
|
||||||
# 'Color Correct' parm to be set to 'OpenColorIO'
|
# 'Color Correct' parm to be set to 'OpenColorIO'
|
||||||
|
|
@ -52,39 +86,54 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
||||||
" 'OpenColorIO'".format(rop_node.path())
|
" 'OpenColorIO'".format(rop_node.path())
|
||||||
)
|
)
|
||||||
|
|
||||||
if rop_node.evalParm("ociocolorspace") not in \
|
current_color_space = rop_node.evalParm("ociocolorspace")
|
||||||
hou.Color.ocio_spaces():
|
if current_color_space not in hou.Color.ocio_spaces():
|
||||||
|
|
||||||
raise PublishValidationError(
|
raise PublishValidationError(
|
||||||
"Invalid value: Colorspace name doesn't exist.\n"
|
"Invalid value: Colorspace name doesn't exist.\n"
|
||||||
"Check 'OCIO Colorspace' parameter on '{}' ROP"
|
"Check 'OCIO Colorspace' parameter on '{}' ROP"
|
||||||
.format(rop_node.path())
|
.format(rop_node.path())
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
# if houdini/imageio/workfile is enabled and
|
||||||
def repair(cls, instance):
|
# Review colorspace setting is empty then this check should
|
||||||
"""Set Default View Space Action.
|
# actually check if the current_color_space setting equals
|
||||||
|
# the default colorspace value.
|
||||||
|
# However, it will make the black cmd screen show up more often
|
||||||
|
# which is very annoying.
|
||||||
|
if self.review_color_space and \
|
||||||
|
self.review_color_space != current_color_space:
|
||||||
|
|
||||||
It is a helper action more than a repair action,
|
raise PublishValidationError(
|
||||||
used to set colorspace on opengl node to the default view.
|
"Invalid value: Colorspace name doesn't match"
|
||||||
"""
|
"the Colorspace specified in settings."
|
||||||
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
|
||||||
|
|
||||||
rop_node = hou.node(instance.data["instance_node"])
|
|
||||||
|
|
||||||
if rop_node.evalParm("colorcorrect") != 2:
|
|
||||||
rop_node.setParms({"colorcorrect": 2})
|
|
||||||
cls.log.debug(
|
|
||||||
"'Color Correction' parm on '{}' has been set to"
|
|
||||||
" 'OpenColorIO'".format(rop_node.path())
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get default view colorspace name
|
@classmethod
|
||||||
default_view_space = get_default_display_view_colorspace()
|
def repair(cls, instance):
|
||||||
|
"""Reset view colorspace.
|
||||||
|
|
||||||
rop_node.setParms({"ociocolorspace": default_view_space})
|
It is used to set colorspace on opengl node.
|
||||||
cls.log.info(
|
|
||||||
"'OCIO Colorspace' parm on '{}' has been set to "
|
It uses the colorspace value specified in the Houdini addon settings.
|
||||||
"the default view color space '{}'"
|
If the value in the Houdini addon settings is empty,
|
||||||
.format(rop_node, default_view_space)
|
it will fall to the default colorspace.
|
||||||
)
|
|
||||||
|
Note:
|
||||||
|
This repair action assumes that OCIO is enabled.
|
||||||
|
As if OCIO is disabled the whole validation is skipped
|
||||||
|
and this repair action won't show up.
|
||||||
|
"""
|
||||||
|
from ayon_core.hosts.houdini.api.lib import set_review_color_space
|
||||||
|
|
||||||
|
# Fall to the default value if cls.review_color_space is empty.
|
||||||
|
if not cls.review_color_space:
|
||||||
|
# cls.review_color_space is an empty string
|
||||||
|
# when the imageio/workfile setting is disabled or
|
||||||
|
# when the Review colorspace setting is empty.
|
||||||
|
from ayon_core.hosts.houdini.api.colorspace import get_default_display_view_colorspace # noqa
|
||||||
|
cls.review_color_space = get_default_display_view_colorspace()
|
||||||
|
|
||||||
|
rop_node = hou.node(instance.data["instance_node"])
|
||||||
|
set_review_color_space(rop_node,
|
||||||
|
cls.review_color_space,
|
||||||
|
cls.log)
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
|
||||||
report = []
|
report = []
|
||||||
instance_node = hou.node(instance.data.get("instance_node"))
|
instance_node = hou.node(instance.data.get("instance_node"))
|
||||||
|
|
||||||
|
# This plugin is triggered when marking render as reviewable.
|
||||||
|
# Therefore, this plugin will run on over wrong instances.
|
||||||
|
# TODO: Don't run this plugin on wrong instances.
|
||||||
|
# This plugin should run only on review product type
|
||||||
|
# with instance node of opengl type.
|
||||||
|
if instance_node.type().name() != "opengl":
|
||||||
|
self.log.debug("Skipping Validation. Rop node {} "
|
||||||
|
"is not an OpenGl node.".format(instance_node.path()))
|
||||||
|
return
|
||||||
|
|
||||||
invalid = self.get_invalid_scene_path(instance_node)
|
invalid = self.get_invalid_scene_path(instance_node)
|
||||||
if invalid:
|
if invalid:
|
||||||
report.append(invalid)
|
report.append(invalid)
|
||||||
|
|
|
||||||
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!-- OPMenu Stencil.
|
||||||
|
It's used to extend the OPMenu.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<menuDocument>
|
||||||
|
<menu>
|
||||||
|
<!-- Operator type and asset options. -->
|
||||||
|
<subMenu id="opmenu.vhda_options_create">
|
||||||
|
<insertBefore>opmenu.unsynchronize</insertBefore>
|
||||||
|
<scriptItem id="opmenu.vhda_create_ayon">
|
||||||
|
<insertAfter>opmenu.vhda_create</insertAfter>
|
||||||
|
<label>Create New (AYON)...</label>
|
||||||
|
<context>
|
||||||
|
</context>
|
||||||
|
<scriptCode>
|
||||||
|
<![CDATA[
|
||||||
|
from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive
|
||||||
|
|
||||||
|
node = kwargs["node"]
|
||||||
|
if node not in hou.selectedNodes():
|
||||||
|
node.setSelected(True)
|
||||||
|
create_interactive("io.openpype.creators.houdini.hda", **kwargs)
|
||||||
|
]]>
|
||||||
|
</scriptCode>
|
||||||
|
</scriptItem>
|
||||||
|
</subMenu>
|
||||||
|
</menu>
|
||||||
|
</menuDocument>
|
||||||
|
|
@ -6,12 +6,9 @@ import json
|
||||||
from typing import Any, Dict, Union
|
from typing import Any, Dict, Union
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import ayon_api
|
|
||||||
|
|
||||||
from ayon_core.pipeline import (
|
from ayon_core.pipeline import (
|
||||||
get_current_project_name,
|
get_current_project_name,
|
||||||
get_current_folder_path,
|
|
||||||
get_current_task_name,
|
|
||||||
colorspace
|
colorspace
|
||||||
)
|
)
|
||||||
from ayon_core.settings import get_project_settings
|
from ayon_core.settings import get_project_settings
|
||||||
|
|
@ -372,12 +369,8 @@ def reset_colorspace():
|
||||||
"""
|
"""
|
||||||
if int(get_max_version()) < 2024:
|
if int(get_max_version()) < 2024:
|
||||||
return
|
return
|
||||||
project_name = get_current_project_name()
|
|
||||||
colorspace_mgr = rt.ColorPipelineMgr
|
|
||||||
project_settings = get_project_settings(project_name)
|
|
||||||
|
|
||||||
max_config_data = colorspace.get_imageio_config(
|
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||||
project_name, "max", project_settings)
|
|
||||||
if max_config_data:
|
if max_config_data:
|
||||||
ocio_config_path = max_config_data["path"]
|
ocio_config_path = max_config_data["path"]
|
||||||
colorspace_mgr = rt.ColorPipelineMgr
|
colorspace_mgr = rt.ColorPipelineMgr
|
||||||
|
|
@ -392,10 +385,7 @@ def check_colorspace():
|
||||||
"because Max main window can't be found.")
|
"because Max main window can't be found.")
|
||||||
if int(get_max_version()) >= 2024:
|
if int(get_max_version()) >= 2024:
|
||||||
color_mgr = rt.ColorPipelineMgr
|
color_mgr = rt.ColorPipelineMgr
|
||||||
project_name = get_current_project_name()
|
max_config_data = colorspace.get_current_context_imageio_config_preset()
|
||||||
project_settings = get_project_settings(project_name)
|
|
||||||
max_config_data = colorspace.get_imageio_config(
|
|
||||||
project_name, "max", project_settings)
|
|
||||||
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
|
if max_config_data and color_mgr.Mode != rt.Name("OCIO_Custom"):
|
||||||
if not is_headless():
|
if not is_headless():
|
||||||
from ayon_core.tools.utils import SimplePopup
|
from ayon_core.tools.utils import SimplePopup
|
||||||
|
|
@ -496,9 +486,9 @@ def object_transform_set(container_children):
|
||||||
"""
|
"""
|
||||||
transform_set = {}
|
transform_set = {}
|
||||||
for node in container_children:
|
for node in container_children:
|
||||||
name = f"{node.name}.transform"
|
name = f"{node}.transform"
|
||||||
transform_set[name] = node.pos
|
transform_set[name] = node.pos
|
||||||
name = f"{node.name}.scale"
|
name = f"{node}.scale"
|
||||||
transform_set[name] = node.scale
|
transform_set[name] = node.scale
|
||||||
return transform_set
|
return transform_set
|
||||||
|
|
||||||
|
|
@ -519,6 +509,36 @@ def get_plugins() -> list:
|
||||||
return plugin_info_list
|
return plugin_info_list
|
||||||
|
|
||||||
|
|
||||||
|
def update_modifier_node_names(event, node):
|
||||||
|
"""Update the name of the nodes after renaming
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event (pymxs.MXSWrapperBase): Event Name (
|
||||||
|
Mandatory argument for rt.NodeEventCallback)
|
||||||
|
node (list): Event Number (
|
||||||
|
Mandatory argument for rt.NodeEventCallback)
|
||||||
|
|
||||||
|
"""
|
||||||
|
containers = [
|
||||||
|
obj
|
||||||
|
for obj in rt.Objects
|
||||||
|
if (
|
||||||
|
rt.ClassOf(obj) == rt.Container
|
||||||
|
and rt.getUserProp(obj, "id") == "pyblish.avalon.instance"
|
||||||
|
and rt.getUserProp(obj, "productType") not in {
|
||||||
|
"workfile", "tyflow"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
]
|
||||||
|
if not containers:
|
||||||
|
return
|
||||||
|
for container in containers:
|
||||||
|
ayon_data = container.modifiers[0].openPypeData
|
||||||
|
updated_node_names = [str(node.node) for node
|
||||||
|
in ayon_data.all_handles]
|
||||||
|
rt.setProperty(ayon_data, "sel_list", updated_node_names)
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def render_resolution(width, height):
|
def render_resolution(width, height):
|
||||||
"""Set render resolution option during context
|
"""Set render resolution option during context
|
||||||
|
|
|
||||||
|
|
@ -52,17 +52,15 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
|
|
||||||
self._has_been_setup = True
|
self._has_been_setup = True
|
||||||
|
|
||||||
def context_setting():
|
rt.callbacks.addScript(rt.Name('systemPostNew'), on_new)
|
||||||
return lib.set_context_setting()
|
|
||||||
|
|
||||||
rt.callbacks.addScript(rt.Name('systemPostNew'),
|
|
||||||
context_setting)
|
|
||||||
|
|
||||||
rt.callbacks.addScript(rt.Name('filePostOpen'),
|
rt.callbacks.addScript(rt.Name('filePostOpen'),
|
||||||
lib.check_colorspace)
|
lib.check_colorspace)
|
||||||
|
|
||||||
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
|
rt.callbacks.addScript(rt.Name('postWorkspaceChange'),
|
||||||
self._deferred_menu_creation)
|
self._deferred_menu_creation)
|
||||||
|
rt.NodeEventCallback(
|
||||||
|
nameChanged=lib.update_modifier_node_names)
|
||||||
|
|
||||||
def workfile_has_unsaved_changes(self):
|
def workfile_has_unsaved_changes(self):
|
||||||
return rt.getSaveRequired()
|
return rt.getSaveRequired()
|
||||||
|
|
@ -161,6 +159,14 @@ def ls() -> list:
|
||||||
yield lib.read(container)
|
yield lib.read(container)
|
||||||
|
|
||||||
|
|
||||||
|
def on_new():
|
||||||
|
lib.set_context_setting()
|
||||||
|
if rt.checkForSave():
|
||||||
|
rt.resetMaxFile(rt.Name("noPrompt"))
|
||||||
|
rt.clearUndoBuffer()
|
||||||
|
rt.redrawViews()
|
||||||
|
|
||||||
|
|
||||||
def containerise(name: str, nodes: list, context,
|
def containerise(name: str, nodes: list, context,
|
||||||
namespace=None, loader=None, suffix="_CON"):
|
namespace=None, loader=None, suffix="_CON"):
|
||||||
data = {
|
data = {
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ class MaxSceneLoader(load.LoaderPlugin):
|
||||||
)
|
)
|
||||||
for max_obj, obj_name in zip(max_objects, max_object_names):
|
for max_obj, obj_name in zip(max_objects, max_object_names):
|
||||||
max_obj.name = f"{namespace}:{obj_name}"
|
max_obj.name = f"{namespace}:{obj_name}"
|
||||||
max_container.append(rt.getNodeByName(max_obj.name))
|
max_container.append(max_obj)
|
||||||
return containerise(
|
return containerise(
|
||||||
name, max_container, context,
|
name, max_container, context,
|
||||||
namespace, loader=self.__class__.__name__)
|
namespace, loader=self.__class__.__name__)
|
||||||
|
|
@ -158,11 +158,11 @@ class MaxSceneLoader(load.LoaderPlugin):
|
||||||
current_max_object_names):
|
current_max_object_names):
|
||||||
max_obj.name = f"{namespace}:{obj_name}"
|
max_obj.name = f"{namespace}:{obj_name}"
|
||||||
max_objects.append(max_obj)
|
max_objects.append(max_obj)
|
||||||
max_transform = f"{max_obj.name}.transform"
|
max_transform = f"{max_obj}.transform"
|
||||||
if max_transform in transform_data.keys():
|
if max_transform in transform_data.keys():
|
||||||
max_obj.pos = transform_data[max_transform] or 0
|
max_obj.pos = transform_data[max_transform] or 0
|
||||||
max_obj.scale = transform_data[
|
max_obj.scale = transform_data[
|
||||||
f"{max_obj.name}.scale"] or 0
|
f"{max_obj}.scale"] or 0
|
||||||
|
|
||||||
update_custom_attribute_data(node, max_objects)
|
update_custom_attribute_data(node, max_objects)
|
||||||
lib.imprint(container["instance_node"], {
|
lib.imprint(container["instance_node"], {
|
||||||
|
|
|
||||||
|
|
@ -76,11 +76,11 @@ class FbxModelLoader(load.LoaderPlugin):
|
||||||
for fbx_object in current_fbx_objects:
|
for fbx_object in current_fbx_objects:
|
||||||
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
fbx_object.name = f"{namespace}:{fbx_object.name}"
|
||||||
fbx_objects.append(fbx_object)
|
fbx_objects.append(fbx_object)
|
||||||
fbx_transform = f"{fbx_object.name}.transform"
|
fbx_transform = f"{fbx_object}.transform"
|
||||||
if fbx_transform in transform_data.keys():
|
if fbx_transform in transform_data.keys():
|
||||||
fbx_object.pos = transform_data[fbx_transform] or 0
|
fbx_object.pos = transform_data[fbx_transform] or 0
|
||||||
fbx_object.scale = transform_data[
|
fbx_object.scale = transform_data[
|
||||||
f"{fbx_object.name}.scale"] or 0
|
f"{fbx_object}.scale"] or 0
|
||||||
|
|
||||||
with maintained_selection():
|
with maintained_selection():
|
||||||
rt.Select(node)
|
rt.Select(node)
|
||||||
|
|
|
||||||
|
|
@ -67,11 +67,11 @@ class ObjLoader(load.LoaderPlugin):
|
||||||
selections = rt.GetCurrentSelection()
|
selections = rt.GetCurrentSelection()
|
||||||
for selection in selections:
|
for selection in selections:
|
||||||
selection.name = f"{namespace}:{selection.name}"
|
selection.name = f"{namespace}:{selection.name}"
|
||||||
selection_transform = f"{selection.name}.transform"
|
selection_transform = f"{selection}.transform"
|
||||||
if selection_transform in transform_data.keys():
|
if selection_transform in transform_data.keys():
|
||||||
selection.pos = transform_data[selection_transform] or 0
|
selection.pos = transform_data[selection_transform] or 0
|
||||||
selection.scale = transform_data[
|
selection.scale = transform_data[
|
||||||
f"{selection.name}.scale"] or 0
|
f"{selection}.scale"] or 0
|
||||||
update_custom_attribute_data(node, selections)
|
update_custom_attribute_data(node, selections)
|
||||||
with maintained_selection():
|
with maintained_selection():
|
||||||
rt.Select(node)
|
rt.Select(node)
|
||||||
|
|
|
||||||
|
|
@ -95,11 +95,11 @@ class ModelUSDLoader(load.LoaderPlugin):
|
||||||
for children in asset.Children:
|
for children in asset.Children:
|
||||||
children.name = f"{namespace}:{children.name}"
|
children.name = f"{namespace}:{children.name}"
|
||||||
usd_objects.append(children)
|
usd_objects.append(children)
|
||||||
children_transform = f"{children.name}.transform"
|
children_transform = f"{children}.transform"
|
||||||
if children_transform in transform_data.keys():
|
if children_transform in transform_data.keys():
|
||||||
children.pos = transform_data[children_transform] or 0
|
children.pos = transform_data[children_transform] or 0
|
||||||
children.scale = transform_data[
|
children.scale = transform_data[
|
||||||
f"{children.name}.scale"] or 0
|
f"{children}.scale"] or 0
|
||||||
|
|
||||||
asset.name = f"{namespace}:{asset.name}"
|
asset.name = f"{namespace}:{asset.name}"
|
||||||
usd_objects.append(asset)
|
usd_objects.append(asset)
|
||||||
|
|
|
||||||
|
|
@ -92,10 +92,10 @@ class OxAbcLoader(load.LoaderPlugin):
|
||||||
abc.Parent = container
|
abc.Parent = container
|
||||||
abc.name = f"{namespace}:{abc.name}"
|
abc.name = f"{namespace}:{abc.name}"
|
||||||
ox_abc_objects.append(abc)
|
ox_abc_objects.append(abc)
|
||||||
ox_transform = f"{abc.name}.transform"
|
ox_transform = f"{abc}.transform"
|
||||||
if ox_transform in transform_data.keys():
|
if ox_transform in transform_data.keys():
|
||||||
abc.pos = transform_data[ox_transform] or 0
|
abc.pos = transform_data[ox_transform] or 0
|
||||||
abc.scale = transform_data[f"{abc.name}.scale"] or 0
|
abc.scale = transform_data[f"{abc}.scale"] or 0
|
||||||
update_custom_attribute_data(node, ox_abc_objects)
|
update_custom_attribute_data(node, ox_abc_objects)
|
||||||
lib.imprint(
|
lib.imprint(
|
||||||
container["instance_node"],
|
container["instance_node"],
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ class ExtractAlembic(publish.Extractor,
|
||||||
hosts = ["max"]
|
hosts = ["max"]
|
||||||
families = ["pointcache"]
|
families = ["pointcache"]
|
||||||
optional = True
|
optional = True
|
||||||
|
active = True
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
if not self.is_active(instance.data):
|
if not self.is_active(instance.data):
|
||||||
|
|
@ -102,24 +103,27 @@ class ExtractAlembic(publish.Extractor,
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_attribute_defs(cls):
|
def get_attribute_defs(cls):
|
||||||
return [
|
defs = super(ExtractAlembic, cls).get_attribute_defs()
|
||||||
|
defs.extend([
|
||||||
BoolDef("custom_attrs",
|
BoolDef("custom_attrs",
|
||||||
label="Custom Attributes",
|
label="Custom Attributes",
|
||||||
default=False),
|
default=False),
|
||||||
]
|
])
|
||||||
|
return defs
|
||||||
|
|
||||||
|
|
||||||
class ExtractCameraAlembic(ExtractAlembic):
|
class ExtractCameraAlembic(ExtractAlembic):
|
||||||
"""Extract Camera with AlembicExport."""
|
"""Extract Camera with AlembicExport."""
|
||||||
|
|
||||||
label = "Extract Alembic Camera"
|
label = "Extract Alembic Camera"
|
||||||
families = ["camera"]
|
families = ["camera"]
|
||||||
|
optional = True
|
||||||
|
|
||||||
|
|
||||||
class ExtractModel(ExtractAlembic):
|
class ExtractModelAlembic(ExtractAlembic):
|
||||||
"""Extract Geometry in Alembic Format"""
|
"""Extract Geometry in Alembic Format"""
|
||||||
label = "Extract Geometry (Alembic)"
|
label = "Extract Geometry (Alembic)"
|
||||||
families = ["model"]
|
families = ["model"]
|
||||||
|
optional = True
|
||||||
|
|
||||||
def _set_abc_attributes(self, instance):
|
def _set_abc_attributes(self, instance):
|
||||||
attr_values = self.get_attr_values_from_data(instance.data)
|
attr_values = self.get_attr_values_from_data(instance.data)
|
||||||
|
|
|
||||||
|
|
@ -12,4 +12,4 @@
|
||||||
max create mode
|
max create mode
|
||||||
|
|
||||||
python.ExecuteFile startup
|
python.ExecuteFile startup
|
||||||
)
|
)
|
||||||
|
|
|
||||||
350
client/ayon_core/hosts/maya/api/alembic.py
Normal file
350
client/ayon_core/hosts/maya/api/alembic.py
Normal file
|
|
@ -0,0 +1,350 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from maya import cmds # noqa
|
||||||
|
|
||||||
|
from ayon_core.hosts.maya.api.lib import evaluation
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# The maya alembic export types
|
||||||
|
ALEMBIC_ARGS = {
|
||||||
|
"attr": (list, tuple),
|
||||||
|
"attrPrefix": (list, tuple),
|
||||||
|
"autoSubd": bool,
|
||||||
|
"dataFormat": str,
|
||||||
|
"endFrame": float,
|
||||||
|
"eulerFilter": bool,
|
||||||
|
"frameRange": str, # "start end"; overrides startFrame & endFrame
|
||||||
|
"frameRelativeSample": float,
|
||||||
|
"melPerFrameCallback": str,
|
||||||
|
"melPostJobCallback": str,
|
||||||
|
"noNormals": bool,
|
||||||
|
"preRoll": bool,
|
||||||
|
"pythonPerFrameCallback": str,
|
||||||
|
"pythonPostJobCallback": str,
|
||||||
|
"renderableOnly": bool,
|
||||||
|
"root": (list, tuple),
|
||||||
|
"selection": bool,
|
||||||
|
"startFrame": float,
|
||||||
|
"step": float,
|
||||||
|
"stripNamespaces": bool,
|
||||||
|
"userAttr": (list, tuple),
|
||||||
|
"userAttrPrefix": (list, tuple),
|
||||||
|
"uvWrite": bool,
|
||||||
|
"uvsOnly": bool,
|
||||||
|
"verbose": bool,
|
||||||
|
"wholeFrameGeo": bool,
|
||||||
|
"worldSpace": bool,
|
||||||
|
"writeColorSets": bool,
|
||||||
|
"writeCreases": bool, # Maya 2015 Ext1+
|
||||||
|
"writeFaceSets": bool,
|
||||||
|
"writeUVSets": bool, # Maya 2017+
|
||||||
|
"writeVisibility": bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def extract_alembic(
|
||||||
|
file,
|
||||||
|
attr=None,
|
||||||
|
attrPrefix=None,
|
||||||
|
dataFormat="ogawa",
|
||||||
|
endFrame=None,
|
||||||
|
eulerFilter=True,
|
||||||
|
frameRange="",
|
||||||
|
melPerFrameCallback=None,
|
||||||
|
melPostJobCallback=None,
|
||||||
|
noNormals=False,
|
||||||
|
preRoll=False,
|
||||||
|
preRollStartFrame=0,
|
||||||
|
pythonPerFrameCallback=None,
|
||||||
|
pythonPostJobCallback=None,
|
||||||
|
renderableOnly=False,
|
||||||
|
root=None,
|
||||||
|
selection=True,
|
||||||
|
startFrame=None,
|
||||||
|
step=1.0,
|
||||||
|
stripNamespaces=True,
|
||||||
|
userAttr=None,
|
||||||
|
userAttrPrefix=None,
|
||||||
|
uvsOnly=False,
|
||||||
|
uvWrite=True,
|
||||||
|
verbose=False,
|
||||||
|
wholeFrameGeo=False,
|
||||||
|
worldSpace=False,
|
||||||
|
writeColorSets=False,
|
||||||
|
writeCreases=False,
|
||||||
|
writeFaceSets=False,
|
||||||
|
writeUVSets=False,
|
||||||
|
writeVisibility=False
|
||||||
|
):
|
||||||
|
"""Extract a single Alembic Cache.
|
||||||
|
|
||||||
|
This extracts an Alembic cache using the `-selection` flag to minimize
|
||||||
|
the extracted content to solely what was Collected into the instance.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
file (str): The filepath to write the alembic file to.
|
||||||
|
|
||||||
|
attr (list of str, optional): A specific geometric attribute to write
|
||||||
|
out. Defaults to [].
|
||||||
|
|
||||||
|
attrPrefix (list of str, optional): Prefix filter for determining which
|
||||||
|
geometric attributes to write out. Defaults to ["ABC_"].
|
||||||
|
|
||||||
|
dataFormat (str): The data format to use for the cache,
|
||||||
|
defaults to "ogawa"
|
||||||
|
|
||||||
|
endFrame (float): End frame of output. Ignored if `frameRange`
|
||||||
|
provided.
|
||||||
|
|
||||||
|
eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
|
||||||
|
an Euler filter. Euler filtering helps resolve irregularities in
|
||||||
|
rotations especially if X, Y, and Z rotations exceed 360 degrees.
|
||||||
|
Defaults to True.
|
||||||
|
|
||||||
|
frameRange (tuple or str): Two-tuple with start and end frame or a
|
||||||
|
string formatted as: "startFrame endFrame". This argument
|
||||||
|
overrides `startFrame` and `endFrame` arguments.
|
||||||
|
|
||||||
|
melPerFrameCallback (Optional[str]): MEL callback run per frame.
|
||||||
|
|
||||||
|
melPostJobCallback (Optional[str]): MEL callback after last frame is
|
||||||
|
written.
|
||||||
|
|
||||||
|
noNormals (bool): When on, normal data from the original polygon
|
||||||
|
objects is not included in the exported Alembic cache file.
|
||||||
|
|
||||||
|
preRoll (bool): This frame range will not be sampled.
|
||||||
|
Defaults to False.
|
||||||
|
|
||||||
|
preRollStartFrame (float): The frame to start scene
|
||||||
|
evaluation at. This is used to set the starting frame for time
|
||||||
|
dependent translations and can be used to evaluate run-up that
|
||||||
|
isn't actually translated. Defaults to 0.
|
||||||
|
|
||||||
|
pythonPerFrameCallback (Optional[str]): Python callback run per frame.
|
||||||
|
|
||||||
|
pythonPostJobCallback (Optional[str]): Python callback after last frame
|
||||||
|
is written.
|
||||||
|
|
||||||
|
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
|
||||||
|
such as hidden objects, are not included in the Alembic file.
|
||||||
|
Defaults to False.
|
||||||
|
|
||||||
|
root (list of str): Maya dag path which will be parented to
|
||||||
|
the root of the Alembic file. Defaults to [], which means the
|
||||||
|
entire scene will be written out.
|
||||||
|
|
||||||
|
selection (bool): Write out all all selected nodes from the
|
||||||
|
active selection list that are descendents of the roots specified
|
||||||
|
with -root. Defaults to False.
|
||||||
|
|
||||||
|
startFrame (float): Start frame of output. Ignored if `frameRange`
|
||||||
|
provided.
|
||||||
|
|
||||||
|
step (float): The time interval (expressed in frames) at
|
||||||
|
which the frame range is sampled. Additional samples around each
|
||||||
|
frame can be specified with -frs. Defaults to 1.0.
|
||||||
|
|
||||||
|
stripNamespaces (bool): When on, any namespaces associated with the
|
||||||
|
exported objects are removed from the Alembic file. For example, an
|
||||||
|
object with the namespace taco:foo:bar appears as bar in the
|
||||||
|
Alembic file.
|
||||||
|
|
||||||
|
userAttr (list of str, optional): A specific user defined attribute to
|
||||||
|
write out. Defaults to [].
|
||||||
|
|
||||||
|
userAttrPrefix (list of str, optional): Prefix filter for determining
|
||||||
|
which user defined attributes to write out. Defaults to [].
|
||||||
|
|
||||||
|
uvsOnly (bool): When on, only uv data for PolyMesh and SubD shapes
|
||||||
|
will be written to the Alembic file.
|
||||||
|
|
||||||
|
uvWrite (bool): When on, UV data from polygon meshes and subdivision
|
||||||
|
objects are written to the Alembic file. Only the current UV map is
|
||||||
|
included.
|
||||||
|
|
||||||
|
verbose (bool): When on, outputs frame number information to the
|
||||||
|
Script Editor or output window during extraction.
|
||||||
|
|
||||||
|
wholeFrameGeo (bool): Data for geometry will only be written
|
||||||
|
out on whole frames. Defaults to False.
|
||||||
|
|
||||||
|
worldSpace (bool): When on, the top node in the node hierarchy is
|
||||||
|
stored as world space. By default, these nodes are stored as local
|
||||||
|
space. Defaults to False.
|
||||||
|
|
||||||
|
writeColorSets (bool): Write all color sets on MFnMeshes as
|
||||||
|
color 3 or color 4 indexed geometry parameters with face varying
|
||||||
|
scope. Defaults to False.
|
||||||
|
|
||||||
|
writeCreases (bool): If the mesh has crease edges or crease
|
||||||
|
vertices, the mesh (OPolyMesh) would now be written out as an OSubD
|
||||||
|
and crease info will be stored in the Alembic file. Otherwise,
|
||||||
|
creases info won't be preserved in Alembic file unless a custom
|
||||||
|
Boolean attribute SubDivisionMesh has been added to mesh node and
|
||||||
|
its value is true. Defaults to False.
|
||||||
|
|
||||||
|
writeFaceSets (bool): Write all Face sets on MFnMeshes.
|
||||||
|
Defaults to False.
|
||||||
|
|
||||||
|
writeUVSets (bool): Write all uv sets on MFnMeshes as vector
|
||||||
|
2 indexed geometry parameters with face varying scope. Defaults to
|
||||||
|
False.
|
||||||
|
|
||||||
|
writeVisibility (bool): Visibility state will be stored in
|
||||||
|
the Alembic file. Otherwise everything written out is treated as
|
||||||
|
visible. Defaults to False.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Ensure alembic exporter is loaded
|
||||||
|
cmds.loadPlugin('AbcExport', quiet=True)
|
||||||
|
|
||||||
|
# Alembic Exporter requires forward slashes
|
||||||
|
file = file.replace('\\', '/')
|
||||||
|
|
||||||
|
# Ensure list arguments are valid.
|
||||||
|
attr = attr or []
|
||||||
|
attrPrefix = attrPrefix or []
|
||||||
|
userAttr = userAttr or []
|
||||||
|
userAttrPrefix = userAttrPrefix or []
|
||||||
|
root = root or []
|
||||||
|
|
||||||
|
# Pass the start and end frame on as `frameRange` so that it
|
||||||
|
# never conflicts with that argument
|
||||||
|
if not frameRange:
|
||||||
|
# Fallback to maya timeline if no start or end frame provided.
|
||||||
|
if startFrame is None:
|
||||||
|
startFrame = cmds.playbackOptions(query=True,
|
||||||
|
animationStartTime=True)
|
||||||
|
if endFrame is None:
|
||||||
|
endFrame = cmds.playbackOptions(query=True,
|
||||||
|
animationEndTime=True)
|
||||||
|
|
||||||
|
# Ensure valid types are converted to frame range
|
||||||
|
assert isinstance(startFrame, ALEMBIC_ARGS["startFrame"])
|
||||||
|
assert isinstance(endFrame, ALEMBIC_ARGS["endFrame"])
|
||||||
|
frameRange = "{0} {1}".format(startFrame, endFrame)
|
||||||
|
else:
|
||||||
|
# Allow conversion from tuple for `frameRange`
|
||||||
|
if isinstance(frameRange, (list, tuple)):
|
||||||
|
assert len(frameRange) == 2
|
||||||
|
frameRange = "{0} {1}".format(frameRange[0], frameRange[1])
|
||||||
|
|
||||||
|
# Assemble options
|
||||||
|
options = {
|
||||||
|
"selection": selection,
|
||||||
|
"frameRange": frameRange,
|
||||||
|
"eulerFilter": eulerFilter,
|
||||||
|
"noNormals": noNormals,
|
||||||
|
"preRoll": preRoll,
|
||||||
|
"root": root,
|
||||||
|
"renderableOnly": renderableOnly,
|
||||||
|
"uvWrite": uvWrite,
|
||||||
|
"uvsOnly": uvsOnly,
|
||||||
|
"writeColorSets": writeColorSets,
|
||||||
|
"writeFaceSets": writeFaceSets,
|
||||||
|
"wholeFrameGeo": wholeFrameGeo,
|
||||||
|
"worldSpace": worldSpace,
|
||||||
|
"writeVisibility": writeVisibility,
|
||||||
|
"writeUVSets": writeUVSets,
|
||||||
|
"writeCreases": writeCreases,
|
||||||
|
"dataFormat": dataFormat,
|
||||||
|
"step": step,
|
||||||
|
"attr": attr,
|
||||||
|
"attrPrefix": attrPrefix,
|
||||||
|
"userAttr": userAttr,
|
||||||
|
"userAttrPrefix": userAttrPrefix,
|
||||||
|
"stripNamespaces": stripNamespaces,
|
||||||
|
"verbose": verbose
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate options
|
||||||
|
for key, value in options.copy().items():
|
||||||
|
|
||||||
|
# Discard unknown options
|
||||||
|
if key not in ALEMBIC_ARGS:
|
||||||
|
log.warning("extract_alembic() does not support option '%s'. "
|
||||||
|
"Flag will be ignored..", key)
|
||||||
|
options.pop(key)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Validate value type
|
||||||
|
valid_types = ALEMBIC_ARGS[key]
|
||||||
|
if not isinstance(value, valid_types):
|
||||||
|
raise TypeError("Alembic option unsupported type: "
|
||||||
|
"{0} (expected {1})".format(value, valid_types))
|
||||||
|
|
||||||
|
# Ignore empty values, like an empty string, since they mess up how
|
||||||
|
# job arguments are built
|
||||||
|
if isinstance(value, (list, tuple)):
|
||||||
|
value = [x for x in value if x.strip()]
|
||||||
|
|
||||||
|
# Ignore option completely if no values remaining
|
||||||
|
if not value:
|
||||||
|
options.pop(key)
|
||||||
|
continue
|
||||||
|
|
||||||
|
options[key] = value
|
||||||
|
|
||||||
|
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
|
||||||
|
maya_version = int(cmds.about(version=True))
|
||||||
|
if maya_version >= 2018:
|
||||||
|
options['autoSubd'] = options.pop('writeCreases', False)
|
||||||
|
|
||||||
|
# Only add callbacks if they are set so that we're not passing `None`
|
||||||
|
callbacks = {
|
||||||
|
"melPerFrameCallback": melPerFrameCallback,
|
||||||
|
"melPostJobCallback": melPostJobCallback,
|
||||||
|
"pythonPerFrameCallback": pythonPerFrameCallback,
|
||||||
|
"pythonPostJobCallback": pythonPostJobCallback,
|
||||||
|
}
|
||||||
|
for key, callback in callbacks.items():
|
||||||
|
if callback:
|
||||||
|
options[key] = str(callback)
|
||||||
|
|
||||||
|
# Format the job string from options
|
||||||
|
job_args = list()
|
||||||
|
for key, value in options.items():
|
||||||
|
if isinstance(value, (list, tuple)):
|
||||||
|
for entry in value:
|
||||||
|
job_args.append("-{} {}".format(key, entry))
|
||||||
|
elif isinstance(value, bool):
|
||||||
|
# Add only when state is set to True
|
||||||
|
if value:
|
||||||
|
job_args.append("-{0}".format(key))
|
||||||
|
else:
|
||||||
|
job_args.append("-{0} {1}".format(key, value))
|
||||||
|
|
||||||
|
job_str = " ".join(job_args)
|
||||||
|
job_str += ' -file "%s"' % file
|
||||||
|
|
||||||
|
# Ensure output directory exists
|
||||||
|
parent_dir = os.path.dirname(file)
|
||||||
|
if not os.path.exists(parent_dir):
|
||||||
|
os.makedirs(parent_dir)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
log.debug("Preparing Alembic export with options: %s",
|
||||||
|
json.dumps(options, indent=4))
|
||||||
|
log.debug("Extracting Alembic with job arguments: %s", job_str)
|
||||||
|
|
||||||
|
# Perform extraction
|
||||||
|
print("Alembic Job Arguments : {}".format(job_str))
|
||||||
|
|
||||||
|
# Disable the parallel evaluation temporarily to ensure no buggy
|
||||||
|
# exports are made. (PLN-31)
|
||||||
|
# TODO: Make sure this actually fixes the issues
|
||||||
|
with evaluation("off"):
|
||||||
|
cmds.AbcExport(
|
||||||
|
j=job_str,
|
||||||
|
verbose=verbose,
|
||||||
|
preRollStartFrame=preRollStartFrame
|
||||||
|
)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
log.debug("Extracted Alembic to: %s", file)
|
||||||
|
|
||||||
|
return file
|
||||||
|
|
@ -47,7 +47,7 @@ class FBXExtractor:
|
||||||
"smoothMesh": bool,
|
"smoothMesh": bool,
|
||||||
"instances": bool,
|
"instances": bool,
|
||||||
# "referencedContainersContent": bool, # deprecated in Maya 2016+
|
# "referencedContainersContent": bool, # deprecated in Maya 2016+
|
||||||
"bakeComplexAnimation": int,
|
"bakeComplexAnimation": bool,
|
||||||
"bakeComplexStart": int,
|
"bakeComplexStart": int,
|
||||||
"bakeComplexEnd": int,
|
"bakeComplexEnd": int,
|
||||||
"bakeComplexStep": int,
|
"bakeComplexStep": int,
|
||||||
|
|
@ -59,6 +59,7 @@ class FBXExtractor:
|
||||||
"constraints": bool,
|
"constraints": bool,
|
||||||
"lights": bool,
|
"lights": bool,
|
||||||
"embeddedTextures": bool,
|
"embeddedTextures": bool,
|
||||||
|
"includeChildren": bool,
|
||||||
"inputConnections": bool,
|
"inputConnections": bool,
|
||||||
"upAxis": str, # x, y or z,
|
"upAxis": str, # x, y or z,
|
||||||
"triangulate": bool,
|
"triangulate": bool,
|
||||||
|
|
@ -102,6 +103,7 @@ class FBXExtractor:
|
||||||
"constraints": False,
|
"constraints": False,
|
||||||
"lights": True,
|
"lights": True,
|
||||||
"embeddedTextures": False,
|
"embeddedTextures": False,
|
||||||
|
"includeChildren": True,
|
||||||
"inputConnections": True,
|
"inputConnections": True,
|
||||||
"upAxis": "y",
|
"upAxis": "y",
|
||||||
"triangulate": False,
|
"triangulate": False,
|
||||||
|
|
|
||||||
|
|
@ -70,37 +70,6 @@ DEFAULT_MATRIX = [1.0, 0.0, 0.0, 0.0,
|
||||||
0.0, 0.0, 1.0, 0.0,
|
0.0, 0.0, 1.0, 0.0,
|
||||||
0.0, 0.0, 0.0, 1.0]
|
0.0, 0.0, 0.0, 1.0]
|
||||||
|
|
||||||
# The maya alembic export types
|
|
||||||
_alembic_options = {
|
|
||||||
"startFrame": float,
|
|
||||||
"endFrame": float,
|
|
||||||
"frameRange": str, # "start end"; overrides startFrame & endFrame
|
|
||||||
"eulerFilter": bool,
|
|
||||||
"frameRelativeSample": float,
|
|
||||||
"noNormals": bool,
|
|
||||||
"renderableOnly": bool,
|
|
||||||
"step": float,
|
|
||||||
"stripNamespaces": bool,
|
|
||||||
"uvWrite": bool,
|
|
||||||
"wholeFrameGeo": bool,
|
|
||||||
"worldSpace": bool,
|
|
||||||
"writeVisibility": bool,
|
|
||||||
"writeColorSets": bool,
|
|
||||||
"writeFaceSets": bool,
|
|
||||||
"writeCreases": bool, # Maya 2015 Ext1+
|
|
||||||
"writeUVSets": bool, # Maya 2017+
|
|
||||||
"dataFormat": str,
|
|
||||||
"root": (list, tuple),
|
|
||||||
"attr": (list, tuple),
|
|
||||||
"attrPrefix": (list, tuple),
|
|
||||||
"userAttr": (list, tuple),
|
|
||||||
"melPerFrameCallback": str,
|
|
||||||
"melPostJobCallback": str,
|
|
||||||
"pythonPerFrameCallback": str,
|
|
||||||
"pythonPostJobCallback": str,
|
|
||||||
"selection": bool
|
|
||||||
}
|
|
||||||
|
|
||||||
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
|
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
|
||||||
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
|
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
|
||||||
|
|
||||||
|
|
@ -1330,7 +1299,7 @@ def is_visible(node,
|
||||||
override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node))
|
override_enabled = cmds.getAttr('{}.overrideEnabled'.format(node))
|
||||||
override_visibility = cmds.getAttr('{}.overrideVisibility'.format(
|
override_visibility = cmds.getAttr('{}.overrideVisibility'.format(
|
||||||
node))
|
node))
|
||||||
if override_enabled and override_visibility:
|
if override_enabled and not override_visibility:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if parentHidden:
|
if parentHidden:
|
||||||
|
|
@ -1346,178 +1315,6 @@ def is_visible(node,
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def extract_alembic(file,
|
|
||||||
startFrame=None,
|
|
||||||
endFrame=None,
|
|
||||||
selection=True,
|
|
||||||
uvWrite=True,
|
|
||||||
eulerFilter=True,
|
|
||||||
dataFormat="ogawa",
|
|
||||||
verbose=False,
|
|
||||||
**kwargs):
|
|
||||||
"""Extract a single Alembic Cache.
|
|
||||||
|
|
||||||
This extracts an Alembic cache using the `-selection` flag to minimize
|
|
||||||
the extracted content to solely what was Collected into the instance.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
|
|
||||||
startFrame (float): Start frame of output. Ignored if `frameRange`
|
|
||||||
provided.
|
|
||||||
|
|
||||||
endFrame (float): End frame of output. Ignored if `frameRange`
|
|
||||||
provided.
|
|
||||||
|
|
||||||
frameRange (tuple or str): Two-tuple with start and end frame or a
|
|
||||||
string formatted as: "startFrame endFrame". This argument
|
|
||||||
overrides `startFrame` and `endFrame` arguments.
|
|
||||||
|
|
||||||
dataFormat (str): The data format to use for the cache,
|
|
||||||
defaults to "ogawa"
|
|
||||||
|
|
||||||
verbose (bool): When on, outputs frame number information to the
|
|
||||||
Script Editor or output window during extraction.
|
|
||||||
|
|
||||||
noNormals (bool): When on, normal data from the original polygon
|
|
||||||
objects is not included in the exported Alembic cache file.
|
|
||||||
|
|
||||||
renderableOnly (bool): When on, any non-renderable nodes or hierarchy,
|
|
||||||
such as hidden objects, are not included in the Alembic file.
|
|
||||||
Defaults to False.
|
|
||||||
|
|
||||||
stripNamespaces (bool): When on, any namespaces associated with the
|
|
||||||
exported objects are removed from the Alembic file. For example, an
|
|
||||||
object with the namespace taco:foo:bar appears as bar in the
|
|
||||||
Alembic file.
|
|
||||||
|
|
||||||
uvWrite (bool): When on, UV data from polygon meshes and subdivision
|
|
||||||
objects are written to the Alembic file. Only the current UV map is
|
|
||||||
included.
|
|
||||||
|
|
||||||
worldSpace (bool): When on, the top node in the node hierarchy is
|
|
||||||
stored as world space. By default, these nodes are stored as local
|
|
||||||
space. Defaults to False.
|
|
||||||
|
|
||||||
eulerFilter (bool): When on, X, Y, and Z rotation data is filtered with
|
|
||||||
an Euler filter. Euler filtering helps resolve irregularities in
|
|
||||||
rotations especially if X, Y, and Z rotations exceed 360 degrees.
|
|
||||||
Defaults to True.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Ensure alembic exporter is loaded
|
|
||||||
cmds.loadPlugin('AbcExport', quiet=True)
|
|
||||||
|
|
||||||
# Alembic Exporter requires forward slashes
|
|
||||||
file = file.replace('\\', '/')
|
|
||||||
|
|
||||||
# Pass the start and end frame on as `frameRange` so that it
|
|
||||||
# never conflicts with that argument
|
|
||||||
if "frameRange" not in kwargs:
|
|
||||||
# Fallback to maya timeline if no start or end frame provided.
|
|
||||||
if startFrame is None:
|
|
||||||
startFrame = cmds.playbackOptions(query=True,
|
|
||||||
animationStartTime=True)
|
|
||||||
if endFrame is None:
|
|
||||||
endFrame = cmds.playbackOptions(query=True,
|
|
||||||
animationEndTime=True)
|
|
||||||
|
|
||||||
# Ensure valid types are converted to frame range
|
|
||||||
assert isinstance(startFrame, _alembic_options["startFrame"])
|
|
||||||
assert isinstance(endFrame, _alembic_options["endFrame"])
|
|
||||||
kwargs["frameRange"] = "{0} {1}".format(startFrame, endFrame)
|
|
||||||
else:
|
|
||||||
# Allow conversion from tuple for `frameRange`
|
|
||||||
frame_range = kwargs["frameRange"]
|
|
||||||
if isinstance(frame_range, (list, tuple)):
|
|
||||||
assert len(frame_range) == 2
|
|
||||||
kwargs["frameRange"] = "{0} {1}".format(frame_range[0],
|
|
||||||
frame_range[1])
|
|
||||||
|
|
||||||
# Assemble options
|
|
||||||
options = {
|
|
||||||
"selection": selection,
|
|
||||||
"uvWrite": uvWrite,
|
|
||||||
"eulerFilter": eulerFilter,
|
|
||||||
"dataFormat": dataFormat
|
|
||||||
}
|
|
||||||
options.update(kwargs)
|
|
||||||
|
|
||||||
# Validate options
|
|
||||||
for key, value in options.copy().items():
|
|
||||||
|
|
||||||
# Discard unknown options
|
|
||||||
if key not in _alembic_options:
|
|
||||||
log.warning("extract_alembic() does not support option '%s'. "
|
|
||||||
"Flag will be ignored..", key)
|
|
||||||
options.pop(key)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Validate value type
|
|
||||||
valid_types = _alembic_options[key]
|
|
||||||
if not isinstance(value, valid_types):
|
|
||||||
raise TypeError("Alembic option unsupported type: "
|
|
||||||
"{0} (expected {1})".format(value, valid_types))
|
|
||||||
|
|
||||||
# Ignore empty values, like an empty string, since they mess up how
|
|
||||||
# job arguments are built
|
|
||||||
if isinstance(value, (list, tuple)):
|
|
||||||
value = [x for x in value if x.strip()]
|
|
||||||
|
|
||||||
# Ignore option completely if no values remaining
|
|
||||||
if not value:
|
|
||||||
options.pop(key)
|
|
||||||
continue
|
|
||||||
|
|
||||||
options[key] = value
|
|
||||||
|
|
||||||
# The `writeCreases` argument was changed to `autoSubd` in Maya 2018+
|
|
||||||
maya_version = int(cmds.about(version=True))
|
|
||||||
if maya_version >= 2018:
|
|
||||||
options['autoSubd'] = options.pop('writeCreases', False)
|
|
||||||
|
|
||||||
# Format the job string from options
|
|
||||||
job_args = list()
|
|
||||||
for key, value in options.items():
|
|
||||||
if isinstance(value, (list, tuple)):
|
|
||||||
for entry in value:
|
|
||||||
job_args.append("-{} {}".format(key, entry))
|
|
||||||
elif isinstance(value, bool):
|
|
||||||
# Add only when state is set to True
|
|
||||||
if value:
|
|
||||||
job_args.append("-{0}".format(key))
|
|
||||||
else:
|
|
||||||
job_args.append("-{0} {1}".format(key, value))
|
|
||||||
|
|
||||||
job_str = " ".join(job_args)
|
|
||||||
job_str += ' -file "%s"' % file
|
|
||||||
|
|
||||||
# Ensure output directory exists
|
|
||||||
parent_dir = os.path.dirname(file)
|
|
||||||
if not os.path.exists(parent_dir):
|
|
||||||
os.makedirs(parent_dir)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
log.debug("Preparing Alembic export with options: %s",
|
|
||||||
json.dumps(options, indent=4))
|
|
||||||
log.debug("Extracting Alembic with job arguments: %s", job_str)
|
|
||||||
|
|
||||||
# Perform extraction
|
|
||||||
print("Alembic Job Arguments : {}".format(job_str))
|
|
||||||
|
|
||||||
# Disable the parallel evaluation temporarily to ensure no buggy
|
|
||||||
# exports are made. (PLN-31)
|
|
||||||
# TODO: Make sure this actually fixes the issues
|
|
||||||
with evaluation("off"):
|
|
||||||
cmds.AbcExport(j=job_str, verbose=verbose)
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
log.debug("Extracted Alembic to: %s", file)
|
|
||||||
|
|
||||||
return file
|
|
||||||
|
|
||||||
|
|
||||||
# region ID
|
# region ID
|
||||||
def get_id_required_nodes(referenced_nodes=False,
|
def get_id_required_nodes(referenced_nodes=False,
|
||||||
nodes=None,
|
nodes=None,
|
||||||
|
|
@ -2520,7 +2317,16 @@ def set_scene_fps(fps, update=True):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
fps_mapping = {
|
fps_mapping = {
|
||||||
|
'2': '2fps',
|
||||||
|
'3': '3fps',
|
||||||
|
'4': '4fps',
|
||||||
|
'5': '5fps',
|
||||||
|
'6': '6fps',
|
||||||
|
'8': '8fps',
|
||||||
|
'10': '10fps',
|
||||||
|
'12': '12fps',
|
||||||
'15': 'game',
|
'15': 'game',
|
||||||
|
'16': '16fps',
|
||||||
'24': 'film',
|
'24': 'film',
|
||||||
'25': 'pal',
|
'25': 'pal',
|
||||||
'30': 'ntsc',
|
'30': 'ntsc',
|
||||||
|
|
@ -2612,21 +2418,24 @@ def get_fps_for_current_context():
|
||||||
Returns:
|
Returns:
|
||||||
Union[int, float]: FPS value.
|
Union[int, float]: FPS value.
|
||||||
"""
|
"""
|
||||||
|
task_entity = get_current_task_entity(fields={"attrib"})
|
||||||
project_name = get_current_project_name()
|
fps = task_entity.get("attrib", {}).get("fps")
|
||||||
folder_path = get_current_folder_path()
|
|
||||||
folder_entity = ayon_api.get_folder_by_path(
|
|
||||||
project_name, folder_path, fields={"attrib.fps"}
|
|
||||||
) or {}
|
|
||||||
fps = folder_entity.get("attrib", {}).get("fps")
|
|
||||||
if not fps:
|
if not fps:
|
||||||
project_entity = ayon_api.get_project(
|
project_name = get_current_project_name()
|
||||||
project_name, fields=["attrib.fps"]
|
folder_path = get_current_folder_path()
|
||||||
|
folder_entity = ayon_api.get_folder_by_path(
|
||||||
|
project_name, folder_path, fields={"attrib.fps"}
|
||||||
) or {}
|
) or {}
|
||||||
fps = project_entity.get("attrib", {}).get("fps")
|
|
||||||
|
|
||||||
|
fps = folder_entity.get("attrib", {}).get("fps")
|
||||||
if not fps:
|
if not fps:
|
||||||
fps = 25
|
project_entity = ayon_api.get_project(
|
||||||
|
project_name, fields=["attrib.fps"]
|
||||||
|
) or {}
|
||||||
|
fps = project_entity.get("attrib", {}).get("fps")
|
||||||
|
|
||||||
|
if not fps:
|
||||||
|
fps = 25
|
||||||
|
|
||||||
return convert_to_maya_fps(fps)
|
return convert_to_maya_fps(fps)
|
||||||
|
|
||||||
|
|
@ -4403,3 +4212,23 @@ def create_rig_animation_instance(
|
||||||
variant=namespace,
|
variant=namespace,
|
||||||
pre_create_data={"use_selection": True}
|
pre_create_data={"use_selection": True}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_index_under_parent(node: str) -> int:
|
||||||
|
"""Return the index of a DAG node under its parent.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
node (str): A DAG Node path.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The DAG node's index under its parents or world
|
||||||
|
|
||||||
|
"""
|
||||||
|
node = cmds.ls(node, long=True)[0] # enforce long names
|
||||||
|
parent = node.rsplit("|", 1)[0]
|
||||||
|
if not parent:
|
||||||
|
return cmds.ls(assemblies=True, long=True).index(node)
|
||||||
|
else:
|
||||||
|
return cmds.listRelatives(parent,
|
||||||
|
children=True,
|
||||||
|
fullPath=True).index(node)
|
||||||
|
|
|
||||||
|
|
@ -720,7 +720,8 @@ class RenderProductsArnold(ARenderProducts):
|
||||||
|
|
||||||
# AOVs > Legacy > Maya Render View > Mode
|
# AOVs > Legacy > Maya Render View > Mode
|
||||||
aovs_enabled = bool(
|
aovs_enabled = bool(
|
||||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
self._get_attr(
|
||||||
|
"defaultArnoldRenderOptions.aovMode", as_string=False)
|
||||||
)
|
)
|
||||||
if not aovs_enabled:
|
if not aovs_enabled:
|
||||||
return beauty_products
|
return beauty_products
|
||||||
|
|
|
||||||
|
|
@ -30,9 +30,11 @@ from ayon_core.pipeline import (
|
||||||
register_loader_plugin_path,
|
register_loader_plugin_path,
|
||||||
register_inventory_action_path,
|
register_inventory_action_path,
|
||||||
register_creator_plugin_path,
|
register_creator_plugin_path,
|
||||||
|
register_workfile_build_plugin_path,
|
||||||
deregister_loader_plugin_path,
|
deregister_loader_plugin_path,
|
||||||
deregister_inventory_action_path,
|
deregister_inventory_action_path,
|
||||||
deregister_creator_plugin_path,
|
deregister_creator_plugin_path,
|
||||||
|
deregister_workfile_build_plugin_path,
|
||||||
AYON_CONTAINER_ID,
|
AYON_CONTAINER_ID,
|
||||||
AVALON_CONTAINER_ID,
|
AVALON_CONTAINER_ID,
|
||||||
)
|
)
|
||||||
|
|
@ -47,7 +49,6 @@ from ayon_core.hosts.maya import MAYA_ROOT_DIR
|
||||||
from ayon_core.hosts.maya.lib import create_workspace_mel
|
from ayon_core.hosts.maya.lib import create_workspace_mel
|
||||||
|
|
||||||
from . import menu, lib
|
from . import menu, lib
|
||||||
from .workfile_template_builder import MayaPlaceholderLoadPlugin
|
|
||||||
from .workio import (
|
from .workio import (
|
||||||
open_file,
|
open_file,
|
||||||
save_file,
|
save_file,
|
||||||
|
|
@ -64,6 +65,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||||
|
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
|
||||||
|
|
||||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||||
|
|
||||||
|
|
@ -93,7 +95,7 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
register_loader_plugin_path(LOAD_PATH)
|
register_loader_plugin_path(LOAD_PATH)
|
||||||
register_creator_plugin_path(CREATE_PATH)
|
register_creator_plugin_path(CREATE_PATH)
|
||||||
register_inventory_action_path(INVENTORY_PATH)
|
register_inventory_action_path(INVENTORY_PATH)
|
||||||
self.log.info(PUBLISH_PATH)
|
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
|
||||||
|
|
||||||
self.log.info("Installing callbacks ... ")
|
self.log.info("Installing callbacks ... ")
|
||||||
register_event_callback("init", on_init)
|
register_event_callback("init", on_init)
|
||||||
|
|
@ -148,11 +150,6 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||||
def get_containers(self):
|
def get_containers(self):
|
||||||
return ls()
|
return ls()
|
||||||
|
|
||||||
def get_workfile_build_placeholder_plugins(self):
|
|
||||||
return [
|
|
||||||
MayaPlaceholderLoadPlugin
|
|
||||||
]
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def maintained_selection(self):
|
def maintained_selection(self):
|
||||||
with lib.maintained_selection():
|
with lib.maintained_selection():
|
||||||
|
|
@ -338,6 +335,7 @@ def uninstall():
|
||||||
deregister_loader_plugin_path(LOAD_PATH)
|
deregister_loader_plugin_path(LOAD_PATH)
|
||||||
deregister_creator_plugin_path(CREATE_PATH)
|
deregister_creator_plugin_path(CREATE_PATH)
|
||||||
deregister_inventory_action_path(INVENTORY_PATH)
|
deregister_inventory_action_path(INVENTORY_PATH)
|
||||||
|
deregister_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
|
||||||
|
|
||||||
menu.uninstall()
|
menu.uninstall()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,14 +12,13 @@ from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
TemplateAlreadyImported,
|
TemplateAlreadyImported,
|
||||||
AbstractTemplateBuilder,
|
AbstractTemplateBuilder,
|
||||||
PlaceholderPlugin,
|
PlaceholderPlugin,
|
||||||
LoadPlaceholderItem,
|
PlaceholderItem,
|
||||||
PlaceholderLoadMixin,
|
|
||||||
)
|
)
|
||||||
from ayon_core.tools.workfile_template_build import (
|
from ayon_core.tools.workfile_template_build import (
|
||||||
WorkfileBuildPlaceholderDialog,
|
WorkfileBuildPlaceholderDialog,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .lib import read, imprint, get_reference_node, get_main_window
|
from .lib import read, imprint, get_main_window
|
||||||
|
|
||||||
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
|
||||||
|
|
||||||
|
|
@ -91,170 +90,102 @@ class MayaTemplateBuilder(AbstractTemplateBuilder):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
class MayaPlaceholderPlugin(PlaceholderPlugin):
|
||||||
identifier = "maya.load"
|
"""Base Placeholder Plugin for Maya with one unified cache.
|
||||||
label = "Maya load"
|
|
||||||
|
|
||||||
def _collect_scene_placeholders(self):
|
Creates a locator as placeholder node, which during populate provide
|
||||||
# Cache placeholder data to shared data
|
all of its attributes defined on the locator's transform in
|
||||||
placeholder_nodes = self.builder.get_shared_populate_data(
|
`placeholder.data` and where `placeholder.scene_identifier` is the
|
||||||
"placeholder_nodes"
|
full path to the node.
|
||||||
)
|
|
||||||
if placeholder_nodes is None:
|
|
||||||
attributes = cmds.ls("*.plugin_identifier", long=True)
|
|
||||||
placeholder_nodes = {}
|
|
||||||
for attribute in attributes:
|
|
||||||
node_name = attribute.rpartition(".")[0]
|
|
||||||
placeholder_nodes[node_name] = (
|
|
||||||
self._parse_placeholder_node_data(node_name)
|
|
||||||
)
|
|
||||||
|
|
||||||
self.builder.set_shared_populate_data(
|
Inherited classes must still implement `populate_placeholder`
|
||||||
"placeholder_nodes", placeholder_nodes
|
|
||||||
)
|
|
||||||
return placeholder_nodes
|
|
||||||
|
|
||||||
def _parse_placeholder_node_data(self, node_name):
|
"""
|
||||||
placeholder_data = read(node_name)
|
|
||||||
parent_name = (
|
|
||||||
cmds.getAttr(node_name + ".parent", asString=True)
|
|
||||||
or node_name.rpartition("|")[0]
|
|
||||||
or ""
|
|
||||||
)
|
|
||||||
if parent_name:
|
|
||||||
siblings = cmds.listRelatives(parent_name, children=True)
|
|
||||||
else:
|
|
||||||
siblings = cmds.ls(assemblies=True)
|
|
||||||
node_shortname = node_name.rpartition("|")[2]
|
|
||||||
current_index = cmds.getAttr(node_name + ".index", asString=True)
|
|
||||||
if current_index < 0:
|
|
||||||
current_index = siblings.index(node_shortname)
|
|
||||||
|
|
||||||
placeholder_data.update({
|
use_selection_as_parent = True
|
||||||
"parent": parent_name,
|
item_class = PlaceholderItem
|
||||||
"index": current_index
|
|
||||||
})
|
|
||||||
return placeholder_data
|
|
||||||
|
|
||||||
def _create_placeholder_name(self, placeholder_data):
|
def _create_placeholder_name(self, placeholder_data):
|
||||||
placeholder_name_parts = placeholder_data["builder_type"].split("_")
|
return self.identifier.replace(".", "_")
|
||||||
|
|
||||||
pos = 1
|
def _collect_scene_placeholders(self):
|
||||||
placeholder_product_type = placeholder_data.get("product_type")
|
nodes_by_identifier = self.builder.get_shared_populate_data(
|
||||||
if placeholder_product_type is None:
|
"placeholder_nodes"
|
||||||
placeholder_product_type = placeholder_data.get("family")
|
|
||||||
|
|
||||||
if placeholder_product_type:
|
|
||||||
placeholder_name_parts.insert(pos, placeholder_product_type)
|
|
||||||
pos += 1
|
|
||||||
|
|
||||||
# add loader arguments if any
|
|
||||||
loader_args = placeholder_data["loader_args"]
|
|
||||||
if loader_args:
|
|
||||||
loader_args = json.loads(loader_args.replace('\'', '\"'))
|
|
||||||
values = [v for v in loader_args.values()]
|
|
||||||
for value in values:
|
|
||||||
placeholder_name_parts.insert(pos, value)
|
|
||||||
pos += 1
|
|
||||||
|
|
||||||
placeholder_name = "_".join(placeholder_name_parts)
|
|
||||||
|
|
||||||
return placeholder_name.capitalize()
|
|
||||||
|
|
||||||
def _get_loaded_repre_ids(self):
|
|
||||||
loaded_representation_ids = self.builder.get_shared_populate_data(
|
|
||||||
"loaded_representation_ids"
|
|
||||||
)
|
)
|
||||||
if loaded_representation_ids is None:
|
if nodes_by_identifier is None:
|
||||||
try:
|
# Cache placeholder data to shared data
|
||||||
containers = cmds.sets("AVALON_CONTAINERS", q=True)
|
nodes = cmds.ls("*.plugin_identifier", long=True, objectsOnly=True)
|
||||||
except ValueError:
|
|
||||||
containers = []
|
|
||||||
|
|
||||||
loaded_representation_ids = {
|
nodes_by_identifier = {}
|
||||||
cmds.getAttr(container + ".representation")
|
for node in nodes:
|
||||||
for container in containers
|
identifier = cmds.getAttr("{}.plugin_identifier".format(node))
|
||||||
}
|
nodes_by_identifier.setdefault(identifier, []).append(node)
|
||||||
|
|
||||||
|
# Set the cache
|
||||||
self.builder.set_shared_populate_data(
|
self.builder.set_shared_populate_data(
|
||||||
"loaded_representation_ids", loaded_representation_ids
|
"placeholder_nodes", nodes_by_identifier
|
||||||
)
|
)
|
||||||
return loaded_representation_ids
|
|
||||||
|
return nodes_by_identifier
|
||||||
|
|
||||||
def create_placeholder(self, placeholder_data):
|
def create_placeholder(self, placeholder_data):
|
||||||
selection = cmds.ls(selection=True)
|
|
||||||
if len(selection) > 1:
|
|
||||||
raise ValueError("More then one item are selected")
|
|
||||||
|
|
||||||
parent = selection[0] if selection else None
|
parent = None
|
||||||
|
if self.use_selection_as_parent:
|
||||||
|
selection = cmds.ls(selection=True)
|
||||||
|
if len(selection) > 1:
|
||||||
|
raise ValueError(
|
||||||
|
"More than one node is selected. "
|
||||||
|
"Please select only one to define the parent."
|
||||||
|
)
|
||||||
|
parent = selection[0] if selection else None
|
||||||
|
|
||||||
placeholder_data["plugin_identifier"] = self.identifier
|
placeholder_data["plugin_identifier"] = self.identifier
|
||||||
|
|
||||||
placeholder_name = self._create_placeholder_name(placeholder_data)
|
placeholder_name = self._create_placeholder_name(placeholder_data)
|
||||||
|
|
||||||
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
|
placeholder = cmds.spaceLocator(name=placeholder_name)[0]
|
||||||
if parent:
|
if parent:
|
||||||
placeholder = cmds.parent(placeholder, selection[0])[0]
|
placeholder = cmds.parent(placeholder, selection[0])[0]
|
||||||
|
|
||||||
imprint(placeholder, placeholder_data)
|
self.imprint(placeholder, placeholder_data)
|
||||||
|
|
||||||
# Add helper attributes to keep placeholder info
|
|
||||||
cmds.addAttr(
|
|
||||||
placeholder,
|
|
||||||
longName="parent",
|
|
||||||
hidden=True,
|
|
||||||
dataType="string"
|
|
||||||
)
|
|
||||||
cmds.addAttr(
|
|
||||||
placeholder,
|
|
||||||
longName="index",
|
|
||||||
hidden=True,
|
|
||||||
attributeType="short",
|
|
||||||
defaultValue=-1
|
|
||||||
)
|
|
||||||
|
|
||||||
cmds.setAttr(placeholder + ".parent", "", type="string")
|
|
||||||
|
|
||||||
def update_placeholder(self, placeholder_item, placeholder_data):
|
def update_placeholder(self, placeholder_item, placeholder_data):
|
||||||
node_name = placeholder_item.scene_identifier
|
node_name = placeholder_item.scene_identifier
|
||||||
new_values = {}
|
|
||||||
|
changed_values = {}
|
||||||
for key, value in placeholder_data.items():
|
for key, value in placeholder_data.items():
|
||||||
placeholder_value = placeholder_item.data.get(key)
|
if value != placeholder_item.data.get(key):
|
||||||
if value != placeholder_value:
|
changed_values[key] = value
|
||||||
new_values[key] = value
|
|
||||||
placeholder_item.data[key] = value
|
|
||||||
|
|
||||||
for key in new_values.keys():
|
# Delete attributes to ensure we imprint new data with correct type
|
||||||
cmds.deleteAttr(node_name + "." + key)
|
for key in changed_values.keys():
|
||||||
|
placeholder_item.data[key] = value
|
||||||
|
if cmds.attributeQuery(key, node=node_name, exists=True):
|
||||||
|
attribute = "{}.{}".format(node_name, key)
|
||||||
|
cmds.deleteAttr(attribute)
|
||||||
|
|
||||||
imprint(node_name, new_values)
|
self.imprint(node_name, changed_values)
|
||||||
|
|
||||||
def collect_placeholders(self):
|
def collect_placeholders(self):
|
||||||
output = []
|
placeholders = []
|
||||||
scene_placeholders = self._collect_scene_placeholders()
|
nodes_by_identifier = self._collect_scene_placeholders()
|
||||||
for node_name, placeholder_data in scene_placeholders.items():
|
for node in nodes_by_identifier.get(self.identifier, []):
|
||||||
if placeholder_data.get("plugin_identifier") != self.identifier:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# TODO do data validations and maybe upgrades if they are invalid
|
# TODO do data validations and maybe upgrades if they are invalid
|
||||||
output.append(
|
placeholder_data = self.read(node)
|
||||||
LoadPlaceholderItem(node_name, placeholder_data, self)
|
placeholders.append(
|
||||||
|
self.item_class(scene_identifier=node,
|
||||||
|
data=placeholder_data,
|
||||||
|
plugin=self)
|
||||||
)
|
)
|
||||||
|
|
||||||
return output
|
return placeholders
|
||||||
|
|
||||||
def populate_placeholder(self, placeholder):
|
|
||||||
self.populate_load_placeholder(placeholder)
|
|
||||||
|
|
||||||
def repopulate_placeholder(self, placeholder):
|
|
||||||
repre_ids = self._get_loaded_repre_ids()
|
|
||||||
self.populate_load_placeholder(placeholder, repre_ids)
|
|
||||||
|
|
||||||
def get_placeholder_options(self, options=None):
|
|
||||||
return self.get_load_plugin_options(options)
|
|
||||||
|
|
||||||
def post_placeholder_process(self, placeholder, failed):
|
def post_placeholder_process(self, placeholder, failed):
|
||||||
"""Cleanup placeholder after load of its corresponding representations.
|
"""Cleanup placeholder after load of its corresponding representations.
|
||||||
|
|
||||||
|
Hide placeholder, add them to placeholder set.
|
||||||
|
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
placeholder (PlaceholderItem): Item which was just used to load
|
placeholder (PlaceholderItem): Item which was just used to load
|
||||||
representation.
|
representation.
|
||||||
|
|
@ -263,81 +194,56 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
||||||
# Hide placeholder and add them to placeholder set
|
# Hide placeholder and add them to placeholder set
|
||||||
node = placeholder.scene_identifier
|
node = placeholder.scene_identifier
|
||||||
|
|
||||||
|
# If we just populate the placeholders from current scene, the
|
||||||
|
# placeholder set will not be created so account for that.
|
||||||
|
if not cmds.objExists(PLACEHOLDER_SET):
|
||||||
|
cmds.sets(name=PLACEHOLDER_SET, empty=True)
|
||||||
|
|
||||||
cmds.sets(node, addElement=PLACEHOLDER_SET)
|
cmds.sets(node, addElement=PLACEHOLDER_SET)
|
||||||
cmds.hide(node)
|
cmds.hide(node)
|
||||||
cmds.setAttr(node + ".hiddenInOutliner", True)
|
cmds.setAttr("{}.hiddenInOutliner".format(node), True)
|
||||||
|
|
||||||
def delete_placeholder(self, placeholder):
|
def delete_placeholder(self, placeholder):
|
||||||
"""Remove placeholder if building was successful"""
|
"""Remove placeholder if building was successful
|
||||||
cmds.delete(placeholder.scene_identifier)
|
|
||||||
|
|
||||||
def load_succeed(self, placeholder, container):
|
Used only by PlaceholderCreateMixin and PlaceholderLoadMixin.
|
||||||
self._parent_in_hierarchy(placeholder, container)
|
|
||||||
|
|
||||||
def _parent_in_hierarchy(self, placeholder, container):
|
|
||||||
"""Parent loaded container to placeholder's parent.
|
|
||||||
|
|
||||||
ie : Set loaded content as placeholder's sibling
|
|
||||||
|
|
||||||
Args:
|
|
||||||
container (str): Placeholder loaded containers
|
|
||||||
"""
|
"""
|
||||||
|
node = placeholder.scene_identifier
|
||||||
|
|
||||||
if not container:
|
# To avoid that deleting a placeholder node will have Maya delete
|
||||||
return
|
# any objectSets the node was a member of we will first remove it
|
||||||
|
# from any sets it was a member of. This way the `PLACEHOLDERS_SET`
|
||||||
|
# will survive long enough
|
||||||
|
sets = cmds.listSets(o=node) or []
|
||||||
|
for object_set in sets:
|
||||||
|
cmds.sets(node, remove=object_set)
|
||||||
|
|
||||||
roots = cmds.sets(container, q=True) or []
|
cmds.delete(node)
|
||||||
ref_node = None
|
|
||||||
try:
|
|
||||||
ref_node = get_reference_node(roots)
|
|
||||||
except AssertionError as e:
|
|
||||||
self.log.info(e.args[0])
|
|
||||||
|
|
||||||
nodes_to_parent = []
|
def imprint(self, node, data):
|
||||||
for root in roots:
|
"""Imprint call for placeholder node"""
|
||||||
if ref_node:
|
|
||||||
ref_root = cmds.referenceQuery(root, nodes=True)[0]
|
|
||||||
ref_root = (
|
|
||||||
cmds.listRelatives(ref_root, parent=True, path=True) or
|
|
||||||
[ref_root]
|
|
||||||
)
|
|
||||||
nodes_to_parent.extend(ref_root)
|
|
||||||
continue
|
|
||||||
if root.endswith("_RN"):
|
|
||||||
# Backwards compatibility for hardcoded reference names.
|
|
||||||
refRoot = cmds.referenceQuery(root, n=True)[0]
|
|
||||||
refRoot = cmds.listRelatives(refRoot, parent=True) or [refRoot]
|
|
||||||
nodes_to_parent.extend(refRoot)
|
|
||||||
elif root not in cmds.listSets(allSets=True):
|
|
||||||
nodes_to_parent.append(root)
|
|
||||||
|
|
||||||
elif not cmds.sets(root, q=True):
|
# Complicated data that can't be represented as flat maya attributes
|
||||||
return
|
# we write to json strings, e.g. multiselection EnumDef
|
||||||
|
for key, value in data.items():
|
||||||
|
if isinstance(value, (list, tuple, dict)):
|
||||||
|
data[key] = "JSON::{}".format(json.dumps(value))
|
||||||
|
|
||||||
# Move loaded nodes to correct index in outliner hierarchy
|
imprint(node, data)
|
||||||
placeholder_form = cmds.xform(
|
|
||||||
placeholder.scene_identifier,
|
|
||||||
q=True,
|
|
||||||
matrix=True,
|
|
||||||
worldSpace=True
|
|
||||||
)
|
|
||||||
scene_parent = cmds.listRelatives(
|
|
||||||
placeholder.scene_identifier, parent=True, fullPath=True
|
|
||||||
)
|
|
||||||
for node in set(nodes_to_parent):
|
|
||||||
cmds.reorder(node, front=True)
|
|
||||||
cmds.reorder(node, relative=placeholder.data["index"])
|
|
||||||
cmds.xform(node, matrix=placeholder_form, ws=True)
|
|
||||||
if scene_parent:
|
|
||||||
cmds.parent(node, scene_parent)
|
|
||||||
else:
|
|
||||||
cmds.parent(node, world=True)
|
|
||||||
|
|
||||||
holding_sets = cmds.listSets(object=placeholder.scene_identifier)
|
def read(self, node):
|
||||||
if not holding_sets:
|
"""Read call for placeholder node"""
|
||||||
return
|
|
||||||
for holding_set in holding_sets:
|
data = read(node)
|
||||||
cmds.sets(roots, forceElement=holding_set)
|
|
||||||
|
# Complicated data that can't be represented as flat maya attributes
|
||||||
|
# we read from json strings, e.g. multiselection EnumDef
|
||||||
|
for key, value in data.items():
|
||||||
|
if isinstance(value, str) and value.startswith("JSON::"):
|
||||||
|
value = value[len("JSON::"):] # strip of JSON:: prefix
|
||||||
|
data[key] = json.loads(value)
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
def build_workfile_template(*args):
|
def build_workfile_template(*args):
|
||||||
|
|
|
||||||
101
client/ayon_core/hosts/maya/api/yeti.py
Normal file
101
client/ayon_core/hosts/maya/api/yeti.py
Normal file
|
|
@ -0,0 +1,101 @@
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
|
|
||||||
|
def get_yeti_user_variables(yeti_shape_node: str) -> List[str]:
|
||||||
|
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
yeti_shape_node (str): The `pgYetiMaya` shape node.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: Attribute names (for a vector attribute it only lists the top
|
||||||
|
parent attribute, not the attribute per axis)
|
||||||
|
"""
|
||||||
|
|
||||||
|
attrs = cmds.listAttr(yeti_shape_node,
|
||||||
|
userDefined=True,
|
||||||
|
string=("yetiVariableV_*",
|
||||||
|
"yetiVariableF_*")) or []
|
||||||
|
valid_attrs = []
|
||||||
|
for attr in attrs:
|
||||||
|
attr_type = cmds.attributeQuery(attr, node=yeti_shape_node,
|
||||||
|
attributeType=True)
|
||||||
|
if attr.startswith("yetiVariableV_") and attr_type == "double3":
|
||||||
|
# vector
|
||||||
|
valid_attrs.append(attr)
|
||||||
|
elif attr.startswith("yetiVariableF_") and attr_type == "double":
|
||||||
|
valid_attrs.append(attr)
|
||||||
|
|
||||||
|
return valid_attrs
|
||||||
|
|
||||||
|
|
||||||
|
def create_yeti_variable(yeti_shape_node: str,
|
||||||
|
attr_name: str,
|
||||||
|
value=None,
|
||||||
|
force_value: bool = False) -> bool:
|
||||||
|
"""Get user defined yeti user variables for a `pgYetiMaya` shape node.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
yeti_shape_node (str): The `pgYetiMaya` shape node.
|
||||||
|
attr_name (str): The fully qualified yeti variable name, e.g.
|
||||||
|
"yetiVariableF_myfloat" or "yetiVariableV_myvector"
|
||||||
|
value (object): The value to set (must match the type of the attribute)
|
||||||
|
When value is None it will ignored and not be set.
|
||||||
|
force_value (bool): Whether to set the value if the attribute already
|
||||||
|
exists or not.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: Whether the attribute value was set or not.
|
||||||
|
|
||||||
|
"""
|
||||||
|
exists = cmds.attributeQuery(attr_name, node=yeti_shape_node, exists=True)
|
||||||
|
if not exists:
|
||||||
|
if attr_name.startswith("yetiVariableV_"):
|
||||||
|
_create_vector_yeti_user_variable(yeti_shape_node, attr_name)
|
||||||
|
if attr_name.startswith("yetiVariableF_"):
|
||||||
|
_create_float_yeti_user_variable(yeti_shape_node, attr_name)
|
||||||
|
|
||||||
|
if value is not None and (not exists or force_value):
|
||||||
|
plug = "{}.{}".format(yeti_shape_node, attr_name)
|
||||||
|
if (
|
||||||
|
isinstance(value, (list, tuple))
|
||||||
|
and attr_name.startswith("yetiVariableV_")
|
||||||
|
):
|
||||||
|
cmds.setAttr(plug, *value, type="double3")
|
||||||
|
else:
|
||||||
|
cmds.setAttr(plug, value)
|
||||||
|
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _create_vector_yeti_user_variable(yeti_shape_node: str, attr_name: str):
|
||||||
|
if not attr_name.startswith("yetiVariableV_"):
|
||||||
|
raise ValueError("Must start with yetiVariableV_")
|
||||||
|
cmds.addAttr(yeti_shape_node,
|
||||||
|
longName=attr_name,
|
||||||
|
attributeType="double3",
|
||||||
|
cachedInternally=True,
|
||||||
|
keyable=True)
|
||||||
|
for axis in "XYZ":
|
||||||
|
cmds.addAttr(yeti_shape_node,
|
||||||
|
longName="{}{}".format(attr_name, axis),
|
||||||
|
attributeType="double",
|
||||||
|
parent=attr_name,
|
||||||
|
cachedInternally=True,
|
||||||
|
keyable=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_float_yeti_user_variable(yeti_node: str, attr_name: str):
|
||||||
|
if not attr_name.startswith("yetiVariableF_"):
|
||||||
|
raise ValueError("Must start with yetiVariableF_")
|
||||||
|
|
||||||
|
cmds.addAttr(yeti_node,
|
||||||
|
longName=attr_name,
|
||||||
|
attributeType="double",
|
||||||
|
cachedInternally=True,
|
||||||
|
softMinValue=0,
|
||||||
|
softMaxValue=100,
|
||||||
|
keyable=True)
|
||||||
|
|
@ -1,89 +0,0 @@
|
||||||
from ayon_core.hosts.maya.api import (
|
|
||||||
lib,
|
|
||||||
plugin
|
|
||||||
)
|
|
||||||
from ayon_core.lib import (
|
|
||||||
BoolDef,
|
|
||||||
TextDef
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateAnimation(plugin.MayaHiddenCreator):
|
|
||||||
"""Animation output for character rigs
|
|
||||||
|
|
||||||
We hide the animation creator from the UI since the creation of it is
|
|
||||||
automated upon loading a rig. There's an inventory action to recreate it
|
|
||||||
for loaded rigs if by chance someone deleted the animation instance.
|
|
||||||
"""
|
|
||||||
identifier = "io.openpype.creators.maya.animation"
|
|
||||||
name = "animationDefault"
|
|
||||||
label = "Animation"
|
|
||||||
product_type = "animation"
|
|
||||||
icon = "male"
|
|
||||||
|
|
||||||
write_color_sets = False
|
|
||||||
write_face_sets = False
|
|
||||||
include_parent_hierarchy = False
|
|
||||||
include_user_defined_attributes = False
|
|
||||||
|
|
||||||
def get_instance_attr_defs(self):
|
|
||||||
|
|
||||||
defs = lib.collect_animation_defs()
|
|
||||||
|
|
||||||
defs.extend([
|
|
||||||
BoolDef("writeColorSets",
|
|
||||||
label="Write vertex colors",
|
|
||||||
tooltip="Write vertex colors with the geometry",
|
|
||||||
default=self.write_color_sets),
|
|
||||||
BoolDef("writeFaceSets",
|
|
||||||
label="Write face sets",
|
|
||||||
tooltip="Write face sets with the geometry",
|
|
||||||
default=self.write_face_sets),
|
|
||||||
BoolDef("writeNormals",
|
|
||||||
label="Write normals",
|
|
||||||
tooltip="Write normals with the deforming geometry",
|
|
||||||
default=True),
|
|
||||||
BoolDef("renderableOnly",
|
|
||||||
label="Renderable Only",
|
|
||||||
tooltip="Only export renderable visible shapes",
|
|
||||||
default=False),
|
|
||||||
BoolDef("visibleOnly",
|
|
||||||
label="Visible Only",
|
|
||||||
tooltip="Only export dag objects visible during "
|
|
||||||
"frame range",
|
|
||||||
default=False),
|
|
||||||
BoolDef("includeParentHierarchy",
|
|
||||||
label="Include Parent Hierarchy",
|
|
||||||
tooltip="Whether to include parent hierarchy of nodes in "
|
|
||||||
"the publish instance",
|
|
||||||
default=self.include_parent_hierarchy),
|
|
||||||
BoolDef("worldSpace",
|
|
||||||
label="World-Space Export",
|
|
||||||
default=True),
|
|
||||||
BoolDef("includeUserDefinedAttributes",
|
|
||||||
label="Include User Defined Attributes",
|
|
||||||
default=self.include_user_defined_attributes),
|
|
||||||
TextDef("attr",
|
|
||||||
label="Custom Attributes",
|
|
||||||
default="",
|
|
||||||
placeholder="attr1, attr2"),
|
|
||||||
TextDef("attrPrefix",
|
|
||||||
label="Custom Attributes Prefix",
|
|
||||||
placeholder="prefix1, prefix2")
|
|
||||||
])
|
|
||||||
|
|
||||||
# TODO: Implement these on a Deadline plug-in instead?
|
|
||||||
"""
|
|
||||||
# Default to not send to farm.
|
|
||||||
self.data["farm"] = False
|
|
||||||
self.data["priority"] = 50
|
|
||||||
"""
|
|
||||||
|
|
||||||
return defs
|
|
||||||
|
|
||||||
def apply_settings(self, project_settings):
|
|
||||||
super(CreateAnimation, self).apply_settings(project_settings)
|
|
||||||
# Hardcoding creator to be enabled due to existing settings would
|
|
||||||
# disable the creator causing the creator plugin to not be
|
|
||||||
# discoverable.
|
|
||||||
self.enabled = True
|
|
||||||
|
|
@ -0,0 +1,138 @@
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
|
from ayon_core.hosts.maya.api import lib, plugin
|
||||||
|
|
||||||
|
from ayon_core.lib import (
|
||||||
|
BoolDef,
|
||||||
|
NumberDef,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_animation_attr_defs(cls):
|
||||||
|
"""Get Animation generic definitions."""
|
||||||
|
defs = lib.collect_animation_defs()
|
||||||
|
defs.extend(
|
||||||
|
[
|
||||||
|
BoolDef("farm", label="Submit to Farm"),
|
||||||
|
NumberDef("priority", label="Farm job Priority", default=50),
|
||||||
|
BoolDef("refresh", label="Refresh viewport during export"),
|
||||||
|
BoolDef(
|
||||||
|
"includeParentHierarchy",
|
||||||
|
label="Include Parent Hierarchy",
|
||||||
|
tooltip=(
|
||||||
|
"Whether to include parent hierarchy of nodes in the "
|
||||||
|
"publish instance."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
BoolDef(
|
||||||
|
"includeUserDefinedAttributes",
|
||||||
|
label="Include User Defined Attributes",
|
||||||
|
tooltip=(
|
||||||
|
"Whether to include all custom maya attributes found "
|
||||||
|
"on nodes as attributes in the Alembic data."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
return defs
|
||||||
|
|
||||||
|
|
||||||
|
def convert_legacy_alembic_creator_attributes(node_data, class_name):
|
||||||
|
"""This is a legacy transfer of creator attributes to publish attributes
|
||||||
|
for ExtractAlembic/ExtractAnimation plugin.
|
||||||
|
"""
|
||||||
|
publish_attributes = node_data["publish_attributes"]
|
||||||
|
|
||||||
|
if class_name in publish_attributes:
|
||||||
|
return node_data
|
||||||
|
|
||||||
|
attributes = [
|
||||||
|
"attr",
|
||||||
|
"attrPrefix",
|
||||||
|
"visibleOnly",
|
||||||
|
"writeColorSets",
|
||||||
|
"writeFaceSets",
|
||||||
|
"writeNormals",
|
||||||
|
"renderableOnly",
|
||||||
|
"visibleOnly",
|
||||||
|
"worldSpace",
|
||||||
|
"renderableOnly"
|
||||||
|
]
|
||||||
|
plugin_attributes = {}
|
||||||
|
for attr in attributes:
|
||||||
|
if attr not in node_data["creator_attributes"]:
|
||||||
|
continue
|
||||||
|
value = node_data["creator_attributes"].pop(attr)
|
||||||
|
|
||||||
|
plugin_attributes[attr] = value
|
||||||
|
|
||||||
|
publish_attributes[class_name] = plugin_attributes
|
||||||
|
|
||||||
|
return node_data
|
||||||
|
|
||||||
|
|
||||||
|
class CreateAnimation(plugin.MayaHiddenCreator):
|
||||||
|
"""Animation output for character rigs
|
||||||
|
|
||||||
|
We hide the animation creator from the UI since the creation of it is
|
||||||
|
automated upon loading a rig. There's an inventory action to recreate it
|
||||||
|
for loaded rigs if by chance someone deleted the animation instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
identifier = "io.openpype.creators.maya.animation"
|
||||||
|
name = "animationDefault"
|
||||||
|
label = "Animation"
|
||||||
|
product_type = "animation"
|
||||||
|
icon = "male"
|
||||||
|
|
||||||
|
write_color_sets = False
|
||||||
|
write_face_sets = False
|
||||||
|
include_parent_hierarchy = False
|
||||||
|
include_user_defined_attributes = False
|
||||||
|
|
||||||
|
def read_instance_node(self, node):
|
||||||
|
node_data = super(CreateAnimation, self).read_instance_node(node)
|
||||||
|
node_data = convert_legacy_alembic_creator_attributes(
|
||||||
|
node_data, "ExtractAnimation"
|
||||||
|
)
|
||||||
|
return node_data
|
||||||
|
|
||||||
|
def get_instance_attr_defs(self):
|
||||||
|
defs = super(CreateAnimation, self).get_instance_attr_defs()
|
||||||
|
defs += _get_animation_attr_defs(self)
|
||||||
|
return defs
|
||||||
|
|
||||||
|
|
||||||
|
class CreatePointCache(plugin.MayaCreator):
|
||||||
|
"""Alembic pointcache for animated data"""
|
||||||
|
|
||||||
|
identifier = "io.openpype.creators.maya.pointcache"
|
||||||
|
label = "Pointcache"
|
||||||
|
product_type = "pointcache"
|
||||||
|
icon = "gears"
|
||||||
|
write_color_sets = False
|
||||||
|
write_face_sets = False
|
||||||
|
include_user_defined_attributes = False
|
||||||
|
|
||||||
|
def read_instance_node(self, node):
|
||||||
|
node_data = super(CreatePointCache, self).read_instance_node(node)
|
||||||
|
node_data = convert_legacy_alembic_creator_attributes(
|
||||||
|
node_data, "ExtractAlembic"
|
||||||
|
)
|
||||||
|
return node_data
|
||||||
|
|
||||||
|
def get_instance_attr_defs(self):
|
||||||
|
defs = super(CreatePointCache, self).get_instance_attr_defs()
|
||||||
|
defs += _get_animation_attr_defs(self)
|
||||||
|
return defs
|
||||||
|
|
||||||
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
|
instance = super(CreatePointCache, self).create(
|
||||||
|
product_name, instance_data, pre_create_data
|
||||||
|
)
|
||||||
|
instance_node = instance.get("instance_node")
|
||||||
|
|
||||||
|
# For Arnold standin proxy
|
||||||
|
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
||||||
|
cmds.sets(proxy_set, forceElement=instance_node)
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
from ayon_core.hosts.maya.api import (
|
from ayon_core.hosts.maya.api import (
|
||||||
lib,
|
lib,
|
||||||
plugin
|
plugin
|
||||||
|
|
@ -87,16 +89,24 @@ class CreateArnoldSceneSource(plugin.MayaCreator):
|
||||||
|
|
||||||
return defs
|
return defs
|
||||||
|
|
||||||
|
|
||||||
|
class CreateArnoldSceneSourceProxy(CreateArnoldSceneSource):
|
||||||
|
"""Arnold Scene Source Proxy
|
||||||
|
|
||||||
|
This product type facilitates working with proxy geometry in the viewport.
|
||||||
|
"""
|
||||||
|
|
||||||
|
identifier = "io.openpype.creators.maya.assproxy"
|
||||||
|
label = "Arnold Scene Source Proxy"
|
||||||
|
product_type = "assProxy"
|
||||||
|
icon = "cube"
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
def create(self, product_name, instance_data, pre_create_data):
|
||||||
|
|
||||||
from maya import cmds
|
|
||||||
|
|
||||||
instance = super(CreateArnoldSceneSource, self).create(
|
instance = super(CreateArnoldSceneSource, self).create(
|
||||||
product_name, instance_data, pre_create_data
|
product_name, instance_data, pre_create_data
|
||||||
)
|
)
|
||||||
|
|
||||||
instance_node = instance.get("instance_node")
|
instance_node = instance.get("instance_node")
|
||||||
|
|
||||||
content = cmds.sets(name=instance_node + "_content_SET", empty=True)
|
|
||||||
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
proxy = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
||||||
cmds.sets([content, proxy], forceElement=instance_node)
|
cmds.sets([proxy], forceElement=instance_node)
|
||||||
|
|
|
||||||
|
|
@ -1,88 +0,0 @@
|
||||||
from maya import cmds
|
|
||||||
|
|
||||||
from ayon_core.hosts.maya.api import (
|
|
||||||
lib,
|
|
||||||
plugin
|
|
||||||
)
|
|
||||||
from ayon_core.lib import (
|
|
||||||
BoolDef,
|
|
||||||
TextDef
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CreatePointCache(plugin.MayaCreator):
|
|
||||||
"""Alembic pointcache for animated data"""
|
|
||||||
|
|
||||||
identifier = "io.openpype.creators.maya.pointcache"
|
|
||||||
label = "Pointcache"
|
|
||||||
product_type = "pointcache"
|
|
||||||
icon = "gears"
|
|
||||||
write_color_sets = False
|
|
||||||
write_face_sets = False
|
|
||||||
include_user_defined_attributes = False
|
|
||||||
|
|
||||||
def get_instance_attr_defs(self):
|
|
||||||
|
|
||||||
defs = lib.collect_animation_defs()
|
|
||||||
|
|
||||||
defs.extend([
|
|
||||||
BoolDef("writeColorSets",
|
|
||||||
label="Write vertex colors",
|
|
||||||
tooltip="Write vertex colors with the geometry",
|
|
||||||
default=False),
|
|
||||||
BoolDef("writeFaceSets",
|
|
||||||
label="Write face sets",
|
|
||||||
tooltip="Write face sets with the geometry",
|
|
||||||
default=False),
|
|
||||||
BoolDef("renderableOnly",
|
|
||||||
label="Renderable Only",
|
|
||||||
tooltip="Only export renderable visible shapes",
|
|
||||||
default=False),
|
|
||||||
BoolDef("visibleOnly",
|
|
||||||
label="Visible Only",
|
|
||||||
tooltip="Only export dag objects visible during "
|
|
||||||
"frame range",
|
|
||||||
default=False),
|
|
||||||
BoolDef("includeParentHierarchy",
|
|
||||||
label="Include Parent Hierarchy",
|
|
||||||
tooltip="Whether to include parent hierarchy of nodes in "
|
|
||||||
"the publish instance",
|
|
||||||
default=False),
|
|
||||||
BoolDef("worldSpace",
|
|
||||||
label="World-Space Export",
|
|
||||||
default=True),
|
|
||||||
BoolDef("refresh",
|
|
||||||
label="Refresh viewport during export",
|
|
||||||
default=False),
|
|
||||||
BoolDef("includeUserDefinedAttributes",
|
|
||||||
label="Include User Defined Attributes",
|
|
||||||
default=self.include_user_defined_attributes),
|
|
||||||
TextDef("attr",
|
|
||||||
label="Custom Attributes",
|
|
||||||
default="",
|
|
||||||
placeholder="attr1, attr2"),
|
|
||||||
TextDef("attrPrefix",
|
|
||||||
label="Custom Attributes Prefix",
|
|
||||||
default="",
|
|
||||||
placeholder="prefix1, prefix2")
|
|
||||||
])
|
|
||||||
|
|
||||||
# TODO: Implement these on a Deadline plug-in instead?
|
|
||||||
"""
|
|
||||||
# Default to not send to farm.
|
|
||||||
self.data["farm"] = False
|
|
||||||
self.data["priority"] = 50
|
|
||||||
"""
|
|
||||||
|
|
||||||
return defs
|
|
||||||
|
|
||||||
def create(self, product_name, instance_data, pre_create_data):
|
|
||||||
|
|
||||||
instance = super(CreatePointCache, self).create(
|
|
||||||
product_name, instance_data, pre_create_data
|
|
||||||
)
|
|
||||||
instance_node = instance.get("instance_node")
|
|
||||||
|
|
||||||
# For Arnold standin proxy
|
|
||||||
proxy_set = cmds.sets(name=instance_node + "_proxy_SET", empty=True)
|
|
||||||
cmds.sets(proxy_set, forceElement=instance_node)
|
|
||||||
|
|
@ -12,6 +12,7 @@ from ayon_core.hosts.maya.api.lib import (
|
||||||
unique_namespace,
|
unique_namespace,
|
||||||
get_attribute_input,
|
get_attribute_input,
|
||||||
maintained_selection,
|
maintained_selection,
|
||||||
|
get_fps_for_current_context
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.maya.api.pipeline import containerise
|
from ayon_core.hosts.maya.api.pipeline import containerise
|
||||||
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
|
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
|
||||||
|
|
@ -29,7 +30,13 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
||||||
"""Load as Arnold standin"""
|
"""Load as Arnold standin"""
|
||||||
|
|
||||||
product_types = {
|
product_types = {
|
||||||
"ass", "animation", "model", "proxyAbc", "pointcache", "usd"
|
"ass",
|
||||||
|
"assProxy",
|
||||||
|
"animation",
|
||||||
|
"model",
|
||||||
|
"proxyAbc",
|
||||||
|
"pointcache",
|
||||||
|
"usd"
|
||||||
}
|
}
|
||||||
representations = {"ass", "abc", "usda", "usdc", "usd"}
|
representations = {"ass", "abc", "usda", "usdc", "usd"}
|
||||||
|
|
||||||
|
|
@ -95,8 +102,10 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
||||||
sequence = is_sequence(os.listdir(os.path.dirname(repre_path)))
|
sequence = is_sequence(os.listdir(os.path.dirname(repre_path)))
|
||||||
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
|
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
|
||||||
|
|
||||||
fps = float(version_attributes.get("fps")) or 25
|
fps = (
|
||||||
cmds.setAttr(standin_shape + ".abcFPS", fps)
|
version_attributes.get("fps") or get_fps_for_current_context()
|
||||||
|
)
|
||||||
|
cmds.setAttr(standin_shape + ".abcFPS", float(fps))
|
||||||
|
|
||||||
nodes = [root, standin, standin_shape]
|
nodes = [root, standin, standin_shape]
|
||||||
if operator is not None:
|
if operator is not None:
|
||||||
|
|
@ -128,6 +137,18 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
||||||
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
|
proxy_path = "/".join([os.path.dirname(path), proxy_basename])
|
||||||
return proxy_basename, proxy_path
|
return proxy_basename, proxy_path
|
||||||
|
|
||||||
|
def _update_operators(self, string_replace_operator, proxy_basename, path):
|
||||||
|
cmds.setAttr(
|
||||||
|
string_replace_operator + ".match",
|
||||||
|
proxy_basename.split(".")[0],
|
||||||
|
type="string"
|
||||||
|
)
|
||||||
|
cmds.setAttr(
|
||||||
|
string_replace_operator + ".replace",
|
||||||
|
os.path.basename(path).split(".")[0],
|
||||||
|
type="string"
|
||||||
|
)
|
||||||
|
|
||||||
def _setup_proxy(self, shape, path, namespace):
|
def _setup_proxy(self, shape, path, namespace):
|
||||||
proxy_basename, proxy_path = self._get_proxy_path(path)
|
proxy_basename, proxy_path = self._get_proxy_path(path)
|
||||||
|
|
||||||
|
|
@ -150,16 +171,7 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
||||||
"*.(@node=='{}')".format(node_type),
|
"*.(@node=='{}')".format(node_type),
|
||||||
type="string"
|
type="string"
|
||||||
)
|
)
|
||||||
cmds.setAttr(
|
self._update_operators(string_replace_operator, proxy_basename, path)
|
||||||
string_replace_operator + ".match",
|
|
||||||
proxy_basename,
|
|
||||||
type="string"
|
|
||||||
)
|
|
||||||
cmds.setAttr(
|
|
||||||
string_replace_operator + ".replace",
|
|
||||||
os.path.basename(path),
|
|
||||||
type="string"
|
|
||||||
)
|
|
||||||
|
|
||||||
cmds.connectAttr(
|
cmds.connectAttr(
|
||||||
string_replace_operator + ".out",
|
string_replace_operator + ".out",
|
||||||
|
|
@ -194,18 +206,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
||||||
path = get_representation_path(repre_entity)
|
path = get_representation_path(repre_entity)
|
||||||
proxy_basename, proxy_path = self._get_proxy_path(path)
|
proxy_basename, proxy_path = self._get_proxy_path(path)
|
||||||
|
|
||||||
# Whether there is proxy or so, we still update the string operator.
|
# Whether there is proxy or not, we still update the string operator.
|
||||||
# If no proxy exists, the string operator won't replace anything.
|
# If no proxy exists, the string operator won't replace anything.
|
||||||
cmds.setAttr(
|
self._update_operators(string_replace_operator, proxy_basename, path)
|
||||||
string_replace_operator + ".match",
|
|
||||||
proxy_basename,
|
|
||||||
type="string"
|
|
||||||
)
|
|
||||||
cmds.setAttr(
|
|
||||||
string_replace_operator + ".replace",
|
|
||||||
os.path.basename(path),
|
|
||||||
type="string"
|
|
||||||
)
|
|
||||||
|
|
||||||
dso_path = path
|
dso_path = path
|
||||||
if os.path.exists(proxy_path):
|
if os.path.exists(proxy_path):
|
||||||
|
|
|
||||||
39
client/ayon_core/hosts/maya/plugins/load/load_as_template.py
Normal file
39
client/ayon_core/hosts/maya/plugins/load/load_as_template.py
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
from ayon_core.lib import (
|
||||||
|
BoolDef
|
||||||
|
)
|
||||||
|
from ayon_core.pipeline import (
|
||||||
|
load,
|
||||||
|
registered_host
|
||||||
|
)
|
||||||
|
from ayon_core.hosts.maya.api.workfile_template_builder import (
|
||||||
|
MayaTemplateBuilder
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LoadAsTemplate(load.LoaderPlugin):
|
||||||
|
"""Load workfile as a template """
|
||||||
|
|
||||||
|
product_types = {"workfile", "mayaScene"}
|
||||||
|
label = "Load as template"
|
||||||
|
representations = ["ma", "mb"]
|
||||||
|
icon = "wrench"
|
||||||
|
color = "#775555"
|
||||||
|
order = 10
|
||||||
|
|
||||||
|
options = [
|
||||||
|
BoolDef("keep_placeholders",
|
||||||
|
label="Keep Placeholders",
|
||||||
|
default=False),
|
||||||
|
BoolDef("create_first_version",
|
||||||
|
label="Create First Version",
|
||||||
|
default=False),
|
||||||
|
]
|
||||||
|
|
||||||
|
def load(self, context, name, namespace, data):
|
||||||
|
keep_placeholders = data.get("keep_placeholders", False)
|
||||||
|
create_first_version = data.get("create_first_version", False)
|
||||||
|
path = self.filepath_from_context(context)
|
||||||
|
builder = MayaTemplateBuilder(registered_host())
|
||||||
|
builder.build_template(template_path=path,
|
||||||
|
keep_placeholders=keep_placeholders,
|
||||||
|
create_first_version=create_first_version)
|
||||||
|
|
@ -8,7 +8,7 @@ from ayon_core.pipeline import (
|
||||||
from ayon_core.pipeline.load.utils import get_representation_path_from_context
|
from ayon_core.pipeline.load.utils import get_representation_path_from_context
|
||||||
from ayon_core.pipeline.colorspace import (
|
from ayon_core.pipeline.colorspace import (
|
||||||
get_imageio_file_rules_colorspace_from_filepath,
|
get_imageio_file_rules_colorspace_from_filepath,
|
||||||
get_imageio_config,
|
get_current_context_imageio_config_preset,
|
||||||
get_imageio_file_rules
|
get_imageio_file_rules
|
||||||
)
|
)
|
||||||
from ayon_core.settings import get_project_settings
|
from ayon_core.settings import get_project_settings
|
||||||
|
|
@ -270,8 +270,7 @@ class FileNodeLoader(load.LoaderPlugin):
|
||||||
host_name = get_current_host_name()
|
host_name = get_current_host_name()
|
||||||
project_settings = get_project_settings(project_name)
|
project_settings = get_project_settings(project_name)
|
||||||
|
|
||||||
config_data = get_imageio_config(
|
config_data = get_current_context_imageio_config_preset(
|
||||||
project_name, host_name,
|
|
||||||
project_settings=project_settings
|
project_settings=project_settings
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ from ayon_core.pipeline import (
|
||||||
get_representation_path
|
get_representation_path
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.maya.api import lib
|
from ayon_core.hosts.maya.api import lib
|
||||||
|
from ayon_core.hosts.maya.api.yeti import create_yeti_variable
|
||||||
from ayon_core.hosts.maya.api.pipeline import containerise
|
from ayon_core.hosts.maya.api.pipeline import containerise
|
||||||
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
|
from ayon_core.hosts.maya.api.plugin import get_load_color_for_product_type
|
||||||
|
|
||||||
|
|
@ -23,8 +24,19 @@ SKIP_UPDATE_ATTRS = {
|
||||||
"viewportDensity",
|
"viewportDensity",
|
||||||
"viewportWidth",
|
"viewportWidth",
|
||||||
"viewportLength",
|
"viewportLength",
|
||||||
|
"renderDensity",
|
||||||
|
"renderWidth",
|
||||||
|
"renderLength",
|
||||||
|
"increaseRenderBounds"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SKIP_ATTR_MESSAGE = (
|
||||||
|
"Skipping updating %s.%s to %s because it "
|
||||||
|
"is considered a local overridable attribute. "
|
||||||
|
"Either set manually or the load the cache "
|
||||||
|
"anew."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def set_attribute(node, attr, value):
|
def set_attribute(node, attr, value):
|
||||||
"""Wrapper of set attribute which ignores None values"""
|
"""Wrapper of set attribute which ignores None values"""
|
||||||
|
|
@ -209,9 +221,31 @@ class YetiCacheLoader(load.LoaderPlugin):
|
||||||
|
|
||||||
for attr, value in node_settings["attrs"].items():
|
for attr, value in node_settings["attrs"].items():
|
||||||
if attr in SKIP_UPDATE_ATTRS:
|
if attr in SKIP_UPDATE_ATTRS:
|
||||||
|
self.log.info(
|
||||||
|
SKIP_ATTR_MESSAGE, yeti_node, attr, value
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
set_attribute(attr, value, yeti_node)
|
set_attribute(attr, value, yeti_node)
|
||||||
|
|
||||||
|
# Set up user defined attributes
|
||||||
|
user_variables = node_settings.get("user_variables", {})
|
||||||
|
for attr, value in user_variables.items():
|
||||||
|
was_value_set = create_yeti_variable(
|
||||||
|
yeti_shape_node=yeti_node,
|
||||||
|
attr_name=attr,
|
||||||
|
value=value,
|
||||||
|
# We do not want to update the
|
||||||
|
# value if it already exists so
|
||||||
|
# that any local overrides that
|
||||||
|
# may have been applied still
|
||||||
|
# persist
|
||||||
|
force_value=False
|
||||||
|
)
|
||||||
|
if not was_value_set:
|
||||||
|
self.log.info(
|
||||||
|
SKIP_ATTR_MESSAGE, yeti_node, attr, value
|
||||||
|
)
|
||||||
|
|
||||||
cmds.setAttr("{}.representation".format(container_node),
|
cmds.setAttr("{}.representation".format(container_node),
|
||||||
repre_entity["id"],
|
repre_entity["id"],
|
||||||
typ="string")
|
typ="string")
|
||||||
|
|
@ -332,6 +366,13 @@ class YetiCacheLoader(load.LoaderPlugin):
|
||||||
for attr, value in attributes.items():
|
for attr, value in attributes.items():
|
||||||
set_attribute(attr, value, yeti_node)
|
set_attribute(attr, value, yeti_node)
|
||||||
|
|
||||||
|
# Set up user defined attributes
|
||||||
|
user_variables = node_settings.get("user_variables", {})
|
||||||
|
for attr, value in user_variables.items():
|
||||||
|
create_yeti_variable(yeti_shape_node=yeti_node,
|
||||||
|
attr_name=attr,
|
||||||
|
value=value)
|
||||||
|
|
||||||
# Connect to the time node
|
# Connect to the time node
|
||||||
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
|
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -58,4 +58,3 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
||||||
|
|
||||||
if instance.data.get("farm"):
|
if instance.data.get("farm"):
|
||||||
instance.data["families"].append("publish.farm")
|
instance.data["families"].append("publish.farm")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,21 +10,23 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
# Offset to be after renderable camera collection.
|
# Offset to be after renderable camera collection.
|
||||||
order = pyblish.api.CollectorOrder + 0.2
|
order = pyblish.api.CollectorOrder + 0.2
|
||||||
label = "Collect Arnold Scene Source"
|
label = "Collect Arnold Scene Source"
|
||||||
families = ["ass"]
|
families = ["ass", "assProxy"]
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
objsets = instance.data["setMembers"]
|
instance.data["members"] = []
|
||||||
|
for set_member in instance.data["setMembers"]:
|
||||||
|
if cmds.nodeType(set_member) != "objectSet":
|
||||||
|
instance.data["members"].extend(self.get_hierarchy(set_member))
|
||||||
|
continue
|
||||||
|
|
||||||
for objset in objsets:
|
members = cmds.sets(set_member, query=True)
|
||||||
objset = str(objset)
|
|
||||||
members = cmds.sets(objset, query=True)
|
|
||||||
members = cmds.ls(members, long=True)
|
members = cmds.ls(members, long=True)
|
||||||
if members is None:
|
if members is None:
|
||||||
self.log.warning("Skipped empty instance: \"%s\" " % objset)
|
self.log.warning(
|
||||||
|
"Skipped empty instance: \"%s\" " % set_member
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
if objset.endswith("content_SET"):
|
if set_member.endswith("proxy_SET"):
|
||||||
instance.data["contentMembers"] = self.get_hierarchy(members)
|
|
||||||
if objset.endswith("proxy_SET"):
|
|
||||||
instance.data["proxy"] = self.get_hierarchy(members)
|
instance.data["proxy"] = self.get_hierarchy(members)
|
||||||
|
|
||||||
# Use camera in object set if present else default to render globals
|
# Use camera in object set if present else default to render globals
|
||||||
|
|
@ -33,7 +35,7 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
|
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
|
||||||
if renderable:
|
if renderable:
|
||||||
camera = renderable[0]
|
camera = renderable[0]
|
||||||
for node in instance.data["contentMembers"]:
|
for node in instance.data["members"]:
|
||||||
camera_shapes = cmds.listRelatives(
|
camera_shapes = cmds.listRelatives(
|
||||||
node, shapes=True, type="camera"
|
node, shapes=True, type="camera"
|
||||||
)
|
)
|
||||||
|
|
@ -46,18 +48,11 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
self.log.debug("data: {}".format(instance.data))
|
self.log.debug("data: {}".format(instance.data))
|
||||||
|
|
||||||
def get_hierarchy(self, nodes):
|
def get_hierarchy(self, nodes):
|
||||||
"""Return nodes with all their children.
|
"""Return nodes with all their children"""
|
||||||
|
|
||||||
Arguments:
|
|
||||||
nodes (List[str]): List of nodes to collect children hierarchy for
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
list: Input nodes with their children hierarchy
|
|
||||||
|
|
||||||
"""
|
|
||||||
nodes = cmds.ls(nodes, long=True)
|
nodes = cmds.ls(nodes, long=True)
|
||||||
if not nodes:
|
if not nodes:
|
||||||
return []
|
return []
|
||||||
|
children = get_all_children(nodes)
|
||||||
children = get_all_children(nodes, ignore_intermediate_objects=True)
|
# Make sure nodes merged with children only
|
||||||
return list(children.union(nodes))
|
# contains unique entries
|
||||||
|
return list(set(nodes + list(children)))
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,9 @@ class CollectUserDefinedAttributes(pyblish.api.InstancePlugin):
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
|
||||||
# Collect user defined attributes.
|
# Collect user defined attributes.
|
||||||
if not instance.data.get("includeUserDefinedAttributes", False):
|
if not instance.data["creator_attributes"].get(
|
||||||
|
"includeUserDefinedAttributes"
|
||||||
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
if "out_hierarchy" in instance.data:
|
if "out_hierarchy" in instance.data:
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ from maya import cmds
|
||||||
import pyblish.api
|
import pyblish.api
|
||||||
|
|
||||||
from ayon_core.hosts.maya.api import lib
|
from ayon_core.hosts.maya.api import lib
|
||||||
|
from ayon_core.hosts.maya.api.yeti import get_yeti_user_variables
|
||||||
|
|
||||||
|
|
||||||
SETTINGS = {
|
SETTINGS = {
|
||||||
|
|
@ -34,7 +35,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
||||||
- "increaseRenderBounds"
|
- "increaseRenderBounds"
|
||||||
- "imageSearchPath"
|
- "imageSearchPath"
|
||||||
|
|
||||||
Other information is the name of the transform and it's Colorbleed ID
|
Other information is the name of the transform and its `cbId`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
order = pyblish.api.CollectorOrder + 0.45
|
order = pyblish.api.CollectorOrder + 0.45
|
||||||
|
|
@ -54,6 +55,16 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
||||||
# Get specific node attributes
|
# Get specific node attributes
|
||||||
attr_data = {}
|
attr_data = {}
|
||||||
for attr in SETTINGS:
|
for attr in SETTINGS:
|
||||||
|
# Ignore non-existing attributes with a warning, e.g. cbId
|
||||||
|
# if they have not been generated yet
|
||||||
|
if not cmds.attributeQuery(attr, node=shape, exists=True):
|
||||||
|
self.log.warning(
|
||||||
|
"Attribute '{}' not found on Yeti node: {}".format(
|
||||||
|
attr, shape
|
||||||
|
)
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
current = cmds.getAttr("%s.%s" % (shape, attr))
|
current = cmds.getAttr("%s.%s" % (shape, attr))
|
||||||
# change None to empty string as Maya doesn't support
|
# change None to empty string as Maya doesn't support
|
||||||
# NoneType in attributes
|
# NoneType in attributes
|
||||||
|
|
@ -61,6 +72,12 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
||||||
current = ""
|
current = ""
|
||||||
attr_data[attr] = current
|
attr_data[attr] = current
|
||||||
|
|
||||||
|
# Get user variable attributes
|
||||||
|
user_variable_attrs = {
|
||||||
|
attr: lib.get_attribute("{}.{}".format(shape, attr))
|
||||||
|
for attr in get_yeti_user_variables(shape)
|
||||||
|
}
|
||||||
|
|
||||||
# Get transform data
|
# Get transform data
|
||||||
parent = cmds.listRelatives(shape, parent=True)[0]
|
parent = cmds.listRelatives(shape, parent=True)[0]
|
||||||
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
|
transform_data = {"name": parent, "cbId": lib.get_id(parent)}
|
||||||
|
|
@ -70,6 +87,7 @@ class CollectYetiCache(pyblish.api.InstancePlugin):
|
||||||
"name": shape,
|
"name": shape,
|
||||||
"cbId": lib.get_id(shape),
|
"cbId": lib.get_id(shape),
|
||||||
"attrs": attr_data,
|
"attrs": attr_data,
|
||||||
|
"user_variables": user_variable_attrs
|
||||||
}
|
}
|
||||||
|
|
||||||
settings["nodes"].append(shape_data)
|
settings["nodes"].append(shape_data)
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
families = ["ass"]
|
families = ["ass"]
|
||||||
asciiAss = False
|
asciiAss = False
|
||||||
|
|
||||||
def process(self, instance):
|
def _pre_process(self, instance, staging_dir):
|
||||||
staging_dir = self.staging_dir(instance)
|
|
||||||
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
|
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
|
||||||
|
|
||||||
# Mask
|
# Mask
|
||||||
|
|
@ -70,24 +69,38 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
"mask": mask
|
"mask": mask
|
||||||
}
|
}
|
||||||
|
|
||||||
filenames, nodes_by_id = self._extract(
|
|
||||||
instance.data["contentMembers"], attribute_data, kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
if "representations" not in instance.data:
|
if "representations" not in instance.data:
|
||||||
instance.data["representations"] = []
|
instance.data["representations"] = []
|
||||||
|
|
||||||
|
return attribute_data, kwargs
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
staging_dir = self.staging_dir(instance)
|
||||||
|
attribute_data, kwargs = self._pre_process(instance, staging_dir)
|
||||||
|
|
||||||
|
filenames = self._extract(
|
||||||
|
instance.data["members"], attribute_data, kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
self._post_process(
|
||||||
|
instance, filenames, staging_dir, kwargs["startFrame"]
|
||||||
|
)
|
||||||
|
|
||||||
|
def _post_process(self, instance, filenames, staging_dir, frame_start):
|
||||||
|
nodes_by_id = self._nodes_by_id(instance[:])
|
||||||
representation = {
|
representation = {
|
||||||
"name": "ass",
|
"name": "ass",
|
||||||
"ext": "ass",
|
"ext": "ass",
|
||||||
"files": filenames if len(filenames) > 1 else filenames[0],
|
"files": filenames if len(filenames) > 1 else filenames[0],
|
||||||
"stagingDir": staging_dir,
|
"stagingDir": staging_dir,
|
||||||
"frameStart": kwargs["startFrame"]
|
"frameStart": frame_start
|
||||||
}
|
}
|
||||||
|
|
||||||
instance.data["representations"].append(representation)
|
instance.data["representations"].append(representation)
|
||||||
|
|
||||||
json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
|
json_path = os.path.join(
|
||||||
|
staging_dir, "{}.json".format(instance.name)
|
||||||
|
)
|
||||||
with open(json_path, "w") as f:
|
with open(json_path, "w") as f:
|
||||||
json.dump(nodes_by_id, f)
|
json.dump(nodes_by_id, f)
|
||||||
|
|
||||||
|
|
@ -104,13 +117,68 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
"Extracted instance {} to: {}".format(instance.name, staging_dir)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extract proxy.
|
def _nodes_by_id(self, nodes):
|
||||||
if not instance.data.get("proxy", []):
|
nodes_by_id = defaultdict(list)
|
||||||
return
|
|
||||||
|
|
||||||
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
|
for node in nodes:
|
||||||
|
id = lib.get_id(node)
|
||||||
|
|
||||||
filenames, _ = self._extract(
|
if id is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Converting Maya hierarchy separator "|" to Arnold separator "/".
|
||||||
|
nodes_by_id[id].append(node.replace("|", "/"))
|
||||||
|
|
||||||
|
return nodes_by_id
|
||||||
|
|
||||||
|
def _extract(self, nodes, attribute_data, kwargs):
|
||||||
|
filenames = []
|
||||||
|
with lib.attribute_values(attribute_data):
|
||||||
|
with lib.maintained_selection():
|
||||||
|
self.log.debug(
|
||||||
|
"Writing: {}".format(nodes)
|
||||||
|
)
|
||||||
|
cmds.select(nodes, noExpand=True)
|
||||||
|
|
||||||
|
self.log.debug(
|
||||||
|
"Extracting ass sequence with: {}".format(kwargs)
|
||||||
|
)
|
||||||
|
|
||||||
|
exported_files = cmds.arnoldExportAss(**kwargs)
|
||||||
|
|
||||||
|
for file in exported_files:
|
||||||
|
filenames.append(os.path.split(file)[1])
|
||||||
|
|
||||||
|
self.log.debug("Exported: {}".format(filenames))
|
||||||
|
|
||||||
|
return filenames
|
||||||
|
|
||||||
|
|
||||||
|
class ExtractArnoldSceneSourceProxy(ExtractArnoldSceneSource):
|
||||||
|
"""Extract the content of the instance to an Arnold Scene Source file."""
|
||||||
|
|
||||||
|
label = "Extract Arnold Scene Source Proxy"
|
||||||
|
hosts = ["maya"]
|
||||||
|
families = ["assProxy"]
|
||||||
|
asciiAss = True
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
staging_dir = self.staging_dir(instance)
|
||||||
|
attribute_data, kwargs = self._pre_process(instance, staging_dir)
|
||||||
|
|
||||||
|
filenames, _ = self._duplicate_extract(
|
||||||
|
instance.data["members"], attribute_data, kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
self._post_process(
|
||||||
|
instance, filenames, staging_dir, kwargs["startFrame"]
|
||||||
|
)
|
||||||
|
|
||||||
|
kwargs["filename"] = os.path.join(
|
||||||
|
staging_dir, "{}_proxy.ass".format(instance.name)
|
||||||
|
)
|
||||||
|
|
||||||
|
filenames, _ = self._duplicate_extract(
|
||||||
instance.data["proxy"], attribute_data, kwargs
|
instance.data["proxy"], attribute_data, kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -125,12 +193,11 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
|
|
||||||
instance.data["representations"].append(representation)
|
instance.data["representations"].append(representation)
|
||||||
|
|
||||||
def _extract(self, nodes, attribute_data, kwargs):
|
def _duplicate_extract(self, nodes, attribute_data, kwargs):
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
|
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
|
||||||
)
|
)
|
||||||
filenames = []
|
filenames = []
|
||||||
nodes_by_id = defaultdict(list)
|
|
||||||
# Duplicating nodes so they are direct children of the world. This
|
# Duplicating nodes so they are direct children of the world. This
|
||||||
# makes the hierarchy of any exported ass file the same.
|
# makes the hierarchy of any exported ass file the same.
|
||||||
with lib.delete_after() as delete_bin:
|
with lib.delete_after() as delete_bin:
|
||||||
|
|
@ -147,7 +214,9 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
if not shapes:
|
if not shapes:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
duplicate_transform = cmds.duplicate(node)[0]
|
basename = cmds.duplicate(node)[0]
|
||||||
|
parents = cmds.ls(node, long=True)[0].split("|")[:-1]
|
||||||
|
duplicate_transform = "|".join(parents + [basename])
|
||||||
|
|
||||||
if cmds.listRelatives(duplicate_transform, parent=True):
|
if cmds.listRelatives(duplicate_transform, parent=True):
|
||||||
duplicate_transform = cmds.parent(
|
duplicate_transform = cmds.parent(
|
||||||
|
|
@ -172,28 +241,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
||||||
duplicate_nodes.extend(shapes)
|
duplicate_nodes.extend(shapes)
|
||||||
delete_bin.append(duplicate_transform)
|
delete_bin.append(duplicate_transform)
|
||||||
|
|
||||||
# Copy cbId to mtoa_constant.
|
nodes_by_id = self._nodes_by_id(duplicate_nodes)
|
||||||
for node in duplicate_nodes:
|
filenames = self._extract(duplicate_nodes, attribute_data, kwargs)
|
||||||
# Converting Maya hierarchy separator "|" to Arnold
|
|
||||||
# separator "/".
|
|
||||||
nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
|
|
||||||
|
|
||||||
with lib.attribute_values(attribute_data):
|
|
||||||
with lib.maintained_selection():
|
|
||||||
self.log.debug(
|
|
||||||
"Writing: {}".format(duplicate_nodes)
|
|
||||||
)
|
|
||||||
cmds.select(duplicate_nodes, noExpand=True)
|
|
||||||
|
|
||||||
self.log.debug(
|
|
||||||
"Extracting ass sequence with: {}".format(kwargs)
|
|
||||||
)
|
|
||||||
|
|
||||||
exported_files = cmds.arnoldExportAss(**kwargs)
|
|
||||||
|
|
||||||
for file in exported_files:
|
|
||||||
filenames.append(os.path.split(file)[1])
|
|
||||||
|
|
||||||
self.log.debug("Exported: {}".format(filenames))
|
|
||||||
|
|
||||||
return filenames, nodes_by_id
|
return filenames, nodes_by_id
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import os
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from ayon_core.pipeline import publish
|
from ayon_core.pipeline import publish
|
||||||
from ayon_core.hosts.maya.api.lib import extract_alembic
|
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||||
|
|
||||||
from maya import cmds
|
from maya import cmds
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,8 @@ class ExtractFBXAnimation(publish.Extractor):
|
||||||
fbx_exporter = fbx.FBXExtractor(log=self.log)
|
fbx_exporter = fbx.FBXExtractor(log=self.log)
|
||||||
out_members = instance.data.get("animated_skeleton", [])
|
out_members = instance.data.get("animated_skeleton", [])
|
||||||
# Export
|
# Export
|
||||||
instance.data["constraints"] = True
|
# TODO: need to set up the options for users to set up
|
||||||
|
# the flags they intended to export
|
||||||
instance.data["skeletonDefinitions"] = True
|
instance.data["skeletonDefinitions"] = True
|
||||||
instance.data["referencedAssetsContent"] = True
|
instance.data["referencedAssetsContent"] = True
|
||||||
fbx_exporter.set_options_from_instance(instance)
|
fbx_exporter.set_options_from_instance(instance)
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,29 @@
|
||||||
import os
|
import os
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
from maya import cmds
|
from maya import cmds
|
||||||
|
|
||||||
from ayon_core.pipeline import publish
|
from ayon_core.pipeline import publish
|
||||||
|
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||||
from ayon_core.hosts.maya.api.lib import (
|
from ayon_core.hosts.maya.api.lib import (
|
||||||
extract_alembic,
|
get_all_children,
|
||||||
suspended_refresh,
|
suspended_refresh,
|
||||||
maintained_selection,
|
maintained_selection,
|
||||||
iter_visible_nodes_in_range
|
iter_visible_nodes_in_range
|
||||||
)
|
)
|
||||||
|
from ayon_core.lib import (
|
||||||
|
BoolDef,
|
||||||
|
TextDef,
|
||||||
|
NumberDef,
|
||||||
|
EnumDef,
|
||||||
|
UISeparatorDef,
|
||||||
|
UILabelDef,
|
||||||
|
)
|
||||||
|
from ayon_core.pipeline.publish import AYONPyblishPluginMixin
|
||||||
|
from ayon_core.pipeline import KnownPublishError
|
||||||
|
|
||||||
|
|
||||||
class ExtractAlembic(publish.Extractor):
|
class ExtractAlembic(publish.Extractor, AYONPyblishPluginMixin):
|
||||||
"""Produce an alembic of just point positions and normals.
|
"""Produce an alembic of just point positions and normals.
|
||||||
|
|
||||||
Positions and normals, uvs, creases are preserved, but nothing more,
|
Positions and normals, uvs, creases are preserved, but nothing more,
|
||||||
|
|
@ -27,8 +39,35 @@ class ExtractAlembic(publish.Extractor):
|
||||||
targets = ["local", "remote"]
|
targets = ["local", "remote"]
|
||||||
|
|
||||||
# From settings
|
# From settings
|
||||||
|
attr = []
|
||||||
|
attrPrefix = []
|
||||||
bake_attributes = []
|
bake_attributes = []
|
||||||
bake_attribute_prefixes = []
|
bake_attribute_prefixes = []
|
||||||
|
dataFormat = "ogawa"
|
||||||
|
eulerFilter = False
|
||||||
|
melPerFrameCallback = ""
|
||||||
|
melPostJobCallback = ""
|
||||||
|
overrides = []
|
||||||
|
preRoll = False
|
||||||
|
preRollStartFrame = 0
|
||||||
|
pythonPerFrameCallback = ""
|
||||||
|
pythonPostJobCallback = ""
|
||||||
|
renderableOnly = False
|
||||||
|
stripNamespaces = True
|
||||||
|
uvsOnly = False
|
||||||
|
uvWrite = False
|
||||||
|
userAttr = ""
|
||||||
|
userAttrPrefix = ""
|
||||||
|
verbose = False
|
||||||
|
visibleOnly = False
|
||||||
|
wholeFrameGeo = False
|
||||||
|
worldSpace = True
|
||||||
|
writeColorSets = False
|
||||||
|
writeCreases = False
|
||||||
|
writeFaceSets = False
|
||||||
|
writeNormals = True
|
||||||
|
writeUVSets = False
|
||||||
|
writeVisibility = False
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
if instance.data.get("farm"):
|
if instance.data.get("farm"):
|
||||||
|
|
@ -41,16 +80,38 @@ class ExtractAlembic(publish.Extractor):
|
||||||
start = float(instance.data.get("frameStartHandle", 1))
|
start = float(instance.data.get("frameStartHandle", 1))
|
||||||
end = float(instance.data.get("frameEndHandle", 1))
|
end = float(instance.data.get("frameEndHandle", 1))
|
||||||
|
|
||||||
attrs = instance.data.get("attr", "").split(";")
|
attribute_values = self.get_attr_values_from_data(
|
||||||
attrs = [value for value in attrs if value.strip()]
|
instance.data
|
||||||
|
)
|
||||||
|
|
||||||
|
attrs = [
|
||||||
|
attr.strip()
|
||||||
|
for attr in attribute_values.get("attr", "").split(";")
|
||||||
|
if attr.strip()
|
||||||
|
]
|
||||||
attrs += instance.data.get("userDefinedAttributes", [])
|
attrs += instance.data.get("userDefinedAttributes", [])
|
||||||
attrs += self.bake_attributes
|
attrs += self.bake_attributes
|
||||||
attrs += ["cbId"]
|
attrs += ["cbId"]
|
||||||
|
|
||||||
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
|
attr_prefixes = [
|
||||||
attr_prefixes = [value for value in attr_prefixes if value.strip()]
|
attr.strip()
|
||||||
|
for attr in attribute_values.get("attrPrefix", "").split(";")
|
||||||
|
if attr.strip()
|
||||||
|
]
|
||||||
attr_prefixes += self.bake_attribute_prefixes
|
attr_prefixes += self.bake_attribute_prefixes
|
||||||
|
|
||||||
|
user_attrs = [
|
||||||
|
attr.strip()
|
||||||
|
for attr in attribute_values.get("userAttr", "").split(";")
|
||||||
|
if attr.strip()
|
||||||
|
]
|
||||||
|
|
||||||
|
user_attr_prefixes = [
|
||||||
|
attr.strip()
|
||||||
|
for attr in attribute_values.get("userAttrPrefix", "").split(";")
|
||||||
|
if attr.strip()
|
||||||
|
]
|
||||||
|
|
||||||
self.log.debug("Extracting pointcache..")
|
self.log.debug("Extracting pointcache..")
|
||||||
dirname = self.staging_dir(instance)
|
dirname = self.staging_dir(instance)
|
||||||
|
|
||||||
|
|
@ -58,28 +119,82 @@ class ExtractAlembic(publish.Extractor):
|
||||||
filename = "{name}.abc".format(**instance.data)
|
filename = "{name}.abc".format(**instance.data)
|
||||||
path = os.path.join(parent_dir, filename)
|
path = os.path.join(parent_dir, filename)
|
||||||
|
|
||||||
options = {
|
root = None
|
||||||
"step": instance.data.get("step", 1.0),
|
|
||||||
"attr": attrs,
|
|
||||||
"attrPrefix": attr_prefixes,
|
|
||||||
"writeVisibility": True,
|
|
||||||
"writeCreases": True,
|
|
||||||
"writeColorSets": instance.data.get("writeColorSets", False),
|
|
||||||
"writeFaceSets": instance.data.get("writeFaceSets", False),
|
|
||||||
"uvWrite": True,
|
|
||||||
"selection": True,
|
|
||||||
"worldSpace": instance.data.get("worldSpace", True)
|
|
||||||
}
|
|
||||||
|
|
||||||
if not instance.data.get("includeParentHierarchy", True):
|
if not instance.data.get("includeParentHierarchy", True):
|
||||||
# Set the root nodes if we don't want to include parents
|
# Set the root nodes if we don't want to include parents
|
||||||
# The roots are to be considered the ones that are the actual
|
# The roots are to be considered the ones that are the actual
|
||||||
# direct members of the set
|
# direct members of the set
|
||||||
options["root"] = roots
|
root = roots
|
||||||
|
|
||||||
if int(cmds.about(version=True)) >= 2017:
|
kwargs = {
|
||||||
# Since Maya 2017 alembic supports multiple uv sets - write them.
|
"file": path,
|
||||||
options["writeUVSets"] = True
|
"attr": attrs,
|
||||||
|
"attrPrefix": attr_prefixes,
|
||||||
|
"userAttr": user_attrs,
|
||||||
|
"userAttrPrefix": user_attr_prefixes,
|
||||||
|
"dataFormat": attribute_values.get("dataFormat", self.dataFormat),
|
||||||
|
"endFrame": end,
|
||||||
|
"eulerFilter": attribute_values.get(
|
||||||
|
"eulerFilter", self.eulerFilter
|
||||||
|
),
|
||||||
|
"preRoll": attribute_values.get("preRoll", self.preRoll),
|
||||||
|
"preRollStartFrame": attribute_values.get(
|
||||||
|
"preRollStartFrame", self.preRollStartFrame
|
||||||
|
),
|
||||||
|
"renderableOnly": attribute_values.get(
|
||||||
|
"renderableOnly", self.renderableOnly
|
||||||
|
),
|
||||||
|
"root": root,
|
||||||
|
"selection": True,
|
||||||
|
"startFrame": start,
|
||||||
|
"step": instance.data.get(
|
||||||
|
"creator_attributes", {}
|
||||||
|
).get("step", 1.0),
|
||||||
|
"stripNamespaces": attribute_values.get(
|
||||||
|
"stripNamespaces", self.stripNamespaces
|
||||||
|
),
|
||||||
|
"uvWrite": attribute_values.get("uvWrite", self.uvWrite),
|
||||||
|
"verbose": attribute_values.get("verbose", self.verbose),
|
||||||
|
"wholeFrameGeo": attribute_values.get(
|
||||||
|
"wholeFrameGeo", self.wholeFrameGeo
|
||||||
|
),
|
||||||
|
"worldSpace": attribute_values.get("worldSpace", self.worldSpace),
|
||||||
|
"writeColorSets": attribute_values.get(
|
||||||
|
"writeColorSets", self.writeColorSets
|
||||||
|
),
|
||||||
|
"writeCreases": attribute_values.get(
|
||||||
|
"writeCreases", self.writeCreases
|
||||||
|
),
|
||||||
|
"writeFaceSets": attribute_values.get(
|
||||||
|
"writeFaceSets", self.writeFaceSets
|
||||||
|
),
|
||||||
|
"writeUVSets": attribute_values.get(
|
||||||
|
"writeUVSets", self.writeUVSets
|
||||||
|
),
|
||||||
|
"writeVisibility": attribute_values.get(
|
||||||
|
"writeVisibility", self.writeVisibility
|
||||||
|
),
|
||||||
|
"uvsOnly": attribute_values.get(
|
||||||
|
"uvsOnly", self.uvsOnly
|
||||||
|
),
|
||||||
|
"melPerFrameCallback": attribute_values.get(
|
||||||
|
"melPerFrameCallback", self.melPerFrameCallback
|
||||||
|
),
|
||||||
|
"melPostJobCallback": attribute_values.get(
|
||||||
|
"melPostJobCallback", self.melPostJobCallback
|
||||||
|
),
|
||||||
|
"pythonPerFrameCallback": attribute_values.get(
|
||||||
|
"pythonPerFrameCallback", self.pythonPostJobCallback
|
||||||
|
),
|
||||||
|
"pythonPostJobCallback": attribute_values.get(
|
||||||
|
"pythonPostJobCallback", self.pythonPostJobCallback
|
||||||
|
),
|
||||||
|
# Note that this converts `writeNormals` to `noNormals` for the
|
||||||
|
# `AbcExport` equivalent in `extract_alembic`
|
||||||
|
"noNormals": not attribute_values.get(
|
||||||
|
"writeNormals", self.writeNormals
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
if instance.data.get("visibleOnly", False):
|
if instance.data.get("visibleOnly", False):
|
||||||
# If we only want to include nodes that are visible in the frame
|
# If we only want to include nodes that are visible in the frame
|
||||||
|
|
@ -87,20 +202,19 @@ class ExtractAlembic(publish.Extractor):
|
||||||
# flag does not filter out those that are only hidden on some
|
# flag does not filter out those that are only hidden on some
|
||||||
# frames as it counts "animated" or "connected" visibilities as
|
# frames as it counts "animated" or "connected" visibilities as
|
||||||
# if it's always visible.
|
# if it's always visible.
|
||||||
nodes = list(iter_visible_nodes_in_range(nodes,
|
nodes = list(
|
||||||
start=start,
|
iter_visible_nodes_in_range(nodes, start=start, end=end)
|
||||||
end=end))
|
)
|
||||||
|
|
||||||
suspend = not instance.data.get("refresh", False)
|
suspend = not instance.data.get("refresh", False)
|
||||||
with suspended_refresh(suspend=suspend):
|
with suspended_refresh(suspend=suspend):
|
||||||
with maintained_selection():
|
with maintained_selection():
|
||||||
cmds.select(nodes, noExpand=True)
|
cmds.select(nodes, noExpand=True)
|
||||||
extract_alembic(
|
self.log.debug(
|
||||||
file=path,
|
"Running `extract_alembic` with the keyword arguments: "
|
||||||
startFrame=start,
|
"{}".format(kwargs)
|
||||||
endFrame=end,
|
|
||||||
**options
|
|
||||||
)
|
)
|
||||||
|
extract_alembic(**kwargs)
|
||||||
|
|
||||||
if "representations" not in instance.data:
|
if "representations" not in instance.data:
|
||||||
instance.data["representations"] = []
|
instance.data["representations"] = []
|
||||||
|
|
@ -124,22 +238,17 @@ class ExtractAlembic(publish.Extractor):
|
||||||
return
|
return
|
||||||
|
|
||||||
path = path.replace(".abc", "_proxy.abc")
|
path = path.replace(".abc", "_proxy.abc")
|
||||||
|
kwargs["file"] = path
|
||||||
if not instance.data.get("includeParentHierarchy", True):
|
if not instance.data.get("includeParentHierarchy", True):
|
||||||
# Set the root nodes if we don't want to include parents
|
# Set the root nodes if we don't want to include parents
|
||||||
# The roots are to be considered the ones that are the actual
|
# The roots are to be considered the ones that are the actual
|
||||||
# direct members of the set
|
# direct members of the set
|
||||||
options["root"] = instance.data["proxyRoots"]
|
kwargs["root"] = instance.data["proxyRoots"]
|
||||||
|
|
||||||
with suspended_refresh(suspend=suspend):
|
with suspended_refresh(suspend=suspend):
|
||||||
with maintained_selection():
|
with maintained_selection():
|
||||||
cmds.select(instance.data["proxy"])
|
cmds.select(instance.data["proxy"])
|
||||||
extract_alembic(
|
extract_alembic(**kwargs)
|
||||||
file=path,
|
|
||||||
startFrame=start,
|
|
||||||
endFrame=end,
|
|
||||||
**options
|
|
||||||
)
|
|
||||||
|
|
||||||
representation = {
|
representation = {
|
||||||
"name": "proxy",
|
"name": "proxy",
|
||||||
"ext": "abc",
|
"ext": "abc",
|
||||||
|
|
@ -152,24 +261,265 @@ class ExtractAlembic(publish.Extractor):
|
||||||
def get_members_and_roots(self, instance):
|
def get_members_and_roots(self, instance):
|
||||||
return instance[:], instance.data.get("setMembers")
|
return instance[:], instance.data.get("setMembers")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_attribute_defs(cls):
|
||||||
|
if not cls.overrides:
|
||||||
|
return []
|
||||||
|
|
||||||
|
override_defs = OrderedDict({
|
||||||
|
"eulerFilter": BoolDef(
|
||||||
|
"eulerFilter",
|
||||||
|
label="Euler Filter",
|
||||||
|
default=cls.eulerFilter,
|
||||||
|
tooltip="Apply Euler filter while sampling rotations."
|
||||||
|
),
|
||||||
|
"renderableOnly": BoolDef(
|
||||||
|
"renderableOnly",
|
||||||
|
label="Renderable Only",
|
||||||
|
default=cls.renderableOnly,
|
||||||
|
tooltip="Only export renderable visible shapes."
|
||||||
|
),
|
||||||
|
"stripNamespaces": BoolDef(
|
||||||
|
"stripNamespaces",
|
||||||
|
label="Strip Namespaces",
|
||||||
|
default=cls.stripNamespaces,
|
||||||
|
tooltip=(
|
||||||
|
"Namespaces will be stripped off of the node before being "
|
||||||
|
"written to Alembic."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"uvsOnly": BoolDef(
|
||||||
|
"uvsOnly",
|
||||||
|
label="UVs Only",
|
||||||
|
default=cls.uvsOnly,
|
||||||
|
tooltip=(
|
||||||
|
"If this flag is present, only uv data for PolyMesh and "
|
||||||
|
"SubD shapes will be written to the Alembic file."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"uvWrite": BoolDef(
|
||||||
|
"uvWrite",
|
||||||
|
label="UV Write",
|
||||||
|
default=cls.uvWrite,
|
||||||
|
tooltip=(
|
||||||
|
"Uv data for PolyMesh and SubD shapes will be written to "
|
||||||
|
"the Alembic file."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"verbose": BoolDef(
|
||||||
|
"verbose",
|
||||||
|
label="Verbose",
|
||||||
|
default=cls.verbose,
|
||||||
|
tooltip="Prints the current frame that is being evaluated."
|
||||||
|
),
|
||||||
|
"visibleOnly": BoolDef(
|
||||||
|
"visibleOnly",
|
||||||
|
label="Visible Only",
|
||||||
|
default=cls.visibleOnly,
|
||||||
|
tooltip="Only export dag objects visible during frame range."
|
||||||
|
),
|
||||||
|
"wholeFrameGeo": BoolDef(
|
||||||
|
"wholeFrameGeo",
|
||||||
|
label="Whole Frame Geo",
|
||||||
|
default=cls.wholeFrameGeo,
|
||||||
|
tooltip=(
|
||||||
|
"Data for geometry will only be written out on whole "
|
||||||
|
"frames."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"worldSpace": BoolDef(
|
||||||
|
"worldSpace",
|
||||||
|
label="World Space",
|
||||||
|
default=cls.worldSpace,
|
||||||
|
tooltip="Any root nodes will be stored in world space."
|
||||||
|
),
|
||||||
|
"writeColorSets": BoolDef(
|
||||||
|
"writeColorSets",
|
||||||
|
label="Write Color Sets",
|
||||||
|
default=cls.writeColorSets,
|
||||||
|
tooltip="Write vertex colors with the geometry."
|
||||||
|
),
|
||||||
|
"writeCreases": BoolDef(
|
||||||
|
"writeCreases",
|
||||||
|
label="Write Creases",
|
||||||
|
default=cls.writeCreases,
|
||||||
|
tooltip="Write the geometry's edge and vertex crease "
|
||||||
|
"information."
|
||||||
|
),
|
||||||
|
"writeFaceSets": BoolDef(
|
||||||
|
"writeFaceSets",
|
||||||
|
label="Write Face Sets",
|
||||||
|
default=cls.writeFaceSets,
|
||||||
|
tooltip="Write face sets with the geometry."
|
||||||
|
),
|
||||||
|
"writeNormals": BoolDef(
|
||||||
|
"writeNormals",
|
||||||
|
label="Write Normals",
|
||||||
|
default=cls.writeNormals,
|
||||||
|
tooltip="Write normals with the deforming geometry."
|
||||||
|
),
|
||||||
|
"writeUVSets": BoolDef(
|
||||||
|
"writeUVSets",
|
||||||
|
label="Write UV Sets",
|
||||||
|
default=cls.writeUVSets,
|
||||||
|
tooltip=(
|
||||||
|
"Write all uv sets on MFnMeshes as vector 2 indexed "
|
||||||
|
"geometry parameters with face varying scope."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"writeVisibility": BoolDef(
|
||||||
|
"writeVisibility",
|
||||||
|
label="Write Visibility",
|
||||||
|
default=cls.writeVisibility,
|
||||||
|
tooltip=(
|
||||||
|
"Visibility state will be stored in the Alembic file. "
|
||||||
|
"Otherwise everything written out is treated as visible."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"preRoll": BoolDef(
|
||||||
|
"preRoll",
|
||||||
|
label="Pre Roll",
|
||||||
|
default=cls.preRoll,
|
||||||
|
tooltip="This frame range will not be sampled."
|
||||||
|
),
|
||||||
|
"preRollStartFrame": NumberDef(
|
||||||
|
"preRollStartFrame",
|
||||||
|
label="Pre Roll Start Frame",
|
||||||
|
tooltip=(
|
||||||
|
"The frame to start scene evaluation at. This is used"
|
||||||
|
" to set the starting frame for time dependent "
|
||||||
|
"translations and can be used to evaluate run-up that"
|
||||||
|
" isn't actually translated."
|
||||||
|
),
|
||||||
|
default=cls.preRollStartFrame
|
||||||
|
),
|
||||||
|
"dataFormat": EnumDef(
|
||||||
|
"dataFormat",
|
||||||
|
label="Data Format",
|
||||||
|
items=["ogawa", "HDF"],
|
||||||
|
default=cls.dataFormat,
|
||||||
|
tooltip="The data format to use to write the file."
|
||||||
|
),
|
||||||
|
"attr": TextDef(
|
||||||
|
"attr",
|
||||||
|
label="Custom Attributes",
|
||||||
|
placeholder="attr1; attr2; ...",
|
||||||
|
default=cls.attr,
|
||||||
|
tooltip=(
|
||||||
|
"Attributes matching by name will be included in the "
|
||||||
|
"Alembic export. Attributes should be separated by "
|
||||||
|
"semi-colon `;`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"attrPrefix": TextDef(
|
||||||
|
"attrPrefix",
|
||||||
|
label="Custom Attributes Prefix",
|
||||||
|
placeholder="prefix1; prefix2; ...",
|
||||||
|
default=cls.attrPrefix,
|
||||||
|
tooltip=(
|
||||||
|
"Attributes starting with these prefixes will be included "
|
||||||
|
"in the Alembic export. Attributes should be separated by "
|
||||||
|
"semi-colon `;`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"userAttr": TextDef(
|
||||||
|
"userAttr",
|
||||||
|
label="User Attr",
|
||||||
|
placeholder="attr1; attr2; ...",
|
||||||
|
default=cls.userAttr,
|
||||||
|
tooltip=(
|
||||||
|
"Attributes matching by name will be included in the "
|
||||||
|
"Alembic export. Attributes should be separated by "
|
||||||
|
"semi-colon `;`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"userAttrPrefix": TextDef(
|
||||||
|
"userAttrPrefix",
|
||||||
|
label="User Attr Prefix",
|
||||||
|
placeholder="prefix1; prefix2; ...",
|
||||||
|
default=cls.userAttrPrefix,
|
||||||
|
tooltip=(
|
||||||
|
"Attributes starting with these prefixes will be included "
|
||||||
|
"in the Alembic export. Attributes should be separated by "
|
||||||
|
"semi-colon `;`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"melPerFrameCallback": TextDef(
|
||||||
|
"melPerFrameCallback",
|
||||||
|
label="Mel Per Frame Callback",
|
||||||
|
default=cls.melPerFrameCallback,
|
||||||
|
tooltip=(
|
||||||
|
"When each frame (and the static frame) is evaluated the "
|
||||||
|
"string specified is evaluated as a Mel command."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"melPostJobCallback": TextDef(
|
||||||
|
"melPostJobCallback",
|
||||||
|
label="Mel Post Job Callback",
|
||||||
|
default=cls.melPostJobCallback,
|
||||||
|
tooltip=(
|
||||||
|
"When the translation has finished the string specified "
|
||||||
|
"is evaluated as a Mel command."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"pythonPerFrameCallback": TextDef(
|
||||||
|
"pythonPerFrameCallback",
|
||||||
|
label="Python Per Frame Callback",
|
||||||
|
default=cls.pythonPerFrameCallback,
|
||||||
|
tooltip=(
|
||||||
|
"When each frame (and the static frame) is evaluated the "
|
||||||
|
"string specified is evaluated as a python command."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
"pythonPostJobCallback": TextDef(
|
||||||
|
"pythonPostJobCallback",
|
||||||
|
label="Python Post Frame Callback",
|
||||||
|
default=cls.pythonPostJobCallback,
|
||||||
|
tooltip=(
|
||||||
|
"When the translation has finished the string specified "
|
||||||
|
"is evaluated as a python command."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
defs = super(ExtractAlembic, cls).get_attribute_defs()
|
||||||
|
|
||||||
|
defs.extend([
|
||||||
|
UISeparatorDef("sep_alembic_options"),
|
||||||
|
UILabelDef("Alembic Options"),
|
||||||
|
])
|
||||||
|
|
||||||
|
# The Arguments that can be modified by the Publisher
|
||||||
|
overrides = set(cls.overrides)
|
||||||
|
for key, value in override_defs.items():
|
||||||
|
if key not in overrides:
|
||||||
|
continue
|
||||||
|
|
||||||
|
defs.append(value)
|
||||||
|
|
||||||
|
defs.append(
|
||||||
|
UISeparatorDef("sep_alembic_options_end")
|
||||||
|
)
|
||||||
|
|
||||||
|
return defs
|
||||||
|
|
||||||
|
|
||||||
class ExtractAnimation(ExtractAlembic):
|
class ExtractAnimation(ExtractAlembic):
|
||||||
label = "Extract Animation"
|
label = "Extract Animation (Alembic)"
|
||||||
families = ["animation"]
|
families = ["animation"]
|
||||||
|
|
||||||
def get_members_and_roots(self, instance):
|
def get_members_and_roots(self, instance):
|
||||||
|
|
||||||
# Collect the out set nodes
|
# Collect the out set nodes
|
||||||
out_sets = [node for node in instance if node.endswith("out_SET")]
|
out_sets = [node for node in instance if node.endswith("out_SET")]
|
||||||
if len(out_sets) != 1:
|
if len(out_sets) != 1:
|
||||||
raise RuntimeError("Couldn't find exactly one out_SET: "
|
raise KnownPublishError(
|
||||||
"{0}".format(out_sets))
|
"Couldn't find exactly one out_SET: {0}".format(out_sets)
|
||||||
|
)
|
||||||
out_set = out_sets[0]
|
out_set = out_sets[0]
|
||||||
roots = cmds.sets(out_set, query=True)
|
roots = cmds.sets(out_set, query=True) or []
|
||||||
|
|
||||||
# Include all descendants
|
# Include all descendants
|
||||||
nodes = roots + cmds.listRelatives(roots,
|
nodes = roots.copy()
|
||||||
allDescendents=True,
|
nodes.extend(get_all_children(roots, ignore_intermediate_objects=True))
|
||||||
fullPath=True) or []
|
|
||||||
|
|
||||||
return nodes, roots
|
return nodes, roots
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,8 @@ import os
|
||||||
from maya import cmds
|
from maya import cmds
|
||||||
|
|
||||||
from ayon_core.pipeline import publish
|
from ayon_core.pipeline import publish
|
||||||
|
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||||
from ayon_core.hosts.maya.api.lib import (
|
from ayon_core.hosts.maya.api.lib import (
|
||||||
extract_alembic,
|
|
||||||
suspended_refresh,
|
suspended_refresh,
|
||||||
maintained_selection,
|
maintained_selection,
|
||||||
iter_visible_nodes_in_range
|
iter_visible_nodes_in_range
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,8 @@ import os
|
||||||
from maya import cmds # noqa
|
from maya import cmds # noqa
|
||||||
|
|
||||||
from ayon_core.pipeline import publish
|
from ayon_core.pipeline import publish
|
||||||
|
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||||
from ayon_core.hosts.maya.api.lib import (
|
from ayon_core.hosts.maya.api.lib import (
|
||||||
extract_alembic,
|
|
||||||
suspended_refresh,
|
suspended_refresh,
|
||||||
maintained_selection
|
maintained_selection
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ import copy
|
||||||
from maya import cmds
|
from maya import cmds
|
||||||
|
|
||||||
import pyblish.api
|
import pyblish.api
|
||||||
from ayon_core.hosts.maya.api.lib import extract_alembic
|
from ayon_core.hosts.maya.api.alembic import extract_alembic
|
||||||
from ayon_core.pipeline import publish
|
from ayon_core.pipeline import publish
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,130 @@
|
||||||
|
import inspect
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
from ayon_core.pipeline import OptionalPyblishPluginMixin
|
||||||
|
from ayon_core.pipeline.publish import RepairAction, PublishValidationError
|
||||||
|
|
||||||
|
|
||||||
|
class ValidateAlembicDefaultsPointcache(
|
||||||
|
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
|
||||||
|
):
|
||||||
|
"""Validate the attributes on the instance are defaults.
|
||||||
|
|
||||||
|
The defaults are defined in the project settings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
order = pyblish.api.ValidatorOrder
|
||||||
|
families = ["pointcache"]
|
||||||
|
hosts = ["maya"]
|
||||||
|
label = "Validate Alembic Options Defaults"
|
||||||
|
actions = [RepairAction]
|
||||||
|
optional = True
|
||||||
|
|
||||||
|
plugin_name = "ExtractAlembic"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_settings(cls, context):
|
||||||
|
maya_settings = context.data["project_settings"]["maya"]
|
||||||
|
settings = maya_settings["publish"]["ExtractAlembic"]
|
||||||
|
return settings
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_publish_attributes(cls, instance):
|
||||||
|
return instance.data["publish_attributes"][cls.plugin_name]
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
if not self.is_active(instance.data):
|
||||||
|
return
|
||||||
|
|
||||||
|
settings = self._get_settings(instance.context)
|
||||||
|
attributes = self._get_publish_attributes(instance)
|
||||||
|
|
||||||
|
invalid = {}
|
||||||
|
for key, value in attributes.items():
|
||||||
|
if key not in settings:
|
||||||
|
# This may occur if attributes have changed over time and an
|
||||||
|
# existing instance has older legacy attributes that do not
|
||||||
|
# match the current settings definition.
|
||||||
|
self.log.warning(
|
||||||
|
"Publish attribute %s not found in Alembic Export "
|
||||||
|
"default settings. Ignoring validation for attribute.",
|
||||||
|
key
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
default_value = settings[key]
|
||||||
|
|
||||||
|
# Lists are best to compared sorted since we cant rely on the order
|
||||||
|
# of the items.
|
||||||
|
if isinstance(value, list):
|
||||||
|
value = sorted(value)
|
||||||
|
default_value = sorted(default_value)
|
||||||
|
|
||||||
|
if value != default_value:
|
||||||
|
invalid[key] = value, default_value
|
||||||
|
|
||||||
|
if invalid:
|
||||||
|
non_defaults = "\n".join(
|
||||||
|
f"- {key}: {value} \t(default: {default_value})"
|
||||||
|
for key, (value, default_value) in invalid.items()
|
||||||
|
)
|
||||||
|
|
||||||
|
raise PublishValidationError(
|
||||||
|
"Alembic extract options differ from default values:\n"
|
||||||
|
f"{non_defaults}",
|
||||||
|
description=self.get_description()
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_description():
|
||||||
|
return inspect.cleandoc(
|
||||||
|
"""### Alembic Extract settings differ from defaults
|
||||||
|
|
||||||
|
The alembic export options differ from the project default values.
|
||||||
|
|
||||||
|
If this is intentional you can disable this validation by
|
||||||
|
disabling **Validate Alembic Options Default**.
|
||||||
|
|
||||||
|
If not you may use the "Repair" action to revert all the options to
|
||||||
|
their default values.
|
||||||
|
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def repair(cls, instance):
|
||||||
|
# Find create instance twin.
|
||||||
|
create_context = instance.context.data["create_context"]
|
||||||
|
create_instance = create_context.get_instance_by_id(
|
||||||
|
instance.data["instance_id"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set the settings values on the create context then save to workfile.
|
||||||
|
settings = cls._get_settings(instance.context)
|
||||||
|
attributes = cls._get_publish_attributes(create_instance)
|
||||||
|
for key in attributes:
|
||||||
|
if key not in settings:
|
||||||
|
# This may occur if attributes have changed over time and an
|
||||||
|
# existing instance has older legacy attributes that do not
|
||||||
|
# match the current settings definition.
|
||||||
|
cls.log.warning(
|
||||||
|
"Publish attribute %s not found in Alembic Export "
|
||||||
|
"default settings. Ignoring repair for attribute.",
|
||||||
|
key
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
attributes[key] = settings[key]
|
||||||
|
|
||||||
|
create_context.save_changes()
|
||||||
|
|
||||||
|
|
||||||
|
class ValidateAlembicDefaultsAnimation(
|
||||||
|
ValidateAlembicDefaultsPointcache
|
||||||
|
):
|
||||||
|
"""Validate the attributes on the instance are defaults.
|
||||||
|
|
||||||
|
The defaults are defined in the project settings.
|
||||||
|
"""
|
||||||
|
label = "Validate Alembic Options Defaults"
|
||||||
|
families = ["animation"]
|
||||||
|
plugin_name = "ExtractAnimation"
|
||||||
|
|
@ -1,71 +0,0 @@
|
||||||
import pyblish.api
|
|
||||||
import ayon_core.hosts.maya.api.action
|
|
||||||
from ayon_core.pipeline.publish import (
|
|
||||||
PublishValidationError,
|
|
||||||
ValidateContentsOrder,
|
|
||||||
OptionalPyblishPluginMixin
|
|
||||||
)
|
|
||||||
from maya import cmds
|
|
||||||
|
|
||||||
|
|
||||||
class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin,
|
|
||||||
OptionalPyblishPluginMixin):
|
|
||||||
"""Validate all nodes in skeletonAnim_SET are referenced"""
|
|
||||||
|
|
||||||
order = ValidateContentsOrder
|
|
||||||
hosts = ["maya"]
|
|
||||||
families = ["animation.fbx"]
|
|
||||||
label = "Animated Reference Rig"
|
|
||||||
accepted_controllers = ["transform", "locator"]
|
|
||||||
actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction]
|
|
||||||
optional = False
|
|
||||||
|
|
||||||
def process(self, instance):
|
|
||||||
if not self.is_active(instance.data):
|
|
||||||
return
|
|
||||||
animated_sets = instance.data.get("animated_skeleton", [])
|
|
||||||
if not animated_sets:
|
|
||||||
self.log.debug(
|
|
||||||
"No nodes found in skeletonAnim_SET. "
|
|
||||||
"Skipping validation of animated reference rig..."
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
for animated_reference in animated_sets:
|
|
||||||
is_referenced = cmds.referenceQuery(
|
|
||||||
animated_reference, isNodeReferenced=True)
|
|
||||||
if not bool(is_referenced):
|
|
||||||
raise PublishValidationError(
|
|
||||||
"All the content in skeletonAnim_SET"
|
|
||||||
" should be referenced nodes"
|
|
||||||
)
|
|
||||||
invalid_controls = self.validate_controls(animated_sets)
|
|
||||||
if invalid_controls:
|
|
||||||
raise PublishValidationError(
|
|
||||||
"All the content in skeletonAnim_SET"
|
|
||||||
" should be transforms"
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def validate_controls(self, set_members):
|
|
||||||
"""Check if the controller set contains only accepted node types.
|
|
||||||
|
|
||||||
Checks if all its set members are within the hierarchy of the root
|
|
||||||
Checks if the node types of the set members valid
|
|
||||||
|
|
||||||
Args:
|
|
||||||
set_members: list of nodes of the skeleton_anim_set
|
|
||||||
hierarchy: list of nodes which reside under the root node
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
errors (list)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Validate control types
|
|
||||||
invalid = []
|
|
||||||
set_members = cmds.ls(set_members, long=True)
|
|
||||||
for node in set_members:
|
|
||||||
if cmds.nodeType(node) not in self.accepted_controllers:
|
|
||||||
invalid.append(node)
|
|
||||||
|
|
||||||
return invalid
|
|
||||||
|
|
@ -1,30 +1,56 @@
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
import pyblish.api
|
import pyblish.api
|
||||||
|
|
||||||
from ayon_core.pipeline.publish import (
|
from ayon_core.pipeline.publish import (
|
||||||
ValidateContentsOrder, PublishValidationError
|
ValidateContentsOrder, PublishValidationError
|
||||||
)
|
)
|
||||||
|
from ayon_core.hosts.maya.api.lib import is_visible
|
||||||
|
|
||||||
|
|
||||||
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
"""Validate Arnold Scene Source.
|
"""Validate Arnold Scene Source.
|
||||||
|
|
||||||
We require at least 1 root node/parent for the meshes. This is to ensure we
|
Ensure no nodes are hidden.
|
||||||
can duplicate the nodes and preserve the names.
|
"""
|
||||||
|
|
||||||
If using proxies we need the nodes to share the same names and not be
|
order = ValidateContentsOrder
|
||||||
|
hosts = ["maya"]
|
||||||
|
families = ["ass", "assProxy"]
|
||||||
|
label = "Validate Arnold Scene Source"
|
||||||
|
|
||||||
|
def process(self, instance):
|
||||||
|
# Validate against having nodes hidden, which will result in the
|
||||||
|
# extraction to ignore the node.
|
||||||
|
nodes = instance.data["members"] + instance.data.get("proxy", [])
|
||||||
|
nodes = [x for x in nodes if cmds.objectType(x, isAType='dagNode')]
|
||||||
|
hidden_nodes = [
|
||||||
|
x for x in nodes if not is_visible(x, intermediateObject=False)
|
||||||
|
]
|
||||||
|
if hidden_nodes:
|
||||||
|
raise PublishValidationError(
|
||||||
|
"Found hidden nodes:\n\n{}\n\nPlease unhide for"
|
||||||
|
" publishing.".format("\n".join(hidden_nodes))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ValidateArnoldSceneSourceProxy(pyblish.api.InstancePlugin):
|
||||||
|
"""Validate Arnold Scene Source Proxy.
|
||||||
|
|
||||||
|
When using proxies we need the nodes to share the same names and not be
|
||||||
parent to the world. This ends up needing at least two groups with content
|
parent to the world. This ends up needing at least two groups with content
|
||||||
nodes and proxy nodes in another.
|
nodes and proxy nodes in another.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
order = ValidateContentsOrder
|
order = ValidateContentsOrder
|
||||||
hosts = ["maya"]
|
hosts = ["maya"]
|
||||||
families = ["ass"]
|
families = ["assProxy"]
|
||||||
label = "Validate Arnold Scene Source"
|
label = "Validate Arnold Scene Source Proxy"
|
||||||
|
|
||||||
def _get_nodes_by_name(self, nodes):
|
def _get_nodes_by_name(self, nodes):
|
||||||
ungrouped_nodes = []
|
ungrouped_nodes = []
|
||||||
nodes_by_name = {}
|
nodes_by_name = {}
|
||||||
parents = []
|
parents = []
|
||||||
same_named_nodes = {}
|
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node_split = node.split("|")
|
node_split = node.split("|")
|
||||||
if len(node_split) == 2:
|
if len(node_split) == 2:
|
||||||
|
|
@ -35,33 +61,16 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
parents.append(parent)
|
parents.append(parent)
|
||||||
|
|
||||||
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
|
node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1]
|
||||||
|
|
||||||
# Check for same same nodes, which can happen in different
|
|
||||||
# hierarchies.
|
|
||||||
if node_name in nodes_by_name:
|
|
||||||
try:
|
|
||||||
same_named_nodes[node_name].append(node)
|
|
||||||
except KeyError:
|
|
||||||
same_named_nodes[node_name] = [
|
|
||||||
nodes_by_name[node_name], node
|
|
||||||
]
|
|
||||||
|
|
||||||
nodes_by_name[node_name] = node
|
nodes_by_name[node_name] = node
|
||||||
|
|
||||||
if same_named_nodes:
|
|
||||||
message = "Found nodes with the same name:"
|
|
||||||
for name, nodes in same_named_nodes.items():
|
|
||||||
message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes))
|
|
||||||
|
|
||||||
raise PublishValidationError(message)
|
|
||||||
|
|
||||||
return ungrouped_nodes, nodes_by_name, parents
|
return ungrouped_nodes, nodes_by_name, parents
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
# Validate against nodes directly parented to world.
|
||||||
ungrouped_nodes = []
|
ungrouped_nodes = []
|
||||||
|
|
||||||
nodes, content_nodes_by_name, content_parents = (
|
nodes, content_nodes_by_name, content_parents = (
|
||||||
self._get_nodes_by_name(instance.data["contentMembers"])
|
self._get_nodes_by_name(instance.data["members"])
|
||||||
)
|
)
|
||||||
ungrouped_nodes.extend(nodes)
|
ungrouped_nodes.extend(nodes)
|
||||||
|
|
||||||
|
|
@ -70,24 +79,21 @@ class ValidateArnoldSceneSource(pyblish.api.InstancePlugin):
|
||||||
)
|
)
|
||||||
ungrouped_nodes.extend(nodes)
|
ungrouped_nodes.extend(nodes)
|
||||||
|
|
||||||
# Validate against nodes directly parented to world.
|
|
||||||
if ungrouped_nodes:
|
if ungrouped_nodes:
|
||||||
raise PublishValidationError(
|
raise PublishValidationError(
|
||||||
"Found nodes parented to the world: {}\n"
|
"Found nodes parented to the world: {}\n"
|
||||||
"All nodes need to be grouped.".format(ungrouped_nodes)
|
"All nodes need to be grouped.".format(ungrouped_nodes)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Proxy validation.
|
|
||||||
if not instance.data.get("proxy", []):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Validate for content and proxy nodes amount being the same.
|
# Validate for content and proxy nodes amount being the same.
|
||||||
if len(instance.data["contentMembers"]) != len(instance.data["proxy"]):
|
if len(instance.data["members"]) != len(instance.data["proxy"]):
|
||||||
raise PublishValidationError(
|
raise PublishValidationError(
|
||||||
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
|
"Amount of content nodes ({}) and proxy nodes ({}) needs to "
|
||||||
"be the same.".format(
|
"be the same.\nContent nodes: {}\nProxy nodes:{}".format(
|
||||||
len(instance.data["contentMembers"]),
|
len(instance.data["members"]),
|
||||||
len(instance.data["proxy"])
|
len(instance.data["proxy"]),
|
||||||
|
instance.data["members"],
|
||||||
|
instance.data["proxy"]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
|
||||||
|
|
||||||
order = ValidateContentsOrder
|
order = ValidateContentsOrder
|
||||||
hosts = ["maya"]
|
hosts = ["maya"]
|
||||||
families = ["ass"]
|
families = ["assProxy"]
|
||||||
label = "Validate Arnold Scene Source CBID"
|
label = "Validate Arnold Scene Source CBID"
|
||||||
actions = [RepairAction]
|
actions = [RepairAction]
|
||||||
optional = False
|
optional = False
|
||||||
|
|
@ -40,15 +40,11 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_invalid_couples(cls, instance):
|
def get_invalid_couples(cls, instance):
|
||||||
content_nodes_by_name = cls._get_nodes_by_name(
|
nodes_by_name = cls._get_nodes_by_name(instance.data["members"])
|
||||||
instance.data["contentMembers"]
|
proxy_nodes_by_name = cls._get_nodes_by_name(instance.data["proxy"])
|
||||||
)
|
|
||||||
proxy_nodes_by_name = cls._get_nodes_by_name(
|
|
||||||
instance.data.get("proxy", [])
|
|
||||||
)
|
|
||||||
|
|
||||||
invalid_couples = []
|
invalid_couples = []
|
||||||
for content_name, content_node in content_nodes_by_name.items():
|
for content_name, content_node in nodes_by_name.items():
|
||||||
proxy_node = proxy_nodes_by_name.get(content_name, None)
|
proxy_node = proxy_nodes_by_name.get(content_name, None)
|
||||||
|
|
||||||
if not proxy_node:
|
if not proxy_node:
|
||||||
|
|
@ -70,7 +66,7 @@ class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin,
|
||||||
if not self.is_active(instance.data):
|
if not self.is_active(instance.data):
|
||||||
return
|
return
|
||||||
# Proxy validation.
|
# Proxy validation.
|
||||||
if not instance.data.get("proxy", []):
|
if not instance.data["proxy"]:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Validate for proxy nodes sharing the same cbId as content nodes.
|
# Validate for proxy nodes sharing the same cbId as content nodes.
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@ from ayon_core.pipeline.publish import (
|
||||||
RepairAction,
|
RepairAction,
|
||||||
ValidateContentsOrder,
|
ValidateContentsOrder,
|
||||||
PublishValidationError,
|
PublishValidationError,
|
||||||
|
OptionalPyblishPluginMixin
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.maya.api import lib
|
from ayon_core.hosts.maya.api import lib
|
||||||
from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings
|
from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings
|
||||||
|
|
@ -37,7 +38,8 @@ def get_redshift_image_format_labels():
|
||||||
return mel.eval("{0}={0}".format(var))
|
return mel.eval("{0}={0}".format(var))
|
||||||
|
|
||||||
|
|
||||||
class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
class ValidateRenderSettings(pyblish.api.InstancePlugin,
|
||||||
|
OptionalPyblishPluginMixin):
|
||||||
"""Validates the global render settings
|
"""Validates the global render settings
|
||||||
|
|
||||||
* File Name Prefix must start with: `<Scene>`
|
* File Name Prefix must start with: `<Scene>`
|
||||||
|
|
@ -55,7 +57,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
||||||
* Frame Padding must be:
|
* Frame Padding must be:
|
||||||
* default: 4
|
* default: 4
|
||||||
|
|
||||||
* Animation must be toggle on, in Render Settings - Common tab:
|
* Animation must be toggled on, in Render Settings - Common tab:
|
||||||
* vray: Animation on standard of specific
|
* vray: Animation on standard of specific
|
||||||
* arnold: Frame / Animation ext: Any choice without "(Single Frame)"
|
* arnold: Frame / Animation ext: Any choice without "(Single Frame)"
|
||||||
* redshift: Animation toggled on
|
* redshift: Animation toggled on
|
||||||
|
|
@ -67,10 +69,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
order = ValidateContentsOrder
|
order = ValidateContentsOrder
|
||||||
label = "Render Settings"
|
label = "Validate Render Settings"
|
||||||
hosts = ["maya"]
|
hosts = ["maya"]
|
||||||
families = ["renderlayer"]
|
families = ["renderlayer"]
|
||||||
actions = [RepairAction]
|
actions = [RepairAction]
|
||||||
|
optional = True
|
||||||
|
|
||||||
ImagePrefixes = {
|
ImagePrefixes = {
|
||||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||||
|
|
@ -112,6 +115,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
||||||
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
if not self.is_active(instance.data):
|
||||||
|
return
|
||||||
|
|
||||||
invalid = self.get_invalid(instance)
|
invalid = self.get_invalid(instance)
|
||||||
if invalid:
|
if invalid:
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,132 @@
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
|
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
|
PlaceholderLoadMixin,
|
||||||
|
LoadPlaceholderItem
|
||||||
|
)
|
||||||
|
from ayon_core.hosts.maya.api.lib import (
|
||||||
|
get_container_transforms,
|
||||||
|
get_node_parent,
|
||||||
|
get_node_index_under_parent
|
||||||
|
)
|
||||||
|
from ayon_core.hosts.maya.api.workfile_template_builder import (
|
||||||
|
MayaPlaceholderPlugin,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MayaPlaceholderLoadPlugin(MayaPlaceholderPlugin, PlaceholderLoadMixin):
|
||||||
|
identifier = "maya.load"
|
||||||
|
label = "Maya load"
|
||||||
|
|
||||||
|
item_class = LoadPlaceholderItem
|
||||||
|
|
||||||
|
def _create_placeholder_name(self, placeholder_data):
|
||||||
|
|
||||||
|
# Split builder type: context_assets, linked_assets, all_assets
|
||||||
|
prefix, suffix = placeholder_data["builder_type"].split("_", 1)
|
||||||
|
parts = [prefix]
|
||||||
|
|
||||||
|
# add family if any
|
||||||
|
placeholder_product_type = placeholder_data.get("product_type")
|
||||||
|
if placeholder_product_type is None:
|
||||||
|
placeholder_product_type = placeholder_data.get("family")
|
||||||
|
|
||||||
|
if placeholder_product_type:
|
||||||
|
parts.append(placeholder_product_type)
|
||||||
|
|
||||||
|
# add loader arguments if any
|
||||||
|
loader_args = placeholder_data["loader_args"]
|
||||||
|
if loader_args:
|
||||||
|
loader_args = eval(loader_args)
|
||||||
|
for value in loader_args.values():
|
||||||
|
parts.append(str(value))
|
||||||
|
|
||||||
|
parts.append(suffix)
|
||||||
|
placeholder_name = "_".join(parts)
|
||||||
|
|
||||||
|
return placeholder_name.capitalize()
|
||||||
|
|
||||||
|
def _get_loaded_repre_ids(self):
|
||||||
|
loaded_representation_ids = self.builder.get_shared_populate_data(
|
||||||
|
"loaded_representation_ids"
|
||||||
|
)
|
||||||
|
if loaded_representation_ids is None:
|
||||||
|
try:
|
||||||
|
containers = cmds.sets("AVALON_CONTAINERS", q=True)
|
||||||
|
except ValueError:
|
||||||
|
containers = []
|
||||||
|
|
||||||
|
loaded_representation_ids = {
|
||||||
|
cmds.getAttr(container + ".representation")
|
||||||
|
for container in containers
|
||||||
|
}
|
||||||
|
self.builder.set_shared_populate_data(
|
||||||
|
"loaded_representation_ids", loaded_representation_ids
|
||||||
|
)
|
||||||
|
return loaded_representation_ids
|
||||||
|
|
||||||
|
def populate_placeholder(self, placeholder):
|
||||||
|
self.populate_load_placeholder(placeholder)
|
||||||
|
|
||||||
|
def repopulate_placeholder(self, placeholder):
|
||||||
|
repre_ids = self._get_loaded_repre_ids()
|
||||||
|
self.populate_load_placeholder(placeholder, repre_ids)
|
||||||
|
|
||||||
|
def get_placeholder_options(self, options=None):
|
||||||
|
return self.get_load_plugin_options(options)
|
||||||
|
|
||||||
|
def load_succeed(self, placeholder, container):
|
||||||
|
self._parent_in_hierarchy(placeholder, container)
|
||||||
|
|
||||||
|
def _parent_in_hierarchy(self, placeholder, container):
|
||||||
|
"""Parent loaded container to placeholder's parent.
|
||||||
|
|
||||||
|
ie : Set loaded content as placeholder's sibling
|
||||||
|
|
||||||
|
Args:
|
||||||
|
container (str): Placeholder loaded containers
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not container:
|
||||||
|
return
|
||||||
|
|
||||||
|
# TODO: This currently returns only a single root but a loaded scene
|
||||||
|
# could technically load more than a single root
|
||||||
|
container_root = get_container_transforms(container, root=True)
|
||||||
|
|
||||||
|
# Bugfix: The get_container_transforms does not recognize the load
|
||||||
|
# reference group currently
|
||||||
|
# TODO: Remove this when it does
|
||||||
|
parent = get_node_parent(container_root)
|
||||||
|
if parent:
|
||||||
|
container_root = parent
|
||||||
|
roots = [container_root]
|
||||||
|
|
||||||
|
# Add the loaded roots to the holding sets if they exist
|
||||||
|
holding_sets = cmds.listSets(object=placeholder.scene_identifier) or []
|
||||||
|
for holding_set in holding_sets:
|
||||||
|
cmds.sets(roots, forceElement=holding_set)
|
||||||
|
|
||||||
|
# Parent the roots to the place of the placeholder locator and match
|
||||||
|
# its matrix
|
||||||
|
placeholder_form = cmds.xform(
|
||||||
|
placeholder.scene_identifier,
|
||||||
|
query=True,
|
||||||
|
matrix=True,
|
||||||
|
worldSpace=True
|
||||||
|
)
|
||||||
|
scene_parent = get_node_parent(placeholder.scene_identifier)
|
||||||
|
for node in set(roots):
|
||||||
|
cmds.xform(node, matrix=placeholder_form, worldSpace=True)
|
||||||
|
|
||||||
|
if scene_parent != get_node_parent(node):
|
||||||
|
if scene_parent:
|
||||||
|
node = cmds.parent(node, scene_parent)[0]
|
||||||
|
else:
|
||||||
|
node = cmds.parent(node, world=True)[0]
|
||||||
|
|
||||||
|
# Move loaded nodes in index order next to their placeholder node
|
||||||
|
cmds.reorder(node, back=True)
|
||||||
|
index = get_node_index_under_parent(placeholder.scene_identifier)
|
||||||
|
cmds.reorder(node, front=True)
|
||||||
|
cmds.reorder(node, relative=index + 1)
|
||||||
|
|
@ -0,0 +1,201 @@
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
|
from ayon_core.hosts.maya.api.workfile_template_builder import (
|
||||||
|
MayaPlaceholderPlugin
|
||||||
|
)
|
||||||
|
from ayon_core.lib import NumberDef, TextDef, EnumDef
|
||||||
|
from ayon_core.lib.events import weakref_partial
|
||||||
|
|
||||||
|
|
||||||
|
EXAMPLE_SCRIPT = """
|
||||||
|
# Access maya commands
|
||||||
|
from maya import cmds
|
||||||
|
|
||||||
|
# Access the placeholder node
|
||||||
|
placeholder_node = placeholder.scene_identifier
|
||||||
|
|
||||||
|
# Access the event callback
|
||||||
|
if event is None:
|
||||||
|
print(f"Populating {placeholder}")
|
||||||
|
else:
|
||||||
|
if event.topic == "template.depth_processed":
|
||||||
|
print(f"Processed depth: {event.get('depth')}")
|
||||||
|
elif event.topic == "template.finished":
|
||||||
|
print("Build finished.")
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
|
||||||
|
class MayaPlaceholderScriptPlugin(MayaPlaceholderPlugin):
|
||||||
|
"""Execute a script at the given `order` during workfile build.
|
||||||
|
|
||||||
|
This is a very low-level placeholder to run Python scripts at a given
|
||||||
|
point in time during the workfile template build.
|
||||||
|
|
||||||
|
It can create either a locator or an objectSet as placeholder node.
|
||||||
|
It defaults to an objectSet, since allowing to run on e.g. other
|
||||||
|
placeholder node members can be useful, e.g. using:
|
||||||
|
|
||||||
|
>>> members = cmds.sets(placeholder.scene_identifier, query=True)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
identifier = "maya.runscript"
|
||||||
|
label = "Run Python Script"
|
||||||
|
|
||||||
|
use_selection_as_parent = False
|
||||||
|
|
||||||
|
def get_placeholder_options(self, options=None):
|
||||||
|
options = options or {}
|
||||||
|
return [
|
||||||
|
NumberDef(
|
||||||
|
"order",
|
||||||
|
label="Order",
|
||||||
|
default=options.get("order") or 0,
|
||||||
|
decimals=0,
|
||||||
|
minimum=0,
|
||||||
|
maximum=999,
|
||||||
|
tooltip=(
|
||||||
|
"Order"
|
||||||
|
"\nOrder defines asset loading priority (0 to 999)"
|
||||||
|
"\nPriority rule is : \"lowest is first to load\"."
|
||||||
|
)
|
||||||
|
),
|
||||||
|
TextDef(
|
||||||
|
"prepare_script",
|
||||||
|
label="Run at\nprepare",
|
||||||
|
tooltip="Run before populate at prepare order",
|
||||||
|
multiline=True,
|
||||||
|
default=options.get("prepare_script", "")
|
||||||
|
),
|
||||||
|
TextDef(
|
||||||
|
"populate_script",
|
||||||
|
label="Run at\npopulate",
|
||||||
|
tooltip="Run script at populate node order<br>"
|
||||||
|
"This is the <b>default</b> behavior",
|
||||||
|
multiline=True,
|
||||||
|
default=options.get("populate_script", EXAMPLE_SCRIPT)
|
||||||
|
),
|
||||||
|
TextDef(
|
||||||
|
"depth_processed_script",
|
||||||
|
label="Run after\ndepth\niteration",
|
||||||
|
tooltip="Run script after every build depth iteration",
|
||||||
|
multiline=True,
|
||||||
|
default=options.get("depth_processed_script", "")
|
||||||
|
),
|
||||||
|
TextDef(
|
||||||
|
"finished_script",
|
||||||
|
label="Run after\nbuild",
|
||||||
|
tooltip=(
|
||||||
|
"Run script at build finished.<br>"
|
||||||
|
"<b>Note</b>: this even runs if other placeholders had "
|
||||||
|
"errors during the build"
|
||||||
|
),
|
||||||
|
multiline=True,
|
||||||
|
default=options.get("finished_script", "")
|
||||||
|
),
|
||||||
|
EnumDef(
|
||||||
|
"create_nodetype",
|
||||||
|
label="Nodetype",
|
||||||
|
items={
|
||||||
|
"spaceLocator": "Locator",
|
||||||
|
"objectSet": "ObjectSet"
|
||||||
|
},
|
||||||
|
tooltip=(
|
||||||
|
"The placeholder's node type to be created.<br>"
|
||||||
|
"<b>Note</b> this only works on create, not on update"
|
||||||
|
),
|
||||||
|
default=options.get("create_nodetype", "objectSet")
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
def create_placeholder(self, placeholder_data):
|
||||||
|
nodetype = placeholder_data.get("create_nodetype", "objectSet")
|
||||||
|
|
||||||
|
if nodetype == "spaceLocator":
|
||||||
|
super(MayaPlaceholderScriptPlugin, self).create_placeholder(
|
||||||
|
placeholder_data
|
||||||
|
)
|
||||||
|
elif nodetype == "objectSet":
|
||||||
|
placeholder_data["plugin_identifier"] = self.identifier
|
||||||
|
|
||||||
|
# Create maya objectSet on selection
|
||||||
|
selection = cmds.ls(selection=True, long=True)
|
||||||
|
name = self._create_placeholder_name(placeholder_data)
|
||||||
|
node = cmds.sets(selection, name=name)
|
||||||
|
|
||||||
|
self.imprint(node, placeholder_data)
|
||||||
|
|
||||||
|
def prepare_placeholders(self, placeholders):
|
||||||
|
super(MayaPlaceholderScriptPlugin, self).prepare_placeholders(
|
||||||
|
placeholders
|
||||||
|
)
|
||||||
|
for placeholder in placeholders:
|
||||||
|
prepare_script = placeholder.data.get("prepare_script")
|
||||||
|
if not prepare_script:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.run_script(placeholder, prepare_script)
|
||||||
|
|
||||||
|
def populate_placeholder(self, placeholder):
|
||||||
|
|
||||||
|
populate_script = placeholder.data.get("populate_script")
|
||||||
|
depth_script = placeholder.data.get("depth_processed_script")
|
||||||
|
finished_script = placeholder.data.get("finished_script")
|
||||||
|
|
||||||
|
# Run now
|
||||||
|
if populate_script:
|
||||||
|
self.run_script(placeholder, populate_script)
|
||||||
|
|
||||||
|
if not any([depth_script, finished_script]):
|
||||||
|
# No callback scripts to run
|
||||||
|
if not placeholder.data.get("keep_placeholder", True):
|
||||||
|
self.delete_placeholder(placeholder)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Run at each depth processed
|
||||||
|
if depth_script:
|
||||||
|
callback = weakref_partial(
|
||||||
|
self.run_script, placeholder, depth_script)
|
||||||
|
self.builder.add_on_depth_processed_callback(
|
||||||
|
callback, order=placeholder.order)
|
||||||
|
|
||||||
|
# Run at build finish
|
||||||
|
if finished_script:
|
||||||
|
callback = weakref_partial(
|
||||||
|
self.run_script, placeholder, finished_script)
|
||||||
|
self.builder.add_on_finished_callback(
|
||||||
|
callback, order=placeholder.order)
|
||||||
|
|
||||||
|
# If placeholder should be deleted, delete it after finish so
|
||||||
|
# the scripts have access to it up to the last run
|
||||||
|
if not placeholder.data.get("keep_placeholder", True):
|
||||||
|
delete_callback = weakref_partial(
|
||||||
|
self.delete_placeholder, placeholder)
|
||||||
|
self.builder.add_on_finished_callback(
|
||||||
|
delete_callback, order=placeholder.order + 1)
|
||||||
|
|
||||||
|
def run_script(self, placeholder, script, event=None):
|
||||||
|
"""Run script
|
||||||
|
|
||||||
|
Even though `placeholder` is an unused arguments by exposing it as
|
||||||
|
an input argument it means it makes it available through
|
||||||
|
globals()/locals() in the `exec` call, giving the script access
|
||||||
|
to the placeholder.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
>>> node = placeholder.scene_identifier
|
||||||
|
|
||||||
|
In the case the script is running at a callback level (not during
|
||||||
|
populate) then it has access to the `event` as well, otherwise the
|
||||||
|
value is None if it runs during `populate_placeholder` directly.
|
||||||
|
|
||||||
|
For example adding this as the callback script:
|
||||||
|
>>> if event is not None:
|
||||||
|
>>> if event.topic == "on_depth_processed":
|
||||||
|
>>> print(f"Processed depth: {event.get('depth')}")
|
||||||
|
>>> elif event.topic == "on_finished":
|
||||||
|
>>> print("Build finished.")
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.log.debug(f"Running script at event: {event}")
|
||||||
|
exec(script, locals())
|
||||||
|
|
@ -7,7 +7,7 @@ from maya import cmds
|
||||||
import ayon_api
|
import ayon_api
|
||||||
|
|
||||||
from ayon_core.pipeline import get_current_project_name
|
from ayon_core.pipeline import get_current_project_name
|
||||||
import ayon_core.hosts.maya.lib as maya_lib
|
import ayon_core.hosts.maya.api.lib as maya_lib
|
||||||
from . import lib
|
from . import lib
|
||||||
from .alembic import get_alembic_ids_cache
|
from .alembic import get_alembic_ids_cache
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,9 @@ from ayon_core.pipeline import (
|
||||||
from ayon_core.pipeline.context_tools import (
|
from ayon_core.pipeline.context_tools import (
|
||||||
get_current_context_custom_workfile_template
|
get_current_context_custom_workfile_template
|
||||||
)
|
)
|
||||||
from ayon_core.pipeline.colorspace import get_imageio_config
|
from ayon_core.pipeline.colorspace import (
|
||||||
|
get_current_context_imageio_config_preset
|
||||||
|
)
|
||||||
from ayon_core.pipeline.workfile import BuildWorkfile
|
from ayon_core.pipeline.workfile import BuildWorkfile
|
||||||
from . import gizmo_menu
|
from . import gizmo_menu
|
||||||
from .constants import ASSIST
|
from .constants import ASSIST
|
||||||
|
|
@ -1495,18 +1497,28 @@ class WorkfileSettings(object):
|
||||||
|
|
||||||
filter_knobs = [
|
filter_knobs = [
|
||||||
"viewerProcess",
|
"viewerProcess",
|
||||||
"wipe_position"
|
"wipe_position",
|
||||||
|
"monitorOutOutputTransform"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
display, viewer = get_viewer_config_from_string(
|
||||||
|
viewer_dict["viewerProcess"]
|
||||||
|
)
|
||||||
|
viewer_process = create_viewer_profile_string(
|
||||||
|
viewer, display, path_like=False
|
||||||
|
)
|
||||||
|
display, viewer = get_viewer_config_from_string(
|
||||||
|
viewer_dict["output_transform"]
|
||||||
|
)
|
||||||
|
output_transform = create_viewer_profile_string(
|
||||||
|
viewer, display, path_like=False
|
||||||
|
)
|
||||||
erased_viewers = []
|
erased_viewers = []
|
||||||
for v in nuke.allNodes(filter="Viewer"):
|
for v in nuke.allNodes(filter="Viewer"):
|
||||||
# set viewProcess to preset from settings
|
# set viewProcess to preset from settings
|
||||||
v["viewerProcess"].setValue(
|
v["viewerProcess"].setValue(viewer_process)
|
||||||
str(viewer_dict["viewerProcess"])
|
|
||||||
)
|
|
||||||
|
|
||||||
if str(viewer_dict["viewerProcess"]) \
|
if viewer_process not in v["viewerProcess"].value():
|
||||||
not in v["viewerProcess"].value():
|
|
||||||
copy_inputs = v.dependencies()
|
copy_inputs = v.dependencies()
|
||||||
copy_knobs = {k: v[k].value() for k in v.knobs()
|
copy_knobs = {k: v[k].value() for k in v.knobs()
|
||||||
if k not in filter_knobs}
|
if k not in filter_knobs}
|
||||||
|
|
@ -1524,11 +1536,11 @@ class WorkfileSettings(object):
|
||||||
|
|
||||||
# set copied knobs
|
# set copied knobs
|
||||||
for k, v in copy_knobs.items():
|
for k, v in copy_knobs.items():
|
||||||
print(k, v)
|
|
||||||
nv[k].setValue(v)
|
nv[k].setValue(v)
|
||||||
|
|
||||||
# set viewerProcess
|
# set viewerProcess
|
||||||
nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"]))
|
nv["viewerProcess"].setValue(viewer_process)
|
||||||
|
nv["monitorOutOutputTransform"].setValue(output_transform)
|
||||||
|
|
||||||
if erased_viewers:
|
if erased_viewers:
|
||||||
log.warning(
|
log.warning(
|
||||||
|
|
@ -1542,12 +1554,8 @@ class WorkfileSettings(object):
|
||||||
imageio_host (dict): host colorspace configurations
|
imageio_host (dict): host colorspace configurations
|
||||||
|
|
||||||
'''
|
'''
|
||||||
config_data = get_imageio_config(
|
config_data = get_current_context_imageio_config_preset()
|
||||||
project_name=get_current_project_name(),
|
|
||||||
host_name="nuke"
|
|
||||||
)
|
|
||||||
|
|
||||||
viewer_process_settings = imageio_host["viewer"]["viewerProcess"]
|
|
||||||
workfile_settings = imageio_host["workfile"]
|
workfile_settings = imageio_host["workfile"]
|
||||||
color_management = workfile_settings["color_management"]
|
color_management = workfile_settings["color_management"]
|
||||||
native_ocio_config = workfile_settings["native_ocio_config"]
|
native_ocio_config = workfile_settings["native_ocio_config"]
|
||||||
|
|
@ -1574,29 +1582,6 @@ class WorkfileSettings(object):
|
||||||
residual_path
|
residual_path
|
||||||
))
|
))
|
||||||
|
|
||||||
# get monitor lut from settings respecting Nuke version differences
|
|
||||||
monitor_lut = workfile_settings["thumbnail_space"]
|
|
||||||
monitor_lut_data = self._get_monitor_settings(
|
|
||||||
viewer_process_settings, monitor_lut
|
|
||||||
)
|
|
||||||
monitor_lut_data["workingSpaceLUT"] = (
|
|
||||||
workfile_settings["working_space"]
|
|
||||||
)
|
|
||||||
|
|
||||||
# then set the rest
|
|
||||||
for knob, value_ in monitor_lut_data.items():
|
|
||||||
# skip unfilled ocio config path
|
|
||||||
# it will be dict in value
|
|
||||||
if isinstance(value_, dict):
|
|
||||||
continue
|
|
||||||
# skip empty values
|
|
||||||
if not value_:
|
|
||||||
continue
|
|
||||||
if self._root_node[knob].value() not in value_:
|
|
||||||
self._root_node[knob].setValue(str(value_))
|
|
||||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
|
||||||
knob, value_))
|
|
||||||
|
|
||||||
# set ocio config path
|
# set ocio config path
|
||||||
if config_data:
|
if config_data:
|
||||||
config_path = config_data["path"].replace("\\", "/")
|
config_path = config_data["path"].replace("\\", "/")
|
||||||
|
|
@ -1611,6 +1596,31 @@ class WorkfileSettings(object):
|
||||||
if correct_settings:
|
if correct_settings:
|
||||||
self._set_ocio_config_path_to_workfile(config_data)
|
self._set_ocio_config_path_to_workfile(config_data)
|
||||||
|
|
||||||
|
# get monitor lut from settings respecting Nuke version differences
|
||||||
|
monitor_lut_data = self._get_monitor_settings(
|
||||||
|
workfile_settings["monitor_out_lut"],
|
||||||
|
workfile_settings["monitor_lut"]
|
||||||
|
)
|
||||||
|
monitor_lut_data.update({
|
||||||
|
"workingSpaceLUT": workfile_settings["working_space"],
|
||||||
|
"int8Lut": workfile_settings["int_8_lut"],
|
||||||
|
"int16Lut": workfile_settings["int_16_lut"],
|
||||||
|
"logLut": workfile_settings["log_lut"],
|
||||||
|
"floatLut": workfile_settings["float_lut"]
|
||||||
|
})
|
||||||
|
|
||||||
|
# then set the rest
|
||||||
|
for knob, value_ in monitor_lut_data.items():
|
||||||
|
# skip unfilled ocio config path
|
||||||
|
# it will be dict in value
|
||||||
|
if isinstance(value_, dict):
|
||||||
|
continue
|
||||||
|
# skip empty values
|
||||||
|
if not value_:
|
||||||
|
continue
|
||||||
|
self._root_node[knob].setValue(str(value_))
|
||||||
|
log.debug("nuke.root()['{}'] changed to: {}".format(knob, value_))
|
||||||
|
|
||||||
def _get_monitor_settings(self, viewer_lut, monitor_lut):
|
def _get_monitor_settings(self, viewer_lut, monitor_lut):
|
||||||
""" Get monitor settings from viewer and monitor lut
|
""" Get monitor settings from viewer and monitor lut
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ from ayon_core.pipeline import (
|
||||||
register_loader_plugin_path,
|
register_loader_plugin_path,
|
||||||
register_creator_plugin_path,
|
register_creator_plugin_path,
|
||||||
register_inventory_action_path,
|
register_inventory_action_path,
|
||||||
|
register_workfile_build_plugin_path,
|
||||||
AYON_INSTANCE_ID,
|
AYON_INSTANCE_ID,
|
||||||
AVALON_INSTANCE_ID,
|
AVALON_INSTANCE_ID,
|
||||||
AVALON_CONTAINER_ID,
|
AVALON_CONTAINER_ID,
|
||||||
|
|
@ -52,8 +53,6 @@ from .lib import (
|
||||||
MENU_LABEL,
|
MENU_LABEL,
|
||||||
)
|
)
|
||||||
from .workfile_template_builder import (
|
from .workfile_template_builder import (
|
||||||
NukePlaceholderLoadPlugin,
|
|
||||||
NukePlaceholderCreatePlugin,
|
|
||||||
build_workfile_template,
|
build_workfile_template,
|
||||||
create_placeholder,
|
create_placeholder,
|
||||||
update_placeholder,
|
update_placeholder,
|
||||||
|
|
@ -76,6 +75,7 @@ PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||||
|
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
|
||||||
|
|
||||||
# registering pyblish gui regarding settings in presets
|
# registering pyblish gui regarding settings in presets
|
||||||
if os.getenv("PYBLISH_GUI", None):
|
if os.getenv("PYBLISH_GUI", None):
|
||||||
|
|
@ -105,18 +105,11 @@ class NukeHost(
|
||||||
def get_workfile_extensions(self):
|
def get_workfile_extensions(self):
|
||||||
return file_extensions()
|
return file_extensions()
|
||||||
|
|
||||||
def get_workfile_build_placeholder_plugins(self):
|
|
||||||
return [
|
|
||||||
NukePlaceholderLoadPlugin,
|
|
||||||
NukePlaceholderCreatePlugin
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_containers(self):
|
def get_containers(self):
|
||||||
return ls()
|
return ls()
|
||||||
|
|
||||||
def install(self):
|
def install(self):
|
||||||
''' Installing all requarements for Nuke host
|
"""Installing all requirements for Nuke host"""
|
||||||
'''
|
|
||||||
|
|
||||||
pyblish.api.register_host("nuke")
|
pyblish.api.register_host("nuke")
|
||||||
|
|
||||||
|
|
@ -125,6 +118,7 @@ class NukeHost(
|
||||||
register_loader_plugin_path(LOAD_PATH)
|
register_loader_plugin_path(LOAD_PATH)
|
||||||
register_creator_plugin_path(CREATE_PATH)
|
register_creator_plugin_path(CREATE_PATH)
|
||||||
register_inventory_action_path(INVENTORY_PATH)
|
register_inventory_action_path(INVENTORY_PATH)
|
||||||
|
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
|
||||||
|
|
||||||
# Register AYON event for workfiles loading.
|
# Register AYON event for workfiles loading.
|
||||||
register_event_callback("workio.open_file", check_inventory_versions)
|
register_event_callback("workio.open_file", check_inventory_versions)
|
||||||
|
|
@ -178,7 +172,6 @@ def add_nuke_callbacks():
|
||||||
# set apply all workfile settings on script load and save
|
# set apply all workfile settings on script load and save
|
||||||
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
|
nuke.addOnScriptLoad(WorkfileSettings().set_context_settings)
|
||||||
|
|
||||||
|
|
||||||
if nuke_settings["dirmap"]["enabled"]:
|
if nuke_settings["dirmap"]["enabled"]:
|
||||||
log.info("Added Nuke's dir-mapping callback ...")
|
log.info("Added Nuke's dir-mapping callback ...")
|
||||||
# Add dirmap for file paths.
|
# Add dirmap for file paths.
|
||||||
|
|
|
||||||
|
|
@ -778,6 +778,7 @@ class ExporterReviewMov(ExporterReview):
|
||||||
# deal with now lut defined in viewer lut
|
# deal with now lut defined in viewer lut
|
||||||
self.viewer_lut_raw = klass.viewer_lut_raw
|
self.viewer_lut_raw = klass.viewer_lut_raw
|
||||||
self.write_colorspace = instance.data["colorspace"]
|
self.write_colorspace = instance.data["colorspace"]
|
||||||
|
self.color_channels = instance.data["color_channels"]
|
||||||
|
|
||||||
self.name = name or "baked"
|
self.name = name or "baked"
|
||||||
self.ext = ext or "mov"
|
self.ext = ext or "mov"
|
||||||
|
|
@ -834,7 +835,7 @@ class ExporterReviewMov(ExporterReview):
|
||||||
self.log.info("Nodes exported...")
|
self.log.info("Nodes exported...")
|
||||||
return path
|
return path
|
||||||
|
|
||||||
def generate_mov(self, farm=False, **kwargs):
|
def generate_mov(self, farm=False, delete=True, **kwargs):
|
||||||
# colorspace data
|
# colorspace data
|
||||||
colorspace = None
|
colorspace = None
|
||||||
# get colorspace settings
|
# get colorspace settings
|
||||||
|
|
@ -947,6 +948,8 @@ class ExporterReviewMov(ExporterReview):
|
||||||
self.log.debug("Path: {}".format(self.path))
|
self.log.debug("Path: {}".format(self.path))
|
||||||
write_node["file"].setValue(str(self.path))
|
write_node["file"].setValue(str(self.path))
|
||||||
write_node["file_type"].setValue(str(self.ext))
|
write_node["file_type"].setValue(str(self.ext))
|
||||||
|
write_node["channels"].setValue(str(self.color_channels))
|
||||||
|
|
||||||
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
# Knobs `meta_codec` and `mov64_codec` are not available on centos.
|
||||||
# TODO shouldn't this come from settings on outputs?
|
# TODO shouldn't this come from settings on outputs?
|
||||||
try:
|
try:
|
||||||
|
|
@ -987,8 +990,13 @@ class ExporterReviewMov(ExporterReview):
|
||||||
self.render(write_node.name())
|
self.render(write_node.name())
|
||||||
|
|
||||||
# ---------- generate representation data
|
# ---------- generate representation data
|
||||||
|
tags = ["review", "need_thumbnail"]
|
||||||
|
|
||||||
|
if delete:
|
||||||
|
tags.append("delete")
|
||||||
|
|
||||||
self.get_representation_data(
|
self.get_representation_data(
|
||||||
tags=["review", "need_thumbnail", "delete"] + add_tags,
|
tags=tags + add_tags,
|
||||||
custom_tags=add_custom_tags,
|
custom_tags=add_custom_tags,
|
||||||
range=True,
|
range=True,
|
||||||
colorspace=colorspace
|
colorspace=colorspace
|
||||||
|
|
@ -1151,7 +1159,6 @@ def _remove_old_knobs(node):
|
||||||
"OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority",
|
"OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority",
|
||||||
"deadlineChunkSize", "deadlineConcurrentTasks", "Deadline"
|
"deadlineChunkSize", "deadlineConcurrentTasks", "Deadline"
|
||||||
]
|
]
|
||||||
print(node.name())
|
|
||||||
|
|
||||||
# remove all old knobs
|
# remove all old knobs
|
||||||
for knob in node.allKnobs():
|
for knob in node.allKnobs():
|
||||||
|
|
|
||||||
|
|
@ -1,30 +1,17 @@
|
||||||
import collections
|
import collections
|
||||||
import nuke
|
import nuke
|
||||||
|
|
||||||
from ayon_core.pipeline import registered_host
|
from ayon_core.pipeline import registered_host
|
||||||
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
from ayon_core.pipeline.workfile.workfile_template_builder import (
|
||||||
AbstractTemplateBuilder,
|
AbstractTemplateBuilder,
|
||||||
PlaceholderPlugin,
|
PlaceholderPlugin,
|
||||||
LoadPlaceholderItem,
|
|
||||||
CreatePlaceholderItem,
|
|
||||||
PlaceholderLoadMixin,
|
|
||||||
PlaceholderCreateMixin,
|
|
||||||
)
|
)
|
||||||
from ayon_core.tools.workfile_template_build import (
|
from ayon_core.tools.workfile_template_build import (
|
||||||
WorkfileBuildPlaceholderDialog,
|
WorkfileBuildPlaceholderDialog,
|
||||||
)
|
)
|
||||||
from .lib import (
|
from .lib import (
|
||||||
find_free_space_to_paste_nodes,
|
|
||||||
get_extreme_positions,
|
|
||||||
get_group_io_nodes,
|
|
||||||
imprint,
|
imprint,
|
||||||
refresh_node,
|
|
||||||
refresh_nodes,
|
|
||||||
reset_selection,
|
reset_selection,
|
||||||
get_names_from_nodes,
|
|
||||||
get_nodes_by_names,
|
|
||||||
select_nodes,
|
|
||||||
duplicate_node,
|
|
||||||
node_tempfile,
|
|
||||||
get_main_window,
|
get_main_window,
|
||||||
WorkfileSettings,
|
WorkfileSettings,
|
||||||
)
|
)
|
||||||
|
|
@ -54,6 +41,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder):
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class NukePlaceholderPlugin(PlaceholderPlugin):
|
class NukePlaceholderPlugin(PlaceholderPlugin):
|
||||||
node_color = 4278190335
|
node_color = 4278190335
|
||||||
|
|
||||||
|
|
@ -120,843 +108,6 @@ class NukePlaceholderPlugin(PlaceholderPlugin):
|
||||||
nuke.delete(placeholder_node)
|
nuke.delete(placeholder_node)
|
||||||
|
|
||||||
|
|
||||||
class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin):
|
|
||||||
identifier = "nuke.load"
|
|
||||||
label = "Nuke load"
|
|
||||||
|
|
||||||
def _parse_placeholder_node_data(self, node):
|
|
||||||
placeholder_data = super(
|
|
||||||
NukePlaceholderLoadPlugin, self
|
|
||||||
)._parse_placeholder_node_data(node)
|
|
||||||
|
|
||||||
node_knobs = node.knobs()
|
|
||||||
nb_children = 0
|
|
||||||
if "nb_children" in node_knobs:
|
|
||||||
nb_children = int(node_knobs["nb_children"].getValue())
|
|
||||||
placeholder_data["nb_children"] = nb_children
|
|
||||||
|
|
||||||
siblings = []
|
|
||||||
if "siblings" in node_knobs:
|
|
||||||
siblings = node_knobs["siblings"].values()
|
|
||||||
placeholder_data["siblings"] = siblings
|
|
||||||
|
|
||||||
node_full_name = node.fullName()
|
|
||||||
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
|
|
||||||
placeholder_data["last_loaded"] = []
|
|
||||||
placeholder_data["delete"] = False
|
|
||||||
return placeholder_data
|
|
||||||
|
|
||||||
def _get_loaded_repre_ids(self):
|
|
||||||
loaded_representation_ids = self.builder.get_shared_populate_data(
|
|
||||||
"loaded_representation_ids"
|
|
||||||
)
|
|
||||||
if loaded_representation_ids is None:
|
|
||||||
loaded_representation_ids = set()
|
|
||||||
for node in nuke.allNodes():
|
|
||||||
if "repre_id" in node.knobs():
|
|
||||||
loaded_representation_ids.add(
|
|
||||||
node.knob("repre_id").getValue()
|
|
||||||
)
|
|
||||||
|
|
||||||
self.builder.set_shared_populate_data(
|
|
||||||
"loaded_representation_ids", loaded_representation_ids
|
|
||||||
)
|
|
||||||
return loaded_representation_ids
|
|
||||||
|
|
||||||
def _before_placeholder_load(self, placeholder):
|
|
||||||
placeholder.data["nodes_init"] = nuke.allNodes()
|
|
||||||
|
|
||||||
def _before_repre_load(self, placeholder, representation):
|
|
||||||
placeholder.data["last_repre_id"] = representation["id"]
|
|
||||||
|
|
||||||
def collect_placeholders(self):
|
|
||||||
output = []
|
|
||||||
scene_placeholders = self._collect_scene_placeholders()
|
|
||||||
for node_name, node in scene_placeholders.items():
|
|
||||||
plugin_identifier_knob = node.knob("plugin_identifier")
|
|
||||||
if (
|
|
||||||
plugin_identifier_knob is None
|
|
||||||
or plugin_identifier_knob.getValue() != self.identifier
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
placeholder_data = self._parse_placeholder_node_data(node)
|
|
||||||
# TODO do data validations and maybe updgrades if are invalid
|
|
||||||
output.append(
|
|
||||||
LoadPlaceholderItem(node_name, placeholder_data, self)
|
|
||||||
)
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|
||||||
def populate_placeholder(self, placeholder):
|
|
||||||
self.populate_load_placeholder(placeholder)
|
|
||||||
|
|
||||||
def repopulate_placeholder(self, placeholder):
|
|
||||||
repre_ids = self._get_loaded_repre_ids()
|
|
||||||
self.populate_load_placeholder(placeholder, repre_ids)
|
|
||||||
|
|
||||||
def get_placeholder_options(self, options=None):
|
|
||||||
return self.get_load_plugin_options(options)
|
|
||||||
|
|
||||||
def post_placeholder_process(self, placeholder, failed):
|
|
||||||
"""Cleanup placeholder after load of its corresponding representations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
placeholder (PlaceholderItem): Item which was just used to load
|
|
||||||
representation.
|
|
||||||
failed (bool): Loading of representation failed.
|
|
||||||
"""
|
|
||||||
# deselect all selected nodes
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
|
|
||||||
# getting the latest nodes added
|
|
||||||
# TODO get from shared populate data!
|
|
||||||
nodes_init = placeholder.data["nodes_init"]
|
|
||||||
nodes_loaded = list(set(nuke.allNodes()) - set(nodes_init))
|
|
||||||
self.log.debug("Loaded nodes: {}".format(nodes_loaded))
|
|
||||||
if not nodes_loaded:
|
|
||||||
return
|
|
||||||
|
|
||||||
placeholder.data["delete"] = True
|
|
||||||
|
|
||||||
nodes_loaded = self._move_to_placeholder_group(
|
|
||||||
placeholder, nodes_loaded
|
|
||||||
)
|
|
||||||
placeholder.data["last_loaded"] = nodes_loaded
|
|
||||||
refresh_nodes(nodes_loaded)
|
|
||||||
|
|
||||||
# positioning of the loaded nodes
|
|
||||||
min_x, min_y, _, _ = get_extreme_positions(nodes_loaded)
|
|
||||||
for node in nodes_loaded:
|
|
||||||
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
|
|
||||||
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
|
|
||||||
node.setXYpos(xpos, ypos)
|
|
||||||
refresh_nodes(nodes_loaded)
|
|
||||||
|
|
||||||
# fix the problem of z_order for backdrops
|
|
||||||
self._fix_z_order(placeholder)
|
|
||||||
|
|
||||||
if placeholder.data.get("keep_placeholder"):
|
|
||||||
self._imprint_siblings(placeholder)
|
|
||||||
|
|
||||||
if placeholder.data["nb_children"] == 0:
|
|
||||||
# save initial nodes positions and dimensions, update them
|
|
||||||
# and set inputs and outputs of loaded nodes
|
|
||||||
if placeholder.data.get("keep_placeholder"):
|
|
||||||
self._imprint_inits()
|
|
||||||
self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded)
|
|
||||||
|
|
||||||
self._set_loaded_connections(placeholder)
|
|
||||||
|
|
||||||
elif placeholder.data["siblings"]:
|
|
||||||
# create copies of placeholder siblings for the new loaded nodes,
|
|
||||||
# set their inputs and outputs and update all nodes positions and
|
|
||||||
# dimensions and siblings names
|
|
||||||
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
refresh_nodes(siblings)
|
|
||||||
copies = self._create_sib_copies(placeholder)
|
|
||||||
new_nodes = list(copies.values()) # copies nodes
|
|
||||||
self._update_nodes(new_nodes, nodes_loaded)
|
|
||||||
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
|
|
||||||
new_nodes_name = get_names_from_nodes(new_nodes)
|
|
||||||
imprint(placeholder_node, {"siblings": new_nodes_name})
|
|
||||||
self._set_copies_connections(placeholder, copies)
|
|
||||||
|
|
||||||
self._update_nodes(
|
|
||||||
nuke.allNodes(),
|
|
||||||
new_nodes + nodes_loaded,
|
|
||||||
20
|
|
||||||
)
|
|
||||||
|
|
||||||
new_siblings = get_names_from_nodes(new_nodes)
|
|
||||||
placeholder.data["siblings"] = new_siblings
|
|
||||||
|
|
||||||
else:
|
|
||||||
# if the placeholder doesn't have siblings, the loaded
|
|
||||||
# nodes will be placed in a free space
|
|
||||||
|
|
||||||
xpointer, ypointer = find_free_space_to_paste_nodes(
|
|
||||||
nodes_loaded, direction="bottom", offset=200
|
|
||||||
)
|
|
||||||
node = nuke.createNode("NoOp")
|
|
||||||
reset_selection()
|
|
||||||
nuke.delete(node)
|
|
||||||
for node in nodes_loaded:
|
|
||||||
xpos = (node.xpos() - min_x) + xpointer
|
|
||||||
ypos = (node.ypos() - min_y) + ypointer
|
|
||||||
node.setXYpos(xpos, ypos)
|
|
||||||
|
|
||||||
placeholder.data["nb_children"] += 1
|
|
||||||
reset_selection()
|
|
||||||
|
|
||||||
# go back to root group
|
|
||||||
nuke.root().begin()
|
|
||||||
|
|
||||||
def _move_to_placeholder_group(self, placeholder, nodes_loaded):
|
|
||||||
"""
|
|
||||||
opening the placeholder's group and copying loaded nodes in it.
|
|
||||||
|
|
||||||
Returns :
|
|
||||||
nodes_loaded (list): the new list of pasted nodes
|
|
||||||
"""
|
|
||||||
|
|
||||||
groups_name = placeholder.data["group_name"]
|
|
||||||
reset_selection()
|
|
||||||
select_nodes(nodes_loaded)
|
|
||||||
if groups_name:
|
|
||||||
with node_tempfile() as filepath:
|
|
||||||
nuke.nodeCopy(filepath)
|
|
||||||
for node in nuke.selectedNodes():
|
|
||||||
nuke.delete(node)
|
|
||||||
group = nuke.toNode(groups_name)
|
|
||||||
group.begin()
|
|
||||||
nuke.nodePaste(filepath)
|
|
||||||
nodes_loaded = nuke.selectedNodes()
|
|
||||||
return nodes_loaded
|
|
||||||
|
|
||||||
def _fix_z_order(self, placeholder):
|
|
||||||
"""Fix the problem of z_order when a backdrop is loaded."""
|
|
||||||
|
|
||||||
nodes_loaded = placeholder.data["last_loaded"]
|
|
||||||
loaded_backdrops = []
|
|
||||||
bd_orders = set()
|
|
||||||
for node in nodes_loaded:
|
|
||||||
if isinstance(node, nuke.BackdropNode):
|
|
||||||
loaded_backdrops.append(node)
|
|
||||||
bd_orders.add(node.knob("z_order").getValue())
|
|
||||||
|
|
||||||
if not bd_orders:
|
|
||||||
return
|
|
||||||
|
|
||||||
sib_orders = set()
|
|
||||||
for node_name in placeholder.data["siblings"]:
|
|
||||||
node = nuke.toNode(node_name)
|
|
||||||
if isinstance(node, nuke.BackdropNode):
|
|
||||||
sib_orders.add(node.knob("z_order").getValue())
|
|
||||||
|
|
||||||
if not sib_orders:
|
|
||||||
return
|
|
||||||
|
|
||||||
min_order = min(bd_orders)
|
|
||||||
max_order = max(sib_orders)
|
|
||||||
for backdrop_node in loaded_backdrops:
|
|
||||||
z_order = backdrop_node.knob("z_order").getValue()
|
|
||||||
backdrop_node.knob("z_order").setValue(
|
|
||||||
z_order + max_order - min_order + 1)
|
|
||||||
|
|
||||||
def _imprint_siblings(self, placeholder):
|
|
||||||
"""
|
|
||||||
- add siblings names to placeholder attributes (nodes loaded with it)
|
|
||||||
- add Id to the attributes of all the other nodes
|
|
||||||
"""
|
|
||||||
|
|
||||||
loaded_nodes = placeholder.data["last_loaded"]
|
|
||||||
loaded_nodes_set = set(loaded_nodes)
|
|
||||||
data = {"repre_id": str(placeholder.data["last_repre_id"])}
|
|
||||||
|
|
||||||
for node in loaded_nodes:
|
|
||||||
node_knobs = node.knobs()
|
|
||||||
if "builder_type" not in node_knobs:
|
|
||||||
# save the id of representation for all imported nodes
|
|
||||||
imprint(node, data)
|
|
||||||
node.knob("repre_id").setVisible(False)
|
|
||||||
refresh_node(node)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (
|
|
||||||
"is_placeholder" not in node_knobs
|
|
||||||
or (
|
|
||||||
"is_placeholder" in node_knobs
|
|
||||||
and node.knob("is_placeholder").value()
|
|
||||||
)
|
|
||||||
):
|
|
||||||
siblings = list(loaded_nodes_set - {node})
|
|
||||||
siblings_name = get_names_from_nodes(siblings)
|
|
||||||
siblings = {"siblings": siblings_name}
|
|
||||||
imprint(node, siblings)
|
|
||||||
|
|
||||||
def _imprint_inits(self):
|
|
||||||
"""Add initial positions and dimensions to the attributes"""
|
|
||||||
|
|
||||||
for node in nuke.allNodes():
|
|
||||||
refresh_node(node)
|
|
||||||
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
|
|
||||||
node.knob("x_init").setVisible(False)
|
|
||||||
node.knob("y_init").setVisible(False)
|
|
||||||
width = node.screenWidth()
|
|
||||||
height = node.screenHeight()
|
|
||||||
if "bdwidth" in node.knobs():
|
|
||||||
imprint(node, {"w_init": width, "h_init": height})
|
|
||||||
node.knob("w_init").setVisible(False)
|
|
||||||
node.knob("h_init").setVisible(False)
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
def _update_nodes(
|
|
||||||
self, placeholder, nodes, considered_nodes, offset_y=None
|
|
||||||
):
|
|
||||||
"""Adjust backdrop nodes dimensions and positions.
|
|
||||||
|
|
||||||
Considering some nodes sizes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
nodes (list): list of nodes to update
|
|
||||||
considered_nodes (list): list of nodes to consider while updating
|
|
||||||
positions and dimensions
|
|
||||||
offset (int): distance between copies
|
|
||||||
"""
|
|
||||||
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
|
|
||||||
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
|
|
||||||
|
|
||||||
diff_x = diff_y = 0
|
|
||||||
contained_nodes = [] # for backdrops
|
|
||||||
|
|
||||||
if offset_y is None:
|
|
||||||
width_ph = placeholder_node.screenWidth()
|
|
||||||
height_ph = placeholder_node.screenHeight()
|
|
||||||
diff_y = max_y - min_y - height_ph
|
|
||||||
diff_x = max_x - min_x - width_ph
|
|
||||||
contained_nodes = [placeholder_node]
|
|
||||||
min_x = placeholder_node.xpos()
|
|
||||||
min_y = placeholder_node.ypos()
|
|
||||||
else:
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
minX, _, maxX, _ = get_extreme_positions(siblings)
|
|
||||||
diff_y = max_y - min_y + 20
|
|
||||||
diff_x = abs(max_x - min_x - maxX + minX)
|
|
||||||
contained_nodes = considered_nodes
|
|
||||||
|
|
||||||
if diff_y <= 0 and diff_x <= 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
if (
|
|
||||||
node == placeholder_node
|
|
||||||
or node in considered_nodes
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (
|
|
||||||
not isinstance(node, nuke.BackdropNode)
|
|
||||||
or (
|
|
||||||
isinstance(node, nuke.BackdropNode)
|
|
||||||
and not set(contained_nodes) <= set(node.getNodes())
|
|
||||||
)
|
|
||||||
):
|
|
||||||
if offset_y is None and node.xpos() >= min_x:
|
|
||||||
node.setXpos(node.xpos() + diff_x)
|
|
||||||
|
|
||||||
if node.ypos() >= min_y:
|
|
||||||
node.setYpos(node.ypos() + diff_y)
|
|
||||||
|
|
||||||
else:
|
|
||||||
width = node.screenWidth()
|
|
||||||
height = node.screenHeight()
|
|
||||||
node.knob("bdwidth").setValue(width + diff_x)
|
|
||||||
node.knob("bdheight").setValue(height + diff_y)
|
|
||||||
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
def _set_loaded_connections(self, placeholder):
|
|
||||||
"""
|
|
||||||
set inputs and outputs of loaded nodes"""
|
|
||||||
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
input_node, output_node = get_group_io_nodes(
|
|
||||||
placeholder.data["last_loaded"]
|
|
||||||
)
|
|
||||||
for node in placeholder_node.dependent():
|
|
||||||
for idx in range(node.inputs()):
|
|
||||||
if node.input(idx) == placeholder_node and output_node:
|
|
||||||
node.setInput(idx, output_node)
|
|
||||||
|
|
||||||
for node in placeholder_node.dependencies():
|
|
||||||
for idx in range(placeholder_node.inputs()):
|
|
||||||
if placeholder_node.input(idx) == node and input_node:
|
|
||||||
input_node.setInput(0, node)
|
|
||||||
|
|
||||||
def _create_sib_copies(self, placeholder):
|
|
||||||
""" creating copies of the palce_holder siblings (the ones who were
|
|
||||||
loaded with it) for the new nodes added
|
|
||||||
|
|
||||||
Returns :
|
|
||||||
copies (dict) : with copied nodes names and their copies
|
|
||||||
"""
|
|
||||||
|
|
||||||
copies = {}
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
for node in siblings:
|
|
||||||
new_node = duplicate_node(node)
|
|
||||||
|
|
||||||
x_init = int(new_node.knob("x_init").getValue())
|
|
||||||
y_init = int(new_node.knob("y_init").getValue())
|
|
||||||
new_node.setXYpos(x_init, y_init)
|
|
||||||
if isinstance(new_node, nuke.BackdropNode):
|
|
||||||
w_init = new_node.knob("w_init").getValue()
|
|
||||||
h_init = new_node.knob("h_init").getValue()
|
|
||||||
new_node.knob("bdwidth").setValue(w_init)
|
|
||||||
new_node.knob("bdheight").setValue(h_init)
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
if "repre_id" in node.knobs().keys():
|
|
||||||
node.removeKnob(node.knob("repre_id"))
|
|
||||||
copies[node.name()] = new_node
|
|
||||||
return copies
|
|
||||||
|
|
||||||
def _set_copies_connections(self, placeholder, copies):
|
|
||||||
"""Set inputs and outputs of the copies.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
copies (dict): Copied nodes by their names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
last_input, last_output = get_group_io_nodes(
|
|
||||||
placeholder.data["last_loaded"]
|
|
||||||
)
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
siblings_input, siblings_output = get_group_io_nodes(siblings)
|
|
||||||
copy_input = copies[siblings_input.name()]
|
|
||||||
copy_output = copies[siblings_output.name()]
|
|
||||||
|
|
||||||
for node_init in siblings:
|
|
||||||
if node_init == siblings_output:
|
|
||||||
continue
|
|
||||||
|
|
||||||
node_copy = copies[node_init.name()]
|
|
||||||
for node in node_init.dependent():
|
|
||||||
for idx in range(node.inputs()):
|
|
||||||
if node.input(idx) != node_init:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node in siblings:
|
|
||||||
copies[node.name()].setInput(idx, node_copy)
|
|
||||||
else:
|
|
||||||
last_input.setInput(0, node_copy)
|
|
||||||
|
|
||||||
for node in node_init.dependencies():
|
|
||||||
for idx in range(node_init.inputs()):
|
|
||||||
if node_init.input(idx) != node:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node_init == siblings_input:
|
|
||||||
copy_input.setInput(idx, node)
|
|
||||||
elif node in siblings:
|
|
||||||
node_copy.setInput(idx, copies[node.name()])
|
|
||||||
else:
|
|
||||||
node_copy.setInput(idx, last_output)
|
|
||||||
|
|
||||||
siblings_input.setInput(0, copy_output)
|
|
||||||
|
|
||||||
|
|
||||||
class NukePlaceholderCreatePlugin(
|
|
||||||
NukePlaceholderPlugin, PlaceholderCreateMixin
|
|
||||||
):
|
|
||||||
identifier = "nuke.create"
|
|
||||||
label = "Nuke create"
|
|
||||||
|
|
||||||
def _parse_placeholder_node_data(self, node):
|
|
||||||
placeholder_data = super(
|
|
||||||
NukePlaceholderCreatePlugin, self
|
|
||||||
)._parse_placeholder_node_data(node)
|
|
||||||
|
|
||||||
node_knobs = node.knobs()
|
|
||||||
nb_children = 0
|
|
||||||
if "nb_children" in node_knobs:
|
|
||||||
nb_children = int(node_knobs["nb_children"].getValue())
|
|
||||||
placeholder_data["nb_children"] = nb_children
|
|
||||||
|
|
||||||
siblings = []
|
|
||||||
if "siblings" in node_knobs:
|
|
||||||
siblings = node_knobs["siblings"].values()
|
|
||||||
placeholder_data["siblings"] = siblings
|
|
||||||
|
|
||||||
node_full_name = node.fullName()
|
|
||||||
placeholder_data["group_name"] = node_full_name.rpartition(".")[0]
|
|
||||||
placeholder_data["last_loaded"] = []
|
|
||||||
placeholder_data["delete"] = False
|
|
||||||
return placeholder_data
|
|
||||||
|
|
||||||
def _before_instance_create(self, placeholder):
|
|
||||||
placeholder.data["nodes_init"] = nuke.allNodes()
|
|
||||||
|
|
||||||
def collect_placeholders(self):
|
|
||||||
output = []
|
|
||||||
scene_placeholders = self._collect_scene_placeholders()
|
|
||||||
for node_name, node in scene_placeholders.items():
|
|
||||||
plugin_identifier_knob = node.knob("plugin_identifier")
|
|
||||||
if (
|
|
||||||
plugin_identifier_knob is None
|
|
||||||
or plugin_identifier_knob.getValue() != self.identifier
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
placeholder_data = self._parse_placeholder_node_data(node)
|
|
||||||
|
|
||||||
output.append(
|
|
||||||
CreatePlaceholderItem(node_name, placeholder_data, self)
|
|
||||||
)
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|
||||||
def populate_placeholder(self, placeholder):
|
|
||||||
self.populate_create_placeholder(placeholder)
|
|
||||||
|
|
||||||
def repopulate_placeholder(self, placeholder):
|
|
||||||
self.populate_create_placeholder(placeholder)
|
|
||||||
|
|
||||||
def get_placeholder_options(self, options=None):
|
|
||||||
return self.get_create_plugin_options(options)
|
|
||||||
|
|
||||||
def post_placeholder_process(self, placeholder, failed):
|
|
||||||
"""Cleanup placeholder after load of its corresponding representations.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
placeholder (PlaceholderItem): Item which was just used to load
|
|
||||||
representation.
|
|
||||||
failed (bool): Loading of representation failed.
|
|
||||||
"""
|
|
||||||
# deselect all selected nodes
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
|
|
||||||
# getting the latest nodes added
|
|
||||||
nodes_init = placeholder.data["nodes_init"]
|
|
||||||
nodes_created = list(set(nuke.allNodes()) - set(nodes_init))
|
|
||||||
self.log.debug("Created nodes: {}".format(nodes_created))
|
|
||||||
if not nodes_created:
|
|
||||||
return
|
|
||||||
|
|
||||||
placeholder.data["delete"] = True
|
|
||||||
|
|
||||||
nodes_created = self._move_to_placeholder_group(
|
|
||||||
placeholder, nodes_created
|
|
||||||
)
|
|
||||||
placeholder.data["last_created"] = nodes_created
|
|
||||||
refresh_nodes(nodes_created)
|
|
||||||
|
|
||||||
# positioning of the created nodes
|
|
||||||
min_x, min_y, _, _ = get_extreme_positions(nodes_created)
|
|
||||||
for node in nodes_created:
|
|
||||||
xpos = (node.xpos() - min_x) + placeholder_node.xpos()
|
|
||||||
ypos = (node.ypos() - min_y) + placeholder_node.ypos()
|
|
||||||
node.setXYpos(xpos, ypos)
|
|
||||||
refresh_nodes(nodes_created)
|
|
||||||
|
|
||||||
# fix the problem of z_order for backdrops
|
|
||||||
self._fix_z_order(placeholder)
|
|
||||||
|
|
||||||
if placeholder.data.get("keep_placeholder"):
|
|
||||||
self._imprint_siblings(placeholder)
|
|
||||||
|
|
||||||
if placeholder.data["nb_children"] == 0:
|
|
||||||
# save initial nodes positions and dimensions, update them
|
|
||||||
# and set inputs and outputs of created nodes
|
|
||||||
|
|
||||||
if placeholder.data.get("keep_placeholder"):
|
|
||||||
self._imprint_inits()
|
|
||||||
self._update_nodes(placeholder, nuke.allNodes(), nodes_created)
|
|
||||||
|
|
||||||
self._set_created_connections(placeholder)
|
|
||||||
|
|
||||||
elif placeholder.data["siblings"]:
|
|
||||||
# create copies of placeholder siblings for the new created nodes,
|
|
||||||
# set their inputs and outputs and update all nodes positions and
|
|
||||||
# dimensions and siblings names
|
|
||||||
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
refresh_nodes(siblings)
|
|
||||||
copies = self._create_sib_copies(placeholder)
|
|
||||||
new_nodes = list(copies.values()) # copies nodes
|
|
||||||
self._update_nodes(new_nodes, nodes_created)
|
|
||||||
placeholder_node.removeKnob(placeholder_node.knob("siblings"))
|
|
||||||
new_nodes_name = get_names_from_nodes(new_nodes)
|
|
||||||
imprint(placeholder_node, {"siblings": new_nodes_name})
|
|
||||||
self._set_copies_connections(placeholder, copies)
|
|
||||||
|
|
||||||
self._update_nodes(
|
|
||||||
nuke.allNodes(),
|
|
||||||
new_nodes + nodes_created,
|
|
||||||
20
|
|
||||||
)
|
|
||||||
|
|
||||||
new_siblings = get_names_from_nodes(new_nodes)
|
|
||||||
placeholder.data["siblings"] = new_siblings
|
|
||||||
|
|
||||||
else:
|
|
||||||
# if the placeholder doesn't have siblings, the created
|
|
||||||
# nodes will be placed in a free space
|
|
||||||
|
|
||||||
xpointer, ypointer = find_free_space_to_paste_nodes(
|
|
||||||
nodes_created, direction="bottom", offset=200
|
|
||||||
)
|
|
||||||
node = nuke.createNode("NoOp")
|
|
||||||
reset_selection()
|
|
||||||
nuke.delete(node)
|
|
||||||
for node in nodes_created:
|
|
||||||
xpos = (node.xpos() - min_x) + xpointer
|
|
||||||
ypos = (node.ypos() - min_y) + ypointer
|
|
||||||
node.setXYpos(xpos, ypos)
|
|
||||||
|
|
||||||
placeholder.data["nb_children"] += 1
|
|
||||||
reset_selection()
|
|
||||||
|
|
||||||
# go back to root group
|
|
||||||
nuke.root().begin()
|
|
||||||
|
|
||||||
def _move_to_placeholder_group(self, placeholder, nodes_created):
|
|
||||||
"""
|
|
||||||
opening the placeholder's group and copying created nodes in it.
|
|
||||||
|
|
||||||
Returns :
|
|
||||||
nodes_created (list): the new list of pasted nodes
|
|
||||||
"""
|
|
||||||
groups_name = placeholder.data["group_name"]
|
|
||||||
reset_selection()
|
|
||||||
select_nodes(nodes_created)
|
|
||||||
if groups_name:
|
|
||||||
with node_tempfile() as filepath:
|
|
||||||
nuke.nodeCopy(filepath)
|
|
||||||
for node in nuke.selectedNodes():
|
|
||||||
nuke.delete(node)
|
|
||||||
group = nuke.toNode(groups_name)
|
|
||||||
group.begin()
|
|
||||||
nuke.nodePaste(filepath)
|
|
||||||
nodes_created = nuke.selectedNodes()
|
|
||||||
return nodes_created
|
|
||||||
|
|
||||||
def _fix_z_order(self, placeholder):
|
|
||||||
"""Fix the problem of z_order when a backdrop is create."""
|
|
||||||
|
|
||||||
nodes_created = placeholder.data["last_created"]
|
|
||||||
created_backdrops = []
|
|
||||||
bd_orders = set()
|
|
||||||
for node in nodes_created:
|
|
||||||
if isinstance(node, nuke.BackdropNode):
|
|
||||||
created_backdrops.append(node)
|
|
||||||
bd_orders.add(node.knob("z_order").getValue())
|
|
||||||
|
|
||||||
if not bd_orders:
|
|
||||||
return
|
|
||||||
|
|
||||||
sib_orders = set()
|
|
||||||
for node_name in placeholder.data["siblings"]:
|
|
||||||
node = nuke.toNode(node_name)
|
|
||||||
if isinstance(node, nuke.BackdropNode):
|
|
||||||
sib_orders.add(node.knob("z_order").getValue())
|
|
||||||
|
|
||||||
if not sib_orders:
|
|
||||||
return
|
|
||||||
|
|
||||||
min_order = min(bd_orders)
|
|
||||||
max_order = max(sib_orders)
|
|
||||||
for backdrop_node in created_backdrops:
|
|
||||||
z_order = backdrop_node.knob("z_order").getValue()
|
|
||||||
backdrop_node.knob("z_order").setValue(
|
|
||||||
z_order + max_order - min_order + 1)
|
|
||||||
|
|
||||||
def _imprint_siblings(self, placeholder):
|
|
||||||
"""
|
|
||||||
- add siblings names to placeholder attributes (nodes created with it)
|
|
||||||
- add Id to the attributes of all the other nodes
|
|
||||||
"""
|
|
||||||
|
|
||||||
created_nodes = placeholder.data["last_created"]
|
|
||||||
created_nodes_set = set(created_nodes)
|
|
||||||
|
|
||||||
for node in created_nodes:
|
|
||||||
node_knobs = node.knobs()
|
|
||||||
|
|
||||||
if (
|
|
||||||
"is_placeholder" not in node_knobs
|
|
||||||
or (
|
|
||||||
"is_placeholder" in node_knobs
|
|
||||||
and node.knob("is_placeholder").value()
|
|
||||||
)
|
|
||||||
):
|
|
||||||
siblings = list(created_nodes_set - {node})
|
|
||||||
siblings_name = get_names_from_nodes(siblings)
|
|
||||||
siblings = {"siblings": siblings_name}
|
|
||||||
imprint(node, siblings)
|
|
||||||
|
|
||||||
def _imprint_inits(self):
|
|
||||||
"""Add initial positions and dimensions to the attributes"""
|
|
||||||
|
|
||||||
for node in nuke.allNodes():
|
|
||||||
refresh_node(node)
|
|
||||||
imprint(node, {"x_init": node.xpos(), "y_init": node.ypos()})
|
|
||||||
node.knob("x_init").setVisible(False)
|
|
||||||
node.knob("y_init").setVisible(False)
|
|
||||||
width = node.screenWidth()
|
|
||||||
height = node.screenHeight()
|
|
||||||
if "bdwidth" in node.knobs():
|
|
||||||
imprint(node, {"w_init": width, "h_init": height})
|
|
||||||
node.knob("w_init").setVisible(False)
|
|
||||||
node.knob("h_init").setVisible(False)
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
def _update_nodes(
|
|
||||||
self, placeholder, nodes, considered_nodes, offset_y=None
|
|
||||||
):
|
|
||||||
"""Adjust backdrop nodes dimensions and positions.
|
|
||||||
|
|
||||||
Considering some nodes sizes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
nodes (list): list of nodes to update
|
|
||||||
considered_nodes (list): list of nodes to consider while updating
|
|
||||||
positions and dimensions
|
|
||||||
offset (int): distance between copies
|
|
||||||
"""
|
|
||||||
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
|
|
||||||
min_x, min_y, max_x, max_y = get_extreme_positions(considered_nodes)
|
|
||||||
|
|
||||||
diff_x = diff_y = 0
|
|
||||||
contained_nodes = [] # for backdrops
|
|
||||||
|
|
||||||
if offset_y is None:
|
|
||||||
width_ph = placeholder_node.screenWidth()
|
|
||||||
height_ph = placeholder_node.screenHeight()
|
|
||||||
diff_y = max_y - min_y - height_ph
|
|
||||||
diff_x = max_x - min_x - width_ph
|
|
||||||
contained_nodes = [placeholder_node]
|
|
||||||
min_x = placeholder_node.xpos()
|
|
||||||
min_y = placeholder_node.ypos()
|
|
||||||
else:
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
minX, _, maxX, _ = get_extreme_positions(siblings)
|
|
||||||
diff_y = max_y - min_y + 20
|
|
||||||
diff_x = abs(max_x - min_x - maxX + minX)
|
|
||||||
contained_nodes = considered_nodes
|
|
||||||
|
|
||||||
if diff_y <= 0 and diff_x <= 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
if (
|
|
||||||
node == placeholder_node
|
|
||||||
or node in considered_nodes
|
|
||||||
):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (
|
|
||||||
not isinstance(node, nuke.BackdropNode)
|
|
||||||
or (
|
|
||||||
isinstance(node, nuke.BackdropNode)
|
|
||||||
and not set(contained_nodes) <= set(node.getNodes())
|
|
||||||
)
|
|
||||||
):
|
|
||||||
if offset_y is None and node.xpos() >= min_x:
|
|
||||||
node.setXpos(node.xpos() + diff_x)
|
|
||||||
|
|
||||||
if node.ypos() >= min_y:
|
|
||||||
node.setYpos(node.ypos() + diff_y)
|
|
||||||
|
|
||||||
else:
|
|
||||||
width = node.screenWidth()
|
|
||||||
height = node.screenHeight()
|
|
||||||
node.knob("bdwidth").setValue(width + diff_x)
|
|
||||||
node.knob("bdheight").setValue(height + diff_y)
|
|
||||||
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
def _set_created_connections(self, placeholder):
|
|
||||||
"""
|
|
||||||
set inputs and outputs of created nodes"""
|
|
||||||
|
|
||||||
placeholder_node = nuke.toNode(placeholder.scene_identifier)
|
|
||||||
input_node, output_node = get_group_io_nodes(
|
|
||||||
placeholder.data["last_created"]
|
|
||||||
)
|
|
||||||
for node in placeholder_node.dependent():
|
|
||||||
for idx in range(node.inputs()):
|
|
||||||
if node.input(idx) == placeholder_node and output_node:
|
|
||||||
node.setInput(idx, output_node)
|
|
||||||
|
|
||||||
for node in placeholder_node.dependencies():
|
|
||||||
for idx in range(placeholder_node.inputs()):
|
|
||||||
if placeholder_node.input(idx) == node and input_node:
|
|
||||||
input_node.setInput(0, node)
|
|
||||||
|
|
||||||
def _create_sib_copies(self, placeholder):
|
|
||||||
""" creating copies of the palce_holder siblings (the ones who were
|
|
||||||
created with it) for the new nodes added
|
|
||||||
|
|
||||||
Returns :
|
|
||||||
copies (dict) : with copied nodes names and their copies
|
|
||||||
"""
|
|
||||||
|
|
||||||
copies = {}
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
for node in siblings:
|
|
||||||
new_node = duplicate_node(node)
|
|
||||||
|
|
||||||
x_init = int(new_node.knob("x_init").getValue())
|
|
||||||
y_init = int(new_node.knob("y_init").getValue())
|
|
||||||
new_node.setXYpos(x_init, y_init)
|
|
||||||
if isinstance(new_node, nuke.BackdropNode):
|
|
||||||
w_init = new_node.knob("w_init").getValue()
|
|
||||||
h_init = new_node.knob("h_init").getValue()
|
|
||||||
new_node.knob("bdwidth").setValue(w_init)
|
|
||||||
new_node.knob("bdheight").setValue(h_init)
|
|
||||||
refresh_node(node)
|
|
||||||
|
|
||||||
if "repre_id" in node.knobs().keys():
|
|
||||||
node.removeKnob(node.knob("repre_id"))
|
|
||||||
copies[node.name()] = new_node
|
|
||||||
return copies
|
|
||||||
|
|
||||||
def _set_copies_connections(self, placeholder, copies):
|
|
||||||
"""Set inputs and outputs of the copies.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
copies (dict): Copied nodes by their names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
last_input, last_output = get_group_io_nodes(
|
|
||||||
placeholder.data["last_created"]
|
|
||||||
)
|
|
||||||
siblings = get_nodes_by_names(placeholder.data["siblings"])
|
|
||||||
siblings_input, siblings_output = get_group_io_nodes(siblings)
|
|
||||||
copy_input = copies[siblings_input.name()]
|
|
||||||
copy_output = copies[siblings_output.name()]
|
|
||||||
|
|
||||||
for node_init in siblings:
|
|
||||||
if node_init == siblings_output:
|
|
||||||
continue
|
|
||||||
|
|
||||||
node_copy = copies[node_init.name()]
|
|
||||||
for node in node_init.dependent():
|
|
||||||
for idx in range(node.inputs()):
|
|
||||||
if node.input(idx) != node_init:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node in siblings:
|
|
||||||
copies[node.name()].setInput(idx, node_copy)
|
|
||||||
else:
|
|
||||||
last_input.setInput(0, node_copy)
|
|
||||||
|
|
||||||
for node in node_init.dependencies():
|
|
||||||
for idx in range(node_init.inputs()):
|
|
||||||
if node_init.input(idx) != node:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if node_init == siblings_input:
|
|
||||||
copy_input.setInput(idx, node)
|
|
||||||
elif node in siblings:
|
|
||||||
node_copy.setInput(idx, copies[node.name()])
|
|
||||||
else:
|
|
||||||
node_copy.setInput(idx, last_output)
|
|
||||||
|
|
||||||
siblings_input.setInput(0, copy_output)
|
|
||||||
|
|
||||||
|
|
||||||
def build_workfile_template(*args, **kwargs):
|
def build_workfile_template(*args, **kwargs):
|
||||||
builder = NukeTemplateBuilder(registered_host())
|
builder = NukeTemplateBuilder(registered_host())
|
||||||
builder.build_template(*args, **kwargs)
|
builder.build_template(*args, **kwargs)
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
||||||
}
|
}
|
||||||
|
|
||||||
# add attributes from the version to imprint to metadata knob
|
# add attributes from the version to imprint to metadata knob
|
||||||
for k in ["source", "author", "fps"]:
|
for k in ["source", "fps"]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
||||||
# getting file path
|
# getting file path
|
||||||
|
|
@ -206,7 +206,7 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
||||||
"colorspaceInput": colorspace,
|
"colorspaceInput": colorspace,
|
||||||
}
|
}
|
||||||
|
|
||||||
for k in ["source", "author", "fps"]:
|
for k in ["source", "fps"]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
||||||
# adding nodes to node graph
|
# adding nodes to node graph
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
|
||||||
"frameEnd": last,
|
"frameEnd": last,
|
||||||
"version": version_entity["version"],
|
"version": version_entity["version"],
|
||||||
}
|
}
|
||||||
for k in ["source", "author", "fps"]:
|
for k in ["source", "fps"]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
||||||
# getting file path
|
# getting file path
|
||||||
|
|
@ -123,7 +123,7 @@ class AlembicCameraLoader(load.LoaderPlugin):
|
||||||
}
|
}
|
||||||
|
|
||||||
# add attributes from the version to imprint to metadata knob
|
# add attributes from the version to imprint to metadata knob
|
||||||
for k in ["source", "author", "fps"]:
|
for k in ["source", "fps"]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
||||||
# getting file path
|
# getting file path
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,8 @@ from ayon_core.pipeline import (
|
||||||
get_representation_path,
|
get_representation_path,
|
||||||
)
|
)
|
||||||
from ayon_core.pipeline.colorspace import (
|
from ayon_core.pipeline.colorspace import (
|
||||||
get_imageio_file_rules_colorspace_from_filepath
|
get_imageio_file_rules_colorspace_from_filepath,
|
||||||
|
get_current_context_imageio_config_preset,
|
||||||
)
|
)
|
||||||
from ayon_core.hosts.nuke.api.lib import (
|
from ayon_core.hosts.nuke.api.lib import (
|
||||||
get_imageio_input_colorspace,
|
get_imageio_input_colorspace,
|
||||||
|
|
@ -197,7 +198,6 @@ class LoadClip(plugin.NukeLoader):
|
||||||
"frameStart",
|
"frameStart",
|
||||||
"frameEnd",
|
"frameEnd",
|
||||||
"source",
|
"source",
|
||||||
"author",
|
|
||||||
"fps",
|
"fps",
|
||||||
"handleStart",
|
"handleStart",
|
||||||
"handleEnd",
|
"handleEnd",
|
||||||
|
|
@ -347,8 +347,7 @@ class LoadClip(plugin.NukeLoader):
|
||||||
"source": version_attributes.get("source"),
|
"source": version_attributes.get("source"),
|
||||||
"handleStart": str(self.handle_start),
|
"handleStart": str(self.handle_start),
|
||||||
"handleEnd": str(self.handle_end),
|
"handleEnd": str(self.handle_end),
|
||||||
"fps": str(version_attributes.get("fps")),
|
"fps": str(version_attributes.get("fps"))
|
||||||
"author": version_attributes.get("author")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
last_version_entity = ayon_api.get_last_version_by_product_id(
|
last_version_entity = ayon_api.get_last_version_by_product_id(
|
||||||
|
|
@ -547,9 +546,10 @@ class LoadClip(plugin.NukeLoader):
|
||||||
f"Colorspace from representation colorspaceData: {colorspace}"
|
f"Colorspace from representation colorspaceData: {colorspace}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
config_data = get_current_context_imageio_config_preset()
|
||||||
# check if any filerules are not applicable
|
# check if any filerules are not applicable
|
||||||
new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa
|
new_parsed_colorspace = get_imageio_file_rules_colorspace_from_filepath( # noqa
|
||||||
filepath, "nuke", project_name
|
filepath, "nuke", project_name, config_data=config_data
|
||||||
)
|
)
|
||||||
self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}")
|
self.log.debug(f"Colorspace new filerules: {new_parsed_colorspace}")
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,6 @@ class LoadEffects(load.LoaderPlugin):
|
||||||
"handleStart",
|
"handleStart",
|
||||||
"handleEnd",
|
"handleEnd",
|
||||||
"source",
|
"source",
|
||||||
"author",
|
|
||||||
"fps"
|
"fps"
|
||||||
]:
|
]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
@ -189,7 +188,6 @@ class LoadEffects(load.LoaderPlugin):
|
||||||
"handleStart",
|
"handleStart",
|
||||||
"handleEnd",
|
"handleEnd",
|
||||||
"source",
|
"source",
|
||||||
"author",
|
|
||||||
"fps",
|
"fps",
|
||||||
]:
|
]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
|
||||||
"handleStart",
|
"handleStart",
|
||||||
"handleEnd",
|
"handleEnd",
|
||||||
"source",
|
"source",
|
||||||
"author",
|
|
||||||
"fps"
|
"fps"
|
||||||
]:
|
]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
@ -192,7 +191,6 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
|
||||||
"handleStart",
|
"handleStart",
|
||||||
"handleEnd",
|
"handleEnd",
|
||||||
"source",
|
"source",
|
||||||
"author",
|
|
||||||
"fps"
|
"fps"
|
||||||
]:
|
]:
|
||||||
data_imprint[k] = version_attributes[k]
|
data_imprint[k] = version_attributes[k]
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue