Merge branch 'develop' into enhancement/OP-5244_3dsmax-setting-resolution

This commit is contained in:
Kayla Man 2023-03-20 12:46:56 +08:00
commit 0ac8212beb
49 changed files with 1693 additions and 336 deletions

View file

@ -1,10 +1,14 @@
from .addon import (
get_fusion_version,
FusionAddon,
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
)
__all__ = (
"get_fusion_version",
"FusionAddon",
"FUSION_HOST_DIR",
"FUSION_VERSIONS_DICT",
)

View file

@ -1,8 +1,52 @@
import os
import re
from openpype.modules import OpenPypeModule, IHostAddon
from openpype.lib import Logger
FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
# FUSION_VERSIONS_DICT is used by the pre-launch hooks
# The keys correspond to all currently supported Fusion versions
# Each value is a list of corresponding Python home variables and a profile
# number, which is used by the profile hook to set Fusion profile variables.
FUSION_VERSIONS_DICT = {
9: ("FUSION_PYTHON36_HOME", 9),
16: ("FUSION16_PYTHON36_HOME", 16),
17: ("FUSION16_PYTHON36_HOME", 16),
18: ("FUSION_PYTHON3_HOME", 16),
}
def get_fusion_version(app_name):
"""
The function is triggered by the prelaunch hooks to get the fusion version.
`app_name` is obtained by prelaunch hooks from the
`launch_context.env.get("AVALON_APP_NAME")`.
To get a correct Fusion version, a version number should be present
in the `applications/fusion/variants` key
of the Blackmagic Fusion Application Settings.
"""
log = Logger.get_logger(__name__)
if not app_name:
return
app_version_candidates = re.findall(r"\d+", app_name)
if not app_version_candidates:
return
for app_version in app_version_candidates:
if int(app_version) in FUSION_VERSIONS_DICT:
return int(app_version)
else:
log.info(
"Unsupported Fusion version: {app_version}".format(
app_version=app_version
)
)
class FusionAddon(OpenPypeModule, IHostAddon):
name = "fusion"
@ -14,15 +58,11 @@ class FusionAddon(OpenPypeModule, IHostAddon):
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(FUSION_HOST_DIR, "hooks")
]
return [os.path.join(FUSION_HOST_DIR, "hooks")]
def add_implementation_envs(self, env, _app):
# Set default values if are not already set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "Yes"
}
defaults = {"OPENPYPE_LOG_NO_COLORS": "Yes"}
for key, value in defaults.items():
if not env.get(key):
env[key] = value

View file

@ -303,10 +303,18 @@ def get_frame_path(path):
return filename, padding, ext
def get_current_comp():
"""Hack to get current comp in this session"""
def get_fusion_module():
"""Get current Fusion instance"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.CurrentComp if fusion else None
return fusion
def get_current_comp():
"""Get current comp in this session"""
fusion = get_fusion_module()
if fusion is not None:
comp = fusion.CurrentComp
return comp
@contextlib.contextmanager

View file

@ -1,19 +1,19 @@
{
Locked = true,
Global = {
Paths = {
Map = {
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
["Reactor:"] = "$(REACTOR)",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
["UserPaths:"] = "UserData:;AllData:;Fusion:;Reactor:Deploy"
},
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
Paths = {
Map = {
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
},
}
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
UserInterface = {
Language = "en_US"
},
},
}

View file

@ -0,0 +1,161 @@
import os
import shutil
import platform
from pathlib import Path
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
class FusionCopyPrefsPrelaunch(PreLaunchHook):
"""
Prepares local Fusion profile directory, copies existing Fusion profile.
This also sets FUSION MasterPrefs variable, which is used
to apply Master.prefs file to override some Fusion profile settings to:
- enable the OpenPype menu
- force Python 3 over Python 2
- force English interface
Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs
"""
app_groups = ["fusion"]
order = 2
def get_fusion_profile_name(self, profile_version) -> str:
# Returns 'Default', unless FUSION16_PROFILE is set
return os.getenv(f"FUSION{profile_version}_PROFILE", "Default")
def get_fusion_profile_dir(self, profile_version) -> Path:
# Get FUSION_PROFILE_DIR variable
fusion_profile = self.get_fusion_profile_name(profile_version)
fusion_var_prefs_dir = os.getenv(
f"FUSION{profile_version}_PROFILE_DIR"
)
# Check if FUSION_PROFILE_DIR exists
if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir():
fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile)
self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}")
return fu_prefs_dir
def get_profile_source(self, profile_version) -> Path:
"""Get Fusion preferences profile location.
See Per-User_Preferences_and_Paths on VFXpedia for reference.
"""
fusion_profile = self.get_fusion_profile_name(profile_version)
profile_source = self.get_fusion_profile_dir(profile_version)
if profile_source:
return profile_source
# otherwise get default location of the profile folder
fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}"
if platform.system() == "Windows":
profile_source = Path(os.getenv("AppData"), fu_prefs_dir)
elif platform.system() == "Darwin":
profile_source = Path(
"~/Library/Application Support/", fu_prefs_dir
).expanduser()
elif platform.system() == "Linux":
profile_source = Path("~/.fusion", fu_prefs_dir).expanduser()
self.log.info(
f"Locating source Fusion prefs directory: {profile_source}"
)
return profile_source
def get_copy_fusion_prefs_settings(self):
# Get copy preferences options from the global application settings
copy_fusion_settings = self.data["project_settings"]["fusion"].get(
"copy_fusion_settings", {}
)
if not copy_fusion_settings:
self.log.error("Copy prefs settings not found")
copy_status = copy_fusion_settings.get("copy_status", False)
force_sync = copy_fusion_settings.get("force_sync", False)
copy_path = copy_fusion_settings.get("copy_path") or None
if copy_path:
copy_path = Path(copy_path).expanduser()
return copy_status, copy_path, force_sync
def copy_fusion_profile(
self, copy_from: Path, copy_to: Path, force_sync: bool
) -> None:
"""On the first Fusion launch copy the contents of Fusion profile
directory to the working predefined location. If the Openpype profile
folder exists, skip copying, unless re-sync is checked.
If the prefs were not copied on the first launch,
clean Fusion profile will be created in fu_profile_dir.
"""
if copy_to.exists() and not force_sync:
self.log.info(
"Destination Fusion preferences folder already exists: "
f"{copy_to} "
)
return
self.log.info("Starting copying Fusion preferences")
self.log.debug(f"force_sync option is set to {force_sync}")
try:
copy_to.mkdir(exist_ok=True, parents=True)
except PermissionError:
self.log.warning(f"Creating the folder not permitted at {copy_to}")
return
if not copy_from.exists():
self.log.warning(f"Fusion preferences not found in {copy_from}")
return
for file in copy_from.iterdir():
if file.suffix in (
".prefs",
".def",
".blocklist",
".fu",
".toolbars",
):
# convert Path to str to be compatible with Python 3.6+
shutil.copy(str(file), str(copy_to))
self.log.info(
f"Successfully copied preferences: {copy_from} to {copy_to}"
)
def execute(self):
(
copy_status,
fu_profile_dir,
force_sync,
) = self.get_copy_fusion_prefs_settings()
# Get launched application context and return correct app version
app_name = self.launch_context.env.get("AVALON_APP_NAME")
app_version = get_fusion_version(app_name)
if app_version is None:
version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT)
raise ApplicationLaunchFailed(
"Unable to detect valid Fusion version number from app "
f"name: {app_name}.\nMake sure to include at least a digit "
"to indicate the Fusion version like '18'.\n"
f"Detectable Fusion versions are: {version_names}"
)
_, profile_version = FUSION_VERSIONS_DICT[app_version]
fu_profile = self.get_fusion_profile_name(profile_version)
# do a copy of Fusion profile if copy_status toggle is enabled
if copy_status and fu_profile_dir is not None:
profile_source = self.get_profile_source(profile_version)
dest_folder = Path(fu_profile_dir, fu_profile)
self.copy_fusion_profile(profile_source, dest_folder, force_sync)
# Add temporary profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR"
self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}")
self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir)
# Add custom Fusion Master Prefs and the temporary
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs)

View file

@ -1,32 +1,43 @@
import os
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
from openpype.hosts.fusion import FUSION_HOST_DIR
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
class FusionPrelaunch(PreLaunchHook):
"""Prepares OpenPype Fusion environment
Requires FUSION_PYTHON3_HOME to be defined in the environment for Fusion
to point at a valid Python 3 build for Fusion. That is Python 3.3-3.10
for Fusion 18 and Fusion 3.6 for Fusion 16 and 17.
This also sets FUSION16_MasterPrefs to apply the fusion master prefs
as set in openpype/hosts/fusion/deploy/fusion_shared.prefs to enable
the OpenPype menu and force Python 3 over Python 2.
"""
Prepares OpenPype Fusion environment.
Requires correct Python home variable to be defined in the environment
settings for Fusion to point at a valid Python 3 build for Fusion.
Python3 versions that are supported by Fusion:
Fusion 9, 16, 17 : Python 3.6
Fusion 18 : Python 3.6 - 3.10
"""
app_groups = ["fusion"]
order = 1
def execute(self):
# making sure python 3 is installed at provided path
# Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17
py3_var = "FUSION_PYTHON3_HOME"
app_data = self.launch_context.env.get("AVALON_APP_NAME")
app_version = get_fusion_version(app_data)
if not app_version:
raise ApplicationLaunchFailed(
"Fusion version information not found in System settings.\n"
"The key field in the 'applications/fusion/variants' should "
"consist a number, corresponding to major Fusion version."
)
py3_var, _ = FUSION_VERSIONS_DICT[app_version]
fusion_python3_home = self.launch_context.env.get(py3_var, "")
self.log.info(f"Looking for Python 3 in: {fusion_python3_home}")
for path in fusion_python3_home.split(os.pathsep):
# Allow defining multiple paths to allow "fallback" to other
# path. But make to set only a single path as final variable.
# Allow defining multiple paths, separated by os.pathsep,
# to allow "fallback" to other path.
# But make to set only a single path as final variable.
py3_dir = os.path.normpath(path)
if os.path.isdir(py3_dir):
break
@ -43,19 +54,10 @@ class FusionPrelaunch(PreLaunchHook):
self.launch_context.env[py3_var] = py3_dir
# Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH
self.launch_context.env["PATH"] += ";" + py3_dir
if app_version >= 18:
self.launch_context.env["PATH"] += os.pathsep + py3_dir
# Fusion 16 and 17 use FUSION16_PYTHON36_HOME instead of
# FUSION_PYTHON3_HOME and will only work with a Python 3.6 version
# TODO: Detect Fusion version to only set for specific Fusion build
self.launch_context.env["FUSION16_PYTHON36_HOME"] = py3_dir
self.launch_context.env[py3_var] = py3_dir
# Add our Fusion Master Prefs which is the only way to customize
# Fusion to define where it can read custom scripts and tools from
self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}")
self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR
pref_var = "FUSION16_MasterPrefs" # used by Fusion 16, 17 and 18
prefs = os.path.join(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
self.log.info(f"Setting {pref_var}: {prefs}")
self.launch_context.env[pref_var] = prefs

View file

@ -8,6 +8,7 @@ from openpype.hosts.max.api.lib import (
get_current_renderer,
get_default_render_folder
)
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
@ -34,14 +35,20 @@ class RenderProducts(object):
filename,
container)
context = get_current_project_asset()
startFrame = context["data"].get("frameStart")
endFrame = context["data"].get("frameEnd") + 1
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
full_render_list = []
beauty = self.beauty_render_product(output_file, img_fmt)
full_render_list.append(beauty)
full_render_list = self.beauty_render_product(output_file,
startFrame,
endFrame,
img_fmt)
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer == "VUE_File_Renderer":
return full_render_list
@ -54,6 +61,8 @@ class RenderProducts(object):
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = self.render_elements_product(output_file,
startFrame,
endFrame,
img_fmt)
if render_elem_list:
full_render_list.extend(iter(render_elem_list))
@ -61,18 +70,24 @@ class RenderProducts(object):
if renderer == "Arnold":
aov_list = self.arnold_render_product(output_file,
startFrame,
endFrame,
img_fmt)
if aov_list:
full_render_list.extend(iter(aov_list))
return full_render_list
def beauty_render_product(self, folder, fmt):
beauty_output = f"{folder}.####.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
return beauty_output
def beauty_render_product(self, folder, startFrame, endFrame, fmt):
beauty_frame_range = []
for f in range(startFrame, endFrame):
beauty_output = f"{folder}.{f}.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
beauty_frame_range.append(beauty_output)
return beauty_frame_range
# TODO: Get the arnold render product
def arnold_render_product(self, folder, fmt):
def arnold_render_product(self, folder, startFrame, endFrame, fmt):
"""Get all the Arnold AOVs"""
aovs = []
@ -85,15 +100,17 @@ class RenderProducts(object):
for i in range(aov_group_num):
# get the specific AOV group
for aov in aov_mgr.drivers[i].aov_list:
render_element = f"{folder}_{aov.name}.####.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
for f in range(startFrame, endFrame):
render_element = f"{folder}_{aov.name}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
# close the AOVs manager window
amw.close()
return aovs
def render_elements_product(self, folder, fmt):
def render_elements_product(self, folder, startFrame, endFrame, fmt):
"""Get all the render element output files. """
render_dirname = []
@ -104,9 +121,10 @@ class RenderProducts(object):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
render_element = f"{folder}_{renderpass}.####.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
for f in range(startFrame, endFrame):
render_element = f"{folder}_{renderpass}.{f}.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
return render_dirname

View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating raw max scene."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreateMaxScene(plugin.MaxCreator):
identifier = "io.openpype.creators.max.maxScene"
label = "Max Scene"
family = "maxScene"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateMaxScene, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating point cloud."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreatePointCloud(plugin.MaxCreator):
identifier = "io.openpype.creators.max.pointcloud"
label = "Point Cloud"
family = "pointcloud"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreatePointCloud, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container = rt.getNodeByName(instance.data.get("instance_node"))
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -9,7 +9,8 @@ from openpype.hosts.max.api import lib
class MaxSceneLoader(load.LoaderPlugin):
"""Max Scene Loader"""
families = ["camera"]
families = ["camera",
"maxScene"]
representations = ["max"]
order = -8
icon = "code-fork"
@ -46,8 +47,7 @@ class MaxSceneLoader(load.LoaderPlugin):
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
max_objects = self.get_container_children(node)
max_objects = node.Children
for max_object in max_objects:
max_object.source = path

View file

@ -0,0 +1,51 @@
import os
from openpype.pipeline import (
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class PointCloudLoader(load.LoaderPlugin):
"""Point Cloud Loader"""
families = ["pointcloud"]
representations = ["prt"]
order = -8
icon = "code-fork"
color = "green"
def load(self, context, name=None, namespace=None, data=None):
"""load point cloud by tyCache"""
from pymxs import runtime as rt
filepath = os.path.normpath(self.fname)
obj = rt.tyCache()
obj.filename = filepath
prt_container = rt.getNodeByName(f"{obj.name}")
return containerise(
name, [prt_container], context, loader=self.__class__.__name__)
def update(self, container, representation):
"""update the container"""
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
prt_objects = self.get_container_children(node)
for prt_object in prt_objects:
prt_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
"""remove the container"""
from pymxs import runtime as rt
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -20,7 +20,8 @@ class ExtractMaxSceneRaw(publish.Extractor,
order = pyblish.api.ExtractorOrder - 0.2
label = "Extract Max Scene (Raw)"
hosts = ["max"]
families = ["camera"]
families = ["camera",
"maxScene"]
optional = True
def process(self, instance):

View file

@ -0,0 +1,207 @@
import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import (
maintained_selection
)
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
class ExtractPointCloud(publish.Extractor):
"""
Extract PRT format with tyFlow operators
Notes:
Currently only works for the default partition setting
Args:
export_particle(): sets up all job arguments for attributes
to be exported in MAXscript
get_operators(): get the export_particle operator
get_custom_attr(): get all custom channel attributes from Openpype
setting and sets it as job arguments before exporting
get_files(): get the files with tyFlow naming convention
before publishing
partition_output_name(): get the naming with partition settings.
get_partition(): get partition value
"""
order = pyblish.api.ExtractorOrder - 0.2
label = "Extract Point Cloud"
hosts = ["max"]
families = ["pointcloud"]
def process(self, instance):
start = int(instance.context.data.get("frameStart"))
end = int(instance.context.data.get("frameEnd"))
container = instance.data["instance_node"]
self.log.info("Extracting PRT...")
stagingdir = self.staging_dir(instance)
filename = "{name}.prt".format(**instance.data)
path = os.path.join(stagingdir, filename)
with maintained_selection():
job_args = self.export_particle(container,
start,
end,
path)
for job in job_args:
rt.execute(job)
self.log.info("Performing Extraction ...")
if "representations" not in instance.data:
instance.data["representations"] = []
self.log.info("Writing PRT with TyFlow Plugin...")
filenames = self.get_files(container, path, start, end)
self.log.debug("filenames: {0}".format(filenames))
partition = self.partition_output_name(container)
representation = {
'name': 'prt',
'ext': 'prt',
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": stagingdir,
"outputName": partition # partition value
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
path))
def export_particle(self,
container,
start,
end,
filepath):
job_args = []
opt_list = self.get_operators(container)
for operator in opt_list:
start_frame = "{0}.frameStart={1}".format(operator,
start)
job_args.append(start_frame)
end_frame = "{0}.frameEnd={1}".format(operator,
end)
job_args.append(end_frame)
filepath = filepath.replace("\\", "/")
prt_filename = '{0}.PRTFilename="{1}"'.format(operator,
filepath)
job_args.append(prt_filename)
# Partition
mode = "{0}.PRTPartitionsMode=2".format(operator)
job_args.append(mode)
additional_args = self.get_custom_attr(operator)
for args in additional_args:
job_args.append(args)
prt_export = "{0}.exportPRT()".format(operator)
job_args.append(prt_export)
return job_args
def get_operators(self, container):
"""Get Export Particles Operator"""
opt_list = []
node = rt.getNodebyName(container)
selection_list = list(node.Children)
for sel in selection_list:
obj = sel.baseobject
# TODO: to see if it can be used maxscript instead
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
sub_anim = rt.getsubanim(obj, anim_name)
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
opt_list.append(opt)
return opt_list
def get_custom_attr(self, operator):
"""Get Custom Attributes"""
custom_attr_list = []
attr_settings = get_setting()["attribute"]
for key, value in attr_settings.items():
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
value)
self.log.debug(
"{0} will be added as custom attribute".format(key)
)
custom_attr_list.append(custom_attr)
return custom_attr_list
def get_files(self,
container,
path,
start_frame,
end_frame):
"""
Note:
Set the filenames accordingly to the tyFlow file
naming extension for the publishing purpose
Actual File Output from tyFlow:
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
"""
filenames = []
filename = os.path.basename(path)
orig_name, ext = os.path.splitext(filename)
partition_count, partition_start = self.get_partition(container)
for frame in range(int(start_frame), int(end_frame) + 1):
actual_name = "{}__part{:03}of{}_{:05}".format(orig_name,
partition_start,
partition_count,
frame)
actual_filename = path.replace(orig_name, actual_name)
filenames.append(os.path.basename(actual_filename))
return filenames
def partition_output_name(self, container):
"""
Notes:
Partition output name set for mapping
the published file output
todo:
Customizes the setting for the output
"""
partition_count, partition_start = self.get_partition(container)
partition = "_part{:03}of{}".format(partition_start,
partition_count)
return partition
def get_partition(self, container):
"""
Get Partition Value
"""
opt_list = self.get_operators(container)
for operator in opt_list:
count = rt.execute(f'{operator}.PRTPartitionsCount')
start = rt.execute(f'{operator}.PRTPartitionsFrom')
return count, start

View file

@ -0,0 +1,23 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
class ValidateMaxContents(pyblish.api.InstancePlugin):
"""Validates Max contents.
Check if MaxScene container includes any contents underneath.
"""
order = pyblish.api.ValidatorOrder
families = ["camera",
"maxScene",
"maxrender"]
hosts = ["max"]
label = "Max Scene Contents"
def process(self, instance):
container = rt.getNodeByName(instance.data["instance_node"])
if not list(container.Children):
raise PublishValidationError("No content found in the container")

View file

@ -0,0 +1,191 @@
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
def get_setting(project_setting=None):
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
return (project_setting["max"]["PointCloud"])
class ValidatePointCloud(pyblish.api.InstancePlugin):
"""Validate that workfile was saved."""
order = pyblish.api.ValidatorOrder
families = ["pointcloud"]
hosts = ["max"]
label = "Validate Point Cloud"
def process(self, instance):
"""
Notes:
1. Validate the container only include tyFlow objects
2. Validate if tyFlow operator Export Particle exists
3. Validate if the export mode of Export Particle is at PRT format
4. Validate the partition count and range set as default value
Partition Count : 100
Partition Range : 1 to 1
5. Validate if the custom attribute(s) exist as parameter(s)
of export_particle operator
"""
invalid = self.get_tyFlow_object(instance)
if invalid:
raise PublishValidationError("Non tyFlow object "
"found: {}".format(invalid))
invalid = self.get_tyFlow_operator(instance)
if invalid:
raise PublishValidationError("tyFlow ExportParticle operator "
"not found: {}".format(invalid))
invalid = self.validate_export_mode(instance)
if invalid:
raise PublishValidationError("The export mode is not at PRT")
invalid = self.validate_partition_value(instance)
if invalid:
raise PublishValidationError("tyFlow Partition setting is "
"not at the default value")
invalid = self.validate_custom_attribute(instance)
if invalid:
raise PublishValidationError("Custom Attribute not found "
":{}".format(invalid))
def get_tyFlow_object(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow container "
"for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
sel_tmp = str(sel)
if rt.classOf(sel) in [rt.tyFlow,
rt.Editable_Mesh]:
if "tyFlow" not in sel_tmp:
invalid.append(sel)
else:
invalid.append(sel)
return invalid
def get_tyFlow_operator(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow object "
"for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
bool_list = []
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
bool_list.append(str(boolean))
# if the export_particles property is not there
# it means there is not a "Export Particle" operator
if "True" not in bool_list:
self.log.error("Operator 'Export Particles' not found!")
invalid.append(sel)
return invalid
def validate_custom_attribute(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow custom "
"attributes for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
attributes = get_setting()["attribute"]
for key, value in attributes.items():
custom_attr = "{0}.PRTChannels_{1}".format(opt,
value)
try:
rt.execute(custom_attr)
except RuntimeError:
invalid.add(key)
return invalid
def validate_partition_value(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow partition "
"value for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
count = rt.execute(f'{opt}.PRTPartitionsCount')
if count != 100:
invalid.append(count)
start = rt.execute(f'{opt}.PRTPartitionsFrom')
if start != 1:
invalid.append(start)
end = rt.execute(f'{opt}.PRTPartitionsTo')
if end != 1:
invalid.append(end)
return invalid
def validate_export_mode(self, instance):
invalid = []
container = instance.data["instance_node"]
self.log.info("Validating tyFlow export "
"mode for {}".format(container))
con = rt.getNodeByName(container)
selection_list = list(con.Children)
for sel in selection_list:
obj = sel.baseobject
anim_names = rt.getsubanimnames(obj)
for anim_name in anim_names:
# get all the names of the related tyFlow nodes
sub_anim = rt.getsubanim(obj, anim_name)
# check if there is export particle operator
boolean = rt.isProperty(sub_anim, "Export_Particles")
event_name = sub_anim.name
if boolean:
opt = "${0}.{1}.export_particles".format(sel.name,
event_name)
export_mode = rt.execute(f'{opt}.exportMode')
if export_mode != 1:
invalid.append(export_mode)
return invalid

View file

@ -13,6 +13,7 @@ class CreateAnimation(plugin.Creator):
icon = "male"
write_color_sets = False
write_face_sets = False
include_parent_hierarchy = False
include_user_defined_attributes = False
def __init__(self, *args, **kwargs):
@ -37,7 +38,7 @@ class CreateAnimation(plugin.Creator):
self.data["visibleOnly"] = False
# Include the groups above the out_SET content
self.data["includeParentHierarchy"] = False # Include parent groups
self.data["includeParentHierarchy"] = self.include_parent_hierarchy
# Default to exporting world-space
self.data["worldSpace"] = True

View file

@ -0,0 +1,178 @@
import os
import json
from collections import defaultdict
from maya import cmds
from openpype.pipeline import (
InventoryAction, get_representation_context, get_representation_path
)
from openpype.hosts.maya.api.lib import get_container_members, get_id
class ConnectYetiRig(InventoryAction):
"""Connect Yeti Rig with an animation or pointcache."""
label = "Connect Yeti Rig"
icon = "link"
color = "white"
def process(self, containers):
# Validate selection is more than 1.
message = (
"Only 1 container selected. 2+ containers needed for this action."
)
if len(containers) == 1:
self.display_warning(message)
return
# Categorize containers by family.
containers_by_family = defaultdict(list)
for container in containers:
family = get_representation_context(
container["representation"]
)["subset"]["data"]["family"]
containers_by_family[family].append(container)
# Validate to only 1 source container.
source_containers = containers_by_family.get("animation", [])
source_containers += containers_by_family.get("pointcache", [])
source_container_namespaces = [
x["namespace"] for x in source_containers
]
message = (
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
"\"animation\" or \"pointcache\".".format(
len(source_containers), source_container_namespaces
)
)
if len(source_containers) != 1:
self.display_warning(message)
return
source_container = source_containers[0]
source_ids = self.nodes_by_id(source_container)
# Target containers.
target_ids = {}
inputs = []
yeti_rig_containers = containers_by_family.get("yetiRig")
if not yeti_rig_containers:
self.display_warning(
"Select at least one yetiRig container"
)
return
for container in yeti_rig_containers:
target_ids.update(self.nodes_by_id(container))
maya_file = get_representation_path(
get_representation_context(
container["representation"]
)["representation"]
)
_, ext = os.path.splitext(maya_file)
settings_file = maya_file.replace(ext, ".rigsettings")
if not os.path.exists(settings_file):
continue
with open(settings_file) as f:
inputs.extend(json.load(f)["inputs"])
# Compare loaded connections to scene.
for input in inputs:
source_node = source_ids.get(input["sourceID"])
target_node = target_ids.get(input["destinationID"])
if not source_node or not target_node:
self.log.debug(
"Could not find nodes for input:\n" +
json.dumps(input, indent=4, sort_keys=True)
)
continue
source_attr, target_attr = input["connections"]
if not cmds.attributeQuery(
source_attr, node=source_node, exists=True
):
self.log.debug(
"Could not find attribute {} on node {} for "
"input:\n{}".format(
source_attr,
source_node,
json.dumps(input, indent=4, sort_keys=True)
)
)
continue
if not cmds.attributeQuery(
target_attr, node=target_node, exists=True
):
self.log.debug(
"Could not find attribute {} on node {} for "
"input:\n{}".format(
target_attr,
target_node,
json.dumps(input, indent=4, sort_keys=True)
)
)
continue
source_plug = "{}.{}".format(
source_node, source_attr
)
target_plug = "{}.{}".format(
target_node, target_attr
)
if cmds.isConnected(
source_plug, target_plug, ignoreUnitConversion=True
):
self.log.debug(
"Connection already exists: {} -> {}".format(
source_plug, target_plug
)
)
continue
cmds.connectAttr(source_plug, target_plug, force=True)
self.log.debug(
"Connected attributes: {} -> {}".format(
source_plug, target_plug
)
)
def nodes_by_id(self, container):
ids = {}
for member in get_container_members(container):
id = get_id(member)
if not id:
continue
ids[id] = member
return ids
def display_warning(self, message, show_cancel=False):
"""Show feedback to user.
Returns:
bool
"""
from qtpy import QtWidgets
accept = QtWidgets.QMessageBox.Ok
if show_cancel:
buttons = accept | QtWidgets.QMessageBox.Cancel
else:
buttons = accept
state = QtWidgets.QMessageBox.warning(
None,
"",
message,
buttons=buttons,
defaultButton=accept
)
return state == accept

View file

@ -0,0 +1,332 @@
import os
import copy
from openpype.lib import EnumDef
from openpype.pipeline import (
load,
get_representation_context
)
from openpype.pipeline.load.utils import get_representation_path_from_context
from openpype.pipeline.colorspace import (
get_imageio_colorspace_from_filepath,
get_imageio_config,
get_imageio_file_rules
)
from openpype.settings import get_project_settings
from openpype.hosts.maya.api.pipeline import containerise
from openpype.hosts.maya.api.lib import (
unique_namespace,
namespaced
)
from maya import cmds
def create_texture():
"""Create place2dTexture with file node with uv connections
Mimics Maya "file [Texture]" creation.
"""
place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d")
file = cmds.shadingNode("file", asTexture=True, name="file")
connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV",
"mirrorU", "mirrorV", "stagger", "wrapV", "wrapU",
"repeatUV", "offset", "noiseUV", "vertexUvThree",
"vertexUvTwo", "vertexUvOne", "vertexCameraOne"]
for attr in connections:
src = "{}.{}".format(place, attr)
dest = "{}.{}".format(file, attr)
cmds.connectAttr(src, dest)
cmds.connectAttr(place + '.outUV', file + '.uvCoord')
cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize')
return file, place
def create_projection():
"""Create texture with place3dTexture and projection
Mimics Maya "file [Projection]" creation.
"""
file, place = create_texture()
projection = cmds.shadingNode("projection", asTexture=True,
name="projection")
place3d = cmds.shadingNode("place3dTexture", asUtility=True,
name="place3d")
cmds.connectAttr(place3d + '.worldInverseMatrix[0]',
projection + ".placementMatrix")
cmds.connectAttr(file + '.outColor', projection + ".image")
return file, place, projection, place3d
def create_stencil():
"""Create texture with extra place2dTexture offset and stencil
Mimics Maya "file [Stencil]" creation.
"""
file, place = create_texture()
place_stencil = cmds.shadingNode("place2dTexture", asUtility=True,
name="place2d_stencil")
stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil")
for src_attr, dest_attr in [
("outUV", "uvCoord"),
("outUvFilterSize", "uvFilterSize")
]:
src_plug = "{}.{}".format(place_stencil, src_attr)
cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr))
cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr))
return file, place, stencil, place_stencil
class FileNodeLoader(load.LoaderPlugin):
"""File node loader."""
families = ["image", "plate", "render"]
label = "Load file node"
representations = ["exr", "tif", "png", "jpg"]
icon = "image"
color = "orange"
order = 2
options = [
EnumDef(
"mode",
items={
"texture": "Texture",
"projection": "Projection",
"stencil": "Stencil"
},
default="texture",
label="Texture Mode"
)
]
def load(self, context, name, namespace, data):
asset = context['asset']['name']
namespace = namespace or unique_namespace(
asset + "_",
prefix="_" if asset[0].isdigit() else "",
suffix="_",
)
with namespaced(namespace, new=True) as namespace:
# Create the nodes within the namespace
nodes = {
"texture": create_texture,
"projection": create_projection,
"stencil": create_stencil
}[data.get("mode", "texture")]()
file_node = cmds.ls(nodes, type="file")[0]
self._apply_representation_context(context, file_node)
# For ease of access for the user select all the nodes and select
# the file node last so that UI shows its attributes by default
cmds.select(list(nodes) + [file_node], replace=True)
return containerise(
name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=self.__class__.__name__
)
def update(self, container, representation):
members = cmds.sets(container['objectName'], query=True)
file_node = cmds.ls(members, type="file")[0]
context = get_representation_context(representation)
self._apply_representation_context(context, file_node)
# Update representation
cmds.setAttr(
container["objectName"] + ".representation",
str(representation["_id"]),
type="string"
)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
members = cmds.sets(container['objectName'], query=True)
cmds.lockNode(members, lock=False)
cmds.delete([container['objectName']] + members)
# Clean up the namespace
try:
cmds.namespace(removeNamespace=container['namespace'],
deleteNamespaceContent=True)
except RuntimeError:
pass
def _apply_representation_context(self, context, file_node):
"""Update the file node to match the context.
This sets the file node's attributes for:
- file path
- udim tiling mode (if it is an udim tile)
- use frame extension (if it is a sequence)
- colorspace
"""
repre_context = context["representation"]["context"]
has_frames = repre_context.get("frame") is not None
has_udim = repre_context.get("udim") is not None
# Set UV tiling mode if UDIM tiles
if has_udim:
cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles
else:
cmds.setAttr(file_node + ".uvTilingMode", 0) # off
# Enable sequence if publish has `startFrame` and `endFrame` and
# `startFrame != endFrame`
if has_frames and self._is_sequence(context):
# When enabling useFrameExtension maya automatically
# connects an expression to <file>.frameExtension to set
# the current frame. However, this expression is generated
# with some delay and thus it'll show a warning if frame 0
# doesn't exist because we're explicitly setting the <f>
# token.
cmds.setAttr(file_node + ".useFrameExtension", True)
else:
cmds.setAttr(file_node + ".useFrameExtension", False)
# Set the file node path attribute
path = self._format_path(context)
cmds.setAttr(file_node + ".fileTextureName", path, type="string")
# Set colorspace
colorspace = self._get_colorspace(context)
if colorspace:
cmds.setAttr(file_node + ".colorSpace", colorspace, type="string")
else:
self.log.debug("Unknown colorspace - setting colorspace skipped.")
def _is_sequence(self, context):
"""Check whether frameStart and frameEnd are not the same."""
version = context.get("version", {})
representation = context.get("representation", {})
for doc in [representation, version]:
# Frame range can be set on version or representation.
# When set on representation it overrides version data.
data = doc.get("data", {})
start = data.get("frameStartHandle", data.get("frameStart", None))
end = data.get("frameEndHandle", data.get("frameEnd", None))
if start is None or end is None:
continue
if start != end:
return True
else:
return False
return False
def _get_colorspace(self, context):
"""Return colorspace of the file to load.
Retrieves the explicit colorspace from the publish. If no colorspace
data is stored with published content then project imageio settings
are used to make an assumption of the colorspace based on the file
rules. If no file rules match then None is returned.
Returns:
str or None: The colorspace of the file or None if not detected.
"""
# We can't apply color spaces if management is not enabled
if not cmds.colorManagementPrefs(query=True, cmEnabled=True):
return
representation = context["representation"]
colorspace_data = representation.get("data", {}).get("colorspaceData")
if colorspace_data:
return colorspace_data["colorspace"]
# Assume colorspace from filepath based on project settings
project_name = context["project"]["name"]
host_name = os.environ.get("AVALON_APP")
project_settings = get_project_settings(project_name)
config_data = get_imageio_config(
project_name, host_name,
project_settings=project_settings
)
file_rules = get_imageio_file_rules(
project_name, host_name,
project_settings=project_settings
)
path = get_representation_path_from_context(context)
colorspace = get_imageio_colorspace_from_filepath(
path=path,
host_name=host_name,
project_name=project_name,
config_data=config_data,
file_rules=file_rules,
project_settings=project_settings
)
return colorspace
def _format_path(self, context):
"""Format the path with correct tokens for frames and udim tiles."""
context = copy.deepcopy(context)
representation = context["representation"]
template = representation.get("data", {}).get("template")
if not template:
# No template to find token locations for
return get_representation_path_from_context(context)
def _placeholder(key):
# Substitute with a long placeholder value so that potential
# custom formatting with padding doesn't find its way into
# our formatting, so that <f> wouldn't be padded as 0<f>
return "___{}___".format(key)
# We format UDIM and Frame numbers with their specific tokens. To do so
# we in-place change the representation context data to format the path
# with our own data
tokens = {
"frame": "<f>",
"udim": "<UDIM>"
}
has_tokens = False
repre_context = representation["context"]
for key, _token in tokens.items():
if key in repre_context:
repre_context[key] = _placeholder(key)
has_tokens = True
# Replace with our custom template that has the tokens set
representation["data"]["template"] = template
path = get_representation_path_from_context(context)
if has_tokens:
for key, token in tokens.items():
if key in repre_context:
path = path.replace(_placeholder(key), token)
return path

View file

@ -1,17 +1,12 @@
import os
from collections import defaultdict
import maya.cmds as cmds
from openpype.settings import get_project_settings
from openpype.settings import get_current_project_settings
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib
class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
"""
This loader will load Yeti rig. You can select something in scene and if it
has same ID as mesh published with rig, their shapes will be linked
together.
"""
"""This loader will load Yeti rig."""
families = ["yetiRig"]
representations = ["ma"]
@ -22,72 +17,31 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
color = "orange"
def process_reference(
self, context, name=None, namespace=None, options=None):
self, context, name=None, namespace=None, options=None
):
import maya.cmds as cmds
# get roots of selected hierarchies
selected_roots = []
for sel in cmds.ls(sl=True, long=True):
selected_roots.append(sel.split("|")[1])
# get all objects under those roots
selected_hierarchy = []
for root in selected_roots:
selected_hierarchy.append(cmds.listRelatives(
root,
allDescendents=True) or [])
# flatten the list and filter only shapes
shapes_flat = []
for root in selected_hierarchy:
shapes = cmds.ls(root, long=True, type="mesh") or []
for shape in shapes:
shapes_flat.append(shape)
# create dictionary of cbId and shape nodes
scene_lookup = defaultdict(list)
for node in shapes_flat:
cb_id = lib.get_id(node)
scene_lookup[cb_id] = node
# load rig
group_name = "{}:{}".format(namespace, name)
with lib.maintained_selection():
file_url = self.prepare_root_value(self.fname,
context["project"]["name"])
nodes = cmds.file(file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name))
file_url = self.prepare_root_value(
self.fname, context["project"]["name"]
)
nodes = cmds.file(
file_url,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=group_name
)
# for every shape node we've just loaded find matching shape by its
# cbId in selection. If found outMesh of scene shape will connect to
# inMesh of loaded shape.
for destination_node in nodes:
source_node = scene_lookup[lib.get_id(destination_node)]
if source_node:
self.log.info("found: {}".format(source_node))
self.log.info(
"creating connection to {}".format(destination_node))
cmds.connectAttr("{}.outMesh".format(source_node),
"{}.inMesh".format(destination_node),
force=True)
groupName = "{}:{}".format(namespace, name)
settings = get_project_settings(os.environ['AVALON_PROJECT'])
colors = settings['maya']['load']['colors']
c = colors.get('yetiRig')
settings = get_current_project_settings()
colors = settings["maya"]["load"]["colors"]
c = colors.get("yetiRig")
if c is not None:
cmds.setAttr(groupName + ".useOutlinerColor", 1)
cmds.setAttr(groupName + ".outlinerColor",
(float(c[0])/255),
(float(c[1])/255),
(float(c[2])/255)
cmds.setAttr(group_name + ".useOutlinerColor", 1)
cmds.setAttr(
group_name + ".outlinerColor",
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
)
self[:] = nodes

View file

@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
"""Maya look extractor."""
import os
import sys
import json
import tempfile
import platform
import contextlib
import subprocess
from collections import OrderedDict
from maya import cmds # noqa
@ -23,6 +21,15 @@ COPY = 1
HARDLINK = 2
def _has_arnold():
"""Return whether the arnold package is available and can be imported."""
try:
import arnold # noqa: F401
return True
except (ImportError, ModuleNotFoundError):
return False
def escape_space(path):
"""Ensure path is enclosed by quotes to allow paths with spaces"""
return '"{}"'.format(path) if " " in path else path
@ -548,7 +555,7 @@ class ExtractLook(publish.Extractor):
color_space = cmds.getAttr(color_space_attr)
except ValueError:
# node doesn't have color space attribute
if cmds.loadPlugin("mtoa", quiet=True):
if _has_arnold():
img_info = image_info(filepath)
color_space = guess_colorspace(img_info)
else:
@ -560,7 +567,7 @@ class ExtractLook(publish.Extractor):
render_colorspace])
else:
if cmds.loadPlugin("mtoa", quiet=True):
if _has_arnold():
img_info = image_info(filepath)
color_space = guess_colorspace(img_info)
if color_space == "sRGB":

View file

@ -13,6 +13,22 @@ from openpype.pipeline.publish import (
from openpype.hosts.maya.api import lib
def convert_to_int_or_float(string_value):
# Order of types are important here since float can convert string
# representation of integer.
types = [int, float]
for t in types:
try:
result = t(string_value)
except ValueError:
continue
else:
return result
# Neither integer or float.
return string_value
def get_redshift_image_format_labels():
"""Return nice labels for Redshift image formats."""
var = "$g_redshiftImageFormatLabels"
@ -242,10 +258,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
# load validation definitions from settings
validation_settings = (
instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501
"{}_render_attributes".format(renderer)) or []
)
settings_lights_flag = instance.context.data["project_settings"].get(
"maya", {}).get(
"RenderSettings", {}).get(
@ -253,17 +265,67 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
instance_lights_flag = instance.data.get("renderSetupIncludeLights")
if settings_lights_flag != instance_lights_flag:
cls.log.warning('Instance flag for "Render Setup Include Lights" is set to {0} and Settings flag is set to {1}'.format(instance_lights_flag, settings_lights_flag)) # noqa
cls.log.warning(
"Instance flag for \"Render Setup Include Lights\" is set to "
"{} and Settings flag is set to {}".format(
instance_lights_flag, settings_lights_flag
)
)
# go through definitions and test if such node.attribute exists.
# if so, compare its value from the one required.
for attr, value in OrderedDict(validation_settings).items():
cls.log.debug("{}: {}".format(attr, value))
if "." not in attr:
cls.log.warning("Skipping invalid attribute defined in "
"validation settings: '{}'".format(attr))
for attribute, data in cls.get_nodes(instance, renderer).items():
# Validate the settings has values.
if not data["values"]:
cls.log.error(
"Settings for {}.{} is missing values.".format(
node, attribute
)
)
continue
for node in data["nodes"]:
try:
render_value = cmds.getAttr(
"{}.{}".format(node, attribute)
)
except RuntimeError:
invalid = True
cls.log.error(
"Cannot get value of {}.{}".format(node, attribute)
)
else:
if render_value not in data["values"]:
invalid = True
cls.log.error(
"Invalid value {} set on {}.{}. Expecting "
"{}".format(
render_value, node, attribute, data["values"]
)
)
return invalid
@classmethod
def get_nodes(cls, instance, renderer):
maya_settings = instance.context.data["project_settings"]["maya"]
validation_settings = (
maya_settings["publish"]["ValidateRenderSettings"].get(
"{}_render_attributes".format(renderer)
) or []
)
result = {}
for attr, values in OrderedDict(validation_settings).items():
cls.log.debug("{}: {}".format(attr, values))
if "." not in attr:
cls.log.warning(
"Skipping invalid attribute defined in validation "
"settings: \"{}\"".format(attr)
)
continue
values = [convert_to_int_or_float(v) for v in values]
node_type, attribute_name = attr.split(".", 1)
# first get node of that type
@ -271,28 +333,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
if not nodes:
cls.log.warning(
"No nodes of type '{}' found.".format(node_type))
"No nodes of type \"{}\" found.".format(node_type)
)
continue
for node in nodes:
try:
render_value = cmds.getAttr(
"{}.{}".format(node, attribute_name))
except RuntimeError:
invalid = True
cls.log.error(
"Cannot get value of {}.{}".format(
node, attribute_name))
else:
if str(value) != str(render_value):
invalid = True
cls.log.error(
("Invalid value {} set on {}.{}. "
"Expecting {}").format(
render_value, node, attribute_name, value)
)
result[attribute_name] = {"nodes": nodes, "values": values}
return invalid
return result
@classmethod
def repair(cls, instance):
@ -305,6 +352,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"{aov_separator}", instance.data.get("aovSeparator", "_")
)
for attribute, data in cls.get_nodes(instance, renderer).items():
if not data["values"]:
continue
for node in data["nodes"]:
lib.set_attribute(attribute, data["values"][0], node)
with lib.renderlayer(layer_node):
default = lib.RENDER_ATTRS['default']
render_attrs = lib.RENDER_ATTRS.get(renderer, default)

View file

@ -48,6 +48,18 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True)
if not yeti_loaded and not cmds.ls(type="pgYetiMaya"):
# The yeti plug-in is available and loaded so at
# this point we don't really care whether the scene
# has any yeti callback set or not since if the callback
# is there it wouldn't error and if it weren't then
# nothing happens because there are no yeti nodes.
cls.log.info(
"Yeti is loaded but no yeti nodes were found. "
"Callback validation skipped.."
)
return False
renderer = instance.data["renderer"]
if renderer == "redshift":
cls.log.info("Redshift ignores any pre and post render callbacks")

View file

@ -422,6 +422,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
assembly_job_info.Priority = instance.data.get(
"tile_priority", self.tile_priority
)
assembly_job_info.TileJob = False
pool = instance.context.data["project_settings"]["deadline"]
pool = pool["publish"]["ProcessSubmittedJobOnFarm"]["deadline_pool"]
@ -450,15 +451,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
frame_assembly_job_info.ExtraInfo[0] = file_hash
frame_assembly_job_info.ExtraInfo[1] = file
frame_assembly_job_info.JobDependencies = tile_job_id
frame_assembly_job_info.Frames = frame
# write assembly job config files
now = datetime.now()
config_file = os.path.join(
output_dir,
"{}_config_{}.txt".format(
os.path.splitext(file)[0],
now.strftime("%Y_%m_%d_%H_%M_%S")
datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
)
)
try:
@ -469,6 +469,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
self.log.warning("Path is unreachable: "
"`{}`".format(output_dir))
assembly_plugin_info["ConfigFile"] = config_file
with open(config_file, "w") as cf:
print("TileCount={}".format(tiles_count), file=cf)
print("ImageFileName={}".format(file), file=cf)
@ -477,25 +479,30 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
print("ImageHeight={}".format(
instance.data.get("resolutionHeight")), file=cf)
with open(config_file, "a") as cf:
# Need to reverse the order of the y tiles, because image
# coordinates are calculated from bottom left corner.
tiles = _format_tiles(
file, 0,
instance.data.get("tilesX"),
instance.data.get("tilesY"),
instance.data.get("resolutionWidth"),
instance.data.get("resolutionHeight"),
payload_plugin_info["OutputFilePrefix"]
payload_plugin_info["OutputFilePrefix"],
reversed_y=True
)[1]
for k, v in sorted(tiles.items()):
print("{}={}".format(k, v), file=cf)
payload = self.assemble_payload(
job_info=frame_assembly_job_info,
plugin_info=assembly_plugin_info.copy(),
# todo: aux file transfers don't work with deadline webservice
# add config file as job auxFile
# aux_files=[config_file]
assembly_payloads.append(
self.assemble_payload(
job_info=frame_assembly_job_info,
plugin_info=assembly_plugin_info.copy(),
# This would fail if the client machine and webserice are
# using different storage paths.
aux_files=[config_file]
)
)
assembly_payloads.append(payload)
# Submit assembly jobs
assembly_job_ids = []
@ -505,6 +512,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"submitting assembly job {} of {}".format(i + 1,
num_assemblies)
)
self.log.info(payload)
assembly_job_id = self.submit(payload)
assembly_job_ids.append(assembly_job_id)
@ -764,8 +772,15 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
def _format_tiles(
filename, index, tiles_x, tiles_y,
width, height, prefix):
filename,
index,
tiles_x,
tiles_y,
width,
height,
prefix,
reversed_y=False
):
"""Generate tile entries for Deadline tile job.
Returns two dictionaries - one that can be directly used in Deadline
@ -802,6 +817,7 @@ def _format_tiles(
width (int): Width resolution of final image.
height (int): Height resolution of final image.
prefix (str): Image prefix.
reversed_y (bool): Reverses the order of the y tiles.
Returns:
(dict, dict): Tuple of two dictionaries - first can be used to
@ -824,12 +840,16 @@ def _format_tiles(
cfg["TilesCropped"] = "False"
tile = 0
range_y = range(1, tiles_y + 1)
reversed_y_range = list(reversed(range_y))
for tile_x in range(1, tiles_x + 1):
for tile_y in reversed(range(1, tiles_y + 1)):
for i, tile_y in enumerate(range_y):
tile_y_index = tile_y
if reversed_y:
tile_y_index = reversed_y_range[i]
tile_prefix = "_tile_{}x{}_{}x{}_".format(
tile_x, tile_y,
tiles_x,
tiles_y
tile_x, tile_y_index, tiles_x, tiles_y
)
new_filename = "{}/{}{}".format(
@ -844,11 +864,14 @@ def _format_tiles(
right = (tile_x * w_space) - 1
# Job info
out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501
key = "OutputFilename{}".format(index)
out["JobInfo"][key] = new_filename
# Plugin Info
out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \
"/{}".format(tile_prefix).join(prefix.rsplit("/", 1))
key = "RegionPrefix{}".format(str(tile))
out["PluginInfo"][key] = "/{}".format(
tile_prefix
).join(prefix.rsplit("/", 1))
out["PluginInfo"]["RegionTop{}".format(tile)] = top
out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom
out["PluginInfo"]["RegionLeft{}".format(tile)] = left

View file

@ -68,8 +68,15 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
# files to be in the folder that we might not want to use.
missing = expected_files - existing_files
if missing:
raise RuntimeError("Missing expected files: {}".format(
sorted(missing)))
raise RuntimeError(
"Missing expected files: {}\n"
"Expected files: {}\n"
"Existing files: {}".format(
sorted(missing),
sorted(expected_files),
sorted(existing_files)
)
)
def _get_frame_list(self, original_job_id):
"""Returns list of frame ranges from all render job.

View file

@ -16,6 +16,10 @@ from Deadline.Scripting import (
FileUtils, RepositoryUtils, SystemUtils)
version_major = 1
version_minor = 0
version_patch = 0
version_string = "{}.{}.{}".format(version_major, version_minor, version_patch)
STRING_TAGS = {
"format"
}
@ -264,6 +268,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
def initialize_process(self):
"""Initialization."""
self.LogInfo("Plugin version: {}".format(version_string))
self.SingleFramesOnly = True
self.StdoutHandling = True
self.renderer = self.GetPluginInfoEntryWithDefault(
@ -320,12 +325,7 @@ class OpenPypeTileAssembler(DeadlinePlugin):
output_file = data["ImageFileName"]
output_file = RepositoryUtils.CheckPathMapping(output_file)
output_file = self.process_path(output_file)
"""
_, ext = os.path.splitext(output_file)
if "exr" not in ext:
self.FailRender(
"[{}] Only EXR format is supported for now.".format(ext))
"""
tile_info = []
for tile in range(int(data["TileCount"])):
tile_info.append({
@ -336,11 +336,6 @@ class OpenPypeTileAssembler(DeadlinePlugin):
"width": int(data["Tile{}Width".format(tile)])
})
# FFMpeg doesn't support tile coordinates at the moment.
# arguments = self.tile_completer_ffmpeg_args(
# int(data["ImageWidth"]), int(data["ImageHeight"]),
# tile_info, output_file)
arguments = self.tile_oiio_args(
int(data["ImageWidth"]), int(data["ImageHeight"]),
tile_info, output_file)
@ -362,20 +357,20 @@ class OpenPypeTileAssembler(DeadlinePlugin):
def pre_render_tasks(self):
"""Load config file and do remapping."""
self.LogInfo("OpenPype Tile Assembler starting...")
scene_filename = self.GetDataFilename()
config_file = self.GetPluginInfoEntry("ConfigFile")
temp_scene_directory = self.CreateTempDirectory(
"thread" + str(self.GetThreadNumber()))
temp_scene_filename = Path.GetFileName(scene_filename)
temp_scene_filename = Path.GetFileName(config_file)
self.config_file = Path.Combine(
temp_scene_directory, temp_scene_filename)
if SystemUtils.IsRunningOnWindows():
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "/", "\\")
config_file, self.config_file, "/", "\\")
else:
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "\\", "/")
config_file, self.config_file, "\\", "/")
os.chmod(self.config_file, os.stat(self.config_file).st_mode)
def post_render_tasks(self):
@ -459,75 +454,3 @@ class OpenPypeTileAssembler(DeadlinePlugin):
args.append(output_path)
return args
def tile_completer_ffmpeg_args(
self, output_width, output_height, tiles_info, output_path):
"""Generate ffmpeg arguments for tile assembly.
Expected inputs are tiled images.
Args:
output_width (int): Width of output image.
output_height (int): Height of output image.
tiles_info (list): List of tile items, each item must be
dictionary with `filepath`, `pos_x` and `pos_y` keys
representing path to file and x, y coordinates on output
image where top-left point of tile item should start.
output_path (str): Path to file where should be output stored.
Returns:
(list): ffmpeg arguments.
"""
previous_name = "base"
ffmpeg_args = []
filter_complex_strs = []
filter_complex_strs.append("nullsrc=size={}x{}[{}]".format(
output_width, output_height, previous_name
))
new_tiles_info = {}
for idx, tile_info in enumerate(tiles_info):
# Add input and store input index
filepath = tile_info["filepath"]
ffmpeg_args.append("-i \"{}\"".format(filepath.replace("\\", "/")))
# Prepare initial filter complex arguments
index_name = "input{}".format(idx)
filter_complex_strs.append(
"[{}]setpts=PTS-STARTPTS[{}]".format(idx, index_name)
)
tile_info["index"] = idx
new_tiles_info[index_name] = tile_info
# Set frames to 1
ffmpeg_args.append("-frames 1")
# Concatenation filter complex arguments
global_index = 1
total_index = len(new_tiles_info)
for index_name, tile_info in new_tiles_info.items():
item_str = (
"[{previous_name}][{index_name}]overlay={pos_x}:{pos_y}"
).format(
previous_name=previous_name,
index_name=index_name,
pos_x=tile_info["pos_x"],
pos_y=tile_info["pos_y"]
)
new_previous = "tmp{}".format(global_index)
if global_index != total_index:
item_str += "[{}]".format(new_previous)
filter_complex_strs.append(item_str)
previous_name = new_previous
global_index += 1
joined_parts = ";".join(filter_complex_strs)
filter_complex_str = "-filter_complex \"{}\"".format(joined_parts)
ffmpeg_args.append(filter_complex_str)
ffmpeg_args.append("-y")
ffmpeg_args.append("\"{}\"".format(output_path))
return ffmpeg_args

View file

@ -29,7 +29,7 @@ from openpype.pipeline.publish import OpenPypePyblishPluginMixin
class CollectInstanceCommentDef(
pyblish.api.ContextPlugin,
pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin
):
label = "Comment per instance"

View file

@ -80,10 +80,12 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder
families = ["workfile",
"pointcache",
"pointcloud",
"proxyAbc",
"camera",
"animation",
"model",
"maxScene",
"mayaAscii",
"mayaScene",
"setdress",

View file

@ -76,10 +76,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder + 0.00001
families = ["workfile",
"pointcache",
"pointcloud",
"proxyAbc",
"camera",
"animation",
"model",
"maxScene",
"mayaAscii",
"mayaScene",
"setdress",

View file

@ -23,7 +23,7 @@
"enabled": true,
"optional": false,
"active": true,
"tile_assembler_plugin": "OpenPypeTileAssembler",
"tile_assembler_plugin": "DraftTileAssembler",
"use_published": true,
"import_reference": false,
"asset_dependencies": true,

View file

@ -16,5 +16,10 @@
"linux": []
}
}
},
"copy_fusion_settings": {
"copy_path": "~/.openpype/hosts/fusion/profiles",
"copy_status": false,
"force_sync": false
}
}

View file

@ -4,5 +4,20 @@
"aov_separator": "underscore",
"image_format": "exr",
"multipass": true
},
"PointCloud":{
"attribute":{
"Age": "age",
"Radius": "radius",
"Position": "position",
"Rotation": "rotation",
"Scale": "scale",
"Velocity": "velocity",
"Color": "color",
"TextureCoordinate": "texcoord",
"MaterialID": "matid",
"custFloats": "custFloats",
"custVecs": "custVecs"
}
}
}

View file

@ -147,6 +147,7 @@
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"include_parent_hierarchy": false,
"include_user_defined_attributes": false,
"defaults": [
"Main"

View file

@ -121,7 +121,7 @@
"DraftTileAssembler": "Draft Tile Assembler"
},
{
"OpenPypeTileAssembler": "Open Image IO"
"OpenPypeTileAssembler": "OpenPype Tile Assembler"
}
]
},

View file

@ -45,6 +45,29 @@
]
}
]
},
{
"type": "dict",
"key": "copy_fusion_settings",
"collapsible": true,
"label": "Local Fusion profile settings",
"children": [
{
"key": "copy_path",
"type": "path",
"label": "Local Fusion profile directory"
},
{
"type": "boolean",
"key": "copy_status",
"label": "Copy profile on first launch"
},
{
"key":"force_sync",
"type": "boolean",
"label": "Resync profile on each launch"
}
]
}
]
}

View file

@ -51,6 +51,28 @@
"label": "multipass"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "PointCloud",
"label": "Point Cloud",
"children": [
{
"type": "label",
"label": "Define the channel attribute names before exporting as PRT"
},
{
"type": "dict-modifiable",
"collapsible": true,
"key": "attribute",
"label": "Channel Attribute",
"use_label_wrap": true,
"object_type": {
"type": "text"
}
}
]
}
]
}
}

View file

@ -132,6 +132,11 @@
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "boolean",
"key": "include_parent_hierarchy",
"label": "Include Parent Hierarchy"
},
{
"type": "boolean",
"key": "include_user_defined_attributes",

View file

@ -369,7 +369,8 @@
"label": "Arnold Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -379,7 +380,8 @@
"label": "Vray Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -389,7 +391,8 @@
"label": "Redshift Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
},
{
@ -399,7 +402,8 @@
"label": "Renderman Render Attributes",
"use_label_wrap": true,
"object_type": {
"type": "text"
"type": "list",
"object_type": "text"
}
}
]

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.15.3-nightly.1"
__version__ = "3.15.3-nightly.2"

View file

@ -6,13 +6,13 @@ sidebar_label: Maya
## Publish Plugins
### Render Settings Validator
### Render Settings Validator
`ValidateRenderSettings`
Render Settings Validator is here to make sure artists will submit renders
we correct settings. Some of these settings are needed by OpenPype but some
can be defined by TD using [OpenPype Settings UI](admin_settings.md).
with the correct settings. Some of these settings are needed by OpenPype but some
can be defined by the admin using [OpenPype Settings UI](admin_settings.md).
OpenPype enforced settings include:
@ -36,10 +36,9 @@ For **Renderman**:
For **Arnold**:
- there shouldn't be `<renderpass>` token when merge AOVs option is turned on
Additional check can be added via Settings - **Project Settings > Maya > Publish plugin > ValidateRenderSettings**.
You can add as many options as you want for every supported renderer. In first field put node type and attribute
and in the second required value.
and in the second required value. You can create multiple values for an attribute, but when repairing it'll be the first value in the list that get selected.
![Settings example](assets/maya-admin_render_settings_validator.png)
@ -51,7 +50,11 @@ just one instance of this node type but if that is not so, validator will go thr
instances and check the value there. Node type for **VRay** settings is `VRaySettingsNode`, for **Renderman**
it is `rmanGlobals`, for **Redshift** it is `RedshiftOptions`.
### Model Name Validator
:::info getting attribute values
If you do not know what an attributes value is supposed to be, for example for dropdown menu (enum), try changing the attribute and look in the script editor where it should log what the attribute was set to.
:::
### Model Name Validator
`ValidateRenderSettings`
@ -95,7 +98,7 @@ You can set various aspects of scene submission to farm with per-project setting
- **Optional** will mark sumission plugin optional
- **Active** will enable/disable plugin
- **Tile Assembler Plugin** will set what should be used to assemble tiles on Deadline. Either **Open Image IO** will be used
- **Tile Assembler Plugin** will set what should be used to assemble tiles on Deadline. Either **Open Image IO** will be used
or Deadlines **Draft Tile Assembler**.
- **Use Published scene** enable to render from published scene instead of scene in work area. Rendering from published files is much safer.
- **Use Asset dependencies** will mark job pending on farm until asset dependencies are fulfilled - for example Deadline will wait for scene file to be synced to cloud, etc.
@ -169,5 +172,3 @@ Fill in the necessary fields (the optional fields are regex filters)
- Build your workfile
![maya build template](assets/maya-build_workfile_from_template.png)

View file

@ -516,6 +516,22 @@ In the scene from where you want to publish your model create *Render subset*. P
model subset (Maya set node) under corresponding `LAYER_` set under *Render instance*. During publish, it will submit this render to farm and
after it is rendered, it will be attached to your model subset.
### Tile Rendering
:::note Deadline
This feature is only supported when using Deadline. See [here](module_deadline#openpypetileassembler-plugin) for setup.
:::
On the render instance objectset you'll find:
* `Tile Rendering` - for enabling tile rendering.
* `Tile X` - number of tiles in the X axis.
* `Tile Y` - number of tiles in the Y axis.
When submittig to Deadline, you'll get:
- for each frame a tile rendering job, to render each from Maya.
- for each frame a tile assembly job, to assemble the rendered tiles.
- job to publish the assembled frames.
## Render Setups
### Publishing Render Setups

View file

@ -9,7 +9,9 @@ sidebar_label: Yeti
OpenPype can work with [Yeti](https://peregrinelabs.com/yeti/) in two data modes.
It can handle Yeti caches and Yeti rigs.
### Creating and publishing Yeti caches
## Yeti Caches
### Creating and publishing
Let start by creating simple Yeti setup, just one object and Yeti node. Open new
empty scene in Maya and create sphere. Then select sphere and go **Yeti → Create Yeti Node on Mesh**
@ -44,7 +46,15 @@ You can now publish Yeti cache as any other types. **OpenPype → Publish**. It
create sequence of `.fur` files and `.fursettings` metadata file with Yeti node
setting.
### Loading Yeti caches
:::note Collect Yeti Cache failure
If you encounter **Collect Yeti Cache** failure during collecting phase, and the error is like
```fix
No object matches name: pgYetiMaya1Shape.cbId
```
then it is probably caused by scene not being saved before publishing.
:::
### Loading
You can load Yeti cache by **OpenPype → Load ...**. Select your cache, right+click on
it and select **Load Yeti cache**. This will create Yeti node in scene and set its
@ -52,26 +62,39 @@ cache path to point to your published cache files. Note that this Yeti node will
be named with same name as the one you've used to publish cache. Also notice that
when you open graph on this Yeti node, all nodes are as they were in publishing node.
### Creating and publishing Yeti Rig
## Yeti Rigs
Yeti Rigs are working in similar way as caches, but are more complex and they deal with
other data used by Yeti, like geometry and textures.
### Creating and publishing
Let's start by [loading](artist_hosts_maya.md#loading-model) into new scene some model.
I've loaded my Buddha model.
Yeti Rigs are designed to connect to published models or animation rig. The workflow gives the Yeti Rig full control on that geometry to do additional things on top of whatever input comes in, e.g. deleting faces, pushing faces in/out, subdividing, etc.
Create select model mesh, create Yeti node - **Yeti → Create Yeti Node on Mesh** and
setup similar Yeti graph as in cache example above.
Let's start with a [model](artist_hosts_maya.md#loading-model) or [rig](artist_hosts_maya.md#loading-rigs) loaded into the scene. Here we are using a simple rig.
Then select this Yeti node (mine is called with default name `pgYetiMaya1`) and
create *Yeti Rig instance* - **OpenPype → Create...** and select **Yeti Cache**.
![Maya - Yeti Simple Rig](assets/maya-yeti_simple_rig.png)
We'll need to prepare the scene a bit. We want some Yeti hair on the ball geometry, so duplicating the geometry, adding the Yeti hair and grouping it together.
![Maya - Yeti Hair Setup](assets/maya-yeti_hair_setup.png)
:::note yeti nodes and types
You can use any number of Yeti nodes and types, but they have to have unique names.
:::
Now we need to connect the Yeti Rig with the animation rig. Yeti Rigs work by publishing the attribute connections from its input nodes and reconnect them later in the pipeline. This means we can only use attribute connections to from outside of the Yeti Rig hierarchy. Internal to the Yeti Rig hierarchy, we can use any complexity of node connections. We'll connnect the Yeti Rig geometry to the animation rig, with the transform and mesh attributes.
![Maya - Yeti Rig Setup](assets/maya-yeti_rig_setup.png)
Now we are ready for publishing. Select the Yeti Rig group (`rig_GRP`) and
create *Yeti Rig instance* - **OpenPype → Create...** and select **Yeti Rig**.
Leave `Use selection` checked.
Last step is to add our model geometry to rig instance, so middle+drag its
geometry to `input_SET` under `yetiRigDefault` set representing rig instance.
Last step is to add our geometry to the rig instance, so middle+drag its
geometry to `input_SET` under the `yetiRigMain` set representing rig instance.
Note that its name can differ and is based on your subset name.
![Maya - Yeti Rig Setup](assets/maya-yeti_rig.jpg)
![Maya - Yeti Publish Setup](assets/maya-yeti_publish_setup.png)
You can have any number of nodes in the Yeti Rig, but only nodes with incoming attribute connections from outside of the Yeti Rig hierarchy is needed in the `input_SET`.
Save your scene and ready for publishing our new simple Yeti Rig!
@ -81,28 +104,14 @@ the beginning of your timeline. It will also collect all textures used in Yeti
node, copy them to publish folder `resource` directory and set *Image search path*
of published node to this location.
:::note Collect Yeti Cache failure
If you encounter **Collect Yeti Cache** failure during collecting phase, and the error is like
```fix
No object matches name: pgYetiMaya1Shape.cbId
```
then it is probably caused by scene not being saved before publishing.
:::
### Loading
### Loading Yeti Rig
You can load published Yeti Rigs as any other thing in OpenPype - **OpenPype → Load ...**,
You can load published Yeti Rigs in OpenPype with **OpenPype → Load ...**,
select you Yeti rig and right+click on it. In context menu you should see
**Load Yeti Cache** and **Load Yeti Rig** items (among others). First one will
load that one frame cache. The other one will load whole rig.
**Load Yeti Rig** item (among others).
Notice that although we put only geometry into `input_SET`, whole hierarchy was
pulled inside also. This allows you to store complex scene element along Yeti
node.
To connect the Yeti Rig with published animation, we'll load in the animation and use the Inventory to establish the connections.
:::tip auto-connecting rig mesh to existing one
If you select some objects before loading rig it will try to find shapes
under selected hierarchies and match them with shapes loaded with rig (published
under `input_SET`). This mechanism uses *cbId* attribute on those shapes.
If match is found shapes are connected using their `outMesh` and `outMesh`. Thus you can easily connect existing animation to loaded rig.
:::
![Maya - Yeti Publish Setup](assets/maya-yeti_load_connections.png)
The Yeti Rig should now be following the animation. :tada:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

View file

@ -45,6 +45,10 @@ executable. It is recommended to use the `openpype_console` executable as it pro
![Configure plugin](assets/deadline_configure_plugin.png)
### OpenPypeTileAssembler Plugin
To setup tile rendering copy the `OpenPypeTileAssembler` plugin to the repository;
`[OpenPype]\openpype\modules\deadline\repository\custom\plugins\OpenPypeTileAssembler` > `[DeadlineRepository]\custom\plugins\OpenPypeTileAssembler`
### Pools
The main pools can be configured at `project_settings/deadline/publish/CollectDeadlinePools/primary_pool`, which is applied to the rendering jobs.