Merge branch 'develop' into enhancement/maya_review

# Conflicts:
#	openpype/hosts/maya/plugins/publish/extract_thumbnail.py
This commit is contained in:
Toke Stuart Jepsen 2022-12-15 09:03:18 +00:00
commit 114cf02dc3
186 changed files with 4506 additions and 1502 deletions

View file

@ -1,5 +1,27 @@
# Changelog
## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8)
**🚀 Enhancements**
- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139)
- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137)
- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129)
- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126)
- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115)
- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046)
- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148)
**🐛 Bug fixes**
- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153)
- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136)
- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135)
- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117)
## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7)

View file

@ -1,5 +1,25 @@
# Changelog
## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8)
[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8)
**🚀 Enhancements**
- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139)
- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137)
- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129)
- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126)
- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115)
- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046)
- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148)
**🐛 Bug fixes**
- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153)
- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136)
- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135)
- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117)
## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7)

View file

@ -29,8 +29,14 @@ def main(ctx):
It wraps different commands together.
"""
if ctx.invoked_subcommand is None:
ctx.invoke(tray)
# Print help if headless mode is used
if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1":
print(ctx.get_help())
sys.exit(0)
else:
ctx.invoke(tray)
@main.command()

View file

@ -32,11 +32,6 @@ class CreateCamera(plugin.Creator):
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
@ -53,6 +48,11 @@ class CreateCamera(plugin.Creator):
bpy.ops.object.parent_set(keep_transform=True)
else:
plugin.deselect_all()
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
camera_obj.select_set(True)
asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group

View file

@ -0,0 +1,72 @@
import os
import bpy
from openpype.pipeline import publish
from openpype.hosts.blender.api import plugin
class ExtractAnimationABC(publish.Extractor):
"""Extract as ABC."""
label = "Extract Animation ABC"
hosts = ["blender"]
families = ["animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
filepath = os.path.join(stagingdir, filename)
context = bpy.context
# Perform extraction
self.log.info("Performing extraction..")
plugin.deselect_all()
selected = []
asset_group = None
objects = []
for obj in instance:
if isinstance(obj, bpy.types.Collection):
for child in obj.all_objects:
objects.append(child)
for obj in objects:
children = [o for o in bpy.data.objects if o.parent == obj]
for child in children:
objects.append(child)
for obj in objects:
obj.select_set(True)
selected.append(obj)
context = plugin.create_blender_context(
active=asset_group, selected=selected)
# We export the abc
bpy.ops.wm.alembic_export(
context,
filepath=filepath,
selected=True,
flatten=False
)
plugin.deselect_all()
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)

View file

@ -0,0 +1,10 @@
from .addon import (
CELACTION_ROOT_DIR,
CelactionAddon,
)
__all__ = (
"CELACTION_ROOT_DIR",
"CelactionAddon",
)

View file

@ -0,0 +1,31 @@
import os
from openpype.modules import OpenPypeModule, IHostAddon
CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class CelactionAddon(OpenPypeModule, IHostAddon):
name = "celaction"
host_name = "celaction"
def initialize(self, module_settings):
self.enabled = True
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(CELACTION_ROOT_DIR, "hooks")
]
def add_implementation_envs(self, env, _app):
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".scn"]

View file

@ -1 +0,0 @@
kwargs = None

View file

@ -1,87 +0,0 @@
import os
import sys
import copy
import argparse
import pyblish.api
import pyblish.util
import openpype.hosts.celaction
from openpype.lib import Logger
from openpype.hosts.celaction import api as celaction
from openpype.tools.utils import host_tools
from openpype.pipeline import install_openpype_plugins
log = Logger.get_logger("Celaction_cli_publisher")
publish_host = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
def cli():
parser = argparse.ArgumentParser(prog="celaction_publish")
parser.add_argument("--currentFile",
help="Pass file to Context as `currentFile`")
parser.add_argument("--chunk",
help=("Render chanks on farm"))
parser.add_argument("--frameStart",
help=("Start of frame range"))
parser.add_argument("--frameEnd",
help=("End of frame range"))
parser.add_argument("--resolutionWidth",
help=("Width of resolution"))
parser.add_argument("--resolutionHeight",
help=("Height of resolution"))
celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__
def _prepare_publish_environments():
"""Prepares environments based on request data."""
env = copy.deepcopy(os.environ)
project_name = os.getenv("AVALON_PROJECT")
asset_name = os.getenv("AVALON_ASSET")
env["AVALON_PROJECT"] = project_name
env["AVALON_ASSET"] = asset_name
env["AVALON_TASK"] = os.getenv("AVALON_TASK")
env["AVALON_WORKDIR"] = os.getenv("AVALON_WORKDIR")
env["AVALON_APP"] = f"hosts.{publish_host}"
env["AVALON_APP_NAME"] = "celaction/local"
env["PYBLISH_HOSTS"] = publish_host
os.environ.update(env)
def main():
# prepare all environments
_prepare_publish_environments()
# Registers pype's Global pyblish plugins
install_openpype_plugins()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(publish_host)
return host_tools.show_publish()
if __name__ == "__main__":
cli()
result = main()
sys.exit(not bool(result))

View file

@ -1,122 +0,0 @@
import os
import shutil
import winreg
from openpype.lib import PreLaunchHook
from openpype.hosts.celaction import api as celaction
class CelactionPrelaunchHook(PreLaunchHook):
"""
Bootstrap celacion with pype
"""
workfile_ext = "scn"
app_groups = ["celaction"]
platforms = ["windows"]
def execute(self):
# Add workfile path to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
self.launch_context.launch_args.append(workfile_path)
project_name = self.data["project_name"]
asset_name = self.data["asset_name"]
task_name = self.data["task_name"]
# get publish version of celaction
app = "celaction_publish"
# setting output parameters
path = r"Software\CelAction\CelAction2D\User Settings"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
"Software\\CelAction\\CelAction2D\\User Settings", 0,
winreg.KEY_ALL_ACCESS)
# TODO: this will need to be checked more thoroughly
pype_exe = os.getenv("OPENPYPE_EXECUTABLE")
winreg.SetValueEx(hKey, "SubmitAppTitle", 0, winreg.REG_SZ, pype_exe)
parameters = [
"launch",
f"--app {app}",
f"--project {project_name}",
f"--asset {asset_name}",
f"--task {task_name}",
"--currentFile \\\"\"*SCENE*\"\\\"",
"--chunk 10",
"--frameStart *START*",
"--frameEnd *END*",
"--resolutionWidth *X*",
"--resolutionHeight *Y*",
# "--programDir \"'*PROGPATH*'\""
]
winreg.SetValueEx(hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
" ".join(parameters))
# setting resolution parameters
path = r"Software\CelAction\CelAction2D\User Settings\Dialogs"
path += r"\SubmitOutput"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, 1920)
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, 1080)
# making sure message dialogs don't appear when overwriting
path = r"Software\CelAction\CelAction2D\User Settings\Messages"
path += r"\OverwriteScene"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
path = r"Software\CelAction\CelAction2D\User Settings\Messages"
path += r"\SceneSaved"
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path)
hKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, path, 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesnt exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
pype_celaction_dir = os.path.dirname(os.path.dirname(
os.path.abspath(celaction.__file__)
))
template_path = os.path.join(
pype_celaction_dir,
"resources",
"celaction_template_scene.scn"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path

View file

@ -0,0 +1,137 @@
import os
import shutil
import winreg
import subprocess
from openpype.lib import PreLaunchHook, get_openpype_execute_args
from openpype.hosts.celaction import scripts
CELACTION_SCRIPTS_DIR = os.path.dirname(
os.path.abspath(scripts.__file__)
)
class CelactionPrelaunchHook(PreLaunchHook):
"""
Bootstrap celacion with pype
"""
app_groups = ["celaction"]
platforms = ["windows"]
def execute(self):
asset_doc = self.data["asset_doc"]
width = asset_doc["data"]["resolutionWidth"]
height = asset_doc["data"]["resolutionHeight"]
# Add workfile path to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
self.launch_context.launch_args.append(workfile_path)
# setting output parameters
path_user_settings = "\\".join([
"Software", "CelAction", "CelAction2D", "User Settings"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_user_settings, 0,
winreg.KEY_ALL_ACCESS
)
path_to_cli = os.path.join(CELACTION_SCRIPTS_DIR, "publish_cli.py")
subproces_args = get_openpype_execute_args("run", path_to_cli)
openpype_executable = subproces_args.pop(0)
winreg.SetValueEx(
hKey,
"SubmitAppTitle",
0,
winreg.REG_SZ,
openpype_executable
)
parameters = subproces_args + [
"--currentFile", "*SCENE*",
"--chunk", "*CHUNK*",
"--frameStart", "*START*",
"--frameEnd", "*END*",
"--resolutionWidth", "*X*",
"--resolutionHeight", "*Y*"
]
winreg.SetValueEx(
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
subprocess.list2cmdline(parameters)
)
# setting resolution parameters
path_submit = "\\".join([
path_user_settings, "Dialogs", "SubmitOutput"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_submit, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width)
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height)
# making sure message dialogs don't appear when overwriting
path_overwrite_scene = "\\".join([
path_user_settings, "Messages", "OverwriteScene"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
# set scane as not saved
path_scene_saved = "\\".join([
path_user_settings, "Messages", "SceneSaved"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_scene_saved, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesnt exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
openpype_celaction_dir = os.path.dirname(CELACTION_SCRIPTS_DIR)
template_path = os.path.join(
openpype_celaction_dir,
"resources",
"celaction_template_scene.scn"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path

View file

@ -1,5 +1,7 @@
import pyblish.api
from openpype.hosts.celaction import api as celaction
import argparse
import sys
from pprint import pformat
class CollectCelactionCliKwargs(pyblish.api.Collector):
@ -9,15 +11,31 @@ class CollectCelactionCliKwargs(pyblish.api.Collector):
order = pyblish.api.Collector.order - 0.1
def process(self, context):
kwargs = celaction.kwargs.copy()
parser = argparse.ArgumentParser(prog="celaction")
parser.add_argument("--currentFile",
help="Pass file to Context as `currentFile`")
parser.add_argument("--chunk",
help=("Render chanks on farm"))
parser.add_argument("--frameStart",
help=("Start of frame range"))
parser.add_argument("--frameEnd",
help=("End of frame range"))
parser.add_argument("--resolutionWidth",
help=("Width of resolution"))
parser.add_argument("--resolutionHeight",
help=("Height of resolution"))
passing_kwargs = parser.parse_args(sys.argv[1:]).__dict__
self.log.info("Storing kwargs: %s" % kwargs)
context.set_data("kwargs", kwargs)
self.log.info("Storing kwargs ...")
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
# set kwargs to context data
context.set_data("passingKwargs", passing_kwargs)
# get kwargs onto context data as keys with values
for k, v in kwargs.items():
for k, v in passing_kwargs.items():
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
if k in ["frameStart", "frameEnd"]:
context.data[k] = kwargs[k] = int(v)
context.data[k] = passing_kwargs[k] = int(v)
else:
context.data[k] = v

View file

@ -36,7 +36,8 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
"version": version
}
celaction_kwargs = context.data.get("kwargs", {})
celaction_kwargs = context.data.get(
"passingKwargs", {})
if celaction_kwargs:
shared_instance_data.update(celaction_kwargs)
@ -52,8 +53,8 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
"subset": subset,
"label": scene_file,
"family": family,
"families": [family, "ftrack"],
"representations": list()
"families": [],
"representations": []
})
# adding basic script data
@ -72,7 +73,6 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
self.log.info('Publishing Celaction workfile')
# render instance
family = "render.farm"
subset = f"render{task}Main"
instance = context.create_instance(name=subset)
# getting instance state
@ -81,8 +81,8 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
# add assetEntity data into instance
instance.data.update({
"label": "{} - farm".format(subset),
"family": family,
"families": [family],
"family": "render.farm",
"families": [],
"subset": subset
})

View file

@ -11,28 +11,31 @@ class CollectRenderPath(pyblish.api.InstancePlugin):
families = ["render.farm"]
# Presets
anatomy_render_key = None
publish_render_metadata = None
output_extension = "png"
anatomy_template_key_render_files = None
anatomy_template_key_metadata = None
def process(self, instance):
anatomy = instance.context.data["anatomy"]
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
anatomy_data["family"] = "render"
padding = anatomy.templates.get("frame_padding", 4)
anatomy_data.update({
"frame": f"%0{padding}d",
"representation": "png"
"family": "render",
"representation": self.output_extension,
"ext": self.output_extension
})
anatomy_filled = anatomy.format(anatomy_data)
# get anatomy rendering keys
anatomy_render_key = self.anatomy_render_key or "render"
publish_render_metadata = self.publish_render_metadata or "render"
r_anatomy_key = self.anatomy_template_key_render_files
m_anatomy_key = self.anatomy_template_key_metadata
# get folder and path for rendering images from celaction
render_dir = anatomy_filled[anatomy_render_key]["folder"]
render_path = anatomy_filled[anatomy_render_key]["path"]
render_dir = anatomy_filled[r_anatomy_key]["folder"]
render_path = anatomy_filled[r_anatomy_key]["path"]
self.log.debug("__ render_path: `{}`".format(render_path))
# create dir if it doesnt exists
try:
@ -46,9 +49,9 @@ class CollectRenderPath(pyblish.api.InstancePlugin):
instance.data["path"] = render_path
# get anatomy for published renders folder path
if anatomy_filled.get(publish_render_metadata):
if anatomy_filled.get(m_anatomy_key):
instance.data["publishRenderMetadataFolder"] = anatomy_filled[
publish_render_metadata]["folder"]
m_anatomy_key]["folder"]
self.log.info("Metadata render path: `{}`".format(
instance.data["publishRenderMetadataFolder"]
))

View file

@ -0,0 +1,37 @@
import os
import sys
import pyblish.api
import pyblish.util
import openpype.hosts.celaction
from openpype.lib import Logger
from openpype.tools.utils import host_tools
from openpype.pipeline import install_openpype_plugins
log = Logger.get_logger("celaction")
PUBLISH_HOST = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
def main():
# Registers pype's Global pyblish plugins
install_openpype_plugins()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(PUBLISH_HOST)
pyblish.api.register_target("local")
return host_tools.show_publish()
if __name__ == "__main__":
result = main()
sys.exit(not bool(result))

View file

@ -596,18 +596,28 @@ class PublishableClip:
if not hero_track and self.vertical_sync:
# driving layer is set as negative match
for (_in, _out), hero_data in self.vertical_clip_match.items():
hero_data.update({"heroTrack": False})
if _in == self.clip_in and _out == self.clip_out:
"""
Since only one instance of hero clip is expected in
`self.vertical_clip_match`, this will loop only once
until none hero clip will be matched with hero clip.
`tag_hierarchy_data` will be set only once for every
clip which is not hero clip.
"""
_hero_data = deepcopy(hero_data)
_hero_data.update({"heroTrack": False})
if _in <= self.clip_in and _out >= self.clip_out:
data_subset = hero_data["subset"]
# add track index in case duplicity of names in hero data
if self.subset in data_subset:
hero_data["subset"] = self.subset + str(
_hero_data["subset"] = self.subset + str(
self.track_index)
# in case track name and subset name is the same then add
if self.subset_name == self.track_name:
hero_data["subset"] = self.subset
_hero_data["subset"] = self.subset
# assing data to return hierarchy data to tag
tag_hierarchy_data = hero_data
tag_hierarchy_data = _hero_data
break
# add data to return data dict
self.marker_data.update(tag_hierarchy_data)

View file

@ -27,7 +27,12 @@ class HieroAddon(OpenPypeModule, IHostAddon):
new_hiero_paths.append(norm_path)
env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths)
# Remove auto screen scale factor for Qt
# - let Hiero decide it's value
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Remove tkinter library paths if are set
env.pop("TK_LIBRARY", None)
env.pop("TCL_LIBRARY", None)
# Add vendor to PYTHONPATH
python_path = env["PYTHONPATH"]

View file

@ -7,7 +7,7 @@ import contextlib
import hou # noqa
from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
import pyblish.api
@ -40,7 +40,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "houdini"
def __init__(self):

View file

@ -0,0 +1,10 @@
from .addon import (
MaxAddon,
MAX_HOST_DIR,
)
__all__ = (
"MaxAddon",
"MAX_HOST_DIR",
)

View file

@ -0,0 +1,16 @@
# -*- coding: utf-8 -*-
import os
from openpype.modules import OpenPypeModule, IHostAddon
MAX_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
class MaxAddon(OpenPypeModule, IHostAddon):
name = "max"
host_name = "max"
def initialize(self, module_settings):
self.enabled = True
def get_workfile_extensions(self):
return [".max"]

View file

@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
"""Public API for 3dsmax"""
from .pipeline import (
MaxHost,
)
from .lib import (
maintained_selection,
lsattr,
get_all_children
)
__all__ = [
"MaxHost",
"maintained_selection",
"lsattr",
"get_all_children"
]

View file

@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
"""Library of functions useful for 3dsmax pipeline."""
import json
import six
from pymxs import runtime as rt
from typing import Union
import contextlib
JSON_PREFIX = "JSON::"
def imprint(node_name: str, data: dict) -> bool:
node = rt.getNodeByName(node_name)
if not node:
return False
for k, v in data.items():
if isinstance(v, (dict, list)):
rt.setUserProp(node, k, f'{JSON_PREFIX}{json.dumps(v)}')
else:
rt.setUserProp(node, k, v)
return True
def lsattr(
attr: str,
value: Union[str, None] = None,
root: Union[str, None] = None) -> list:
"""List nodes having attribute with specified value.
Args:
attr (str): Attribute name to match.
value (str, Optional): Value to match, of omitted, all nodes
with specified attribute are returned no matter of value.
root (str, Optional): Root node name. If omitted, scene root is used.
Returns:
list of nodes.
"""
root = rt.rootnode if root is None else rt.getNodeByName(root)
def output_node(node, nodes):
nodes.append(node)
for child in node.Children:
output_node(child, nodes)
nodes = []
output_node(root, nodes)
return [
n for n in nodes
if rt.getUserProp(n, attr) == value
] if value else [
n for n in nodes
if rt.getUserProp(n, attr)
]
def read(container) -> dict:
data = {}
props = rt.getUserPropBuffer(container)
# this shouldn't happen but let's guard against it anyway
if not props:
return data
for line in props.split("\r\n"):
try:
key, value = line.split("=")
except ValueError:
# if the line cannot be split we can't really parse it
continue
value = value.strip()
if isinstance(value.strip(), six.string_types) and \
value.startswith(JSON_PREFIX):
try:
value = json.loads(value[len(JSON_PREFIX):])
except json.JSONDecodeError:
# not a json
pass
data[key.strip()] = value
data["instance_node"] = container.name
return data
@contextlib.contextmanager
def maintained_selection():
previous_selection = rt.getCurrentSelection()
try:
yield
finally:
if previous_selection:
rt.select(previous_selection)
else:
rt.select()
def get_all_children(parent, node_type=None):
"""Handy function to get all the children of a given node
Args:
parent (3dsmax Node1): Node to get all children of.
node_type (None, runtime.class): give class to check for
e.g. rt.FFDBox/rt.GeometryClass etc.
Returns:
list: list of all children of the parent node
"""
def list_children(node):
children = []
for c in node.Children:
children.append(c)
children = children + list_children(c)
return children
child_list = list_children(parent)
return ([x for x in child_list if rt.superClassOf(x) == node_type]
if node_type else child_list)

View file

@ -0,0 +1,130 @@
# -*- coding: utf-8 -*-
"""3dsmax menu definition of OpenPype."""
from Qt import QtWidgets, QtCore
from pymxs import runtime as rt
from openpype.tools.utils import host_tools
class OpenPypeMenu(object):
"""Object representing OpenPype menu.
This is using "hack" to inject itself before "Help" menu of 3dsmax.
For some reason `postLoadingMenus` event doesn't fire, and main menu
if probably re-initialized by menu templates, se we wait for at least
1 event Qt event loop before trying to insert.
"""
def __init__(self):
super().__init__()
self.main_widget = self.get_main_widget()
self.menu = None
timer = QtCore.QTimer()
# set number of event loops to wait.
timer.setInterval(1)
timer.timeout.connect(self._on_timer)
timer.start()
self._timer = timer
self._counter = 0
def _on_timer(self):
if self._counter < 1:
self._counter += 1
return
self._counter = 0
self._timer.stop()
self.build_openpype_menu()
@staticmethod
def get_main_widget():
"""Get 3dsmax main window."""
return QtWidgets.QWidget.find(rt.windows.getMAXHWND())
def get_main_menubar(self) -> QtWidgets.QMenuBar:
"""Get main Menubar by 3dsmax main window."""
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
def get_or_create_openpype_menu(
self, name: str = "&OpenPype",
before: str = "&Help") -> QtWidgets.QAction:
"""Create OpenPype menu.
Args:
name (str, Optional): OpenPypep menu name.
before (str, Optional): Name of the 3dsmax main menu item to
add OpenPype menu before.
Returns:
QtWidgets.QAction: OpenPype menu action.
"""
if self.menu is not None:
return self.menu
menu_bar = self.get_main_menubar()
menu_items = menu_bar.findChildren(
QtWidgets.QMenu, options=QtCore.Qt.FindDirectChildrenOnly)
help_action = None
for item in menu_items:
if name in item.title():
# we already have OpenPype menu
return item
if before in item.title():
help_action = item.menuAction()
op_menu = QtWidgets.QMenu("&OpenPype")
menu_bar.insertMenu(help_action, op_menu)
self.menu = op_menu
return op_menu
def build_openpype_menu(self) -> QtWidgets.QAction:
"""Build items in OpenPype menu."""
openpype_menu = self.get_or_create_openpype_menu()
load_action = QtWidgets.QAction("Load...", openpype_menu)
load_action.triggered.connect(self.load_callback)
openpype_menu.addAction(load_action)
publish_action = QtWidgets.QAction("Publish...", openpype_menu)
publish_action.triggered.connect(self.publish_callback)
openpype_menu.addAction(publish_action)
manage_action = QtWidgets.QAction("Manage...", openpype_menu)
manage_action.triggered.connect(self.manage_callback)
openpype_menu.addAction(manage_action)
library_action = QtWidgets.QAction("Library...", openpype_menu)
library_action.triggered.connect(self.library_callback)
openpype_menu.addAction(library_action)
openpype_menu.addSeparator()
workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu)
workfiles_action.triggered.connect(self.workfiles_callback)
openpype_menu.addAction(workfiles_action)
return openpype_menu
def load_callback(self):
"""Callback to show Loader tool."""
host_tools.show_loader(parent=self.main_widget)
def publish_callback(self):
"""Callback to show Publisher tool."""
host_tools.show_publisher(parent=self.main_widget)
def manage_callback(self):
"""Callback to show Scene Manager/Inventory tool."""
host_tools.show_subset_manager(parent=self.main_widget)
def library_callback(self):
"""Callback to show Library Loader tool."""
host_tools.show_library_loader(parent=self.main_widget)
def workfiles_callback(self):
"""Callback to show Workfiles tool."""
host_tools.show_workfiles(parent=self.main_widget)

View file

@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
"""Pipeline tools for OpenPype Houdini integration."""
import os
import logging
import json
from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher
import pyblish.api
from openpype.pipeline import (
register_creator_plugin_path,
register_loader_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.max.api.menu import OpenPypeMenu
from openpype.hosts.max.api import lib
from openpype.hosts.max import MAX_HOST_DIR
from pymxs import runtime as rt # noqa
log = logging.getLogger("openpype.hosts.max")
PLUGINS_DIR = os.path.join(MAX_HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
name = "max"
menu = None
def __init__(self):
super(MaxHost, self).__init__()
self._op_events = {}
self._has_been_setup = False
def install(self):
pyblish.api.register_host("max")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
# self._register_callbacks()
self.menu = OpenPypeMenu()
self._has_been_setup = True
def has_unsaved_changes(self):
# TODO: how to get it from 3dsmax?
return True
def get_workfile_extensions(self):
return [".max"]
def save_workfile(self, dst_path=None):
rt.saveMaxFile(dst_path)
return dst_path
def open_workfile(self, filepath):
rt.checkForSave()
rt.loadMaxFile(filepath)
return filepath
def get_current_workfile(self):
return os.path.join(rt.maxFilePath, rt.maxFileName)
def get_containers(self):
return ls()
def _register_callbacks(self):
rt.callbacks.removeScripts(id=rt.name("OpenPypeCallbacks"))
rt.callbacks.addScript(
rt.Name("postLoadingMenus"),
self._deferred_menu_creation, id=rt.Name('OpenPypeCallbacks'))
def _deferred_menu_creation(self):
self.log.info("Building menu ...")
self.menu = OpenPypeMenu()
@staticmethod
def create_context_node():
"""Helper for creating context holding node."""
root_scene = rt.rootScene
create_attr_script = ("""
attributes "OpenPypeContext"
(
parameters main rollout:params
(
context type: #string
)
rollout params "OpenPype Parameters"
(
editText editTextContext "Context" type: #string
)
)
""")
attr = rt.execute(create_attr_script)
rt.custAttributes.add(root_scene, attr)
return root_scene.OpenPypeContext.context
def update_context_data(self, data, changes):
try:
_ = rt.rootScene.OpenPypeContext.context
except AttributeError:
# context node doesn't exists
self.create_context_node()
rt.rootScene.OpenPypeContext.context = json.dumps(data)
def get_context_data(self):
try:
context = rt.rootScene.OpenPypeContext.context
except AttributeError:
# context node doesn't exists
context = self.create_context_node()
if not context:
context = "{}"
return json.loads(context)
def save_file(self, dst_path=None):
# Force forwards slashes to avoid segfault
dst_path = dst_path.replace("\\", "/")
rt.saveMaxFile(dst_path)
def ls() -> list:
"""Get all OpenPype instances."""
objs = rt.objects
containers = [
obj for obj in objs
if rt.getUserProp(obj, "id") == AVALON_CONTAINER_ID
]
for container in sorted(containers, key=lambda name: container.name):
yield lib.read(container)

View file

@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
"""3dsmax specific Avalon/Pyblish plugin definitions."""
from pymxs import runtime as rt
import six
from abc import ABCMeta
from openpype.pipeline import (
CreatorError,
Creator,
CreatedInstance
)
from openpype.lib import BoolDef
from .lib import imprint, read, lsattr
class OpenPypeCreatorError(CreatorError):
pass
class MaxCreatorBase(object):
@staticmethod
def cache_subsets(shared_data):
if shared_data.get("max_cached_subsets") is None:
shared_data["max_cached_subsets"] = {}
cached_instances = lsattr("id", "pyblish.avalon.instance")
for i in cached_instances:
creator_id = rt.getUserProp(i, "creator_identifier")
if creator_id not in shared_data["max_cached_subsets"]:
shared_data["max_cached_subsets"][creator_id] = [i.name]
else:
shared_data[
"max_cached_subsets"][creator_id].append(i.name) # noqa
return shared_data
@staticmethod
def create_instance_node(node_name: str, parent: str = ""):
parent_node = rt.getNodeByName(parent) if parent else rt.rootScene
if not parent_node:
raise OpenPypeCreatorError(f"Specified parent {parent} not found")
container = rt.container(name=node_name)
container.Parent = parent_node
return container
@six.add_metaclass(ABCMeta)
class MaxCreator(Creator, MaxCreatorBase):
selected_nodes = []
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
self.selected_nodes = rt.getCurrentSelection()
instance_node = self.create_instance_node(subset_name)
instance_data["instance_node"] = instance_node.name
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self
)
for node in self.selected_nodes:
node.Parent = instance_node
self._add_instance_to_context(instance)
imprint(instance_node.name, instance.data_to_store())
return instance
def collect_instances(self):
self.cache_subsets(self.collection_shared_data)
for instance in self.collection_shared_data[
"max_cached_subsets"].get(self.identifier, []):
created_instance = CreatedInstance.from_existing(
read(rt.getNodeByName(instance)), self
)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
instance_node = created_inst.get("instance_node")
new_values = {
key: new_value
for key, (_old_value, new_value) in _changes.items()
}
imprint(
instance_node,
new_values,
)
def remove_instances(self, instances):
"""Remove specified instance from the scene.
This is only removing `id` parameter so instance is no longer
instance, because it might contain valuable data for artist.
"""
for instance in instances:
instance_node = rt.getNodeByName(
instance.data.get("instance_node"))
if instance_node:
rt.delete(rt.getNodeByName(instance_node))
self._remove_instance_from_context(instance)
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection", label="Use selection")
]

View file

@ -0,0 +1,17 @@
from openpype.lib import PreLaunchHook
class SetPath(PreLaunchHook):
"""Set current dir to workdir.
Hook `GlobalHostDataHook` must be executed before this hook.
"""
app_groups = ["max"]
def execute(self):
workdir = self.launch_context.env.get("AVALON_WORKDIR", "")
if not workdir:
self.log.warning("BUG: Workdir is not filled.")
return
self.launch_context.kwargs["cwd"] = workdir

View file

View file

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
class CreatePointCache(plugin.MaxCreator):
identifier = "io.openpype.creators.max.pointcache"
label = "Point Cache"
family = "pointcache"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
# from pymxs import runtime as rt
_ = super(CreatePointCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))

View file

@ -0,0 +1,65 @@
# -*- coding: utf-8 -*-
"""Simple alembic loader for 3dsmax.
Because of limited api, alembics can be only loaded, but not easily updated.
"""
import os
from openpype.pipeline import (
load
)
class AbcLoader(load.LoaderPlugin):
"""Alembic loader."""
families = ["model", "animation", "pointcache"]
label = "Load Alembic"
representations = ["abc"]
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name=None, namespace=None, data=None):
from pymxs import runtime as rt
file_path = os.path.normpath(self.fname)
abc_before = {
c for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
abc_export_cmd = (f"""
AlembicImport.ImportToRoot = false
importFile @"{file_path}" #noPrompt
""")
self.log.debug(f"Executing command: {abc_export_cmd}")
rt.execute(abc_export_cmd)
abc_after = {
c for c in rt.rootNode.Children
if rt.classOf(c) == rt.AlembicContainer
}
# This should yield new AlembicContainer node
abc_containers = abc_after.difference(abc_before)
if len(abc_containers) != 1:
self.log.error("Something failed when loading.")
abc_container = abc_containers.pop()
container_name = f"{name}_CON"
container = rt.container(name=container_name)
abc_container.Parent = container
return container
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
rt.delete(node)

View file

@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
"""Collect current work file."""
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import legacy_io
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.01
label = "Collect 3dsmax Workfile"
hosts = ['max']
def process(self, context):
"""Inject the current working file."""
folder = rt.maxFilePath
file = rt.maxFileName
if not folder or not file:
self.log.error("Scene is not saved.")
current_file = os.path.join(folder, file)
context.data['currentFile'] = current_file
filename, ext = os.path.splitext(file)
task = legacy_io.Session["AVALON_TASK"]
data = {}
# create instance
instance = context.create_instance(name=filename)
subset = 'workfile' + task.capitalize()
data.update({
"subset": subset,
"asset": os.getenv("AVALON_ASSET", None),
"label": subset,
"publish": True,
"family": 'workfile',
"families": ['workfile'],
"setMembers": [current_file],
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"handleStart": context.data['handleStart'],
"handleEnd": context.data['handleEnd']
})
data['representations'] = [{
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]
instance.data.update(data)
self.log.info('Collected instance: {}'.format(file))
self.log.info('Scene path: {}'.format(current_file))
self.log.info('staging Dir: {}'.format(folder))
self.log.info('subset: {}'.format(subset))

View file

@ -0,0 +1,100 @@
# -*- coding: utf-8 -*-
"""
Export alembic file.
Note:
Parameters on AlembicExport (AlembicExport.Parameter):
ParticleAsMesh (bool): Sets whether particle shapes are exported
as meshes.
AnimTimeRange (enum): How animation is saved:
#CurrentFrame: saves current frame
#TimeSlider: saves the active time segments on time slider (default)
#StartEnd: saves a range specified by the Step
StartFrame (int)
EnFrame (int)
ShapeSuffix (bool): When set to true, appends the string "Shape" to the
name of each exported mesh. This property is set to false by default.
SamplesPerFrame (int): Sets the number of animation samples per frame.
Hidden (bool): When true, export hidden geometry.
UVs (bool): When true, export the mesh UV map channel.
Normals (bool): When true, export the mesh normals.
VertexColors (bool): When true, export the mesh vertex color map 0 and the
current vertex color display data when it differs
ExtraChannels (bool): When true, export the mesh extra map channels
(map channels greater than channel 1)
Velocity (bool): When true, export the meh vertex and particle velocity
data.
MaterialIDs (bool): When true, export the mesh material ID as
Alembic face sets.
Visibility (bool): When true, export the node visibility data.
LayerName (bool): When true, export the node layer name as an Alembic
object property.
MaterialName (bool): When true, export the geometry node material name as
an Alembic object property
ObjectID (bool): When true, export the geometry node g-buffer object ID as
an Alembic object property.
CustomAttributes (bool): When true, export the node and its modifiers
custom attributes into an Alembic object compound property.
"""
import os
import pyblish.api
from openpype.pipeline import publish
from pymxs import runtime as rt
from openpype.hosts.max.api import (
maintained_selection,
get_all_children
)
class ExtractAlembic(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Pointcache"
hosts = ["max"]
families = ["pointcache", "camera"]
def process(self, instance):
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
container = instance.data["instance_node"]
self.log.info("Extracting pointcache ...")
parent_dir = self.staging_dir(instance)
file_name = "{name}.abc".format(**instance.data)
path = os.path.join(parent_dir, file_name)
# We run the render
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
parent_dir))
abc_export_cmd = (
f"""
AlembicExport.ArchiveType = #ogawa
AlembicExport.CoordinateSystem = #maya
AlembicExport.StartFrame = {start}
AlembicExport.EndFrame = {end}
exportFile @"{path}" #noPrompt selectedOnly:on using:AlembicExport
""")
self.log.debug(f"Executing command: {abc_export_cmd}")
with maintained_selection():
# select and export
rt.select(get_all_children(rt.getNodeByName(container)))
rt.execute(abc_export_cmd)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': file_name,
"stagingDir": parent_dir,
}
instance.data["representations"].append(representation)

View file

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from pymxs import runtime as rt
class ValidateSceneSaved(pyblish.api.InstancePlugin):
"""Validate that workfile was saved."""
order = pyblish.api.ValidatorOrder
families = ["workfile"]
hosts = ["max"]
label = "Validate Workfile is saved"
def process(self, instance):
if not rt.maxFilePath or not rt.maxFileName:
raise PublishValidationError(
"Workfile is not saved", title=self.label)

View file

@ -0,0 +1,9 @@
-- OpenPype Init Script
(
local sysPath = dotNetClass "System.IO.Path"
local sysDir = dotNetClass "System.IO.Directory"
local localScript = getThisScriptFilename()
local startup = sysPath.Combine (sysPath.GetDirectoryName localScript) "startup.py"
python.ExecuteFile startup
)

View file

@ -0,0 +1,6 @@
# -*- coding: utf-8 -*-
from openpype.hosts.max.api import MaxHost
from openpype.pipeline import install_host
host = MaxHost()
install_host(host)

View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""Tools to work with GLTF."""
import logging
from maya import cmds, mel # noqa
log = logging.getLogger(__name__)
_gltf_options = {
"of": str, # outputFolder
"cpr": str, # copyright
"sno": bool, # selectedNodeOnly
"sn": str, # sceneName
"glb": bool, # binary
"nbu": bool, # niceBufferURIs
"hbu": bool, # hashBufferURI
"ext": bool, # externalTextures
"ivt": int, # initialValuesTime
"acn": str, # animationClipName
"ast": int, # animationClipStartTime
"aet": int, # animationClipEndTime
"afr": float, # animationClipFrameRate
"dsa": int, # detectStepAnimations
"mpa": str, # meshPrimitiveAttributes
"bpa": str, # blendPrimitiveAttributes
"i32": bool, # force32bitIndices
"ssm": bool, # skipStandardMaterials
"eut": bool, # excludeUnusedTexcoord
"dm": bool, # defaultMaterial
"cm": bool, # colorizeMaterials
"dmy": str, # dumpMaya
"dgl": str, # dumpGLTF
"imd": str, # ignoreMeshDeformers
"ssc": bool, # skipSkinClusters
"sbs": bool, # skipBlendShapes
"rvp": bool, # redrawViewport
"vno": bool # visibleNodesOnly
}
def extract_gltf(parent_dir,
filename,
**kwargs):
"""Sets GLTF export options from data in the instance.
"""
cmds.loadPlugin('maya2glTF', quiet=True)
# load the UI to run mel command
mel.eval("maya2glTF_UI()")
parent_dir = parent_dir.replace('\\', '/')
options = {
"dsa": 1,
"glb": True
}
options.update(kwargs)
for key, value in options.copy().items():
if key not in _gltf_options:
log.warning("extract_gltf() does not support option '%s'. "
"Flag will be ignored..", key)
options.pop(key)
options.pop(value)
continue
job_args = list()
default_opt = "maya2glTF -of \"{0}\" -sn \"{1}\"".format(parent_dir, filename) # noqa
job_args.append(default_opt)
for key, value in options.items():
if isinstance(value, str):
job_args.append("-{0} \"{1}\"".format(key, value))
elif isinstance(value, bool):
if value:
job_args.append("-{0}".format(key))
else:
job_args.append("-{0} {1}".format(key, value))
job_str = " ".join(job_args)
log.info("{}".format(job_str))
mel.eval(job_str)
# close the gltf export after finish the export
gltf_UI = "maya2glTF_exporter_window"
if cmds.window(gltf_UI, q=True, exists=True):
cmds.deleteUI(gltf_UI)

View file

@ -129,14 +129,19 @@ def get_main_window():
@contextlib.contextmanager
def suspended_refresh():
"""Suspend viewport refreshes"""
def suspended_refresh(suspend=True):
"""Suspend viewport refreshes
cmds.ogs(pause=True) is a toggle so we cant pass False.
"""
original_state = cmds.ogs(query=True, pause=True)
try:
cmds.refresh(suspend=True)
if suspend and not original_state:
cmds.ogs(pause=True)
yield
finally:
cmds.refresh(suspend=False)
if suspend and not original_state:
cmds.ogs(pause=True)
@contextlib.contextmanager
@ -3438,3 +3443,8 @@ def iter_visible_nodes_in_range(nodes, start, end):
# If no more nodes to process break the frame iterations..
if not node_dependencies:
break
def get_attribute_input(attr):
connections = cmds.listConnections(attr, plugs=True, destination=False)
return connections[0] if connections else None

View file

@ -95,21 +95,25 @@ class RenderSettings(object):
if renderer == "redshift":
self._set_redshift_settings(width, height)
mel.eval("redshiftUpdateActiveAovList")
def _set_arnold_settings(self, width, height):
"""Sets settings for Arnold."""
from mtoa.core import createOptions # noqa
from mtoa.aovs import AOVInterface # noqa
createOptions()
arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa
render_settings = self._project_settings["maya"]["RenderSettings"]
arnold_render_presets = render_settings["arnold_renderer"] # noqa
# Force resetting settings and AOV list to avoid having to deal with
# AOV checking logic, for now.
# This is a work around because the standard
# function to revert render settings does not reset AOVs list in MtoA
# Fetch current aovs in case there's any.
current_aovs = AOVInterface().getAOVs()
remove_aovs = render_settings["remove_aovs"]
if remove_aovs:
# Remove fetched AOVs
AOVInterface().removeAOVs(current_aovs)
AOVInterface().removeAOVs(current_aovs)
mel.eval("unifiedRenderGlobalsRevertToDefault")
img_ext = arnold_render_presets["image_format"]
img_prefix = arnold_render_presets["image_prefix"]
@ -118,6 +122,8 @@ class RenderSettings(object):
multi_exr = arnold_render_presets["multilayer_exr"]
additional_options = arnold_render_presets["additional_options"]
for aov in aovs:
if aov in current_aovs and not remove_aovs:
continue
AOVInterface('defaultArnoldRenderOptions').addAOV(aov)
cmds.setAttr("defaultResolution.width", width)
@ -141,12 +147,50 @@ class RenderSettings(object):
def _set_redshift_settings(self, width, height):
"""Sets settings for Redshift."""
redshift_render_presets = (
self._project_settings
["maya"]
["RenderSettings"]
["redshift_renderer"]
)
render_settings = self._project_settings["maya"]["RenderSettings"]
redshift_render_presets = render_settings["redshift_renderer"]
remove_aovs = render_settings["remove_aovs"]
all_rs_aovs = cmds.ls(type='RedshiftAOV')
if remove_aovs:
for aov in all_rs_aovs:
enabled = cmds.getAttr("{}.enabled".format(aov))
if enabled:
cmds.delete(aov)
redshift_aovs = redshift_render_presets["aov_list"]
# list all the aovs
all_rs_aovs = cmds.ls(type='RedshiftAOV')
for rs_aov in redshift_aovs:
rs_layername = rs_aov
if " " in rs_aov:
rs_renderlayer = rs_aov.replace(" ", "")
rs_layername = "rsAov_{}".format(rs_renderlayer)
else:
rs_layername = "rsAov_{}".format(rs_aov)
if rs_layername in all_rs_aovs:
continue
cmds.rsCreateAov(type=rs_aov)
# update the AOV list
mel.eval("redshiftUpdateActiveAovList")
rs_p_engine = redshift_render_presets["primary_gi_engine"]
rs_s_engine = redshift_render_presets["secondary_gi_engine"]
if int(rs_p_engine) or int(rs_s_engine) != 0:
cmds.setAttr("redshiftOptions.GIEnabled", 1)
if int(rs_p_engine) == 0:
# reset the primary GI Engine as default
cmds.setAttr("redshiftOptions.primaryGIEngine", 4)
if int(rs_s_engine) == 0:
# reset the secondary GI Engine as default
cmds.setAttr("redshiftOptions.secondaryGIEngine", 2)
else:
cmds.setAttr("redshiftOptions.GIEnabled", 0)
cmds.setAttr("redshiftOptions.primaryGIEngine", int(rs_p_engine))
cmds.setAttr("redshiftOptions.secondaryGIEngine", int(rs_s_engine))
additional_options = redshift_render_presets["additional_options"]
ext = redshift_render_presets["image_format"]
img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"]
@ -163,12 +207,31 @@ class RenderSettings(object):
"""Sets important settings for Vray."""
settings = cmds.ls(type="VRaySettingsNode")
node = settings[0] if settings else cmds.createNode("VRaySettingsNode")
vray_render_presets = (
self._project_settings
["maya"]
["RenderSettings"]
["vray_renderer"]
)
render_settings = self._project_settings["maya"]["RenderSettings"]
vray_render_presets = render_settings["vray_renderer"]
# vrayRenderElement
remove_aovs = render_settings["remove_aovs"]
all_vray_aovs = cmds.ls(type='VRayRenderElement')
lightSelect_aovs = cmds.ls(type='VRayRenderElementSet')
if remove_aovs:
for aov in all_vray_aovs:
# remove all aovs except LightSelect
enabled = cmds.getAttr("{}.enabled".format(aov))
if enabled:
cmds.delete(aov)
# remove LightSelect
for light_aovs in lightSelect_aovs:
light_enabled = cmds.getAttr("{}.enabled".format(light_aovs))
if light_enabled:
cmds.delete(lightSelect_aovs)
vray_aovs = vray_render_presets["aov_list"]
for renderlayer in vray_aovs:
renderElement = "vrayAddRenderElement {}".format(renderlayer)
RE_name = mel.eval(renderElement)
# if there is more than one same render element
if RE_name.endswith("1"):
cmds.delete(RE_name)
# Set aov separator
# First we need to explicitly set the UI items in Render Settings
# because that is also what V-Ray updates to when that Render Settings

View file

@ -217,7 +217,7 @@ class ReferenceLoader(Loader):
# Need to save alembic settings and reapply, cause referencing resets
# them to incoming data.
alembic_attrs = ["speed", "offset", "cycleType"]
alembic_attrs = ["speed", "offset", "cycleType", "time"]
alembic_data = {}
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
@ -226,7 +226,12 @@ class ReferenceLoader(Loader):
if alembic_nodes:
for attr in alembic_attrs:
node_attr = "{}.{}".format(alembic_nodes[0], attr)
alembic_data[attr] = cmds.getAttr(node_attr)
data = {
"input": lib.get_attribute_input(node_attr),
"value": cmds.getAttr(node_attr)
}
alembic_data[attr] = data
else:
self.log.debug("No alembic nodes found in {}".format(members))
@ -263,8 +268,19 @@ class ReferenceLoader(Loader):
"{}:*".format(namespace), type="AlembicNode"
)
if alembic_nodes:
for attr, value in alembic_data.items():
cmds.setAttr("{}.{}".format(alembic_nodes[0], attr), value)
alembic_node = alembic_nodes[0] # assume single AlembicNode
for attr, data in alembic_data.items():
node_attr = "{}.{}".format(alembic_node, attr)
input = lib.get_attribute_input(node_attr)
if data["input"]:
if data["input"] != input:
cmds.connectAttr(
data["input"], node_attr, force=True
)
else:
if input:
cmds.disconnectAttr(input, node_attr)
cmds.setAttr(node_attr, data["value"])
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
from openpype.hosts.maya.api import (
lib,
plugin
@ -9,12 +7,26 @@ from maya import cmds
class CreateAss(plugin.Creator):
"""Arnold Archive"""
"""Arnold Scene Source"""
name = "ass"
label = "Ass StandIn"
label = "Arnold Scene Source"
family = "ass"
icon = "cube"
expandProcedurals = False
motionBlur = True
motionBlurKeys = 2
motionBlurLength = 0.5
maskOptions = False
maskCamera = False
maskLight = False
maskShape = False
maskShader = False
maskOverride = False
maskDriver = False
maskFilter = False
maskColor_manager = False
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)
@ -22,17 +34,27 @@ class CreateAss(plugin.Creator):
# Add animation data
self.data.update(lib.collect_animation_data())
# Vertex colors with the geometry
self.data["exportSequence"] = False
self.data["expandProcedurals"] = self.expandProcedurals
self.data["motionBlur"] = self.motionBlur
self.data["motionBlurKeys"] = self.motionBlurKeys
self.data["motionBlurLength"] = self.motionBlurLength
# Masks
self.data["maskOptions"] = self.maskOptions
self.data["maskCamera"] = self.maskCamera
self.data["maskLight"] = self.maskLight
self.data["maskShape"] = self.maskShape
self.data["maskShader"] = self.maskShader
self.data["maskOverride"] = self.maskOverride
self.data["maskDriver"] = self.maskDriver
self.data["maskFilter"] = self.maskFilter
self.data["maskColor_manager"] = self.maskColor_manager
self.data["maskOperator"] = self.maskOperator
def process(self):
instance = super(CreateAss, self).process()
# data = OrderedDict(**self.data)
nodes = list()
nodes = []
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
@ -42,7 +64,3 @@ class CreateAss(plugin.Creator):
assContent = cmds.sets(name="content_SET")
assProxy = cmds.sets(name="proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)
# self.log.info(data)
#
# self.data = data

View file

@ -28,6 +28,7 @@ class CreatePointCache(plugin.Creator):
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups
self.data["worldSpace"] = True # Default to exporting world-space
self.data["refresh"] = False # Default to suspend refresh.
# Add options for custom attributes
self.data["attr"] = ""

View file

@ -0,0 +1,35 @@
from openpype.hosts.maya.api import (
lib,
plugin
)
class CreateProxyAlembic(plugin.Creator):
"""Proxy Alembic for animated data"""
name = "proxyAbcMain"
label = "Proxy Alembic"
family = "proxyAbc"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateProxyAlembic, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
# Default to exporting world-space
self.data["worldSpace"] = True
# name suffix for the bounding box
self.data["nameSuffix"] = "_BBox"
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""

View file

@ -48,3 +48,21 @@ class CreateUnrealSkeletalMesh(plugin.Creator):
cmds.sets(node, forceElement=joints_set)
else:
cmds.sets(node, forceElement=geometry_set)
# Add animation data
self.data.update(lib.collect_animation_data())
# Only renderable visible shapes
self.data["renderableOnly"] = False
# only nodes that are visible
self.data["visibleOnly"] = False
# Include parent groups
self.data["includeParentHierarchy"] = False
# Default to exporting world-space
self.data["worldSpace"] = True
# Default to suspend refresh.
self.data["refresh"] = False
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""

View file

@ -14,6 +14,7 @@ class SetFrameRangeLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"proxyAbc",
"pointcache"]
representations = ["abc"]
@ -48,6 +49,7 @@ class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"proxyAbc",
"pointcache"]
representations = ["abc"]

View file

@ -11,7 +11,7 @@ from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "pointcache"]
families = ["animation", "model", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"

View file

@ -10,7 +10,7 @@ from openpype.settings import get_project_settings
class GpuCacheLoader(load.LoaderPlugin):
"""Load Alembic as gpuCache"""
families = ["model", "animation", "pointcache"]
families = ["model", "animation", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Gpu Cache"

View file

@ -16,6 +16,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
families = ["model",
"pointcache",
"proxyAbc",
"animation",
"mayaAscii",
"mayaScene",

View file

@ -1,4 +1,5 @@
from maya import cmds
from openpype.pipeline.publish import KnownPublishError
import pyblish.api
@ -6,6 +7,7 @@ import pyblish.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
families = ["ass"]
@ -23,8 +25,23 @@ class CollectAssData(pyblish.api.InstancePlugin):
instance.data['setMembers'] = members
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
if len(members) != 1:
msg = "You have multiple proxy meshes, please only use one"
raise KnownPublishError(msg)
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
# Use camera in object set if present else default to render globals
# camera.
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
camera = renderable[0]
for node in instance.data["setMembers"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
if camera_shapes:
camera = node
instance.data["camera"] = camera
self.log.debug("data: {}".format(instance.data))

View file

@ -0,0 +1,17 @@
# -*- coding: utf-8 -*-
import pyblish.api
class CollectGLTF(pyblish.api.InstancePlugin):
"""Collect Assets for GLTF/GLB export."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Asset for GLTF/GLB export"
families = ["model", "animation", "pointcache"]
def process(self, instance):
if not instance.data.get("families"):
instance.data["families"] = []
if "gltf" not in instance.data["families"]:
instance.data["families"].append("gltf")

View file

@ -1,77 +1,93 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file
"""Extract the content of the instance to a ass file"""
Things to pay attention to:
- If animation is toggled, are the frames correct
-
"""
label = "Ass Standin (.ass)"
label = "Arnold Scene Source (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
sequence = instance.data.get("exportSequence", False)
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
filenames = list()
filenames = []
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
values = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
self.log.info("Writing: '%s'" % file_path)
with maintained_selection():
self.log.info("Writing: {}".format(instance.data["setMembers"]))
cmds.select(instance.data["setMembers"], noExpand=True)
with attribute_values(values):
with maintained_selection():
self.log.info(
"Writing: {}".format(instance.data["setMembers"])
)
cmds.select(instance.data["setMembers"], noExpand=True)
if sequence:
self.log.info("Extracting ass sequence")
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
# Collect the start and end including handles
start = instance.data.get("frameStartHandle", 1)
end = instance.data.get("frameEndHandle", 1)
step = instance.data.get("step", 0)
exported_files = cmds.arnoldExportAss(**kwargs)
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=self.asciiAss,
shadowLinks=True,
lightLinks=True,
boundingBox=True,
startFrame=start,
endFrame=end,
frameStep=step
)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
else:
self.log.info("Extracting ass")
cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=False,
shadowLinks=True,
lightLinks=True,
boundingBox=True
)
self.log.info("Extracted {}".format(filename))
filenames = filename
optionals = [
"frameStart", "frameEnd", "step", "handles",
"handleEnd", "handleStart"
]
for key in optionals:
instance.data.pop(key, None)
if "representations" not in instance.data:
instance.data["representations"] = []
@ -79,13 +95,11 @@ class ExtractAssStandin(publish.Extractor):
representation = {
'name': 'ass',
'ext': 'ass',
'files': filenames,
"stagingDir": staging_dir
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
'frameStart': kwargs["startFrame"]
}
if sequence:
representation['frameStart'] = start
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"

View file

@ -0,0 +1,65 @@
import os
from maya import cmds, mel
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
from openpype.hosts.maya.api.gltf import extract_gltf
class ExtractGLB(publish.Extractor):
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
label = "Extract GLB"
families = ["gltf"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{0}.glb".format(instance.name)
path = os.path.join(staging_dir, filename)
self.log.info("Extracting GLB to: {}".format(path))
nodes = instance[:]
self.log.info("Instance: {0}".format(nodes))
start_frame = instance.data('frameStart') or \
int(cmds.playbackOptions(query=True,
animationStartTime=True))# noqa
end_frame = instance.data('frameEnd') or \
int(cmds.playbackOptions(query=True,
animationEndTime=True)) # noqa
fps = mel.eval('currentTimeUnitToFPS()')
options = {
"sno": True, # selectedNodeOnly
"nbu": True, # .bin instead of .bin0
"ast": start_frame,
"aet": end_frame,
"afr": fps,
"dsa": 1,
"acn": instance.name,
"glb": True,
"vno": True # visibleNodeOnly
}
with lib.maintained_selection():
cmds.select(nodes, hi=True, noExpand=True)
extract_gltf(staging_dir,
instance.name,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'glb',
'ext': 'glb',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract GLB successful to: {0}".format(path))

View file

@ -119,6 +119,10 @@ class ExtractPlayblast(publish.Extractor):
else:
preset["viewport_options"] = {"imagePlane": image_plane}
# Disable Pan/Zoom.
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
with lib.maintained_time():
filename = preset.get("filename", "%TEMP%")
@ -139,6 +143,8 @@ class ExtractPlayblast(publish.Extractor):
path = capture.capture(log=self.log, **preset)
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
self.log.debug("playblast path {}".format(path))
collected_files = os.listdir(stagingdir)

View file

@ -86,13 +86,16 @@ class ExtractAlembic(publish.Extractor):
start=start,
end=end))
with suspended_refresh():
suspend = not instance.data.get("refresh", False)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
extract_alembic(
file=path,
startFrame=start,
endFrame=end,
**options
)
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -0,0 +1,109 @@
import os
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
)
class ExtractProxyAlembic(publish.Extractor):
"""Produce an alembic for bounding box geometry
"""
label = "Extract Proxy (Alembic)"
hosts = ["maya"]
families = ["proxyAbc"]
def process(self, instance):
name_suffix = instance.data.get("nameSuffix")
# Collect the start and end including handles
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
self.log.info("Extracting Proxy Alembic..")
dirname = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(dirname, filename)
proxy_root = self.create_proxy_geometry(instance,
name_suffix,
start,
end)
options = {
"step": instance.data.get("step", 1.0),
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": instance.data.get("writeColorSets", False),
"writeFaceSets": instance.data.get("writeFaceSets", False),
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True),
"root": proxy_root
}
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with suspended_refresh():
with maintained_selection():
cmds.select(proxy_root, hi=True, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
instance.context.data["cleanupFullPaths"].append(path)
self.log.info("Extracted {} to {}".format(instance, dirname))
# remove the bounding box
bbox_master = cmds.ls("bbox_grp")
cmds.delete(bbox_master)
def create_proxy_geometry(self, instance, name_suffix, start, end):
nodes = instance[:]
nodes = list(iter_visible_nodes_in_range(nodes,
start=start,
end=end))
inst_selection = cmds.ls(nodes, long=True)
cmds.geomToBBox(inst_selection,
nameSuffix=name_suffix,
keepOriginal=True,
single=False,
bakeAnimation=True,
startTime=start,
endTime=end)
# create master group for bounding
# boxes as the main root
master_group = cmds.group(name="bbox_grp")
bbox_sel = cmds.ls(master_group, long=True)
self.log.debug("proxy_root: {}".format(bbox_sel))
return bbox_sel

View file

@ -109,6 +109,11 @@ class ExtractThumbnail(publish.Extractor):
display_lights = instance.data["displayLights"]
preset["viewport_options"]["displayLights"] = display_lights
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
if transparency != 0:
preset["viewport2_options"]["transparencyAlgorithm"] = transparency
# Isolate view is requested by having objects in the set besides a
# camera.
if preset.pop("isolate_view", False) and instance.data.get("isolate"):
@ -121,6 +126,10 @@ class ExtractThumbnail(publish.Extractor):
else:
preset["viewport_options"] = {"imagePlane": image_plane}
# Disable Pan/Zoom.
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
with lib.maintained_time():
# Force viewer to False in call to capture because we have our own
# viewer opening call to allow a signal to trigger between
@ -140,6 +149,7 @@ class ExtractThumbnail(publish.Extractor):
_, thumbnail = os.path.split(playblast)
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
self.log.info("file list {}".format(thumbnail))

View file

@ -0,0 +1,108 @@
# -*- coding: utf-8 -*-
"""Create Unreal Skeletal Mesh data to be extracted as FBX."""
import os
from contextlib import contextmanager
from maya import cmds # noqa
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
)
@contextmanager
def renamed(original_name, renamed_name):
# type: (str, str) -> None
try:
cmds.rename(original_name, renamed_name)
yield
finally:
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """
label = "Extract Unreal Skeletal Mesh - Alembic"
hosts = ["maya"]
families = ["skeletalMesh"]
optional = True
def process(self, instance):
self.log.info("Extracting pointcache..")
geo = cmds.listRelatives(
instance.data.get("geometry"), allDescendents=True, fullPath=True)
joints = cmds.listRelatives(
instance.data.get("joints"), allDescendents=True, fullPath=True)
nodes = geo + joints
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.abc".format(instance.name)
path = os.path.join(staging_dir, filename)
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace('\\', '/')
self.log.info("Extracting ABC to: {0}".format(path))
self.log.info("Members: {0}".format(nodes))
self.log.info("Instance: {0}".format(instance[:]))
options = {
"step": instance.data.get("step", 1.0),
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": instance.data.get("writeColorSets", False),
"writeFaceSets": instance.data.get("writeFaceSets", False),
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
self.log.info("Options: {}".format(options))
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data.get("setMembers")
with suspended_refresh(suspend=instance.data.get("refresh", False)):
with maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
# startFrame=start,
# endFrame=end,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract ABC successful to: {0}".format(path))

View file

@ -21,12 +21,13 @@ def renamed(original_name, renamed_name):
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMesh(publish.Extractor):
class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract Unreal Skeletal Mesh"
label = "Extract Unreal Skeletal Mesh - FBX"
families = ["skeletalMesh"]
optional = True
def process(self, instance):
fbx_exporter = fbx.FBXExtractor(log=self.log)

View file

@ -20,7 +20,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin):
"""
order = ValidateContentsOrder
families = ['animation', "pointcache"]
families = ['animation', "pointcache", "proxyAbc"]
hosts = ['maya']
label = 'Animation Out Set Related Node Ids'
actions = [

View file

@ -25,6 +25,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
families = ["animation",
"pointcache",
"camera",
"proxyAbc",
"renderlayer",
"review",
"yeticache"]

View file

@ -28,7 +28,9 @@ class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin):
parent.split("|")[1] for parent in (joints_parents + geo_parents)
}
if len(set(parents_set)) != 1:
self.log.info(parents_set)
if len(set(parents_set)) > 2:
raise PublishXmlValidationError(
self,
"Multiple roots on geometry or joints."

View file

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.hosts.maya.api.action import (
SelectInvalidAction,
)
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
)
from maya import cmds
class ValidateSkeletalMeshTriangulated(pyblish.api.InstancePlugin):
"""Validates that the geometry has been triangulated."""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["skeletalMesh"]
label = "Skeletal Mesh Triangulated"
optional = True
actions = [
SelectInvalidAction,
RepairAction
]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
"The following objects needs to be triangulated: "
"{}".format(invalid))
@classmethod
def get_invalid(cls, instance):
geo = instance.data.get("geometry")
invalid = []
for obj in cmds.listRelatives(
cmds.ls(geo), allDescendents=True, fullPath=True):
n_triangles = cmds.polyEvaluate(obj, triangle=True)
n_faces = cmds.polyEvaluate(obj, face=True)
if not (isinstance(n_triangles, int) and isinstance(n_faces, int)):
continue
# We check if the number of triangles is equal to the number of
# faces for each transform node.
# If it is, the object is triangulated.
if cmds.objectType(obj, i="transform") and n_triangles != n_faces:
invalid.append(obj)
return invalid
@classmethod
def repair(cls, instance):
for node in cls.get_invalid(instance):
cmds.polyTriangulate(node)

View file

@ -27,7 +27,12 @@ class NukeAddon(OpenPypeModule, IHostAddon):
new_nuke_paths.append(norm_path)
env["NUKE_PATH"] = os.pathsep.join(new_nuke_paths)
# Remove auto screen scale factor for Qt
# - let Nuke decide it's value
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Remove tkinter library paths if are set
env.pop("TK_LIBRARY", None)
env.pop("TCL_LIBRARY", None)
# Add vendor to PYTHONPATH
python_path = env["PYTHONPATH"]

View file

@ -2961,7 +2961,7 @@ def get_viewer_config_from_string(input_string):
viewer = split[1]
display = split[0]
elif "(" in viewer:
pattern = r"([\w\d\s]+).*[(](.*)[)]"
pattern = r"([\w\d\s\.\-]+).*[(](.*)[)]"
result = re.findall(pattern, viewer)
try:
result = result.pop()

View file

@ -298,7 +298,7 @@ class ExtractSlateFrame(publish.Extractor):
def add_comment_slate_node(self, instance, node):
comment = instance.context.data.get("comment")
comment = instance.data["comment"]
intent = instance.context.data.get("intent")
if not isinstance(intent, dict):
intent = {

View file

@ -1,5 +1,7 @@
import os
import pyblish.api
from openpype.settings import get_project_settings
from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
@ -18,23 +20,38 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
families = ["texture_batch_workfile"]
optional = True
# from presets
main_workfile_extensions = ['mra']
def process(self, instance):
if instance.data["family"] == "workfile":
ext = instance.data["representations"][0]["ext"]
if ext not in self.main_workfile_extensions:
main_workfile_extensions = self.get_main_workfile_extensions()
if ext not in main_workfile_extensions:
self.log.warning("Only secondary workfile present!")
return
if not instance.data.get("resources"):
msg = "No secondary workfile present for workfile '{}'". \
format(instance.data["name"])
ext = self.main_workfile_extensions[0]
ext = main_workfile_extensions[0]
formatting_data = {"file_name": instance.data["name"],
"extension": ext}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data
)
@staticmethod
def get_main_workfile_extensions():
project_settings = get_project_settings(os.environ["AVALON_PROJECT"])
try:
extensions = (project_settings["standalonepublisher"]
["publish"]
["CollectTextures"]
["main_workfile_extensions"])
except KeyError:
raise Exception("Setting 'Main workfile extensions' not found."
" The setting must be set for the"
" 'Collect Texture' publish plugin of the"
" 'Standalone Publish' tool.")
return extensions

View file

@ -2,6 +2,7 @@
import os
import logging
from typing import List
import semver
import pyblish.api
@ -21,6 +22,9 @@ import unreal # noqa
logger = logging.getLogger("openpype.hosts.unreal")
OPENPYPE_CONTAINERS = "OpenPypeContainers"
UNREAL_VERSION = semver.VersionInfo(
*os.getenv("OPENPYPE_UNREAL_VERSION").split(".")
)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
@ -111,7 +115,9 @@ def ls():
"""
ar = unreal.AssetRegistryHelpers.get_asset_registry()
openpype_containers = ar.get_assets_by_class("AssetContainer", True)
# UE 5.1 changed how class name is specified
class_name = ["/Script/OpenPype", "AssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AssetContainer" # noqa
openpype_containers = ar.get_assets_by_class(class_name, True)
# get_asset_by_class returns AssetData. To get all metadata we need to
# load asset. get_tag_values() work only on metadata registered in

View file

@ -150,6 +150,7 @@ class UnrealPrelaunchHook(PreLaunchHook):
engine_path=Path(engine_path)
)
self.launch_context.env["OPENPYPE_UNREAL_VERSION"] = engine_version
# Append project file to launch arguments
self.launch_context.launch_args.append(
f"\"{project_file.as_posix()}\"")

View file

@ -2,107 +2,150 @@
#include "OpenPypePublishInstance.h"
#include "AssetRegistryModule.h"
#include "NotificationManager.h"
#include "SNotificationList.h"
//Moves all the invalid pointers to the end to prepare them for the shrinking
#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \
VAR.Shrink();
UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer)
: UObject(ObjectInitializer)
: UPrimaryDataAsset(ObjectInitializer)
{
FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked<FAssetRegistryModule>("AssetRegistry");
FString path = UOpenPypePublishInstance::GetPathName();
const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked<
FAssetRegistryModule>("AssetRegistry");
const FPropertyEditorModule& PropertyEditorModule = FModuleManager::LoadModuleChecked<FPropertyEditorModule>(
"PropertyEditor");
FString Left, Right;
GetPathName().Split("/" + GetName(), &Left, &Right);
FARFilter Filter;
Filter.PackagePaths.Add(FName(*path));
Filter.PackagePaths.Emplace(FName(Left));
AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetAdded);
TArray<FAssetData> FoundAssets;
AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets);
for (const FAssetData& AssetData : FoundAssets)
OnAssetCreated(AssetData);
REMOVE_INVALID_ENTRIES(AssetDataInternal)
REMOVE_INVALID_ENTRIES(AssetDataExternal)
AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated);
AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved);
AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UOpenPypePublishInstance::OnAssetRenamed);
AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated);
}
void UOpenPypePublishInstance::OnAssetAdded(const FAssetData& AssetData)
void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData)
{
TArray<FString> split;
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
UObject* Asset = InAssetData.GetAsset();
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
// take interest only in paths starting with path of current container
if (assetDir.StartsWith(*selfDir))
if (!IsValid(Asset))
{
// exclude self
if (assetFName != "OpenPypePublishInstance")
UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."),
*InAssetData.ObjectPath.ToString());
return;
}
const bool result = IsUnderSameDir(Asset) && Cast<UOpenPypePublishInstance>(Asset) == nullptr;
if (result)
{
if (AssetDataInternal.Emplace(Asset).IsValidId())
{
assets.Add(assetPath);
UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir);
UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"),
*this->GetName(), *Asset->GetName());
}
}
}
void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData)
void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData)
{
TArray<FString> split;
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
// take interest only in paths starting with path of current container
FString path = UOpenPypePublishInstance::GetPathName();
FString lpp = FPackageName::GetLongPackagePath(*path);
if (assetDir.StartsWith(*selfDir))
if (Cast<UOpenPypePublishInstance>(InAssetData.GetAsset()) == nullptr)
{
// exclude self
if (assetFName != "OpenPypePublishInstance")
if (AssetDataInternal.Contains(nullptr))
{
// UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp);
assets.Remove(assetPath);
AssetDataInternal.Remove(nullptr);
REMOVE_INVALID_ENTRIES(AssetDataInternal)
}
else
{
AssetDataExternal.Remove(nullptr);
REMOVE_INVALID_ENTRIES(AssetDataExternal)
}
}
}
void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const FString& str)
void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData)
{
TArray<FString> split;
REMOVE_INVALID_ENTRIES(AssetDataInternal);
REMOVE_INVALID_ENTRIES(AssetDataExternal);
}
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
bool UOpenPypePublishInstance::IsUnderSameDir(const UObject* InAsset) const
{
FString ThisLeft, ThisRight;
this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight);
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
return InAsset->GetPathName().StartsWith(ThisLeft);
}
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
#ifdef WITH_EDITOR
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
if (assetDir.StartsWith(*selfDir))
void UOpenPypePublishInstance::SendNotification(const FString& Text) const
{
FNotificationInfo Info{FText::FromString(Text)};
Info.bFireAndForget = true;
Info.bUseLargeFont = false;
Info.bUseThrobber = false;
Info.bUseSuccessFailIcons = false;
Info.ExpireDuration = 4.f;
Info.FadeOutDuration = 2.f;
FSlateNotificationManager::Get().AddNotification(Info);
UE_LOG(LogAssetData, Warning,
TEXT(
"Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!"
), *GetName()
)
}
void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent)
{
Super::PostEditChangeProperty(PropertyChangedEvent);
if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet &&
PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED(
UOpenPypePublishInstance, AssetDataExternal))
{
// exclude self
if (assetFName != "AssetContainer")
// Check for duplicated assets
for (const auto& Asset : AssetDataInternal)
{
if (AssetDataExternal.Contains(Asset))
{
AssetDataExternal.Remove(Asset);
return SendNotification(
"You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!");
}
}
assets.Remove(str);
assets.Add(assetPath);
// UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str);
// Check if no UOpenPypePublishInstance type assets are included
for (const auto& Asset : AssetDataExternal)
{
if (Cast<UOpenPypePublishInstance>(Asset.Get()) != nullptr)
{
AssetDataExternal.Remove(Asset);
return SendNotification("You are not allowed to add publish instances!");
}
}
}
}
#endif

View file

@ -9,10 +9,10 @@ UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectIn
bEditorImport = true;
}
UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
{
UOpenPypePublishInstance* OpenPypePublishInstance = NewObject<UOpenPypePublishInstance>(InParent, Class, Name, Flags);
return OpenPypePublishInstance;
check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass()));
return NewObject<UOpenPypePublishInstance>(InParent, InClass, InName, Flags);
}
bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const {

View file

@ -5,17 +5,99 @@
UCLASS(Blueprintable)
class OPENPYPE_API UOpenPypePublishInstance : public UObject
class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset
{
GENERATED_BODY()
GENERATED_UCLASS_BODY()
public:
UOpenPypePublishInstance(const FObjectInitializer& ObjectInitalizer);
/**
/**
* Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is
* placed in)
*
* @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetInternalAssets() const
{
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
TSet<UObject*> ResultSet;
for (const auto& Asset : AssetDataInternal)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
/**
* Retrieves all the assets which have been added manually by the Publish Instance
*
* @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetExternalAssets() const
{
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
TSet<UObject*> ResultSet;
for (const auto& Asset : AssetDataExternal)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
/**
* Function for returning all the assets in the container combined.
*
* @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are
* returning raw pointers. Seems like an issue in UE5
*
* @attention If the bAddExternalAssets variable is false, external assets won't be included!
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetAllAssets() const
{
const TSet<TSoftObjectPtr<UObject>>& IteratedSet = bAddExternalAssets ? AssetDataInternal.Union(AssetDataExternal) : AssetDataInternal;
//Create a new TSet only with raw pointers.
TSet<UObject*> ResultSet;
for (auto& Asset : IteratedSet)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
UPROPERTY(EditAnywhere, BlueprintReadOnly)
TArray<FString> assets;
private:
void OnAssetAdded(const FAssetData& AssetData);
void OnAssetRemoved(const FAssetData& AssetData);
void OnAssetRenamed(const FAssetData& AssetData, const FString& str);
};
UPROPERTY(VisibleAnywhere, Category="Assets")
TSet<TSoftObjectPtr<UObject>> AssetDataInternal;
/**
* This property allows exposing the array to include other assets from any other directory than what it's currently
* monitoring. NOTE: that these assets have to be added manually! They are not automatically registered or added!
*/
UPROPERTY(EditAnywhere, Category = "Assets")
bool bAddExternalAssets = false;
UPROPERTY(EditAnywhere, meta=(EditCondition="bAddExternalAssets"), Category="Assets")
TSet<TSoftObjectPtr<UObject>> AssetDataExternal;
void OnAssetCreated(const FAssetData& InAssetData);
void OnAssetRemoved(const FAssetData& InAssetData);
void OnAssetUpdated(const FAssetData& InAssetData);
bool IsUnderSameDir(const UObject* InAsset) const;
#ifdef WITH_EDITOR
void SendNotification(const FString& Text) const;
virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override;
#endif
};

View file

@ -14,6 +14,6 @@ class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory
public:
UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer);
virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
virtual bool ShouldShowInNewMenu() const override;
};
};

View file

@ -6,7 +6,11 @@ public class OpenPype : ModuleRules
{
public OpenPype(ReadOnlyTargetRules Target) : base(Target)
{
DefaultBuildSettings = BuildSettingsVersion.V2;
bLegacyPublicIncludePaths = false;
ShadowVariableWarningLevel = WarningLevel.Error;
PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs;
IncludeOrderVersion = EngineIncludeOrderVersion.Unreal5_0;
PublicIncludePaths.AddRange(
new string[] {

View file

@ -1,7 +1,7 @@
// Fill out your copyright notice in the Description page of Project Settings.
#include "AssetContainer.h"
#include "AssetRegistryModule.h"
#include "AssetRegistry/AssetRegistryModule.h"
#include "Misc/PackageName.h"
#include "Engine.h"
#include "Containers/UnrealString.h"
@ -30,8 +30,8 @@ void UAssetContainer::OnAssetAdded(const FAssetData& AssetData)
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
UE_LOG(LogTemp, Log, TEXT("asset name %s"), *assetFName);
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
@ -60,7 +60,7 @@ void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData)
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
@ -93,7 +93,7 @@ void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString&
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);

View file

@ -1,108 +1,152 @@
#pragma once
#include "OpenPypePublishInstance.h"
#include "AssetRegistryModule.h"
#include "AssetRegistry/AssetRegistryModule.h"
#include "AssetToolsModule.h"
#include "Framework/Notifications/NotificationManager.h"
#include "Widgets/Notifications/SNotificationList.h"
//Moves all the invalid pointers to the end to prepare them for the shrinking
#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \
VAR.Shrink();
UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer)
: UObject(ObjectInitializer)
: UPrimaryDataAsset(ObjectInitializer)
{
FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked<FAssetRegistryModule>("AssetRegistry");
FString path = UOpenPypePublishInstance::GetPathName();
const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked<
FAssetRegistryModule>("AssetRegistry");
FString Left, Right;
GetPathName().Split(GetName(), &Left, &Right);
FARFilter Filter;
Filter.PackagePaths.Add(FName(*path));
Filter.PackagePaths.Emplace(FName(Left));
AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetAdded);
TArray<FAssetData> FoundAssets;
AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets);
for (const FAssetData& AssetData : FoundAssets)
OnAssetCreated(AssetData);
REMOVE_INVALID_ENTRIES(AssetDataInternal)
REMOVE_INVALID_ENTRIES(AssetDataExternal)
AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated);
AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved);
AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UOpenPypePublishInstance::OnAssetRenamed);
AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated);
}
void UOpenPypePublishInstance::OnAssetAdded(const FAssetData& AssetData)
void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData)
{
TArray<FString> split;
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
const TObjectPtr<UObject> Asset = InAssetData.GetAsset();
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
// take interest only in paths starting with path of current container
if (assetDir.StartsWith(*selfDir))
if (!IsValid(Asset))
{
// exclude self
if (assetFName != "OpenPypePublishInstance")
UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."),
*InAssetData.GetObjectPathString());
return;
}
const bool result = IsUnderSameDir(Asset) && Cast<UOpenPypePublishInstance>(Asset) == nullptr;
if (result)
{
if (AssetDataInternal.Emplace(Asset).IsValidId())
{
assets.Add(assetPath);
UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir);
UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"),
*this->GetName(), *Asset->GetName());
}
}
}
void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData)
void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData)
{
TArray<FString> split;
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
// take interest only in paths starting with path of current container
FString path = UOpenPypePublishInstance::GetPathName();
FString lpp = FPackageName::GetLongPackagePath(*path);
if (assetDir.StartsWith(*selfDir))
if (Cast<UOpenPypePublishInstance>(InAssetData.GetAsset()) == nullptr)
{
// exclude self
if (assetFName != "OpenPypePublishInstance")
if (AssetDataInternal.Contains(nullptr))
{
// UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp);
assets.Remove(assetPath);
AssetDataInternal.Remove(nullptr);
REMOVE_INVALID_ENTRIES(AssetDataInternal)
}
else
{
AssetDataExternal.Remove(nullptr);
REMOVE_INVALID_ENTRIES(AssetDataExternal)
}
}
}
void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const FString& str)
void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData)
{
TArray<FString> split;
REMOVE_INVALID_ENTRIES(AssetDataInternal);
REMOVE_INVALID_ENTRIES(AssetDataExternal);
}
// get directory of current container
FString selfFullPath = UOpenPypePublishInstance::GetPathName();
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
bool UOpenPypePublishInstance::IsUnderSameDir(const TObjectPtr<UObject>& InAsset) const
{
FString ThisLeft, ThisRight;
this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight);
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
return InAsset->GetPathName().StartsWith(ThisLeft);
}
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
#ifdef WITH_EDITOR
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
if (assetDir.StartsWith(*selfDir))
void UOpenPypePublishInstance::SendNotification(const FString& Text) const
{
FNotificationInfo Info{FText::FromString(Text)};
Info.bFireAndForget = true;
Info.bUseLargeFont = false;
Info.bUseThrobber = false;
Info.bUseSuccessFailIcons = false;
Info.ExpireDuration = 4.f;
Info.FadeOutDuration = 2.f;
FSlateNotificationManager::Get().AddNotification(Info);
UE_LOG(LogAssetData, Warning,
TEXT(
"Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!"
), *GetName()
)
}
void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent)
{
Super::PostEditChangeProperty(PropertyChangedEvent);
if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet &&
PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED(
UOpenPypePublishInstance, AssetDataExternal))
{
// exclude self
if (assetFName != "AssetContainer")
{
assets.Remove(str);
assets.Add(assetPath);
// UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str);
// Check for duplicated assets
for (const auto& Asset : AssetDataInternal)
{
if (AssetDataExternal.Contains(Asset))
{
AssetDataExternal.Remove(Asset);
return SendNotification("You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!");
}
}
// Check if no UOpenPypePublishInstance type assets are included
for (const auto& Asset : AssetDataExternal)
{
if (Cast<UOpenPypePublishInstance>(Asset.Get()) != nullptr)
{
AssetDataExternal.Remove(Asset);
return SendNotification("You are not allowed to add publish instances!");
}
}
}
}
#endif

View file

@ -9,10 +9,10 @@ UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectIn
bEditorImport = true;
}
UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn)
{
UOpenPypePublishInstance* OpenPypePublishInstance = NewObject<UOpenPypePublishInstance>(InParent, Class, Name, Flags);
return OpenPypePublishInstance;
check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass()));
return NewObject<UOpenPypePublishInstance>(InParent, InClass, InName, Flags);
}
bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const {

View file

@ -5,7 +5,7 @@
#include "CoreMinimal.h"
#include "UObject/NoExportTypes.h"
#include "Engine/AssetUserData.h"
#include "AssetData.h"
#include "AssetRegistry/AssetData.h"
#include "AssetContainer.generated.h"
/**

View file

@ -5,17 +5,92 @@
UCLASS(Blueprintable)
class OPENPYPE_API UOpenPypePublishInstance : public UObject
class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset
{
GENERATED_BODY()
GENERATED_UCLASS_BODY()
public:
UOpenPypePublishInstance(const FObjectInitializer& ObjectInitalizer);
/**
* Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is
* placed in)
*
* @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetInternalAssets() const
{
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
TSet<UObject*> ResultSet;
for (const auto& Asset : AssetDataInternal)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
/**
* Retrieves all the assets which have been added manually by the Publish Instance
*
* @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetExternalAssets() const
{
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
TSet<UObject*> ResultSet;
for (const auto& Asset : AssetDataExternal)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
/**
* Function for returning all the assets in the container combined.
*
* @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are
* returning raw pointers. Seems like an issue in UE5
*
* @attention If the bAddExternalAssets variable is false, external assets won't be included!
*/
UFUNCTION(BlueprintCallable, BlueprintPure)
TSet<UObject*> GetAllAssets() const
{
const TSet<TSoftObjectPtr<UObject>>& IteratedSet = bAddExternalAssets ? AssetDataInternal.Union(AssetDataExternal) : AssetDataInternal;
//Create a new TSet only with raw pointers.
TSet<UObject*> ResultSet;
for (auto& Asset : IteratedSet)
ResultSet.Add(Asset.LoadSynchronous());
return ResultSet;
}
UPROPERTY(EditAnywhere, BlueprintReadOnly)
TArray<FString> assets;
private:
void OnAssetAdded(const FAssetData& AssetData);
void OnAssetRemoved(const FAssetData& AssetData);
void OnAssetRenamed(const FAssetData& AssetData, const FString& str);
};
UPROPERTY(VisibleAnywhere, Category="Assets")
TSet<TSoftObjectPtr<UObject>> AssetDataInternal;
/**
* This property allows the instance to include other assets from any other directory than what it's currently
* monitoring.
* @attention assets have to be added manually! They are not automatically registered or added!
*/
UPROPERTY(EditAnywhere, Category="Assets")
bool bAddExternalAssets = false;
UPROPERTY(EditAnywhere, Category="Assets", meta=(EditCondition="bAddExternalAssets"))
TSet<TSoftObjectPtr<UObject>> AssetDataExternal;
void OnAssetCreated(const FAssetData& InAssetData);
void OnAssetRemoved(const FAssetData& InAssetData);
void OnAssetUpdated(const FAssetData& InAssetData);
bool IsUnderSameDir(const TObjectPtr<UObject>& InAsset) const;
#ifdef WITH_EDITOR
void SendNotification(const FString& Text) const;
virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override;
#endif
};

View file

@ -14,6 +14,6 @@ class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory
public:
UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer);
virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override;
virtual bool ShouldShowInNewMenu() const override;
};
};

View file

@ -50,7 +50,10 @@ def get_engine_versions(env=None):
# environment variable not set
pass
except OSError:
# specified directory doesn't exists
# specified directory doesn't exist
pass
except StopIteration:
# specified directory doesn't exist
pass
# if we've got something, terminate auto-detection process

View file

@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
"""Load Alembic Animation."""
import os
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
import unreal # noqa
class AnimationAlembicLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from Alembic"""
families = ["animation"]
label = "Import Alembic Animation"
representations = ["abc"]
icon = "cube"
color = "orange"
def get_task(self, filename, asset_dir, asset_name, replace):
task = unreal.AssetImportTask()
options = unreal.AbcImportSettings()
sm_settings = unreal.AbcStaticMeshSettings()
conversion_settings = unreal.AbcConversionSettings(
preset=unreal.AbcConversionPreset.CUSTOM,
flip_u=False, flip_v=False,
rotation=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, -1.0])
task.set_editor_property('filename', filename)
task.set_editor_property('destination_path', asset_dir)
task.set_editor_property('destination_name', asset_name)
task.set_editor_property('replace_existing', replace)
task.set_editor_property('automated', True)
task.set_editor_property('save', True)
options.set_editor_property(
'import_type', unreal.AlembicImportType.SKELETAL)
options.static_mesh_settings = sm_settings
options.conversion_settings = conversion_settings
task.options = options
return task
def load(self, context, name, namespace, data):
"""Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and openpype container
root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
container_name += suffix
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
unreal.EditorAssetLibrary.make_directory(asset_dir)
task = self.get_task(self.fname, asset_dir, asset_name, False)
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
return asset_content
def update(self, container, representation):
name = container["asset_name"]
source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True)
# do import fbx and replace existing data
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
container_path = f"{container['namespace']}/{container['objectName']}"
# update metadata
unreal_pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
asset_content = unreal.EditorAssetLibrary.list_assets(
destination_path, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
def remove(self, container):
path = container["namespace"]
parent_path = os.path.dirname(path)
unreal.EditorAssetLibrary.delete_directory(path)
asset_content = unreal.EditorAssetLibrary.list_assets(
parent_path, recursive=False
)
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path)

View file

@ -14,7 +14,7 @@ import unreal # noqa
class SkeletalMeshAlembicLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from Alembic"""
families = ["pointcache"]
families = ["pointcache", "skeletalMesh"]
label = "Import Alembic Skeletal Mesh"
representations = ["abc"]
icon = "cube"

View file

@ -14,7 +14,7 @@ import unreal # noqa
class StaticMeshAlembicLoader(plugin.Loader):
"""Load Unreal StaticMesh from Alembic"""
families = ["model"]
families = ["model", "staticMesh"]
label = "Import Alembic Static Mesh"
representations = ["abc"]
icon = "cube"

View file

@ -3,6 +3,8 @@
import ast
import unreal # noqa
import pyblish.api
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.pipeline.publish import KnownPublishError
class CollectInstances(pyblish.api.ContextPlugin):
@ -23,8 +25,10 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
instance_containers = ar.get_assets_by_class(
"OpenPypePublishInstance", True)
class_name = ["/Script/OpenPype",
"AssetContainer"] if UNREAL_VERSION.major == 5 and \
UNREAL_VERSION.minor > 0 else "OpenPypePublishInstance" # noqa
instance_containers = ar.get_assets_by_class(class_name, True)
for container_data in instance_containers:
asset = container_data.get_asset()
@ -32,9 +36,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["objectName"] = container_data.asset_name
# convert to strings
data = {str(key): str(value) for (key, value) in data.items()}
assert data.get("family"), (
"instance has no family"
)
if not data.get("family"):
raise KnownPublishError("instance has no family")
# content of container
members = ast.literal_eval(data.get("members"))

View file

@ -14,9 +14,9 @@ else:
class FileTransaction(object):
"""
"""File transaction with rollback options.
The file transaction is a three step process.
The file transaction is a three-step process.
1) Rename any existing files to a "temporary backup" during `process()`
2) Copy the files to final destination during `process()`
@ -39,14 +39,12 @@ class FileTransaction(object):
Warning:
Any folders created during the transfer will not be removed.
"""
MODE_COPY = 0
MODE_HARDLINK = 1
def __init__(self, log=None):
if log is None:
log = logging.getLogger("FileTransaction")
@ -63,49 +61,64 @@ class FileTransaction(object):
self._backup_to_original = {}
def add(self, src, dst, mode=MODE_COPY):
"""Add a new file to transfer queue"""
"""Add a new file to transfer queue.
Args:
src (str): Source path.
dst (str): Destination path.
mode (MODE_COPY, MODE_HARDLINK): Transfer mode.
"""
opts = {"mode": mode}
src = os.path.abspath(src)
dst = os.path.abspath(dst)
src = os.path.normpath(os.path.abspath(src))
dst = os.path.normpath(os.path.abspath(dst))
if dst in self._transfers:
queued_src = self._transfers[dst][0]
if src == queued_src:
self.log.debug("File transfer was already "
"in queue: {} -> {}".format(src, dst))
self.log.debug(
"File transfer was already in queue: {} -> {}".format(
src, dst))
return
else:
self.log.warning("File transfer in queue replaced..")
self.log.debug("Removed from queue: "
"{} -> {}".format(queued_src, dst))
self.log.debug("Added to queue: {} -> {}".format(src, dst))
self.log.debug(
"Removed from queue: {} -> {} replaced by {} -> {}".format(
queued_src, dst, src, dst))
self._transfers[dst] = (src, opts)
def process(self):
# Backup any existing files
for dst in self._transfers.keys():
if os.path.exists(dst):
# Backup original file
# todo: add timestamp or uuid to ensure unique
backup = dst + ".bak"
self._backup_to_original[backup] = dst
self.log.debug("Backup existing file: "
"{} -> {}".format(dst, backup))
os.rename(dst, backup)
for dst, (src, _) in self._transfers.items():
if dst == src or not os.path.exists(dst):
continue
# Backup original file
# todo: add timestamp or uuid to ensure unique
backup = dst + ".bak"
self._backup_to_original[backup] = dst
self.log.debug(
"Backup existing file: {} -> {}".format(dst, backup))
os.rename(dst, backup)
# Copy the files to transfer
for dst, (src, opts) in self._transfers.items():
if dst == src:
self.log.debug(
"Source and destionation are same files {} -> {}".format(
src, dst))
continue
self._create_folder_for_file(dst)
if opts["mode"] == self.MODE_COPY:
self.log.debug("Copying file ... {} -> {}".format(src, dst))
copyfile(src, dst)
elif opts["mode"] == self.MODE_HARDLINK:
self.log.debug("Hardlinking file ... {} -> {}".format(src,
dst))
self.log.debug("Hardlinking file ... {} -> {}".format(
src, dst))
create_hard_link(src, dst)
self._transferred.append(dst)
@ -116,23 +129,21 @@ class FileTransaction(object):
try:
os.remove(backup)
except OSError:
self.log.error("Failed to remove backup file: "
"{}".format(backup),
exc_info=True)
self.log.error(
"Failed to remove backup file: {}".format(backup),
exc_info=True)
def rollback(self):
errors = 0
# Rollback any transferred files
for path in self._transferred:
try:
os.remove(path)
except OSError:
errors += 1
self.log.error("Failed to rollback created file: "
"{}".format(path),
exc_info=True)
self.log.error(
"Failed to rollback created file: {}".format(path),
exc_info=True)
# Rollback the backups
for backup, original in self._backup_to_original.items():
@ -140,13 +151,15 @@ class FileTransaction(object):
os.rename(backup, original)
except OSError:
errors += 1
self.log.error("Failed to restore original file: "
"{} -> {}".format(backup, original),
exc_info=True)
self.log.error(
"Failed to restore original file: {} -> {}".format(
backup, original),
exc_info=True)
if errors:
self.log.error("{} errors occurred during "
"rollback.".format(errors), exc_info=True)
self.log.error(
"{} errors occurred during rollback.".format(errors),
exc_info=True)
six.reraise(*sys.exc_info())
@property

View file

@ -422,7 +422,7 @@ class TemplateResult(str):
cls = self.__class__
return cls(
os.path.normpath(self),
os.path.normpath(self.replace("\\", "/")),
self.template,
self.solved,
self.used_values,

View file

@ -77,26 +77,38 @@ def get_transcode_temp_directory():
)
def get_oiio_info_for_input(filepath, logger=None):
def get_oiio_info_for_input(filepath, logger=None, subimages=False):
"""Call oiiotool to get information about input and return stdout.
Stdout should contain xml format string.
"""
args = [
get_oiio_tools_path(), "--info", "-v", "-i:infoformat=xml", filepath
get_oiio_tools_path(),
"--info",
"-v"
]
if subimages:
args.append("-a")
args.extend(["-i:infoformat=xml", filepath])
output = run_subprocess(args, logger=logger)
output = output.replace("\r\n", "\n")
xml_started = False
subimages_lines = []
lines = []
for line in output.split("\n"):
if not xml_started:
if not line.startswith("<"):
continue
xml_started = True
if xml_started:
lines.append(line)
if line == "</ImageSpec>":
subimages_lines.append(lines)
lines = []
if not xml_started:
raise ValueError(
@ -105,12 +117,19 @@ def get_oiio_info_for_input(filepath, logger=None):
)
)
xml_text = "\n".join(lines)
return parse_oiio_xml_output(xml_text, logger=logger)
output = []
for subimage_lines in subimages_lines:
xml_text = "\n".join(subimage_lines)
output.append(parse_oiio_xml_output(xml_text, logger=logger))
if subimages:
return output
return output[0]
class RationalToInt:
"""Rational value stored as division of 2 integers using string."""
def __init__(self, string_value):
parts = string_value.split("/")
top = float(parts[0])
@ -157,16 +176,16 @@ def convert_value_by_type_name(value_type, value, logger=None):
if value_type == "int":
return int(value)
if value_type == "float":
if value_type in ("float", "double"):
return float(value)
# Vectors will probably have more types
if value_type in ("vec2f", "float2"):
if value_type in ("vec2f", "float2", "float2d"):
return [float(item) for item in value.split(",")]
# Matrix should be always have square size of element 3x3, 4x4
# - are returned as list of lists
if value_type == "matrix":
if value_type in ("matrix", "matrixd"):
output = []
current_index = -1
parts = value.split(",")
@ -198,7 +217,7 @@ def convert_value_by_type_name(value_type, value, logger=None):
if value_type == "rational2i":
return RationalToInt(value)
if value_type == "vector":
if value_type in ("vector", "vectord"):
parts = [part.strip() for part in value.split(",")]
output = []
for part in parts:
@ -380,6 +399,10 @@ def should_convert_for_ffmpeg(src_filepath):
if not input_info:
return None
subimages = input_info.get("subimages")
if subimages is not None and subimages > 1:
return True
# Check compression
compression = input_info["attribs"].get("compression")
if compression in ("dwaa", "dwab"):
@ -453,7 +476,7 @@ def convert_for_ffmpeg(
if input_frame_start is not None and input_frame_end is not None:
is_sequence = int(input_frame_end) != int(input_frame_start)
input_info = get_oiio_info_for_input(first_input_path)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
# Change compression only if source compression is "dwaa" or "dwab"
# - they're not supported in ffmpeg
@ -488,13 +511,21 @@ def convert_for_ffmpeg(
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
oiio_cmd.extend([
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
"-i:ch={}".format(input_channels_str), first_input_path,
# - this option is crashing if used on multipart/subimages exrs
input_arg += ":ch={}".format(input_channels_str)
oiio_cmd.extend([
input_arg, first_input_path,
# Tell oiiotool which channels should be put to top stack (and output)
"--ch", channels_arg
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
# Add frame definitions to arguments
@ -588,7 +619,7 @@ def convert_input_paths_for_ffmpeg(
" \".exr\" extension. Got \"{}\"."
).format(ext))
input_info = get_oiio_info_for_input(first_input_path)
input_info = get_oiio_info_for_input(first_input_path, logger=logger)
# Change compression only if source compression is "dwaa" or "dwab"
# - they're not supported in ffmpeg
@ -606,12 +637,22 @@ def convert_input_paths_for_ffmpeg(
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
# TODO find subimage inder where rgba is available for multipart exrs
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart exrs
input_arg += ":ch={}".format(input_channels_str)
for input_path in input_paths:
# Prepare subprocess arguments
oiio_cmd = [
@ -625,13 +666,12 @@ def convert_input_paths_for_ffmpeg(
oiio_cmd.extend(["--compression", compression])
oiio_cmd.extend([
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to
# avoid memory leak issues
"-i:ch={}".format(input_channels_str), input_path,
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
for attr_name, attr_value in input_info["attribs"].items():

View file

@ -2,16 +2,14 @@ import os
import re
import json
import getpass
import requests
import pyblish.api
class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
"""Submit CelAction2D scene to Deadline
Renders are submitted to a Deadline Web Service as
supplied via settings key "DEADLINE_REST_URL".
Renders are submitted to a Deadline Web Service.
"""
@ -26,27 +24,21 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
deadline_pool_secondary = ""
deadline_group = ""
deadline_chunk_size = 1
enviro_filter = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER"
]
deadline_job_delay = "00:00:08:00"
def process(self, instance):
instance.data["toBeRenderedOn"] = "deadline"
context = instance.context
deadline_url = (
context.data["system_settings"]
["modules"]
["deadline"]
["DEADLINE_REST_URL"]
)
assert deadline_url, "Requires DEADLINE_REST_URL"
# get default deadline webservice url from deadline module
deadline_url = instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if instance.data.get("deadlineUrl"):
deadline_url = instance.data.get("deadlineUrl")
assert deadline_url, "Requires Deadline Webservice URL"
self.deadline_url = "{}/api/jobs".format(deadline_url)
self._comment = context.data.get("comment", "")
self._comment = instance.data["comment"]
self._deadline_user = context.data.get(
"deadlineUser", getpass.getuser())
self._frame_start = int(instance.data["frameStart"])
@ -82,6 +74,26 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
render_dir = os.path.normpath(os.path.dirname(render_path))
render_path = os.path.normpath(render_path)
script_name = os.path.basename(script_path)
for item in instance.context:
if "workfile" in item.data["family"]:
msg = "Workfile (scene) must be published along"
assert item.data["publish"] is True, msg
template_data = item.data.get("anatomyData")
rep = item.data.get("representations")[0].get("name")
template_data["representation"] = rep
template_data["ext"] = rep
template_data["comment"] = None
anatomy_filled = instance.context.data["anatomy"].format(
template_data)
template_filled = anatomy_filled["publish"]["path"]
script_path = os.path.normpath(template_filled)
self.log.info(
"Using published scene for render {}".format(script_path)
)
jobname = "%s - %s" % (script_name, instance.name)
output_filename_0 = self.preview_fname(render_path)
@ -98,7 +110,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
chunk_size = self.deadline_chunk_size
# search for %02d pattern in name, and padding number
search_results = re.search(r"(.%0)(\d)(d)[._]", render_path).groups()
search_results = re.search(r"(%0)(\d)(d)[._]", render_path).groups()
split_patern = "".join(search_results)
padding_number = int(search_results[1])
@ -145,10 +157,11 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
# frames from Deadline Monitor
"OutputFilename0": output_filename_0.replace("\\", "/"),
# # Asset dependency to wait for at least the scene file to sync.
# # Asset dependency to wait for at least
# the scene file to sync.
# "AssetDependency0": script_path
"ScheduledType": "Once",
"JobDelay": "00:00:08:00"
"JobDelay": self.deadline_job_delay
},
"PluginInfo": {
# Input
@ -173,19 +186,6 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
plugin = payload["JobInfo"]["Plugin"]
self.log.info("using render plugin : {}".format(plugin))
i = 0
for key, values in dict(os.environ).items():
if key.upper() in self.enviro_filter:
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% i: "{key}={value}".format(
key=key, value=values
)
}
)
i += 1
self.log.info("Submitting..")
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
@ -193,10 +193,15 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
self.expected_files(instance, render_path)
self.log.debug("__ expectedFiles: `{}`".format(
instance.data["expectedFiles"]))
response = requests.post(self.deadline_url, json=payload)
if not response.ok:
raise Exception(response.text)
self.log.error(
"Submission failed! [{}] {}".format(
response.status_code, response.content))
self.log.debug(payload)
raise SystemExit(response.text)
return response
@ -234,32 +239,29 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin):
split_path = path.split(split_patern)
hashes = "#" * int(search_results[1])
return "".join([split_path[0], hashes, split_path[-1]])
if "#" in path:
self.log.debug("_ path: `{}`".format(path))
return path
else:
return path
def expected_files(self,
instance,
path):
self.log.debug("_ path: `{}`".format(path))
return path
def expected_files(self, instance, filepath):
""" Create expected files in instance data
"""
if not instance.data.get("expectedFiles"):
instance.data["expectedFiles"] = list()
instance.data["expectedFiles"] = []
dir = os.path.dirname(path)
file = os.path.basename(path)
dirpath = os.path.dirname(filepath)
filename = os.path.basename(filepath)
if "#" in file:
pparts = file.split("#")
if "#" in filename:
pparts = filename.split("#")
padding = "%0{}d".format(len(pparts) - 1)
file = pparts[0] + padding + pparts[-1]
filename = pparts[0] + padding + pparts[-1]
if "%" not in file:
instance.data["expectedFiles"].append(path)
if "%" not in filename:
instance.data["expectedFiles"].append(filepath)
return
for i in range(self._frame_start, (self._frame_end + 1)):
instance.data["expectedFiles"].append(
os.path.join(dir, (file % i)).replace("\\", "/"))
os.path.join(dirpath, (filename % i)).replace("\\", "/")
)

View file

@ -241,6 +241,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"]
environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"]
environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME")
environment["OPENPYPE_VERSION"] = os.environ.get("OPENPYPE_VERSION")
environment["OPENPYPE_LOG_NO_COLORS"] = "1"
environment["OPENPYPE_USERNAME"] = instance.context.data["user"]
environment["OPENPYPE_PUBLISH_JOB"] = "1"
@ -776,6 +777,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"handleEnd": handle_end,
"frameStartHandle": start - handle_start,
"frameEndHandle": end + handle_end,
"comment": instance.data["comment"],
"fps": fps,
"source": source,
"extendFrames": data.get("extendFrames"),

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

View file

@ -0,0 +1,38 @@
[About]
Type=label
Label=About
Category=About Plugin
CategoryOrder=-1
Index=0
Default=Celaction Plugin for Deadline
Description=Not configurable
[ConcurrentTasks]
Type=label
Label=ConcurrentTasks
Category=About Plugin
CategoryOrder=-1
Index=0
Default=True
Description=Not configurable
[Executable]
Type=filename
Label=Executable
Category=Config
CategoryOrder=0
CategoryIndex=0
Description=The command executable to run
Required=false
DisableIfBlank=true
[RenderNameSeparator]
Type=string
Label=RenderNameSeparator
Category=Config
CategoryOrder=0
CategoryIndex=1
Description=The separator to use for naming
Required=false
DisableIfBlank=true
Default=.

View file

@ -0,0 +1,122 @@
from System.Text.RegularExpressions import *
from Deadline.Plugins import *
from Deadline.Scripting import *
import _winreg
######################################################################
# This is the function that Deadline calls to get an instance of the
# main DeadlinePlugin class.
######################################################################
def GetDeadlinePlugin():
return CelActionPlugin()
def CleanupDeadlinePlugin(deadlinePlugin):
deadlinePlugin.Cleanup()
######################################################################
# This is the main DeadlinePlugin class for the CelAction plugin.
######################################################################
class CelActionPlugin(DeadlinePlugin):
def __init__(self):
self.InitializeProcessCallback += self.InitializeProcess
self.RenderExecutableCallback += self.RenderExecutable
self.RenderArgumentCallback += self.RenderArgument
self.StartupDirectoryCallback += self.StartupDirectory
def Cleanup(self):
for stdoutHandler in self.StdoutHandlers:
del stdoutHandler.HandleCallback
del self.InitializeProcessCallback
del self.RenderExecutableCallback
del self.RenderArgumentCallback
del self.StartupDirectoryCallback
def GetCelActionRegistryKey(self):
# Modify registry for frame separation
path = r'Software\CelAction\CelAction2D\User Settings'
_winreg.CreateKey(_winreg.HKEY_CURRENT_USER, path)
regKey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
return regKey
def GetSeparatorValue(self, regKey):
useSeparator, _ = _winreg.QueryValueEx(
regKey, 'RenderNameUseSeparator')
separator, _ = _winreg.QueryValueEx(regKey, 'RenderNameSeparator')
return useSeparator, separator
def SetSeparatorValue(self, regKey, useSeparator, separator):
_winreg.SetValueEx(regKey, 'RenderNameUseSeparator',
0, _winreg.REG_DWORD, useSeparator)
_winreg.SetValueEx(regKey, 'RenderNameSeparator',
0, _winreg.REG_SZ, separator)
def InitializeProcess(self):
# Set the plugin specific settings.
self.SingleFramesOnly = False
# Set the process specific settings.
self.StdoutHandling = True
self.PopupHandling = True
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Rendering.*")
self.AddPopupIgnorer(".*AutoRender.*")
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Wait.*")
# Ignore 'celaction' Pop-up dialog
self.AddPopupIgnorer(".*Timeline Scrub.*")
celActionRegKey = self.GetCelActionRegistryKey()
self.SetSeparatorValue(celActionRegKey, 1, self.GetConfigEntryWithDefault(
"RenderNameSeparator", ".").strip())
def RenderExecutable(self):
return RepositoryUtils.CheckPathMapping(self.GetConfigEntry("Executable").strip())
def RenderArgument(self):
arguments = RepositoryUtils.CheckPathMapping(
self.GetPluginInfoEntry("Arguments").strip())
arguments = arguments.replace(
"<STARTFRAME>", str(self.GetStartFrame()))
arguments = arguments.replace("<ENDFRAME>", str(self.GetEndFrame()))
arguments = self.ReplacePaddedFrame(
arguments, "<STARTFRAME%([0-9]+)>", self.GetStartFrame())
arguments = self.ReplacePaddedFrame(
arguments, "<ENDFRAME%([0-9]+)>", self.GetEndFrame())
arguments = arguments.replace("<QUOTE>", "\"")
return arguments
def StartupDirectory(self):
return self.GetPluginInfoEntryWithDefault("StartupDirectory", "").strip()
def ReplacePaddedFrame(self, arguments, pattern, frame):
frameRegex = Regex(pattern)
while True:
frameMatch = frameRegex.Match(arguments)
if frameMatch.Success:
paddingSize = int(frameMatch.Groups[1].Value)
if paddingSize > 0:
padding = StringUtils.ToZeroPaddedString(
frame, paddingSize, False)
else:
padding = str(frame)
arguments = arguments.replace(
frameMatch.Groups[0].Value, padding)
else:
break
return arguments

View file

@ -135,9 +135,9 @@ class FirstVersionStatus(BaseEvent):
new_status = asset_version_statuses.get(found_item["status"])
if not new_status:
self.log.warning(
self.log.warning((
"AssetVersion doesn't have status `{}`."
).format(found_item["status"])
).format(found_item["status"]))
continue
try:

View file

@ -1556,7 +1556,7 @@ class SyncEntitiesFactory:
deleted_entities.append(mongo_id)
av_ent = self.avalon_ents_by_id[mongo_id]
av_ent_path_items = [p for p in av_ent["data"]["parents"]]
av_ent_path_items = list(av_ent["data"]["parents"])
av_ent_path_items.append(av_ent["name"])
self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items)))
@ -1855,7 +1855,7 @@ class SyncEntitiesFactory:
_vis_par = _avalon_ent["data"]["visualParent"]
_name = _avalon_ent["name"]
if _name in self.all_ftrack_names:
av_ent_path_items = _avalon_ent["data"]["parents"]
av_ent_path_items = list(_avalon_ent["data"]["parents"])
av_ent_path_items.append(_name)
av_ent_path = "/".join(av_ent_path_items)
# TODO report
@ -1997,7 +1997,7 @@ class SyncEntitiesFactory:
{"_id": mongo_id},
item
))
av_ent_path_items = item["data"]["parents"]
av_ent_path_items = list(item["data"]["parents"])
av_ent_path_items.append(item["name"])
av_ent_path = "/".join(av_ent_path_items)
self.log.debug(
@ -2110,6 +2110,7 @@ class SyncEntitiesFactory:
entity_dict = self.entities_dict[ftrack_id]
final_parents = entity_dict["final_entity"]["data"]["parents"]
if archived_by_id:
# if is changeable then unarchive (nothing to check here)
if self.changeability_by_mongo_id[mongo_id]:
@ -2123,10 +2124,8 @@ class SyncEntitiesFactory:
archived_name = archived_by_id["name"]
if (
archived_name != entity_dict["name"] or
archived_parents != entity_dict["final_entity"]["data"][
"parents"
]
archived_name != entity_dict["name"]
or archived_parents != final_parents
):
return None
@ -2136,11 +2135,7 @@ class SyncEntitiesFactory:
for archived in archived_by_name:
mongo_id = str(archived["_id"])
archived_parents = archived.get("data", {}).get("parents")
if (
archived_parents == entity_dict["final_entity"]["data"][
"parents"
]
):
if archived_parents == final_parents:
return mongo_id
# Secondly try to find more close to current ftrack entity
@ -2350,8 +2345,7 @@ class SyncEntitiesFactory:
continue
changed = True
parents = [par for par in _parents]
hierarchy = "/".join(parents)
parents = list(_parents)
self.entities_dict[ftrack_id][
"final_entity"]["data"]["parents"] = parents

View file

@ -36,10 +36,35 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
return
context = instance.context
session = context.data["ftrackSession"]
task_entity, parent_entity = self.get_instance_entities(
instance, context)
if parent_entity is None:
self.log.info((
"Skipping ftrack integration. Instance \"{}\" does not"
" have specified ftrack entities."
).format(str(instance)))
return
session = context.data["ftrackSession"]
# Reset session operations and reconfigure locations
session.recorded_operations.clear()
session._configure_locations()
try:
self.integrate_to_ftrack(
session,
instance,
task_entity,
parent_entity,
component_list
)
except Exception:
session.reset()
raise
def get_instance_entities(self, instance, context):
parent_entity = None
default_asset_name = None
# If instance has set "ftrackEntity" or "ftrackTask" then use them from
# instance. Even if they are set to None. If they are set to None it
# has a reason. (like has different context)
@ -52,15 +77,21 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
parent_entity = context.data.get("ftrackEntity")
if task_entity:
default_asset_name = task_entity["name"]
parent_entity = task_entity["parent"]
if parent_entity is None:
self.log.info((
"Skipping ftrack integration. Instance \"{}\" does not"
" have specified ftrack entities."
).format(str(instance)))
return
return task_entity, parent_entity
def integrate_to_ftrack(
self,
session,
instance,
task_entity,
parent_entity,
component_list
):
default_asset_name = None
if task_entity:
default_asset_name = task_entity["name"]
if not default_asset_name:
default_asset_name = parent_entity["name"]
@ -186,13 +217,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
self.log.info("Setting task status to \"{}\"".format(status_name))
task_entity["status"] = status
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
session.commit()
def _fill_component_locations(self, session, component_list):
components_by_location_name = collections.defaultdict(list)
@ -495,13 +520,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
session.delete(member)
del(member)
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
session.commit()
# Reset members in memory
if "members" in component_entity.keys():
@ -617,13 +636,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
)
else:
# Commit changes.
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
session.commit()
def _create_components(self, session, asset_versions_data_by_id):
for item in asset_versions_data_by_id.values():

View file

@ -38,7 +38,7 @@ class IntegrateFtrackDescription(pyblish.api.InstancePlugin):
self.log.info("There are any integrated AssetVersions")
return
comment = (instance.context.data.get("comment") or "").strip()
comment = instance.data["comment"]
if not comment:
self.log.info("Comment is not set.")
else:

View file

@ -45,7 +45,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin):
host_name = context.data["hostName"]
app_name = context.data["appName"]
app_label = context.data["appLabel"]
comment = (context.data.get("comment") or "").strip()
comment = instance.data["comment"]
if not comment:
self.log.info("Comment is not set.")
else:

View file

@ -1,12 +1,9 @@
import os
import threading
import gazu
from openpype.client import (
get_project,
get_assets,
get_asset_by_name
)
from openpype.client import get_project, get_assets, get_asset_by_name
from openpype.pipeline import AvalonMongoDB
from .credentials import validate_credentials
from .update_op_with_zou import (
@ -397,6 +394,13 @@ def start_listeners(login: str, password: str):
login (str): Kitsu user login
password (str): Kitsu user password
"""
# Refresh token every week
def refresh_token_every_week():
print("Refreshing token...")
gazu.refresh_token()
threading.Timer(7 * 3600 * 24, refresh_token_every_week).start()
refresh_token_every_week()
# Connect to server
listener = Listener(login, password)

Some files were not shown because too many files have changed in this diff Show more