Merge branch 'develop' into bugfix/OP-4914_Anchored-publishing-issues

This commit is contained in:
Jakub Jezek 2023-02-24 16:46:01 +01:00
commit 77e2ecaf25
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
86 changed files with 2008 additions and 556 deletions

View file

@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):

View file

@ -1,7 +1,10 @@
import os
from openpype.pipeline import (
load
load,
get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class FbxLoader(load.LoaderPlugin):
@ -36,14 +39,26 @@ importFile @"{filepath}" #noPrompt using:FBXIMP
container_name = f"{name}_CON"
asset = rt.getNodeByName(f"{name}")
# rename the container with "_CON"
container = rt.container(name=container_name)
asset.Parent = container
return container
return containerise(
name, [asset], context, loader=self.__class__.__name__)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
fbx_objects = self.get_container_children(node)
for fbx_object in fbx_objects:
fbx_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -1,7 +1,9 @@
import os
from openpype.pipeline import (
load
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class MaxSceneLoader(load.LoaderPlugin):
@ -35,16 +37,26 @@ class MaxSceneLoader(load.LoaderPlugin):
self.log.error("Something failed when loading.")
max_container = max_containers.pop()
container_name = f"{name}_CON"
# rename the container with "_CON"
# get the original container
container = rt.container(name=container_name)
max_container.Parent = container
return container
return containerise(
name, [max_container], context, loader=self.__class__.__name__)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
max_objects = self.get_container_children(node)
for max_object in max_objects:
max_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -80,7 +80,7 @@ importFile @"{file_path}" #noPrompt
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
@staticmethod

View file

@ -196,12 +196,18 @@ class ARenderProducts:
"""Constructor."""
self.layer = layer
self.render_instance = render_instance
self.multipart = False
self.multipart = self.get_multipart()
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def get_multipart(self):
raise NotImplementedError(
"The render product implementation does not have a "
"\"get_multipart\" method."
)
def has_camera_token(self):
# type: () -> bool
"""Check if camera token is in image prefix.
@ -344,7 +350,6 @@ class ARenderProducts:
separator = file_prefix[matches[0].end(1):matches[1].start(1)]
return separator
def _get_layer_data(self):
# type: () -> LayerMetadata
# ______________________________________________
@ -531,16 +536,20 @@ class RenderProductsArnold(ARenderProducts):
return prefix
def _get_aov_render_products(self, aov, cameras=None):
"""Return all render products for the AOV"""
products = []
aov_name = self._get_attr(aov, "name")
def get_multipart(self):
multipart = False
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
if multilayer or merge_AOVs:
multipart = True
return multipart
def _get_aov_render_products(self, aov, cameras=None):
"""Return all render products for the AOV"""
products = []
aov_name = self._get_attr(aov, "name")
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
source=True,
destination=False,
@ -594,7 +603,7 @@ class RenderProductsArnold(ARenderProducts):
ext=ext,
aov=aov_name,
driver=ai_driver,
multipart=multipart,
multipart=self.multipart,
camera=camera)
products.append(product)
@ -731,6 +740,14 @@ class RenderProductsVray(ARenderProducts):
renderer = "vray"
def get_multipart(self):
multipart = False
image_format = self._get_attr("vraySettings.imageFormatStr")
if image_format == "exr (multichannel)":
multipart = True
return multipart
def get_renderer_prefix(self):
# type: () -> str
"""Get image prefix for V-Ray.
@ -797,11 +814,6 @@ class RenderProductsVray(ARenderProducts):
if default_ext in {"exr (multichannel)", "exr (deep)"}:
default_ext = "exr"
# Define multipart.
multipart = False
if image_format_str == "exr (multichannel)":
multipart = True
products = []
# add beauty as default when not disabled
@ -813,7 +825,7 @@ class RenderProductsVray(ARenderProducts):
productName="",
ext=default_ext,
camera=camera,
multipart=multipart
multipart=self.multipart
)
)
@ -826,10 +838,10 @@ class RenderProductsVray(ARenderProducts):
productName="Alpha",
ext=default_ext,
camera=camera,
multipart=multipart
multipart=self.multipart
)
)
if multipart:
if self.multipart:
# AOVs are merged in m-channel file, only main layer is rendered
return products
@ -989,6 +1001,34 @@ class RenderProductsRedshift(ARenderProducts):
renderer = "redshift"
unmerged_aovs = {"Cryptomatte"}
def get_files(self, product):
# When outputting AOVs we need to replace Redshift specific AOV tokens
# with Maya render tokens for generating file sequences. We validate to
# a specific AOV fileprefix so we only need to accout for one
# replacement.
if not product.multipart and product.driver:
file_prefix = self._get_attr(product.driver + ".filePrefix")
self.layer_data.filePrefix = file_prefix.replace(
"<BeautyPath>/<BeautyFile>",
"<Scene>/<RenderLayer>/<RenderLayer>"
)
return super(RenderProductsRedshift, self).get_files(product)
def get_multipart(self):
# For Redshift we don't directly return upon forcing multilayer
# due to some AOVs still being written into separate files,
# like Cryptomatte.
# AOVs are merged in multi-channel file
multipart = False
force_layer = bool(
self._get_attr("redshiftOptions.exrForceMultilayer")
)
if force_layer:
multipart = True
return multipart
def get_renderer_prefix(self):
"""Get image prefix for Redshift.
@ -1028,16 +1068,6 @@ class RenderProductsRedshift(ARenderProducts):
for c in self.get_renderable_cameras()
]
# For Redshift we don't directly return upon forcing multilayer
# due to some AOVs still being written into separate files,
# like Cryptomatte.
# AOVs are merged in multi-channel file
multipart = False
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
if exMultipart or force_layer:
multipart = True
# Get Redshift Extension from image format
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
@ -1059,7 +1089,7 @@ class RenderProductsRedshift(ARenderProducts):
continue
aov_type = self._get_attr(aov, "aovType")
if multipart and aov_type not in self.unmerged_aovs:
if self.multipart and aov_type not in self.unmerged_aovs:
continue
# Any AOVs that still get processed, like Cryptomatte
@ -1094,8 +1124,9 @@ class RenderProductsRedshift(ARenderProducts):
productName=aov_light_group_name,
aov=aov_name,
ext=ext,
multipart=multipart,
camera=camera)
multipart=False,
camera=camera,
driver=aov)
products.append(product)
if light_groups:
@ -1108,8 +1139,9 @@ class RenderProductsRedshift(ARenderProducts):
product = RenderProduct(productName=aov_name,
aov=aov_name,
ext=ext,
multipart=multipart,
camera=camera)
multipart=False,
camera=camera,
driver=aov)
products.append(product)
# When a Beauty AOV is added manually, it will be rendered as
@ -1124,7 +1156,7 @@ class RenderProductsRedshift(ARenderProducts):
products.insert(0,
RenderProduct(productName=beauty_name,
ext=ext,
multipart=multipart,
multipart=self.multipart,
camera=camera))
return products
@ -1144,6 +1176,10 @@ class RenderProductsRenderman(ARenderProducts):
renderer = "renderman"
unmerged_aovs = {"PxrCryptomatte"}
def get_multipart(self):
# Implemented as display specific in "get_render_products".
return False
def get_render_products(self):
"""Get all AOVs.
@ -1283,6 +1319,10 @@ class RenderProductsMayaHardware(ARenderProducts):
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
]
def get_multipart(self):
# MayaHardware does not support multipart EXRs.
return False
def _get_extension(self, value):
result = None
if isinstance(value, int):

View file

@ -42,6 +42,7 @@ Provides:
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -183,7 +184,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
self.log.info("multipart: {}".format(
multipart))
assert exp_files, "no file names were generated, this is bug"
self.log.info(exp_files)
self.log.info(
"expected files: {}".format(
json.dumps(exp_files, indent=4, sort_keys=True)
)
)
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV

View file

@ -22,6 +22,8 @@ class ExtractGLB(publish.Extractor):
self.log.info("Extracting GLB to: {}".format(path))
cmds.loadPlugin("maya2glTF", quiet=True)
nodes = instance[:]
self.log.info("Instance: {0}".format(nodes))
@ -45,6 +47,7 @@ class ExtractGLB(publish.Extractor):
"glb": True,
"vno": True # visibleNodeOnly
}
with lib.maintained_selection():
cmds.select(nodes, hi=True, noExpand=True)
extract_gltf(staging_dir,

View file

@ -0,0 +1,207 @@
import os
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder
)
from openpype.pipeline import PublishValidationError
class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
"""
Validate if the asset uses GLSL Shader
"""
order = ValidateContentsOrder + 0.1
families = ['gltf']
hosts = ['maya']
label = 'GLSL Shader for GLTF'
actions = [RepairAction]
optional = True
active = True
def process(self, instance):
shading_grp = self.get_material_from_shapes(instance)
if not shading_grp:
raise PublishValidationError("No shading group found")
invalid = self.get_texture_shader_invalid(instance)
if invalid:
raise PublishValidationError("Non GLSL Shader found: "
"{0}".format(invalid))
def get_material_from_shapes(self, instance):
shapes = cmds.ls(instance, type="mesh", long=True)
for shape in shapes:
shading_grp = cmds.listConnections(shape,
destination=True,
type="shadingEngine")
return shading_grp or []
def get_texture_shader_invalid(self, instance):
invalid = set()
shading_grp = self.get_material_from_shapes(instance)
for shading_group in shading_grp:
material_name = "{}.surfaceShader".format(shading_group)
material = cmds.listConnections(material_name,
source=True,
destination=False,
type="GLSLShader")
if not material:
# add material name
material = cmds.listConnections(material_name)[0]
invalid.add(material)
return list(invalid)
@classmethod
def repair(cls, instance):
"""
Repair instance by assigning GLSL Shader
to the material
"""
cls.assign_glsl_shader(instance)
return
@classmethod
def assign_glsl_shader(cls, instance):
"""
Converting StingrayPBS material to GLSL Shaders
for the glb export through Maya2GLTF plugin
"""
meshes = cmds.ls(instance, type="mesh", long=True)
cls.log.info("meshes: {}".format(meshes))
# load the glsl shader plugin
cmds.loadPlugin("glslShader", quiet=True)
for mesh in meshes:
# create glsl shader
glsl = cmds.createNode('GLSLShader')
glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True,
renderable=True, noSurfaceShader=True)
cmds.connectAttr(glsl + ".outColor",
glsl_shading_grp + ".surfaceShader")
# load the maya2gltf shader
ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa
if not os.path.exists(ogsfx_path):
if ogsfx_path:
# if custom ogsfx path is not specified
# the log below is the warning for the user
cls.log.warning("ogsfx shader file "
"not found in {}".format(ogsfx_path))
cls.log.info("Find the ogsfx shader file in "
"default maya directory...")
# re-direct to search the ogsfx path in maya_dir
ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path
if not os.path.exists(ogsfx_path):
raise PublishValidationError("The ogsfx shader file does not " # noqa
"exist: {}".format(ogsfx_path)) # noqa
cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string")
# list the materials used for the assets
shading_grp = cmds.listConnections(mesh,
destination=True,
type="shadingEngine")
# get the materials related to the selected assets
for material in shading_grp:
pbs_shader = cmds.listConnections(material,
destination=True,
type="StingrayPBS")
if pbs_shader:
cls.pbs_shader_conversion(pbs_shader, glsl)
# setting up to relink the texture if
# the mesh is with aiStandardSurface
arnold_shader = cmds.listConnections(material,
destination=True,
type="aiStandardSurface")
if arnold_shader:
cls.arnold_shader_conversion(arnold_shader, glsl)
cmds.sets(mesh, forceElement=str(glsl_shading_grp))
@classmethod
def pbs_shader_conversion(cls, main_shader, glsl):
cls.log.info("StringrayPBS detected "
"-> Can do texture conversion")
for shader in main_shader:
# get the file textures related to the PBS Shader
albedo = cmds.listConnections(shader +
".TEX_color_map")
if albedo:
dif_output = albedo[0] + ".outColor"
# get the glsl_shader input
# reconnect the file nodes to maya2gltf shader
glsl_dif = glsl + ".u_BaseColorTexture"
cmds.connectAttr(dif_output, glsl_dif)
# connect orm map if there is one
orm_packed = cmds.listConnections(shader +
".TEX_ao_map")
if orm_packed:
orm_output = orm_packed[0] + ".outColor"
mtl = glsl + ".u_MetallicTexture"
ao = glsl + ".u_OcclusionTexture"
rough = glsl + ".u_RoughnessTexture"
cmds.connectAttr(orm_output, mtl)
cmds.connectAttr(orm_output, ao)
cmds.connectAttr(orm_output, rough)
# connect nrm map if there is one
nrm = cmds.listConnections(shader +
".TEX_normal_map")
if nrm:
nrm_output = nrm[0] + ".outColor"
glsl_nrm = glsl + ".u_NormalTexture"
cmds.connectAttr(nrm_output, glsl_nrm)
@classmethod
def arnold_shader_conversion(cls, main_shader, glsl):
cls.log.info("aiStandardSurface detected "
"-> Can do texture conversion")
for shader in main_shader:
# get the file textures related to the PBS Shader
albedo = cmds.listConnections(shader + ".baseColor")
if albedo:
dif_output = albedo[0] + ".outColor"
# get the glsl_shader input
# reconnect the file nodes to maya2gltf shader
glsl_dif = glsl + ".u_BaseColorTexture"
cmds.connectAttr(dif_output, glsl_dif)
orm_packed = cmds.listConnections(shader +
".specularRoughness")
if orm_packed:
orm_output = orm_packed[0] + ".outColor"
mtl = glsl + ".u_MetallicTexture"
ao = glsl + ".u_OcclusionTexture"
rough = glsl + ".u_RoughnessTexture"
cmds.connectAttr(orm_output, mtl)
cmds.connectAttr(orm_output, ao)
cmds.connectAttr(orm_output, rough)
# connect nrm map if there is one
bump_node = cmds.listConnections(shader +
".normalCamera")
if bump_node:
for bump in bump_node:
nrm = cmds.listConnections(bump +
".bumpValue")
if nrm:
nrm_output = nrm[0] + ".outColor"
glsl_nrm = glsl + ".u_NormalTexture"
cmds.connectAttr(nrm_output, glsl_nrm)

View file

@ -0,0 +1,31 @@
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder
)
class ValidateGLSLPlugin(pyblish.api.InstancePlugin):
"""
Validate if the asset uses GLSL Shader
"""
order = ValidateContentsOrder + 0.15
families = ['gltf']
hosts = ['maya']
label = 'maya2glTF plugin'
actions = [RepairAction]
def process(self, instance):
if not cmds.pluginInfo("maya2glTF", query=True, loaded=True):
raise RuntimeError("maya2glTF is not loaded")
@classmethod
def repair(cls, instance):
"""
Repair instance by enabling the plugin
"""
return cmds.loadPlugin("maya2glTF", quiet=True)

View file

@ -63,5 +63,12 @@ class NukeAddon(OpenPypeModule, IHostAddon):
path_paths.append(quick_time_path)
env["PATH"] = os.pathsep.join(path_paths)
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(NUKE_ROOT_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".nk"]

View file

@ -30,7 +30,6 @@ from .pipeline import (
parse_container,
update_container,
get_workfile_build_placeholder_plugins,
)
from .lib import (
INSTANCE_DATA_KNOB,
@ -79,8 +78,6 @@ __all__ = (
"parse_container",
"update_container",
"get_workfile_build_placeholder_plugins",
"INSTANCE_DATA_KNOB",
"ROOT_DATA_KNOB",
"maintained_selection",

View file

@ -0,0 +1,4 @@
import os
ASSIST = bool(os.getenv("NUKEASSIST"))

View file

@ -50,6 +50,7 @@ from openpype.pipeline.colorspace import (
from openpype.pipeline.workfile import BuildWorkfile
from . import gizmo_menu
from .constants import ASSIST
from .workio import (
save_file,
@ -215,7 +216,7 @@ def update_node_data(node, knobname, data):
class Knobby(object):
"""[DEPRICATED] For creating knob which it's type isn't
"""[DEPRECATED] For creating knob which it's type isn't
mapped in `create_knobs`
Args:
@ -249,7 +250,7 @@ class Knobby(object):
def create_knobs(data, tab=None):
"""[DEPRICATED] Create knobs by data
"""Create knobs by data
Depending on the type of each dict value and creates the correct Knob.
@ -343,7 +344,7 @@ def create_knobs(data, tab=None):
def imprint(node, data, tab=None):
"""[DEPRICATED] Store attributes with value on node
"""Store attributes with value on node
Parse user data into Node knobs.
Use `collections.OrderedDict` to ensure knob order.
@ -398,8 +399,9 @@ def imprint(node, data, tab=None):
node.addKnob(knob)
@deprecated
def add_publish_knob(node):
"""[DEPRICATED] Add Publish knob to node
"""[DEPRECATED] Add Publish knob to node
Arguments:
node (nuke.Node): nuke node to be processed
@ -416,8 +418,9 @@ def add_publish_knob(node):
return node
@deprecated
def set_avalon_knob_data(node, data=None, prefix="avalon:"):
"""[DEPRICATED] Sets data into nodes's avalon knob
"""[DEPRECATED] Sets data into nodes's avalon knob
Arguments:
node (nuke.Node): Nuke node to imprint with data,
@ -478,8 +481,9 @@ def set_avalon_knob_data(node, data=None, prefix="avalon:"):
return node
@deprecated
def get_avalon_knob_data(node, prefix="avalon:", create=True):
"""[DEPRICATED] Gets a data from nodes's avalon knob
"""[DEPRECATED] Gets a data from nodes's avalon knob
Arguments:
node (obj): Nuke node to search for data,
@ -521,8 +525,9 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
return data
@deprecated
def fix_data_for_node_create(data):
"""[DEPRICATED] Fixing data to be used for nuke knobs
"""[DEPRECATED] Fixing data to be used for nuke knobs
"""
for k, v in data.items():
if isinstance(v, six.text_type):
@ -532,8 +537,9 @@ def fix_data_for_node_create(data):
return data
@deprecated
def add_write_node_legacy(name, **kwarg):
"""[DEPRICATED] Adding nuke write node
"""[DEPRECATED] Adding nuke write node
Arguments:
name (str): nuke node name
kwarg (attrs): data for nuke knobs
@ -697,7 +703,7 @@ def get_nuke_imageio_settings():
@deprecated("openpype.hosts.nuke.api.lib.get_nuke_imageio_settings")
def get_created_node_imageio_setting_legacy(nodeclass, creator, subset):
'''[DEPRICATED] Get preset data for dataflow (fileType, compression, bitDepth)
'''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth)
'''
assert any([creator, nodeclass]), nuke.message(
@ -1241,7 +1247,7 @@ def create_write_node(
nodes to be created before write with dependency
review (bool)[optional]: adding review knob
farm (bool)[optional]: rendering workflow target
kwargs (dict)[optional]: additional key arguments for formating
kwargs (dict)[optional]: additional key arguments for formatting
Example:
prenodes = {
@ -2258,14 +2264,20 @@ class WorkfileSettings(object):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
set_node_data(
self._root_node,
INSTANCE_DATA_KNOB,
{
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}
)
if not ASSIST:
set_node_data(
self._root_node,
INSTANCE_DATA_KNOB,
{
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}
)
else:
log.warning(
"NukeAssist mode is not allowing "
"updating custom knobs..."
)
def reset_resolution(self):
"""Set resolution to project resolution."""

View file

@ -60,6 +60,7 @@ from .workio import (
work_root,
current_file
)
from .constants import ASSIST
log = Logger.get_logger(__name__)
@ -72,7 +73,6 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
MENU_LABEL = os.environ["AVALON_LABEL"]
# registering pyblish gui regarding settings in presets
if os.getenv("PYBLISH_GUI", None):
pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None))
@ -101,6 +101,12 @@ class NukeHost(
def get_workfile_extensions(self):
return file_extensions()
def get_workfile_build_placeholder_plugins(self):
return [
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
]
def get_containers(self):
return ls()
@ -200,45 +206,45 @@ def _show_workfiles():
host_tools.show_workfiles(parent=None, on_top=False)
def get_workfile_build_placeholder_plugins():
return [
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
]
def _install_menu():
"""Install Avalon menu into Nuke's main menu bar."""
# uninstall original avalon menu
main_window = get_main_window()
menubar = nuke.menu("Nuke")
menu = menubar.addMenu(MENU_LABEL)
label = "{0}, {1}".format(
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
)
Context.context_label = label
context_action = menu.addCommand(label)
context_action.setEnabled(False)
if not ASSIST:
label = "{0}, {1}".format(
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
)
Context.context_label = label
context_action = menu.addCommand(label)
context_action.setEnabled(False)
# add separator after context label
menu.addSeparator()
menu.addSeparator()
menu.addCommand(
"Work Files...",
_show_workfiles
)
menu.addSeparator()
menu.addCommand(
"Create...",
lambda: host_tools.show_publisher(
tab="create"
if not ASSIST:
menu.addCommand(
"Create...",
lambda: host_tools.show_publisher(
tab="create"
)
)
)
menu.addCommand(
"Publish...",
lambda: host_tools.show_publisher(
tab="publish"
menu.addCommand(
"Publish...",
lambda: host_tools.show_publisher(
tab="publish"
)
)
)
menu.addCommand(
"Load...",
lambda: host_tools.show_loader(
@ -286,15 +292,18 @@ def _install_menu():
"Build Workfile from template",
lambda: build_workfile_template()
)
menu_template.addSeparator()
menu_template.addCommand(
"Create Place Holder",
lambda: create_placeholder()
)
menu_template.addCommand(
"Update Place Holder",
lambda: update_placeholder()
)
if not ASSIST:
menu_template.addSeparator()
menu_template.addCommand(
"Create Place Holder",
lambda: create_placeholder()
)
menu_template.addCommand(
"Update Place Holder",
lambda: update_placeholder()
)
menu.addSeparator()
menu.addCommand(
"Experimental tools...",

View file

@ -558,9 +558,7 @@ class ExporterReview(object):
self.path_in = self.instance.data.get("path", None)
self.staging_dir = self.instance.data["stagingDir"]
self.collection = self.instance.data.get("collection", None)
self.data = dict({
"representations": list()
})
self.data = {"representations": []}
def get_file_info(self):
if self.collection:
@ -626,7 +624,7 @@ class ExporterReview(object):
nuke_imageio = opnlib.get_nuke_imageio_settings()
# TODO: this is only securing backward compatibility lets remove
# this once all projects's anotomy are updated to newer config
# this once all projects's anatomy are updated to newer config
if "baking" in nuke_imageio.keys():
return nuke_imageio["baking"]["viewerProcess"]
else:
@ -823,8 +821,41 @@ class ExporterReviewMov(ExporterReview):
add_tags = []
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
# TODO: remove this when `reformat_nodes_config`
# is changed in settings
reformat_node_add = kwargs["reformat_node_add"]
reformat_node_config = kwargs["reformat_node_config"]
# TODO: make this required in future
reformat_nodes_config = kwargs.get("reformat_nodes_config", {})
# TODO: remove this once deprecated is removed
# make sure only reformat_nodes_config is used in future
if reformat_node_add and reformat_nodes_config.get("enabled"):
self.log.warning(
"`reformat_node_add` is deprecated. "
"Please use only `reformat_nodes_config` instead.")
reformat_nodes_config = None
# TODO: reformat code when backward compatibility is not needed
# warning if reformat_nodes_config is not set
if not reformat_nodes_config:
self.log.warning(
"Please set `reformat_nodes_config` in settings. "
"Using `reformat_node_config` instead."
)
reformat_nodes_config = {
"enabled": reformat_node_add,
"reposition_nodes": [
{
"node_class": "Reformat",
"knobs": reformat_node_config
}
]
}
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
@ -846,7 +877,6 @@ class ExporterReviewMov(ExporterReview):
subset = self.instance.data["subset"]
self._temp_nodes[subset] = []
# ---------- start nodes creation
# Read node
r_node = nuke.createNode("Read")
@ -860,44 +890,39 @@ class ExporterReviewMov(ExporterReview):
if read_raw:
r_node["raw"].setValue(1)
# connect
self._temp_nodes[subset].append(r_node)
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
# connect to Read node
self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`")
# add reformat node
if reformat_node_add:
if reformat_nodes_config["enabled"]:
reposition_nodes = reformat_nodes_config["reposition_nodes"]
for reposition_node in reposition_nodes:
node_class = reposition_node["node_class"]
knobs = reposition_node["knobs"]
node = nuke.createNode(node_class)
set_node_knobs_from_settings(node, knobs)
# connect in order
self._connect_to_above_nodes(
node, subset, "Reposition node... `{}`"
)
# append reformated tag
add_tags.append("reformated")
rf_node = nuke.createNode("Reformat")
set_node_knobs_from_settings(rf_node, reformat_node_config)
# connect
rf_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(rf_node)
self.previous_node = rf_node
self.log.debug(
"Reformat... `{}`".format(self._temp_nodes[subset]))
# only create colorspace baking if toggled on
if bake_viewer_process:
if bake_viewer_input_process_node:
# View Process node
ipn = get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes[subset].append(ipn)
self.previous_node = ipn
self.log.debug(
"ViewProcess... `{}`".format(
self._temp_nodes[subset]))
# connect to ViewProcess node
self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`")
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# assign display
display, viewer = get_viewer_config_from_string(
str(baking_view_profile)
)
@ -907,13 +932,7 @@ class ExporterReviewMov(ExporterReview):
# assign viewer
dag_node["view"].setValue(viewer)
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(
self._temp_nodes[subset]))
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
@ -967,6 +986,15 @@ class ExporterReviewMov(ExporterReview):
return self.data
def _shift_to_previous_node_and_temp(self, subset, node, message):
self._temp_nodes[subset].append(node)
self.previous_node = node
self.log.debug(message.format(self._temp_nodes[subset]))
def _connect_to_above_nodes(self, node, subset, message):
node.setInput(0, self.previous_node)
self._shift_to_previous_node_and_temp(subset, node, message)
@deprecated("openpype.hosts.nuke.api.plugin.NukeWriteCreator")
class AbstractWriteRender(OpenPypeCreator):

View file

@ -0,0 +1,11 @@
from openpype.lib import PreLaunchHook
class PrelaunchNukeAssistHook(PreLaunchHook):
"""
Adding flag when nukeassist
"""
app_groups = ["nukeassist"]
def execute(self):
self.launch_context.env["NUKEASSIST"] = "1"

View file

@ -23,7 +23,7 @@ class ExtractReviewData(publish.Extractor):
representations = instance.data.get("representations", [])
# review can be removed since `ProcessSubmittedJobOnFarm` will create
# reviable representation if needed
# reviewable representation if needed
if (
"render.farm" in instance.data["families"]
and "review" in instance.data["families"]

View file

@ -35,10 +35,13 @@ Todos:
"""
import collections
from typing import Any, Optional, Union
from openpype.client import get_asset_by_name
from openpype.lib import (
prepare_template_data,
AbstractAttrDef,
UISeparatorDef,
EnumDef,
TextDef,
BoolDef,
@ -86,6 +89,25 @@ to match group color of Render Layer.
)
AUTODETECT_RENDER_DETAILED_DESCRIPTION = (
"""Semi-automated Render Layer and Render Pass creation.
Based on information in TVPaint scene will be created Render Layers and Render
Passes. All color groups used in scene will be used for Render Layer creation.
Name of the group is used as a variant.
All TVPaint layers under the color group will be created as Render Pass where
layer name is used as variant.
The plugin will use all used color groups and layers, or can skip those that
are not visible.
There is option to auto-rename color groups before Render Layer creation. That
is based on settings template where is filled index of used group from bottom
to top.
"""
)
class CreateRenderlayer(TVPaintCreator):
"""Mark layer group as Render layer instance.
@ -604,6 +626,359 @@ class CreateRenderPass(TVPaintCreator):
return self.get_pre_create_attr_defs()
class TVPaintAutoDetectRenderCreator(TVPaintCreator):
"""Create Render Layer and Render Pass instances based on scene data.
This is auto-detection creator which can be triggered by user to create
instances based on information in scene. Each used color group in scene
will be created as Render Layer where group name is used as variant and
each TVPaint layer as Render Pass where layer name is used as variant.
Never will have any instances, all instances belong to different creators.
"""
family = "render"
label = "Render Layer/Passes"
identifier = "render.auto.detect.creator"
order = CreateRenderPass.order + 10
description = (
"Create Render Layers and Render Passes based on scene setup"
)
detailed_description = AUTODETECT_RENDER_DETAILED_DESCRIPTION
# Settings
enabled = False
allow_group_rename = True
group_name_template = "L{group_index}"
group_idx_offset = 10
group_idx_padding = 3
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings
["tvpaint"]
["create"]
["auto_detect_render"]
)
self.allow_group_rename = plugin_settings["allow_group_rename"]
self.group_name_template = plugin_settings["group_name_template"]
self.group_idx_offset = plugin_settings["group_idx_offset"]
self.group_idx_padding = plugin_settings["group_idx_padding"]
def _rename_groups(
self,
groups_order: list[int],
scene_groups: list[dict[str, Any]]
):
new_group_name_by_id: dict[int, str] = {}
groups_by_id: dict[int, dict[str, Any]] = {
group["group_id"]: group
for group in scene_groups
}
# Count only renamed groups
for idx, group_id in enumerate(groups_order):
group_index_value: str = (
"{{:0>{}}}"
.format(self.group_idx_padding)
.format((idx + 1) * self.group_idx_offset)
)
group_name_fill_values: dict[str, str] = {
"groupIdx": group_index_value,
"groupidx": group_index_value,
"group_idx": group_index_value,
"group_index": group_index_value,
}
group_name: str = self.group_name_template.format(
**group_name_fill_values
)
group: dict[str, Any] = groups_by_id[group_id]
if group["name"] != group_name:
new_group_name_by_id[group_id] = group_name
grg_lines: list[str] = []
for group_id, group_name in new_group_name_by_id.items():
group: dict[str, Any] = groups_by_id[group_id]
grg_line: str = "tv_layercolor \"setcolor\" {} {} {} {} {}".format(
group["clip_id"],
group_id,
group["red"],
group["green"],
group["blue"],
group_name
)
grg_lines.append(grg_line)
group["name"] = group_name
if grg_lines:
execute_george_through_file("\n".join(grg_lines))
def _prepare_render_layer(
self,
project_name: str,
asset_doc: dict[str, Any],
task_name: str,
group_id: int,
groups: list[dict[str, Any]],
mark_for_review: bool,
existing_instance: Optional[CreatedInstance] = None,
) -> Union[CreatedInstance, None]:
match_group: Union[dict[str, Any], None] = next(
(
group
for group in groups
if group["group_id"] == group_id
),
None
)
if not match_group:
return None
variant: str = match_group["name"]
creator: CreateRenderlayer = (
self.create_context.creators[CreateRenderlayer.identifier]
)
subset_name: str = creator.get_subset_name(
variant,
task_name,
asset_doc,
project_name,
host_name=self.create_context.host_name,
)
if existing_instance is not None:
existing_instance["asset"] = asset_doc["name"]
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
return existing_instance
instance_data: dict[str, str] = {
"asset": asset_doc["name"],
"task": task_name,
"family": creator.family,
"variant": variant
}
pre_create_data: dict[str, str] = {
"group_id": group_id,
"mark_for_review": mark_for_review
}
return creator.create(subset_name, instance_data, pre_create_data)
def _prepare_render_passes(
self,
project_name: str,
asset_doc: dict[str, Any],
task_name: str,
render_layer_instance: CreatedInstance,
layers: list[dict[str, Any]],
mark_for_review: bool,
existing_render_passes: list[CreatedInstance]
):
creator: CreateRenderPass = (
self.create_context.creators[CreateRenderPass.identifier]
)
render_pass_by_layer_name = {}
for render_pass in existing_render_passes:
for layer_name in render_pass["layer_names"]:
render_pass_by_layer_name[layer_name] = render_pass
for layer in layers:
layer_name = layer["name"]
variant = layer_name
render_pass = render_pass_by_layer_name.get(layer_name)
if render_pass is not None:
if (render_pass["layer_names"]) > 1:
variant = render_pass["variant"]
subset_name = creator.get_subset_name(
variant,
task_name,
asset_doc,
project_name,
host_name=self.create_context.host_name,
instance=render_pass
)
if render_pass is not None:
render_pass["asset"] = asset_doc["name"]
render_pass["task"] = task_name
render_pass["subset"] = subset_name
continue
instance_data: dict[str, str] = {
"asset": asset_doc["name"],
"task": task_name,
"family": creator.family,
"variant": variant
}
pre_create_data: dict[str, Any] = {
"render_layer_instance_id": render_layer_instance.id,
"layer_names": [layer_name],
"mark_for_review": mark_for_review
}
creator.create(subset_name, instance_data, pre_create_data)
def _filter_groups(
self,
layers_by_group_id,
groups_order,
only_visible_groups
):
new_groups_order = []
for group_id in groups_order:
layers: list[dict[str, Any]] = layers_by_group_id[group_id]
if not layers:
continue
if (
only_visible_groups
and not any(
layer
for layer in layers
if layer["visible"]
)
):
continue
new_groups_order.append(group_id)
return new_groups_order
def create(self, subset_name, instance_data, pre_create_data):
project_name: str = self.create_context.get_current_project_name()
asset_name: str = instance_data["asset"]
task_name: str = instance_data["task"]
asset_doc: dict[str, Any] = get_asset_by_name(project_name, asset_name)
render_layers_by_group_id: dict[int, CreatedInstance] = {}
render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = (
collections.defaultdict(list)
)
for instance in self.create_context.instances:
if instance.creator_identifier == CreateRenderlayer.identifier:
group_id = instance["creator_attributes"]["group_id"]
render_layers_by_group_id[group_id] = instance
elif instance.creator_identifier == CreateRenderPass.identifier:
render_layer_id = (
instance
["creator_attributes"]
["render_layer_instance_id"]
)
render_passes_by_render_layer_id[render_layer_id].append(
instance
)
layers_by_group_id: dict[int, list[dict[str, Any]]] = (
collections.defaultdict(list)
)
scene_layers: list[dict[str, Any]] = get_layers_data()
scene_groups: list[dict[str, Any]] = get_groups_data()
groups_order: list[int] = []
for layer in scene_layers:
group_id: int = layer["group_id"]
# Skip 'default' group
if group_id == 0:
continue
layers_by_group_id[group_id].append(layer)
if group_id not in groups_order:
groups_order.append(group_id)
groups_order.reverse()
mark_layers_for_review = pre_create_data.get(
"mark_layers_for_review", False
)
mark_passes_for_review = pre_create_data.get(
"mark_passes_for_review", False
)
rename_groups = pre_create_data.get("rename_groups", False)
only_visible_groups = pre_create_data.get("only_visible_groups", False)
groups_order = self._filter_groups(
layers_by_group_id,
groups_order,
only_visible_groups
)
if not groups_order:
return
if rename_groups:
self._rename_groups(groups_order, scene_groups)
# Make sure all render layers are created
for group_id in groups_order:
instance: Union[CreatedInstance, None] = (
self._prepare_render_layer(
project_name,
asset_doc,
task_name,
group_id,
scene_groups,
mark_layers_for_review,
render_layers_by_group_id.get(group_id),
)
)
if instance is not None:
render_layers_by_group_id[group_id] = instance
for group_id in groups_order:
layers: list[dict[str, Any]] = layers_by_group_id[group_id]
render_layer_instance: Union[CreatedInstance, None] = (
render_layers_by_group_id.get(group_id)
)
if not layers or render_layer_instance is None:
continue
self._prepare_render_passes(
project_name,
asset_doc,
task_name,
render_layer_instance,
layers,
mark_passes_for_review,
render_passes_by_render_layer_id[render_layer_instance.id]
)
def get_pre_create_attr_defs(self) -> list[AbstractAttrDef]:
render_layer_creator: CreateRenderlayer = (
self.create_context.creators[CreateRenderlayer.identifier]
)
render_pass_creator: CreateRenderPass = (
self.create_context.creators[CreateRenderPass.identifier]
)
output = []
if self.allow_group_rename:
output.extend([
BoolDef(
"rename_groups",
label="Rename color groups",
tooltip="Will rename color groups using studio template",
default=True
),
BoolDef(
"only_visible_groups",
label="Only visible color groups",
tooltip=(
"Render Layers and rename will happen only on color"
" groups with visible layers."
),
default=True
),
UISeparatorDef()
])
output.extend([
BoolDef(
"mark_layers_for_review",
label="Mark RenderLayers for review",
default=render_layer_creator.mark_for_review
),
BoolDef(
"mark_passes_for_review",
label="Mark RenderPasses for review",
default=render_pass_creator.mark_for_review
)
])
return output
class TVPaintSceneRenderCreator(TVPaintAutoCreator):
family = "render"
subset_template_family_filter = "renderScene"

View file

@ -9,6 +9,8 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
hosts = ["tvpaint"]
families = ["render", "review"]
ignore_render_pass_transparency = False
def process(self, instance):
context = instance.context
creator_identifier = instance.data["creator_identifier"]
@ -63,6 +65,9 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
for layer in layers_data
if layer["name"] in layer_names
]
instance.data["ignoreLayersTransparency"] = (
self.ignore_render_pass_transparency
)
render_layer_data = None
render_layer_id = creator_attributes["render_layer_instance_id"]

View file

@ -59,6 +59,10 @@ class ExtractSequence(pyblish.api.Extractor):
)
)
ignore_layers_transparency = instance.data.get(
"ignoreLayersTransparency", False
)
family_lowered = instance.data["family"].lower()
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
@ -114,7 +118,11 @@ class ExtractSequence(pyblish.api.Extractor):
else:
# Render output
result = self.render(
output_dir, mark_in, mark_out, filtered_layers
output_dir,
mark_in,
mark_out,
filtered_layers,
ignore_layers_transparency
)
output_filepaths_by_frame_idx, thumbnail_fullpath = result
@ -274,7 +282,9 @@ class ExtractSequence(pyblish.api.Extractor):
return output_filepaths_by_frame_idx, thumbnail_filepath
def render(self, output_dir, mark_in, mark_out, layers):
def render(
self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity
):
""" Export images from TVPaint.
Args:
@ -282,6 +292,7 @@ class ExtractSequence(pyblish.api.Extractor):
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
layers (list): List of layers to be exported.
ignore_layer_opacity (bool): Layer's opacity will be ignored.
Returns:
tuple: With 2 items first is list of filenames second is path to
@ -323,7 +334,7 @@ class ExtractSequence(pyblish.api.Extractor):
for layer_id, render_data in extraction_data_by_layer_id.items():
layer = layers_by_id[layer_id]
filepaths_by_layer_id[layer_id] = self._render_layer(
render_data, layer, output_dir
render_data, layer, output_dir, ignore_layer_opacity
)
# Prepare final filepaths where compositing should store result
@ -380,7 +391,9 @@ class ExtractSequence(pyblish.api.Extractor):
red, green, blue = self.review_bg
return (red, green, blue)
def _render_layer(self, render_data, layer, output_dir):
def _render_layer(
self, render_data, layer, output_dir, ignore_layer_opacity
):
frame_references = render_data["frame_references"]
filenames_by_frame_index = render_data["filenames_by_frame_index"]
@ -389,6 +402,12 @@ class ExtractSequence(pyblish.api.Extractor):
"tv_layerset {}".format(layer_id),
"tv_SaveMode \"PNG\""
]
# Set density to 100 and store previous opacity
if ignore_layer_opacity:
george_script_lines.extend([
"tv_layerdensity 100",
"orig_opacity = result",
])
filepaths_by_frame = {}
frames_to_render = []
@ -409,6 +428,10 @@ class ExtractSequence(pyblish.api.Extractor):
# Store image to output
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
# Set density back to origin opacity
if ignore_layer_opacity:
george_script_lines.append("tv_layerdensity orig_opacity")
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
",".join(frames_to_render), layer_id, layer["name"]
))

View file

@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
"""Unreal Editor OpenPype host API."""
from .plugin import Loader
from .plugin import (
UnrealActorCreator,
UnrealAssetCreator,
Loader
)
from .pipeline import (
install,

View file

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
import os
import json
import logging
from typing import List
from contextlib import contextmanager
import semver
import time
import pyblish.api
@ -16,13 +18,14 @@ from openpype.pipeline import (
)
from openpype.tools.utils import host_tools
import openpype.hosts.unreal
from openpype.host import HostBase, ILoadHost
from openpype.host import HostBase, ILoadHost, IPublishHost
import unreal # noqa
logger = logging.getLogger("openpype.hosts.unreal")
OPENPYPE_CONTAINERS = "OpenPypeContainers"
CONTEXT_CONTAINER = "OpenPype/context.json"
UNREAL_VERSION = semver.VersionInfo(
*os.getenv("OPENPYPE_UNREAL_VERSION").split(".")
)
@ -35,7 +38,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class UnrealHost(HostBase, ILoadHost):
class UnrealHost(HostBase, ILoadHost, IPublishHost):
"""Unreal host implementation.
For some time this class will re-use functions from module based
@ -60,6 +63,32 @@ class UnrealHost(HostBase, ILoadHost):
show_tools_dialog()
def update_context_data(self, data, changes):
content_path = unreal.Paths.project_content_dir()
op_ctx = content_path + CONTEXT_CONTAINER
attempts = 3
for i in range(attempts):
try:
with open(op_ctx, "w+") as f:
json.dump(data, f)
break
except IOError:
if i == attempts - 1:
raise Exception("Failed to write context data. Aborting.")
unreal.log_warning("Failed to write context data. Retrying...")
i += 1
time.sleep(3)
continue
def get_context_data(self):
content_path = unreal.Paths.project_content_dir()
op_ctx = content_path + CONTEXT_CONTAINER
if not os.path.isfile(op_ctx):
return {}
with open(op_ctx, "r") as fp:
data = json.load(fp)
return data
def install():
"""Install Unreal configuration for OpenPype."""
@ -133,6 +162,31 @@ def ls():
yield data
def ls_inst():
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# UE 5.1 changed how class name is specified
class_name = [
"/Script/OpenPype",
"OpenPypePublishInstance"
] if (
UNREAL_VERSION.major == 5
and UNREAL_VERSION.minor > 0
) else "OpenPypePublishInstance" # noqa
instances = ar.get_assets_by_class(class_name, True)
# get_asset_by_class returns AssetData. To get all metadata we need to
# load asset. get_tag_values() work only on metadata registered in
# Asset Registry Project settings (and there is no way to set it with
# python short of editing ini configuration file).
for asset_data in instances:
asset = asset_data.get_asset()
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
data["objectName"] = asset_data.asset_name
data = cast_map_to_str_dict(data)
yield data
def parse_container(container):
"""To get data from container, AssetContainer must be loaded.

View file

@ -1,7 +1,245 @@
# -*- coding: utf-8 -*-
from abc import ABC
import ast
import collections
import sys
import six
from abc import (
ABC,
ABCMeta,
)
from openpype.pipeline import LoaderPlugin
import unreal
from .pipeline import (
create_publish_instance,
imprint,
ls_inst,
UNREAL_VERSION
)
from openpype.lib import (
BoolDef,
UILabelDef
)
from openpype.pipeline import (
Creator,
LoaderPlugin,
CreatorError,
CreatedInstance
)
@six.add_metaclass(ABCMeta)
class UnrealBaseCreator(Creator):
"""Base class for Unreal creator plugins."""
root = "/Game/OpenPype/PublishInstances"
suffix = "_INS"
@staticmethod
def cache_subsets(shared_data):
"""Cache instances for Creators to shared data.
Create `unreal_cached_subsets` key when needed in shared data and
fill it with all collected instances from the scene under its
respective creator identifiers.
If legacy instances are detected in the scene, create
`unreal_cached_legacy_subsets` there and fill it with
all legacy subsets under family as a key.
Args:
Dict[str, Any]: Shared data.
Return:
Dict[str, Any]: Shared data dictionary.
"""
if shared_data.get("unreal_cached_subsets") is None:
unreal_cached_subsets = collections.defaultdict(list)
unreal_cached_legacy_subsets = collections.defaultdict(list)
for instance in ls_inst():
creator_id = instance.get("creator_identifier")
if creator_id:
unreal_cached_subsets[creator_id].append(instance)
else:
family = instance.get("family")
unreal_cached_legacy_subsets[family].append(instance)
shared_data["unreal_cached_subsets"] = unreal_cached_subsets
shared_data["unreal_cached_legacy_subsets"] = (
unreal_cached_legacy_subsets
)
return shared_data
def create(self, subset_name, instance_data, pre_create_data):
try:
instance_name = f"{subset_name}{self.suffix}"
pub_instance = create_publish_instance(instance_name, self.root)
instance_data["subset"] = subset_name
instance_data["instance_path"] = f"{self.root}/{instance_name}"
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self)
self._add_instance_to_context(instance)
pub_instance.set_editor_property('add_external_assets', True)
assets = pub_instance.get_editor_property('asset_data_external')
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for member in pre_create_data.get("members", []):
obj = ar.get_asset_by_object_path(member).get_asset()
assets.add(obj)
imprint(f"{self.root}/{instance_name}", instance.data_to_store())
return instance
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def collect_instances(self):
# cache instances if missing
self.cache_subsets(self.collection_shared_data)
for instance in self.collection_shared_data[
"unreal_cached_subsets"].get(self.identifier, []):
# Unreal saves metadata as string, so we need to convert it back
instance['creator_attributes'] = ast.literal_eval(
instance.get('creator_attributes', '{}'))
instance['publish_attributes'] = ast.literal_eval(
instance.get('publish_attributes', '{}'))
created_instance = CreatedInstance.from_existing(instance, self)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, changes in update_list:
instance_node = created_inst.get("instance_path", "")
if not instance_node:
unreal.log_warning(
f"Instance node not found for {created_inst}")
continue
new_values = {
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,
new_values
)
def remove_instances(self, instances):
for instance in instances:
instance_node = instance.data.get("instance_path", "")
if instance_node:
unreal.EditorAssetLibrary.delete_asset(instance_node)
self._remove_instance_from_context(instance)
@six.add_metaclass(ABCMeta)
class UnrealAssetCreator(UnrealBaseCreator):
"""Base class for Unreal creator plugins based on assets."""
def create(self, subset_name, instance_data, pre_create_data):
"""Create instance of the asset.
Args:
subset_name (str): Name of the subset.
instance_data (dict): Data for the instance.
pre_create_data (dict): Data for the instance.
Returns:
CreatedInstance: Created instance.
"""
try:
# Check if instance data has members, filled by the plugin.
# If not, use selection.
if not pre_create_data.get("members"):
pre_create_data["members"] = []
if pre_create_data.get("use_selection"):
utilib = unreal.EditorUtilityLibrary
sel_objects = utilib.get_selected_assets()
pre_create_data["members"] = [
a.get_path_name() for a in sel_objects]
super(UnrealAssetCreator, self).create(
subset_name,
instance_data,
pre_create_data)
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection", label="Use selection", default=True)
]
@six.add_metaclass(ABCMeta)
class UnrealActorCreator(UnrealBaseCreator):
"""Base class for Unreal creator plugins based on actors."""
def create(self, subset_name, instance_data, pre_create_data):
"""Create instance of the asset.
Args:
subset_name (str): Name of the subset.
instance_data (dict): Data for the instance.
pre_create_data (dict): Data for the instance.
Returns:
CreatedInstance: Created instance.
"""
try:
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
# Check if the level is saved
if world.get_path_name().startswith("/Temp/"):
raise CreatorError(
"Level must be saved before creating instances.")
# Check if instance data has members, filled by the plugin.
# If not, use selection.
if not instance_data.get("members"):
actor_subsystem = unreal.EditorActorSubsystem()
sel_actors = actor_subsystem.get_selected_level_actors()
selection = [a.get_path_name() for a in sel_actors]
instance_data["members"] = selection
instance_data["level"] = world.get_path_name()
super(UnrealActorCreator, self).create(
subset_name,
instance_data,
pre_create_data)
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select actors to create instance from them.")
]
class Loader(LoaderPlugin, ABC):

View file

@ -17,9 +17,8 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ToolsBtnsWidget, self).__init__(parent)
create_btn = QtWidgets.QPushButton("Create...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
publish_btn = QtWidgets.QPushButton("Publisher...", self)
manage_btn = QtWidgets.QPushButton("Manage...", self)
render_btn = QtWidgets.QPushButton("Render...", self)
experimental_tools_btn = QtWidgets.QPushButton(
@ -28,7 +27,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(create_btn, 0)
layout.addWidget(load_btn, 0)
layout.addWidget(publish_btn, 0)
layout.addWidget(manage_btn, 0)
@ -36,7 +34,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
layout.addWidget(experimental_tools_btn, 0)
layout.addStretch(1)
create_btn.clicked.connect(self._on_create)
load_btn.clicked.connect(self._on_load)
publish_btn.clicked.connect(self._on_publish)
manage_btn.clicked.connect(self._on_manage)
@ -50,7 +47,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
self.tool_required.emit("loader")
def _on_publish(self):
self.tool_required.emit("publish")
self.tool_required.emit("publisher")
def _on_manage(self):
self.tool_required.emit("sceneinventory")

View file

@ -1,41 +1,38 @@
# -*- coding: utf-8 -*-
import unreal
from unreal import EditorAssetLibrary as eal
from unreal import EditorLevelLibrary as ell
from openpype.hosts.unreal.api.pipeline import instantiate
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
class CreateCamera(LegacyCreator):
"""Layout output for character rigs"""
class CreateCamera(UnrealAssetCreator):
"""Create Camera."""
name = "layoutMain"
identifier = "io.openpype.creators.unreal.camera"
label = "Camera"
family = "camera"
icon = "cubes"
icon = "fa.camera"
root = "/Game/OpenPype/Instances"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
def __init__(self, *args, **kwargs):
super(CreateCamera, self).__init__(*args, **kwargs)
if len(selection) != 1:
raise CreatorError("Please select only one object.")
def process(self):
data = self.data
# Add the current level path to the metadata
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
name = data["subset"]
instance_data["level"] = world.get_path_name()
data["level"] = ell.get_editor_world().get_path_name()
if not eal.does_directory_exist(self.root):
eal.make_directory(self.root)
factory = unreal.LevelSequenceFactoryNew()
tools = unreal.AssetToolsHelpers().get_asset_tools()
tools.create_asset(name, f"{self.root}/{name}", None, factory)
asset_name = f"{self.root}/{name}/{name}.{name}"
data["members"] = [asset_name]
instantiate(f"{self.root}", name, data, None, self.suffix)
super(CreateCamera, self).create(
subset_name,
instance_data,
pre_create_data)

View file

@ -1,42 +1,13 @@
# -*- coding: utf-8 -*-
from unreal import EditorLevelLibrary
from openpype.pipeline import LegacyCreator
from openpype.hosts.unreal.api.pipeline import instantiate
from openpype.hosts.unreal.api.plugin import (
UnrealActorCreator,
)
class CreateLayout(LegacyCreator):
class CreateLayout(UnrealActorCreator):
"""Layout output for character rigs."""
name = "layoutMain"
identifier = "io.openpype.creators.unreal.layout"
label = "Layout"
family = "layout"
icon = "cubes"
root = "/Game"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateLayout, self).__init__(*args, **kwargs)
def process(self):
data = self.data
name = data["subset"]
selection = []
# if (self.options or {}).get("useSelection"):
# sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
# selection = [a.get_path_name() for a in sel_objects]
data["level"] = EditorLevelLibrary.get_editor_world().get_path_name()
data["members"] = []
if (self.options or {}).get("useSelection"):
# Set as members the selected actors
for actor in EditorLevelLibrary.get_selected_level_actors():
data["members"].append("{}.{}".format(
actor.get_outer().get_name(), actor.get_name()))
instantiate(self.root, name, data, selection, self.suffix)

View file

@ -1,56 +1,57 @@
# -*- coding: utf-8 -*-
"""Create look in Unreal."""
import unreal # noqa
from openpype.hosts.unreal.api import pipeline, plugin
from openpype.pipeline import LegacyCreator
import unreal
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import (
create_folder
)
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator
)
from openpype.lib import UILabelDef
class CreateLook(LegacyCreator):
class CreateLook(UnrealAssetCreator):
"""Shader connections defining shape look."""
name = "unrealLook"
label = "Unreal - Look"
identifier = "io.openpype.creators.unreal.look"
label = "Look"
family = "look"
icon = "paint-brush"
root = "/Game/Avalon/Assets"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
# We need to set this to True for the parent class to work
pre_create_data["use_selection"] = True
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
if len(selection) != 1:
raise CreatorError("Please select only one asset.")
def process(self):
name = self.data["subset"]
selected_asset = selection[0]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
look_directory = "/Game/OpenPype/Looks"
# Create the folder
path = f"{self.root}/{self.data['asset']}"
new_name = pipeline.create_folder(path, name)
full_path = f"{path}/{new_name}"
folder_name = create_folder(look_directory, subset_name)
path = f"{look_directory}/{folder_name}"
instance_data["look"] = path
# Create a new cube static mesh
ar = unreal.AssetRegistryHelpers.get_asset_registry()
cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube")
# Create the avalon publish instance object
container_name = f"{name}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=full_path)
# Get the mesh of the selected object
original_mesh = ar.get_asset_by_object_path(selection[0]).get_asset()
materials = original_mesh.get_editor_property('materials')
original_mesh = ar.get_asset_by_object_path(selected_asset).get_asset()
materials = original_mesh.get_editor_property('static_materials')
self.data["members"] = []
pre_create_data["members"] = []
# Add the materials to the cube
for material in materials:
name = material.get_editor_property('material_slot_name')
object_path = f"{full_path}/{name}.{name}"
mat_name = material.get_editor_property('material_slot_name')
object_path = f"{path}/{mat_name}.{mat_name}"
unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
cube.get_asset(), object_path
)
@ -61,8 +62,16 @@ class CreateLook(LegacyCreator):
unreal_object.add_material(
material.get_editor_property('material_interface'))
self.data["members"].append(object_path)
pre_create_data["members"].append(object_path)
unreal.EditorAssetLibrary.save_asset(object_path)
pipeline.imprint(f"{full_path}/{container_name}", self.data)
super(CreateLook, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select the asset from which to create the look.")
]

View file

@ -1,117 +1,138 @@
# -*- coding: utf-8 -*-
import unreal
from openpype.hosts.unreal.api import pipeline
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import (
get_subsequences
)
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator
)
from openpype.lib import UILabelDef
class CreateRender(LegacyCreator):
class CreateRender(UnrealAssetCreator):
"""Create instance for sequence for rendering"""
name = "unrealRender"
label = "Unreal - Render"
identifier = "io.openpype.creators.unreal.render"
label = "Render"
family = "render"
icon = "cube"
asset_types = ["LevelSequence"]
root = "/Game/OpenPype/PublishInstances"
suffix = "_INS"
def process(self):
subset = self.data["subset"]
icon = "eye"
def create(self, subset_name, instance_data, pre_create_data):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# The asset name is the the third element of the path which contains
# the map.
# The index of the split path is 3 because the first element is an
# empty string, as the path begins with "/Content".
a = unreal.EditorUtilityLibrary.get_selected_assets()[0]
asset_name = a.get_path_name().split("/")[3]
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [
a.get_path_name() for a in sel_objects
if a.get_class().get_name() == "LevelSequence"]
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
ms = sequences[0].get_editor_property('object_path')
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
levels = ar.get_assets(filter)
ml = levels[0].get_editor_property('object_path')
if not selection:
raise CreatorError("Please select at least one Level Sequence.")
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [
a.get_path_name() for a in sel_objects
if a.get_class().get_name() in self.asset_types]
else:
selection.append(self.data['sequence'])
seq_data = None
unreal.log(f"selection: {selection}")
for sel in selection:
selected_asset = ar.get_asset_by_object_path(sel).get_asset()
selected_asset_path = selected_asset.get_path_name()
path = f"{self.root}"
unreal.EditorAssetLibrary.make_directory(path)
# Check if the selected asset is a level sequence asset.
if selected_asset.get_class().get_name() != "LevelSequence":
unreal.log_warning(
f"Skipping {selected_asset.get_name()}. It isn't a Level "
"Sequence.")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# The asset name is the third element of the path which
# contains the map.
# To take the asset name, we remove from the path the prefix
# "/Game/OpenPype/" and then we split the path by "/".
sel_path = selected_asset_path
asset_name = sel_path.replace("/Game/OpenPype/", "").split("/")[0]
for a in selection:
ms_obj = ar.get_asset_by_object_path(ms).get_asset()
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
ar_filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
sequences = ar.get_assets(ar_filter)
master_seq = sequences[0].get_asset().get_path_name()
master_seq_obj = sequences[0].get_asset()
ar_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
levels = ar.get_assets(ar_filter)
master_lvl = levels[0].get_asset().get_path_name()
seq_data = None
# If the selected asset is the master sequence, we get its data
# and then we create the instance for the master sequence.
# Otherwise, we cycle from the master sequence to find the selected
# sequence and we get its data. This data will be used to create
# the instance for the selected sequence. In particular,
# we get the frame range of the selected sequence and its final
# output path.
master_seq_data = {
"sequence": master_seq_obj,
"output": f"{master_seq_obj.get_name()}",
"frame_range": (
master_seq_obj.get_playback_start(),
master_seq_obj.get_playback_end())}
if a == ms:
seq_data = {
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}
if selected_asset_path == master_seq:
seq_data = master_seq_data
else:
seq_data_list = [{
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}]
seq_data_list = [master_seq_data]
for s in seq_data_list:
subscenes = pipeline.get_subsequences(s.get('sequence'))
for seq in seq_data_list:
subscenes = get_subsequences(seq.get('sequence'))
for ss in subscenes:
for sub_seq in subscenes:
sub_seq_obj = sub_seq.get_sequence()
curr_data = {
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"sequence": sub_seq_obj,
"output": (f"{seq.get('output')}/"
f"{sub_seq_obj.get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame() - 1)
}
sub_seq.get_start_frame(),
sub_seq.get_end_frame() - 1)}
if ss.get_sequence().get_path_name() == a:
# If the selected asset is the current sub-sequence,
# we get its data and we break the loop.
# Otherwise, we add the current sub-sequence data to
# the list of sequences to check.
if sub_seq_obj.get_path_name() == selected_asset_path:
seq_data = curr_data
break
seq_data_list.append(curr_data)
# If we found the selected asset, we break the loop.
if seq_data is not None:
break
# If we didn't find the selected asset, we don't create the
# instance.
if not seq_data:
unreal.log_warning(
f"Skipping {selected_asset.get_name()}. It isn't a "
"sub-sequence of the master sequence.")
continue
d = self.data.copy()
d["members"] = [a]
d["sequence"] = a
d["master_sequence"] = ms
d["master_level"] = ml
d["output"] = seq_data.get('output')
d["frameStart"] = seq_data.get('frame_range')[0]
d["frameEnd"] = seq_data.get('frame_range')[1]
instance_data["members"] = [selected_asset_path]
instance_data["sequence"] = selected_asset_path
instance_data["master_sequence"] = master_seq
instance_data["master_level"] = master_lvl
instance_data["output"] = seq_data.get('output')
instance_data["frameStart"] = seq_data.get('frame_range')[0]
instance_data["frameEnd"] = seq_data.get('frame_range')[1]
container_name = f"{subset}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=path)
pipeline.imprint(f"{path}/{container_name}", d)
super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select the sequence to render.")
]

View file

@ -1,35 +1,13 @@
# -*- coding: utf-8 -*-
"""Create Static Meshes as FBX geometry."""
import unreal # noqa
from openpype.hosts.unreal.api.pipeline import (
instantiate,
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
from openpype.pipeline import LegacyCreator
class CreateStaticMeshFBX(LegacyCreator):
"""Static FBX geometry."""
class CreateStaticMeshFBX(UnrealAssetCreator):
"""Create Static Meshes as FBX geometry."""
name = "unrealStaticMeshMain"
label = "Unreal - Static Mesh"
identifier = "io.openpype.creators.unreal.staticmeshfbx"
label = "Static Mesh (FBX)"
family = "unrealStaticMesh"
icon = "cube"
asset_types = ["StaticMesh"]
root = "/Game"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateStaticMeshFBX, self).__init__(*args, **kwargs)
def process(self):
name = self.data["subset"]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
unreal.log("selection: {}".format(selection))
instantiate(self.root, name, self.data, selection, self.suffix)

View file

@ -1,41 +1,31 @@
"""Create UAsset."""
# -*- coding: utf-8 -*-
from pathlib import Path
import unreal
from openpype.hosts.unreal.api import pipeline
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
class CreateUAsset(LegacyCreator):
"""UAsset."""
class CreateUAsset(UnrealAssetCreator):
"""Create UAsset."""
name = "UAsset"
identifier = "io.openpype.creators.unreal.uasset"
label = "UAsset"
family = "uasset"
icon = "cube"
root = "/Game/OpenPype"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
def __init__(self, *args, **kwargs):
super(CreateUAsset, self).__init__(*args, **kwargs)
def process(self):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
subset = self.data["subset"]
path = f"{self.root}/PublishInstances/"
unreal.EditorAssetLibrary.make_directory(path)
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
if len(selection) != 1:
raise RuntimeError("Please select only one object.")
raise CreatorError("Please select only one object.")
obj = selection[0]
@ -43,19 +33,14 @@ class CreateUAsset(LegacyCreator):
sys_path = unreal.SystemLibrary.get_system_path(asset)
if not sys_path:
raise RuntimeError(
raise CreatorError(
f"{Path(obj).name} is not on the disk. Likely it needs to"
"be saved first.")
if Path(sys_path).suffix != ".uasset":
raise RuntimeError(f"{Path(sys_path).name} is not a UAsset.")
raise CreatorError(f"{Path(sys_path).name} is not a UAsset.")
unreal.log("selection: {}".format(selection))
container_name = f"{subset}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=path)
data = self.data.copy()
data["members"] = selection
pipeline.imprint(f"{path}/{container_name}", data)
super(CreateUAsset, self).create(
subset_name,
instance_data,
pre_create_data)

View file

@ -0,0 +1,46 @@
import unreal
import pyblish.api
class CollectInstanceMembers(pyblish.api.InstancePlugin):
"""
Collect members of instance.
This collector will collect the assets for the families that support to
have them included as External Data, and will add them to the instance
as members.
"""
order = pyblish.api.CollectorOrder + 0.1
hosts = ["unreal"]
families = ["camera", "look", "unrealStaticMesh", "uasset"]
label = "Collect Instance Members"
def process(self, instance):
"""Collect members of instance."""
self.log.info("Collecting instance members")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
inst_path = instance.data.get('instance_path')
inst_name = instance.data.get('objectName')
pub_instance = ar.get_asset_by_object_path(
f"{inst_path}.{inst_name}").get_asset()
if not pub_instance:
self.log.error(f"{inst_path}.{inst_name}")
raise RuntimeError(f"Instance {instance} not found.")
if not pub_instance.get_editor_property("add_external_assets"):
# No external assets in the instance
return
assets = pub_instance.get_editor_property('asset_data_external')
members = [asset.get_path_name() for asset in assets]
self.log.debug(f"Members: {members}")
instance.data["members"] = members

View file

@ -1,67 +0,0 @@
# -*- coding: utf-8 -*-
"""Collect publishable instances in Unreal."""
import ast
import unreal # noqa
import pyblish.api
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.pipeline.publish import KnownPublishError
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by OpenPypePublishInstance class
This collector finds all paths containing `OpenPypePublishInstance` class
asset
Identifier:
id (str): "pyblish.avalon.instance"
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["unreal"]
def process(self, context):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
class_name = [
"/Script/OpenPype",
"OpenPypePublishInstance"
] if (
UNREAL_VERSION.major == 5
and UNREAL_VERSION.minor > 0
) else "OpenPypePublishInstance" # noqa
instance_containers = ar.get_assets_by_class(class_name, True)
for container_data in instance_containers:
asset = container_data.get_asset()
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
data["objectName"] = container_data.asset_name
# convert to strings
data = {str(key): str(value) for (key, value) in data.items()}
if not data.get("family"):
raise KnownPublishError("instance has no family")
# content of container
members = ast.literal_eval(data.get("members"))
self.log.debug(members)
self.log.debug(asset.get_path_name())
# remove instance container
self.log.info("Creating instance for {}".format(asset.get_name()))
instance = context.create_instance(asset.get_name())
instance[:] = members
# Store the exact members of the object set
instance.data["setMembers"] = members
instance.data["families"] = [data.get("family")]
instance.data["level"] = data.get("level")
instance.data["parent"] = data.get("parent")
label = "{0} ({1})".format(asset.get_name()[:-4],
data["asset"])
instance.data["label"] = label
instance.data.update(data)

View file

@ -3,10 +3,9 @@
import os
import unreal
from unreal import EditorAssetLibrary as eal
from unreal import EditorLevelLibrary as ell
from openpype.pipeline import publish
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
class ExtractCamera(publish.Extractor):
@ -18,6 +17,8 @@ class ExtractCamera(publish.Extractor):
optional = True
def process(self, instance):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# Define extract output file path
staging_dir = self.staging_dir(instance)
fbx_filename = "{}.fbx".format(instance.name)
@ -26,23 +27,54 @@ class ExtractCamera(publish.Extractor):
self.log.info("Performing extraction..")
# Check if the loaded level is the same of the instance
current_level = ell.get_editor_world().get_path_name()
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
current_level = world.get_path_name()
assert current_level == instance.data.get("level"), \
"Wrong level loaded"
for member in instance[:]:
data = eal.find_asset_data(member)
if data.asset_class == "LevelSequence":
ar = unreal.AssetRegistryHelpers.get_asset_registry()
sequence = ar.get_asset_by_object_path(member).get_asset()
unreal.SequencerTools.export_fbx(
ell.get_editor_world(),
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
break
for member in instance.data.get('members'):
data = ar.get_asset_by_object_path(member)
if UNREAL_VERSION.major == 5:
is_level_sequence = (
data.asset_class_path.asset_name == "LevelSequence")
else:
is_level_sequence = (data.asset_class == "LevelSequence")
if is_level_sequence:
sequence = data.get_asset()
if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor >= 1:
params = unreal.SequencerExportFBXParams(
world=world,
root_sequence=sequence,
sequence=sequence,
bindings=sequence.get_bindings(),
master_tracks=sequence.get_master_tracks(),
fbx_file_name=os.path.join(staging_dir, fbx_filename)
)
unreal.SequencerTools.export_level_sequence_fbx(params)
elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26:
unreal.SequencerTools.export_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
else:
# Unreal 5.0 or 4.27
unreal.SequencerTools.export_level_sequence_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
if not os.path.isfile(os.path.join(staging_dir, fbx_filename)):
raise RuntimeError("Failed to extract camera")
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -29,13 +29,13 @@ class ExtractLook(publish.Extractor):
for member in instance:
asset = ar.get_asset_by_object_path(member)
object = asset.get_asset()
obj = asset.get_asset()
name = asset.get_editor_property('asset_name')
json_element = {'material': str(name)}
material_obj = object.get_editor_property('static_materials')[0]
material_obj = obj.get_editor_property('static_materials')[0]
material = material_obj.material_interface
base_color = mat_lib.get_material_property_input_node(

View file

@ -22,7 +22,13 @@ class ExtractUAsset(publish.Extractor):
staging_dir = self.staging_dir(instance)
filename = "{}.uasset".format(instance.name)
obj = instance[0]
members = instance.data.get("members", [])
if not members:
raise RuntimeError("No members found in instance.")
# UAsset publishing supports only one member
obj = members[0]
asset = ar.get_asset_by_object_path(obj).get_asset()
sys_path = unreal.SystemLibrary.get_system_path(asset)

View file

@ -81,11 +81,14 @@ def run_subprocess(*args, **kwargs):
Entered arguments and keyword arguments are passed to subprocess Popen.
On windows are 'creationflags' filled with flags that should cause ignore
creation of new window.
Args:
*args: Variable length arument list passed to Popen.
*args: Variable length argument list passed to Popen.
**kwargs : Arbitrary keyword arguments passed to Popen. Is possible to
pass `logging.Logger` object under "logger" if want to use
different than lib's logger.
pass `logging.Logger` object under "logger" to use custom logger
for output.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
@ -95,6 +98,17 @@ def run_subprocess(*args, **kwargs):
return code.
"""
# Modify creation flags on windows to hide console window if in UI mode
if (
platform.system().lower() == "windows"
and "creationflags" not in kwargs
):
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
# Get environents from kwarg or use current process environments if were
# not passed.
env = kwargs.get("env") or os.environ
@ -107,10 +121,10 @@ def run_subprocess(*args, **kwargs):
logger = Logger.get_logger("run_subprocess")
# set overrides
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
kwargs['env'] = filtered_env
kwargs["stdout"] = kwargs.get("stdout", subprocess.PIPE)
kwargs["stderr"] = kwargs.get("stderr", subprocess.PIPE)
kwargs["stdin"] = kwargs.get("stdin", subprocess.PIPE)
kwargs["env"] = filtered_env
proc = subprocess.Popen(*args, **kwargs)

View file

@ -28,6 +28,7 @@ def import_filepath(filepath, module_name=None):
# Prepare module object where content of file will be parsed
module = types.ModuleType(module_name)
module.__file__ = filepath
if six.PY3:
# Use loader so module has full specs
@ -41,7 +42,6 @@ def import_filepath(filepath, module_name=None):
# Execute content and store it to module object
six.exec_(_stream.read(), module.__dict__)
module.__file__ = filepath
return module

View file

@ -5,6 +5,7 @@ import json
import collections
import tempfile
import subprocess
import platform
import xml.etree.ElementTree
@ -745,11 +746,18 @@ def get_ffprobe_data(path_to_file, logger=None):
logger.debug("FFprobe command: {}".format(
subprocess.list2cmdline(args)
))
popen = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
kwargs = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
popen = subprocess.Popen(args, **kwargs)
popen_stdout, popen_stderr = popen.communicate()
if popen_stdout:

View file

@ -266,7 +266,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"PYBLISHPLUGINPATH",
"NUKE_PATH",
"TOOL_ENV",
"FOUNDRY_LICENSE"
"FOUNDRY_LICENSE",
"OPENPYPE_SG_USER",
]
# Add OpenPype version if we are running from build.

View file

@ -139,7 +139,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_KEY",
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME"
"OPENPYPE_USERNAME",
"OPENPYPE_SG_USER",
]
# Add OpenPype version if we are running from build.
@ -194,7 +195,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata_path = os.path.join(output_dir, metadata_filename)
# Convert output dir to `{root}/rest/of/path/...` with Anatomy
success, roothless_mtdt_p = self.anatomy.find_root_template_from_path(
success, rootless_mtdt_p = self.anatomy.find_root_template_from_path(
metadata_path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
@ -202,9 +203,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(output_dir))
roothless_mtdt_p = metadata_path
rootless_mtdt_p = metadata_path
return metadata_path, roothless_mtdt_p
return metadata_path, rootless_mtdt_p
def _submit_deadline_post_job(self, instance, job, instances):
"""Submit publish job to Deadline.
@ -237,7 +238,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, roothless_metadata_path = \
metadata_path, rootless_metadata_path = \
self._create_metadata_path(instance)
environment = {
@ -274,7 +275,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
args = [
"--headless",
'publish',
roothless_metadata_path,
rootless_metadata_path,
"--targets", "deadline",
"--targets", "farm"
]
@ -411,7 +412,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
staging = representation.get("stagingDir")
staging = self.anatomy.fill_roots(staging)
staging = self.anatomy.fill_root(staging)
resource_files.append(
(frame,
os.path.join(staging,
@ -588,7 +589,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
host_name = os.environ.get("AVALON_APP", "")
collections, remainders = clique.assemble(exp_files)
# create representation for every collected sequento ce
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
preview = False
@ -656,7 +657,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
self._solve_families(instance, preview)
# add reminders as representations
# add remainders as representations
for remainder in remainders:
ext = remainder.split(".")[-1]
@ -676,7 +677,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"name": ext,
"ext": ext,
"files": os.path.basename(remainder),
"stagingDir": os.path.dirname(remainder),
"stagingDir": staging,
}
preview = match_aov_pattern(
@ -1060,7 +1061,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
publish_job.update({"ftrack": ftrack})
metadata_path, roothless_metadata_path = self._create_metadata_path(
metadata_path, rootless_metadata_path = self._create_metadata_path(
instance)
self.log.info("Writing json file: {}".format(metadata_path))

View file

@ -91,7 +91,7 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
for job_id in render_job_ids:
job_info = self._get_job_info(job_id)
frame_list = job_info["Props"]["Frames"]
frame_list = job_info["Props"].get("Frames")
if frame_list:
all_frame_lists.extend(frame_list.split(','))

View file

@ -83,6 +83,11 @@ class CollectShotgridSession(pyblish.api.ContextPlugin):
"login to shotgrid withing openpype Tray"
)
# Set OPENPYPE_SG_USER with login so other deadline tasks can make
# use of it
self.log.info("Setting OPENPYPE_SG_USER to '%s'.", login)
os.environ["OPENPYPE_SG_USER"] = login
session = shotgun_api3.Shotgun(
base_url=shotgrid_url,
script_name=shotgrid_script_name,

View file

@ -7,7 +7,7 @@ from openpype.pipeline.publish import get_publish_repre_path
class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
"""
Create published Files from representations and add it to version. If
representation is tagged add shotgrid review, it will add it in
representation is tagged as shotgrid review, it will add it in
path to movie for a movie file or path to frame for an image sequence.
"""
@ -27,11 +27,11 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
local_path = get_publish_repre_path(
instance, representation, False
)
code = os.path.basename(local_path)
if representation.get("tags", []):
continue
code = os.path.basename(local_path)
published_file = self._find_existing_publish(
code, context, shotgrid_version
)

View file

@ -37,9 +37,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
self.log.info("Use existing Shotgrid version: {}".format(version))
data_to_update = {}
status = context.data.get("intent", {}).get("value")
if status:
data_to_update["sg_status_list"] = status
intent = context.data.get("intent")
if intent:
data_to_update["sg_status_list"] = intent["value"]
for representation in instance.data.get("representations", []):
local_path = get_publish_repre_path(

View file

@ -187,7 +187,7 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
repre_review_path = get_publish_repre_path(
instance, repre, False
)
if os.path.exists(repre_review_path):
if repre_review_path and os.path.exists(repre_review_path):
review_path = repre_review_path
if "burnin" in tags: # burnin has precedence if exists
break

View file

@ -1390,6 +1390,8 @@ class CreateContext:
self.autocreators = {}
# Manual creators
self.manual_creators = {}
# Creators that are disabled
self.disabled_creators = {}
self.convertors_plugins = {}
self.convertor_items_by_id = {}
@ -1667,6 +1669,7 @@ class CreateContext:
# Discover and prepare creators
creators = {}
disabled_creators = {}
autocreators = {}
manual_creators = {}
report = discover_creator_plugins(return_report=True)
@ -1703,6 +1706,9 @@ class CreateContext:
self,
self.headless
)
if not creator.enabled:
disabled_creators[creator_identifier] = creator
continue
creators[creator_identifier] = creator
if isinstance(creator, AutoCreator):
autocreators[creator_identifier] = creator
@ -1713,6 +1719,7 @@ class CreateContext:
self.manual_creators = manual_creators
self.creators = creators
self.disabled_creators = disabled_creators
def _reset_convertor_plugins(self):
convertors_plugins = {}

View file

@ -12,6 +12,7 @@ import pyblish.api
from openpype.lib import (
Logger,
import_filepath,
filter_profiles
)
from openpype.settings import (
@ -301,12 +302,8 @@ def publish_plugins_discover(paths=None):
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath, "rb") as f:
six.exec_(f.read(), module.__dict__)
module = import_filepath(abspath, mod_name)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
@ -683,6 +680,12 @@ def get_publish_repre_path(instance, repre, only_published=False):
staging_dir = repre.get("stagingDir")
if not staging_dir:
staging_dir = get_instance_staging_dir(instance)
# Expand the staging dir path in case it's been stored with the root
# template syntax
anatomy = instance.context.data["anatomy"]
staging_dir = anatomy.fill_root(staging_dir)
src_path = os.path.normpath(os.path.join(staging_dir, filename))
if os.path.exists(src_path):
return src_path

View file

@ -52,7 +52,16 @@ def _get_ffprobe_data(source):
"-show_streams",
source
]
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
kwargs = {
"stdout": subprocess.PIPE,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
proc = subprocess.Popen(command, **kwargs)
out = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError("Failed to run: %s" % command)
@ -331,12 +340,18 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
)
print("Launching command: {}".format(command))
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
kwargs = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"shell": True,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
proc = subprocess.Popen(command, **kwargs)
_stdout, _stderr = proc.communicate()
if _stdout:

View file

@ -23,4 +23,4 @@
],
"tools_env": [],
"active": true
}
}

View file

@ -255,4 +255,4 @@
]
}
}
}
}

View file

@ -4,4 +4,4 @@
"darwin": "/Volumes/path",
"linux": "/mnt/share/projects"
}
}
}

View file

@ -41,4 +41,4 @@
"Compositing": {
"short_name": "comp"
}
}
}

View file

@ -33,4 +33,4 @@
"create_first_version": false,
"custom_templates": []
}
}
}

View file

@ -82,4 +82,4 @@
"active": false
}
}
}
}

View file

@ -16,4 +16,4 @@
"anatomy_template_key_metadata": "render"
}
}
}
}

View file

@ -163,4 +163,4 @@
]
}
}
}
}

View file

@ -496,4 +496,4 @@
"farm_status_profiles": []
}
}
}
}

View file

@ -17,4 +17,4 @@
}
}
}
}
}

View file

@ -607,4 +607,4 @@
"linux": []
},
"project_environments": {}
}
}

View file

@ -50,4 +50,4 @@
"skip_timelines_check": []
}
}
}
}

View file

@ -97,4 +97,4 @@
}
]
}
}
}

View file

@ -76,4 +76,4 @@
"active": true
}
}
}
}

View file

@ -10,4 +10,4 @@
"note_status_shortname": "wfa"
}
}
}
}

View file

@ -5,4 +5,4 @@
"image_format": "exr",
"multipass": true
}
}
}

View file

@ -407,6 +407,16 @@
"optional": false,
"active": true
},
"ValidateGLSLMaterial": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateGLSLPlugin": {
"enabled": true,
"optional": false,
"active": true
},
"ValidateRenderImageRule": {
"enabled": true,
"optional": false,
@ -898,6 +908,11 @@
"optional": true,
"active": true,
"bake_attributes": []
},
"ExtractGLB": {
"enabled": true,
"active": true,
"ogsfx_path": "/maya2glTF/PBR/shaders/glTF_PBR.ogsfx"
}
},
"load": {
@ -1103,4 +1118,4 @@
"ValidateNoAnimation": false
}
}
}
}

View file

@ -446,6 +446,41 @@
"value": false
}
],
"reformat_nodes_config": {
"enabled": false,
"reposition_nodes": [
{
"node_class": "Reformat",
"knobs": [
{
"type": "text",
"name": "type",
"value": "to format"
},
{
"type": "text",
"name": "format",
"value": "HD_1080"
},
{
"type": "text",
"name": "filter",
"value": "Lanczos6"
},
{
"type": "bool",
"name": "black_outside",
"value": true
},
{
"type": "bool",
"name": "pbb",
"value": false
}
]
}
]
},
"extension": "mov",
"add_custom_tags": []
}
@ -533,4 +568,4 @@
"profiles": []
},
"filters": {}
}
}

View file

@ -67,4 +67,4 @@
"create_first_version": false,
"custom_templates": []
}
}
}

View file

@ -27,4 +27,4 @@
"handleEnd": 10
}
}
}
}

View file

@ -4,4 +4,4 @@
"review": true
}
}
}
}

View file

@ -19,4 +19,4 @@
"step": "step"
}
}
}
}

View file

@ -17,4 +17,4 @@
]
}
}
}
}

View file

@ -321,4 +321,4 @@
"active": true
}
}
}
}

View file

@ -40,9 +40,18 @@
"mark_for_review": true,
"default_variant": "Main",
"default_variants": []
},
"auto_detect_render": {
"allow_group_rename": true,
"group_name_template": "L{group_index}",
"group_idx_offset": 10,
"group_idx_padding": 3
}
},
"publish": {
"CollectRenderInstances": {
"ignore_render_pass_transparency": false
},
"ExtractSequence": {
"review_bg": [
255,
@ -103,4 +112,4 @@
"custom_templates": []
},
"filters": {}
}
}

View file

@ -14,4 +14,4 @@
"project_setup": {
"dev_mode": true
}
}
}

View file

@ -141,4 +141,4 @@
"layer_name_regex": "(?P<layer>L[0-9]{3}_\\w+)_(?P<pass>.+)"
}
}
}
}

View file

@ -337,6 +337,134 @@
}
}
},
"nukeassist": {
"enabled": true,
"label": "Nuke Assist",
"icon": "{}/app_icons/nuke.png",
"host_name": "nuke",
"environment": {
"NUKE_PATH": [
"{NUKE_PATH}",
"{OPENPYPE_STUDIO_PLUGINS}/nuke"
]
},
"variants": {
"13-2": {
"use_python_2": false,
"executables": {
"windows": [
"C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe"
],
"darwin": [],
"linux": [
"/usr/local/Nuke13.2v1/Nuke13.2"
]
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"13-0": {
"use_python_2": false,
"executables": {
"windows": [
"C:\\Program Files\\Nuke13.0v1\\Nuke13.0.exe"
],
"darwin": [],
"linux": [
"/usr/local/Nuke13.0v1/Nuke13.0"
]
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"12-2": {
"use_python_2": true,
"executables": {
"windows": [
"C:\\Program Files\\Nuke12.2v3\\Nuke12.2.exe"
],
"darwin": [],
"linux": [
"/usr/local/Nuke12.2v3Nuke12.2"
]
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"12-0": {
"use_python_2": true,
"executables": {
"windows": [
"C:\\Program Files\\Nuke12.0v1\\Nuke12.0.exe"
],
"darwin": [],
"linux": [
"/usr/local/Nuke12.0v1/Nuke12.0"
]
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"11-3": {
"use_python_2": true,
"executables": {
"windows": [
"C:\\Program Files\\Nuke11.3v1\\Nuke11.3.exe"
],
"darwin": [],
"linux": [
"/usr/local/Nuke11.3v5/Nuke11.3"
]
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"11-2": {
"use_python_2": true,
"executables": {
"windows": [
"C:\\Program Files\\Nuke11.2v2\\Nuke11.2.exe"
],
"darwin": [],
"linux": []
},
"arguments": {
"windows": ["--nukeassist"],
"darwin": ["--nukeassist"],
"linux": ["--nukeassist"]
},
"environment": {}
},
"__dynamic_keys_labels__": {
"13-2": "13.2",
"13-0": "13.0",
"12-2": "12.2",
"12-0": "12.0",
"11-3": "11.3",
"11-2": "11.2"
}
}
},
"nukex": {
"enabled": true,
"label": "Nuke X",
@ -1302,7 +1430,9 @@
"variant_label": "Current",
"use_python_2": false,
"executables": {
"windows": ["C:/Program Files/CelAction/CelAction2D Studio/CelAction2D.exe"],
"windows": [
"C:/Program Files/CelAction/CelAction2D Studio/CelAction2D.exe"
],
"darwin": [],
"linux": []
},
@ -1365,4 +1495,4 @@
}
},
"additional_apps": {}
}
}

View file

@ -18,4 +18,4 @@
"production_version": "",
"staging_version": "",
"version_check_interval": 5
}
}

View file

@ -211,4 +211,4 @@
"linux": ""
}
}
}
}

View file

@ -87,4 +87,4 @@
"renderman": "Pixar Renderman"
}
}
}
}

View file

@ -440,8 +440,9 @@ class RootEntity(BaseItemEntity):
os.makedirs(dirpath)
self.log.debug("Saving data to: {}\n{}".format(subpath, value))
data = json.dumps(value, indent=4) + "\n"
with open(output_path, "w") as file_stream:
json.dump(value, file_stream, indent=4)
file_stream.write(data)
dynamic_values_item = self.collect_dynamic_schema_entities()
dynamic_values_item.save_values()

View file

@ -195,6 +195,43 @@
}
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "auto_detect_render",
"label": "Auto-Detect Create Render",
"is_group": true,
"children": [
{
"type": "label",
"label": "The creator tries to auto-detect Render Layers and Render Passes in scene. For Render Layers is used group name as a variant and for Render Passes is used TVPaint layer name.<br/><br/>Group names can be renamed by their used order in scene. The renaming template where can be used <b>{group_index}</b> formatting key which is filled by \"used position index of group\".<br/>- Template: <b>L{group_index}</b><br/>- Group offset: <b>10</b><br/>- Group padding: <b>3</b><br/>Would create group names \"<b>L010</b>\", \"<b>L020</b>\", ..."
},
{
"type": "boolean",
"key": "allow_group_rename",
"label": "Allow group rename"
},
{
"type": "text",
"key": "group_name_template",
"label": "Group name template"
},
{
"key": "group_idx_offset",
"label": "Group index Offset",
"type": "number",
"decimal": 0,
"minimum": 1
},
{
"key": "group_idx_padding",
"type": "number",
"label": "Group index Padding",
"decimal": 0,
"minimum": 1
}
]
}
]
},
@ -204,6 +241,20 @@
"key": "publish",
"label": "Publish plugins",
"children": [
{
"type": "dict",
"collapsible": true,
"key": "CollectRenderInstances",
"label": "Collect Render Instances",
"is_group": true,
"children": [
{
"type": "boolean",
"key": "ignore_render_pass_transparency",
"label": "Ignore Render Pass opacity"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -408,6 +408,14 @@
"key": "ValidateCurrentRenderLayerIsRenderable",
"label": "Validate Current Render Layer Has Renderable Camera"
},
{
"key": "ValidateGLSLMaterial",
"label": "Validate GLSL Material"
},
{
"key": "ValidateGLSLPlugin",
"label": "Validate GLSL Plugin"
},
{
"key": "ValidateRenderImageRule",
"label": "Validate Images File Rule (Workspace)"
@ -956,6 +964,30 @@
"is_list": true
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "ExtractGLB",
"label": "Extract GLB",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "active",
"label": "Active"
},
{
"type": "text",
"key": "ogsfx_path",
"label": "GLSL Shader Directory"
}
]
}
]
}

View file

@ -271,6 +271,10 @@
{
"type": "separator"
},
{
"type": "label",
"label": "Currently we are supporting also multiple reposition nodes. <br/>Older single reformat node is still supported <br/>and if it is activated then preference will <br/>be on it. If you want to use multiple reformat <br/>nodes then you need to disable single reformat <br/>node and enable multiple <b>Reformat nodes</b> <a href=\"settings://project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/reformat_nodes_config/enabled\"><b>here</b></a>."
},
{
"type": "boolean",
"key": "reformat_node_add",
@ -287,6 +291,49 @@
}
]
},
{
"key": "reformat_nodes_config",
"type": "dict",
"label": "Reformat Nodes",
"collapsible": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "label",
"label": "Reposition knobs supported only.<br/>You can add multiple reformat nodes <br/>and set their knobs. Order of reformat <br/>nodes is important. First reformat node <br/>will be applied first and last reformat <br/>node will be applied last."
},
{
"key": "reposition_nodes",
"type": "list",
"label": "Reposition nodes",
"object_type": {
"type": "dict",
"children": [
{
"key": "node_class",
"label": "Node class",
"type": "text"
},
{
"type": "schema_template",
"name": "template_nuke_knob_inputs",
"template_data": [
{
"label": "Node knobs",
"key": "knobs"
}
]
}
]
}
}
]
},
{
"type": "separator"
},

View file

@ -25,6 +25,14 @@
"nuke_label": "Nuke"
}
},
{
"type": "schema_template",
"name": "template_nuke",
"template_data": {
"nuke_type": "nukeassist",
"nuke_label": "Nuke Assist"
}
},
{
"type": "schema_template",
"name": "template_nuke",

View file

@ -0,0 +1,15 @@
goto comment
SYNOPSIS
Helper script running scripts through the OpenPype environment.
DESCRIPTION
This script is usually used as a replacement for building when tested farm integration like Deadline.
EXAMPLE
cmd> .\openpype_console.bat path/to/python_script.py
:comment
cd "%~dp0\.."
echo %OPENPYPE_MONGO%
.poetry\bin\poetry.exe run python start.py %*