Merge branch 'develop' into enchancement/OP-2630_acescg_maya

# Conflicts:
#	openpype/hosts/maya/api/lib_renderproducts.py
This commit is contained in:
Toke Stuart Jepsen 2023-02-23 15:57:06 +00:00
commit f22ece7357
138 changed files with 4763 additions and 1783 deletions

View file

@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):

View file

@ -210,7 +210,8 @@ def switch_item(container,
if any(not x for x in [asset_name, subset_name, representation_name]):
repre_id = container["representation"]
representation = get_representation_by_id(project_name, repre_id)
repre_parent_docs = get_representation_parents(representation)
repre_parent_docs = get_representation_parents(
project_name, representation)
if repre_parent_docs:
version, subset, asset, _ = repre_parent_docs
else:

View file

@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
"Make sure the environment in fusion settings has "
"'FUSION_PYTHON3_HOME' set correctly and make sure "
"Python 3 is installed in the given path."
f"\n\nPYTHON36: {fusion_python3_home}"
f"\n\nPYTHON PATH: {fusion_python3_home}"
)
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")

View file

@ -80,6 +80,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"outputDir": os.path.dirname(path),
"ext": ext, # todo: should be redundant
"label": label,
"task": context.data["task"],
"frameStart": context.data["frameStart"],
"frameEnd": context.data["frameEnd"],
"frameStartHandle": context.data["frameStartHandle"],

View file

@ -1,6 +1,4 @@
import os
from pprint import pformat
import pyblish.api
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
@ -23,23 +21,53 @@ class Fusionlocal(pyblish.api.InstancePlugin):
# This plug-in runs only once and thus assumes all instances
# currently will render the same frame range
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
key = f"__hasRun{self.__class__.__name__}"
if context.data.get(key, False):
return
else:
context.data[key] = True
current_comp = context.data["currentComp"]
context.data[key] = True
self.render_once(context)
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
path = instance.data["path"]
output_dir = instance.data["outputDir"]
ext = os.path.splitext(os.path.basename(path))[-1]
basename = os.path.basename(path)
head, ext = os.path.splitext(basename)
files = [
f"{head}{str(frame).zfill(4)}{ext}"
for frame in range(frame_start, frame_end + 1)
]
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': f"%0{len(str(frame_end))}d" % frame_start,
'files': files,
"stagingDir": output_dir,
}
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
def render_once(self, context):
"""Render context comp only once, even with more render instances"""
current_comp = context.data["currentComp"]
frame_start = context.data["frameStartHandle"]
frame_end = context.data["frameEndHandle"]
self.log.info("Starting render")
self.log.info("Start frame: {}".format(frame_start))
self.log.info("End frame: {}".format(frame_end))
self.log.info(f"Start frame: {frame_start}")
self.log.info(f"End frame: {frame_end}")
with comp_lock_and_undo_chunk(current_comp):
result = current_comp.Render({
@ -48,26 +76,5 @@ class Fusionlocal(pyblish.api.InstancePlugin):
"Wait": True
})
if "representations" not in instance.data:
instance.data["representations"] = []
collected_frames = os.listdir(output_dir)
repre = {
'name': ext[1:],
'ext': ext[1:],
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
'files': collected_frames,
"stagingDir": output_dir,
}
instance.data["representations"].append(repre)
# review representation
repre_preview = repre.copy()
repre_preview["name"] = repre_preview["ext"] = "mp4"
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
instance.data["representations"].append(repre_preview)
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
if not result:
raise RuntimeError("Comp render failed")

View file

@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
return ([x for x in child_list if rt.superClassOf(x) == node_type]
if node_type else child_list)
def get_current_renderer():
"""get current renderer"""
return rt.renderers.production
def get_default_render_folder(project_setting=None):
return (project_setting["max"]
["RenderSettings"]
["default_render_image_folder"])
def set_framerange(start_frame, end_frame):
"""
Note:
Frame range can be specified in different types. Possible values are:
* `1` - Single frame.
* `2` - Active time segment ( animationRange ).
* `3` - User specified Range.
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
Todo:
Current type is hard-coded, there should be a custom setting for this.
"""
rt.rendTimeType = 4
if start_frame is not None and end_frame is not None:
frame_range = "{0}-{1}".format(start_frame, end_frame)
rt.rendPickupFrames = frame_range
def get_multipass_setting(project_setting=None):
return (project_setting["max"]
["RenderSettings"]
["multipass"])
def get_max_version():
"""
Args:
get max version date for deadline
Returns:
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
max_info[7] = max version date
"""
max_info = rt.maxversion()
return max_info[7]

View file

@ -0,0 +1,114 @@
# Render Element Example : For scanline render, VRay
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
# arnold
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
import os
from pymxs import runtime as rt
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_default_render_folder
)
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
class RenderProducts(object):
def __init__(self, project_settings=None):
self._project_settings = project_settings
if not self._project_settings:
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
def render_product(self, container):
folder = rt.maxFilePath
file = rt.maxFileName
folder = folder.replace("\\", "/")
setting = self._project_settings
render_folder = get_default_render_folder(setting)
filename, ext = os.path.splitext(file)
output_file = os.path.join(folder,
render_folder,
filename,
container)
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
full_render_list = []
beauty = self.beauty_render_product(output_file, img_fmt)
full_render_list.append(beauty)
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer == "VUE_File_Renderer":
return full_render_list
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = self.render_elements_product(output_file,
img_fmt)
if render_elem_list:
full_render_list.extend(iter(render_elem_list))
return full_render_list
if renderer == "Arnold":
aov_list = self.arnold_render_product(output_file,
img_fmt)
if aov_list:
full_render_list.extend(iter(aov_list))
return full_render_list
def beauty_render_product(self, folder, fmt):
beauty_output = f"{folder}.####.{fmt}"
beauty_output = beauty_output.replace("\\", "/")
return beauty_output
# TODO: Get the arnold render product
def arnold_render_product(self, folder, fmt):
"""Get all the Arnold AOVs"""
aovs = []
amw = rt.MaxtoAOps.AOVsManagerWindow()
aov_mgr = rt.renderers.current.AOVManager
# Check if there is any aov group set in AOV manager
aov_group_num = len(aov_mgr.drivers)
if aov_group_num < 1:
return
for i in range(aov_group_num):
# get the specific AOV group
for aov in aov_mgr.drivers[i].aov_list:
render_element = f"{folder}_{aov.name}.####.{fmt}"
render_element = render_element.replace("\\", "/")
aovs.append(render_element)
# close the AOVs manager window
amw.close()
return aovs
def render_elements_product(self, folder, fmt):
"""Get all the render element output files. """
render_dirname = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
# get render elements from the renders
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
if renderlayer_name.enabled:
render_element = f"{folder}_{renderpass}.####.{fmt}"
render_element = render_element.replace("\\", "/")
render_dirname.append(render_element)
return render_dirname
def image_format(self):
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa

View file

@ -0,0 +1,168 @@
import os
from pymxs import runtime as rt
from openpype.lib import Logger
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.hosts.max.api.lib import (
set_framerange,
get_current_renderer,
get_default_render_folder
)
class RenderSettings(object):
log = Logger.get_logger("RenderSettings")
_aov_chars = {
"dot": ".",
"dash": "-",
"underscore": "_"
}
def __init__(self, project_settings=None):
"""
Set up the naming convention for the render
elements for the deadline submission
"""
self._project_settings = project_settings
if not self._project_settings:
self._project_settings = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
def set_render_camera(self, selection):
for sel in selection:
# to avoid Attribute Error from pymxs wrapper
found = False
if rt.classOf(sel) in rt.Camera.classes:
found = True
rt.viewport.setCamera(sel)
break
if not found:
raise RuntimeError("Camera not found")
def render_output(self, container):
folder = rt.maxFilePath
# hard-coded, should be customized in the setting
file = rt.maxFileName
folder = folder.replace("\\", "/")
# hard-coded, set the renderoutput path
setting = self._project_settings
render_folder = get_default_render_folder(setting)
filename, ext = os.path.splitext(file)
output_dir = os.path.join(folder,
render_folder,
filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# hard-coded, should be customized in the setting
context = get_current_project_asset()
# get project resolution
width = context["data"].get("resolutionWidth")
height = context["data"].get("resolutionHeight")
# Set Frame Range
frame_start = context["data"].get("frame_start")
frame_end = context["data"].get("frame_end")
set_framerange(frame_start, frame_end)
# get the production render
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
output = os.path.join(output_dir, container)
try:
aov_separator = self._aov_chars[(
self._project_settings["maya"]
["RenderSettings"]
["aov_separator"]
)]
except KeyError:
aov_separator = "."
output_filename = "{0}..{1}".format(output, img_fmt)
output_filename = output_filename.replace("{aov_separator}",
aov_separator)
rt.rendOutputFilename = output_filename
if renderer == "VUE_File_Renderer":
return
# TODO: Finish the arnold render setup
if renderer == "Arnold":
self.arnold_setup()
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
self.render_element_layer(output, width, height, img_fmt)
rt.rendSaveFile = True
def arnold_setup(self):
# get Arnold RenderView run in the background
# for setting up renderable camera
arv = rt.MAXToAOps.ArnoldRenderView()
render_camera = rt.viewport.GetCamera()
arv.setOption("Camera", str(render_camera))
# TODO: add AOVs and extension
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
setup_cmd = (
f"""
amw = MaxtoAOps.AOVsManagerWindow()
amw.close()
aovmgr = renderers.current.AOVManager
aovmgr.drivers = #()
img_fmt = "{img_fmt}"
if img_fmt == "png" then driver = ArnoldPNGDriver()
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
if img_fmt == "exr" then driver = ArnoldEXRDriver()
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
append aovmgr.drivers driver
aovmgr.drivers[1].aov_list = #()
""")
rt.execute(setup_cmd)
arv.close()
def render_element_layer(self, dir, width, height, ext):
"""For Renderers with render elements"""
rt.renderWidth = width
rt.renderHeight = height
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
if render_elem_num < 0:
return
for i in range(render_elem_num):
renderlayer_name = render_elem.GetRenderElement(i)
target, renderpass = str(renderlayer_name).split(":")
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
render_elem.SetRenderElementFileName(i, aov_name)
def get_render_output(self, container, output_dir):
output = os.path.join(output_dir, container)
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
output_filename = "{0}..{1}".format(output, img_fmt)
return output_filename
def get_render_element(self):
orig_render_elem = []
render_elem = rt.maxOps.GetCurRenderElementMgr()
render_elem_num = render_elem.NumRenderElements()
if render_elem_num < 0:
return
for i in range(render_elem_num):
render_element = render_elem.GetRenderElementFilename(i)
orig_render_elem.append(render_element)
return orig_render_elem

View file

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating camera."""
from openpype.hosts.max.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
class CreateRender(plugin.MaxCreator):
identifier = "io.openpype.creators.max.render"
label = "Render"
family = "maxrender"
icon = "gear"
def create(self, subset_name, instance_data, pre_create_data):
from pymxs import runtime as rt
sel_obj = list(rt.selection)
instance = super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
container_name = instance.data.get("instance_node")
container = rt.getNodeByName(container_name)
# TODO: Disable "Add to Containers?" Panel
# parent the selected cameras into the container
for obj in sel_obj:
obj.parent = container
# for additional work on the node:
# instance_node = rt.getNodeByName(instance.get("instance_node"))
# set viewport camera for rendering(mandatory for deadline)
RenderSettings().set_render_camera(sel_obj)
# set output paths for rendering(mandatory for deadline)
RenderSettings().render_output(container_name)

View file

@ -1,7 +1,10 @@
import os
from openpype.pipeline import (
load
load,
get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class FbxLoader(load.LoaderPlugin):
@ -36,14 +39,26 @@ importFile @"{filepath}" #noPrompt using:FBXIMP
container_name = f"{name}_CON"
asset = rt.getNodeByName(f"{name}")
# rename the container with "_CON"
container = rt.container(name=container_name)
asset.Parent = container
return container
return containerise(
name, [asset], context, loader=self.__class__.__name__)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
fbx_objects = self.get_container_children(node)
for fbx_object in fbx_objects:
fbx_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -1,7 +1,9 @@
import os
from openpype.pipeline import (
load
load, get_representation_path
)
from openpype.hosts.max.api.pipeline import containerise
from openpype.hosts.max.api import lib
class MaxSceneLoader(load.LoaderPlugin):
@ -35,16 +37,26 @@ class MaxSceneLoader(load.LoaderPlugin):
self.log.error("Something failed when loading.")
max_container = max_containers.pop()
container_name = f"{name}_CON"
# rename the container with "_CON"
# get the original container
container = rt.container(name=container_name)
max_container.Parent = container
return container
return containerise(
name, [max_container], context, loader=self.__class__.__name__)
def update(self, container, representation):
from pymxs import runtime as rt
path = get_representation_path(representation)
node = rt.getNodeByName(container["instance_node"])
max_objects = self.get_container_children(node)
for max_object in max_objects:
max_object.source = path
lib.imprint(container["instance_node"], {
"representation": str(representation["_id"])
})
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)

View file

@ -80,7 +80,7 @@ importFile @"{file_path}" #noPrompt
def remove(self, container):
from pymxs import runtime as rt
node = container["node"]
node = rt.getNodeByName(container["instance_node"])
rt.delete(node)
@staticmethod

View file

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
"""Collect Render"""
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import get_current_asset_name
from openpype.hosts.max.api.lib import get_max_version
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
from openpype.client import get_last_version_by_subset_name
class CollectRender(pyblish.api.InstancePlugin):
"""Collect Render for Deadline"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect 3dsmax Render Layers"
hosts = ['max']
families = ["maxrender"]
def process(self, instance):
context = instance.context
folder = rt.maxFilePath
file = rt.maxFileName
current_file = os.path.join(folder, file)
filepath = current_file.replace("\\", "/")
context.data['currentFile'] = current_file
asset = get_current_asset_name()
render_layer_files = RenderProducts().render_product(instance.name)
folder = folder.replace("\\", "/")
img_format = RenderProducts().image_format()
project_name = context.data["projectName"]
asset_doc = context.data["assetEntity"]
asset_id = asset_doc["_id"]
version_doc = get_last_version_by_subset_name(project_name,
instance.name,
asset_id)
self.log.debug("version_doc: {0}".format(version_doc))
version_int = 1
if version_doc:
version_int += int(version_doc["name"])
self.log.debug(f"Setting {version_int} to context.")
context.data["version"] = version_int
# setup the plugin as 3dsmax for the internal renderer
data = {
"subset": instance.name,
"asset": asset,
"publish": True,
"maxversion": str(get_max_version()),
"imageFormat": img_format,
"family": 'maxrender',
"families": ['maxrender'],
"source": filepath,
"expectedFiles": render_layer_files,
"plugin": "3dsmax",
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],
"version": version_int
}
self.log.info("data: {0}".format(data))
instance.data.update(data)

View file

@ -198,12 +198,18 @@ class ARenderProducts:
"""Constructor."""
self.layer = layer
self.render_instance = render_instance
self.multipart = False
self.multipart = self.get_multipart()
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def get_multipart(self):
raise NotImplementedError(
"The render product implementation does not have a "
"\"get_multipart\" method."
)
def has_camera_token(self):
# type: () -> bool
"""Check if camera token is in image prefix.
@ -532,16 +538,20 @@ class RenderProductsArnold(ARenderProducts):
return prefix
def _get_aov_render_products(self, aov, cameras=None):
"""Return all render products for the AOV"""
products = []
aov_name = self._get_attr(aov, "name")
def get_multipart(self):
multipart = False
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
if multilayer or merge_AOVs:
multipart = True
return multipart
def _get_aov_render_products(self, aov, cameras=None):
"""Return all render products for the AOV"""
products = []
aov_name = self._get_attr(aov, "name")
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
source=True,
destination=False,
@ -599,7 +609,7 @@ class RenderProductsArnold(ARenderProducts):
ext=ext,
aov=aov_name,
driver=ai_driver,
multipart=multipart,
multipart=self.multipart,
camera=camera,
colorspace=colorspace
)
@ -773,6 +783,14 @@ class RenderProductsVray(ARenderProducts):
renderer = "vray"
def get_multipart(self):
multipart = False
image_format = self._get_attr("vraySettings.imageFormatStr")
if image_format == "exr (multichannel)":
multipart = True
return multipart
def get_renderer_prefix(self):
# type: () -> str
"""Get image prefix for V-Ray.
@ -839,11 +857,6 @@ class RenderProductsVray(ARenderProducts):
if default_ext in {"exr (multichannel)", "exr (deep)"}:
default_ext = "exr"
# Define multipart.
multipart = False
if image_format_str == "exr (multichannel)":
multipart = True
products = []
# add beauty as default when not disabled
@ -856,7 +869,7 @@ class RenderProductsVray(ARenderProducts):
ext=default_ext,
camera=camera,
colorspace=lib.get_color_management_output_transform(),
multipart=multipart
multipart=self.multipart
)
)
@ -869,10 +882,10 @@ class RenderProductsVray(ARenderProducts):
productName="Alpha",
ext=default_ext,
camera=camera,
multipart=multipart
multipart=self.multipart
)
)
if multipart:
if self.multipart:
# AOVs are merged in m-channel file, only main layer is rendered
return products
@ -1035,6 +1048,34 @@ class RenderProductsRedshift(ARenderProducts):
renderer = "redshift"
unmerged_aovs = {"Cryptomatte"}
def get_files(self, product):
# When outputting AOVs we need to replace Redshift specific AOV tokens
# with Maya render tokens for generating file sequences. We validate to
# a specific AOV fileprefix so we only need to accout for one
# replacement.
if not product.multipart and product.driver:
file_prefix = self._get_attr(product.driver + ".filePrefix")
self.layer_data.filePrefix = file_prefix.replace(
"<BeautyPath>/<BeautyFile>",
"<Scene>/<RenderLayer>/<RenderLayer>"
)
return super(RenderProductsRedshift, self).get_files(product)
def get_multipart(self):
# For Redshift we don't directly return upon forcing multilayer
# due to some AOVs still being written into separate files,
# like Cryptomatte.
# AOVs are merged in multi-channel file
multipart = False
force_layer = bool(
self._get_attr("redshiftOptions.exrForceMultilayer")
)
if force_layer:
multipart = True
return multipart
def get_renderer_prefix(self):
"""Get image prefix for Redshift.
@ -1074,16 +1115,6 @@ class RenderProductsRedshift(ARenderProducts):
for c in self.get_renderable_cameras()
]
# For Redshift we don't directly return upon forcing multilayer
# due to some AOVs still being written into separate files,
# like Cryptomatte.
# AOVs are merged in multi-channel file
multipart = False
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
if exMultipart or force_layer:
multipart = True
# Get Redshift Extension from image format
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
@ -1105,7 +1136,7 @@ class RenderProductsRedshift(ARenderProducts):
continue
aov_type = self._get_attr(aov, "aovType")
if multipart and aov_type not in self.unmerged_aovs:
if self.multipart and aov_type not in self.unmerged_aovs:
continue
# Any AOVs that still get processed, like Cryptomatte
@ -1140,8 +1171,9 @@ class RenderProductsRedshift(ARenderProducts):
productName=aov_light_group_name,
aov=aov_name,
ext=ext,
multipart=multipart,
camera=camera)
multipart=False,
camera=camera,
driver=aov)
products.append(product)
if light_groups:
@ -1154,8 +1186,9 @@ class RenderProductsRedshift(ARenderProducts):
product = RenderProduct(productName=aov_name,
aov=aov_name,
ext=ext,
multipart=multipart,
camera=camera)
multipart=False,
camera=camera,
driver=aov)
products.append(product)
# When a Beauty AOV is added manually, it will be rendered as
@ -1170,7 +1203,7 @@ class RenderProductsRedshift(ARenderProducts):
products.insert(0,
RenderProduct(productName=beauty_name,
ext=ext,
multipart=multipart,
multipart=self.multipart,
camera=camera))
return products
@ -1190,6 +1223,10 @@ class RenderProductsRenderman(ARenderProducts):
renderer = "renderman"
unmerged_aovs = {"PxrCryptomatte"}
def get_multipart(self):
# Implemented as display specific in "get_render_products".
return False
def get_render_products(self):
"""Get all AOVs.
@ -1329,6 +1366,10 @@ class RenderProductsMayaHardware(ARenderProducts):
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
]
def get_multipart(self):
# MayaHardware does not support multipart EXRs.
return False
def _get_extension(self, value):
result = None
if isinstance(value, int):

View file

@ -42,6 +42,7 @@ Provides:
import re
import os
import platform
import json
from maya import cmds
import maya.app.renderSetup.model.renderSetup as renderSetup
@ -183,7 +184,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
self.log.info("multipart: {}".format(
multipart))
assert exp_files, "no file names were generated, this is bug"
self.log.info(exp_files)
self.log.info(
"expected files: {}".format(
json.dumps(exp_files, indent=4, sort_keys=True)
)
)
# if we want to attach render to subset, check if we have AOV's
# in expectedFiles. If so, raise error as we cannot attach AOV

View file

@ -22,6 +22,8 @@ class ExtractGLB(publish.Extractor):
self.log.info("Extracting GLB to: {}".format(path))
cmds.loadPlugin("maya2glTF", quiet=True)
nodes = instance[:]
self.log.info("Instance: {0}".format(nodes))
@ -45,6 +47,7 @@ class ExtractGLB(publish.Extractor):
"glb": True,
"vno": True # visibleNodeOnly
}
with lib.maintained_selection():
cmds.select(nodes, hi=True, noExpand=True)
extract_gltf(staging_dir,

View file

@ -0,0 +1,207 @@
import os
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder
)
from openpype.pipeline import PublishValidationError
class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
"""
Validate if the asset uses GLSL Shader
"""
order = ValidateContentsOrder + 0.1
families = ['gltf']
hosts = ['maya']
label = 'GLSL Shader for GLTF'
actions = [RepairAction]
optional = True
active = True
def process(self, instance):
shading_grp = self.get_material_from_shapes(instance)
if not shading_grp:
raise PublishValidationError("No shading group found")
invalid = self.get_texture_shader_invalid(instance)
if invalid:
raise PublishValidationError("Non GLSL Shader found: "
"{0}".format(invalid))
def get_material_from_shapes(self, instance):
shapes = cmds.ls(instance, type="mesh", long=True)
for shape in shapes:
shading_grp = cmds.listConnections(shape,
destination=True,
type="shadingEngine")
return shading_grp or []
def get_texture_shader_invalid(self, instance):
invalid = set()
shading_grp = self.get_material_from_shapes(instance)
for shading_group in shading_grp:
material_name = "{}.surfaceShader".format(shading_group)
material = cmds.listConnections(material_name,
source=True,
destination=False,
type="GLSLShader")
if not material:
# add material name
material = cmds.listConnections(material_name)[0]
invalid.add(material)
return list(invalid)
@classmethod
def repair(cls, instance):
"""
Repair instance by assigning GLSL Shader
to the material
"""
cls.assign_glsl_shader(instance)
return
@classmethod
def assign_glsl_shader(cls, instance):
"""
Converting StingrayPBS material to GLSL Shaders
for the glb export through Maya2GLTF plugin
"""
meshes = cmds.ls(instance, type="mesh", long=True)
cls.log.info("meshes: {}".format(meshes))
# load the glsl shader plugin
cmds.loadPlugin("glslShader", quiet=True)
for mesh in meshes:
# create glsl shader
glsl = cmds.createNode('GLSLShader')
glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True,
renderable=True, noSurfaceShader=True)
cmds.connectAttr(glsl + ".outColor",
glsl_shading_grp + ".surfaceShader")
# load the maya2gltf shader
ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa
if not os.path.exists(ogsfx_path):
if ogsfx_path:
# if custom ogsfx path is not specified
# the log below is the warning for the user
cls.log.warning("ogsfx shader file "
"not found in {}".format(ogsfx_path))
cls.log.info("Find the ogsfx shader file in "
"default maya directory...")
# re-direct to search the ogsfx path in maya_dir
ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path
if not os.path.exists(ogsfx_path):
raise PublishValidationError("The ogsfx shader file does not " # noqa
"exist: {}".format(ogsfx_path)) # noqa
cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string")
# list the materials used for the assets
shading_grp = cmds.listConnections(mesh,
destination=True,
type="shadingEngine")
# get the materials related to the selected assets
for material in shading_grp:
pbs_shader = cmds.listConnections(material,
destination=True,
type="StingrayPBS")
if pbs_shader:
cls.pbs_shader_conversion(pbs_shader, glsl)
# setting up to relink the texture if
# the mesh is with aiStandardSurface
arnold_shader = cmds.listConnections(material,
destination=True,
type="aiStandardSurface")
if arnold_shader:
cls.arnold_shader_conversion(arnold_shader, glsl)
cmds.sets(mesh, forceElement=str(glsl_shading_grp))
@classmethod
def pbs_shader_conversion(cls, main_shader, glsl):
cls.log.info("StringrayPBS detected "
"-> Can do texture conversion")
for shader in main_shader:
# get the file textures related to the PBS Shader
albedo = cmds.listConnections(shader +
".TEX_color_map")
if albedo:
dif_output = albedo[0] + ".outColor"
# get the glsl_shader input
# reconnect the file nodes to maya2gltf shader
glsl_dif = glsl + ".u_BaseColorTexture"
cmds.connectAttr(dif_output, glsl_dif)
# connect orm map if there is one
orm_packed = cmds.listConnections(shader +
".TEX_ao_map")
if orm_packed:
orm_output = orm_packed[0] + ".outColor"
mtl = glsl + ".u_MetallicTexture"
ao = glsl + ".u_OcclusionTexture"
rough = glsl + ".u_RoughnessTexture"
cmds.connectAttr(orm_output, mtl)
cmds.connectAttr(orm_output, ao)
cmds.connectAttr(orm_output, rough)
# connect nrm map if there is one
nrm = cmds.listConnections(shader +
".TEX_normal_map")
if nrm:
nrm_output = nrm[0] + ".outColor"
glsl_nrm = glsl + ".u_NormalTexture"
cmds.connectAttr(nrm_output, glsl_nrm)
@classmethod
def arnold_shader_conversion(cls, main_shader, glsl):
cls.log.info("aiStandardSurface detected "
"-> Can do texture conversion")
for shader in main_shader:
# get the file textures related to the PBS Shader
albedo = cmds.listConnections(shader + ".baseColor")
if albedo:
dif_output = albedo[0] + ".outColor"
# get the glsl_shader input
# reconnect the file nodes to maya2gltf shader
glsl_dif = glsl + ".u_BaseColorTexture"
cmds.connectAttr(dif_output, glsl_dif)
orm_packed = cmds.listConnections(shader +
".specularRoughness")
if orm_packed:
orm_output = orm_packed[0] + ".outColor"
mtl = glsl + ".u_MetallicTexture"
ao = glsl + ".u_OcclusionTexture"
rough = glsl + ".u_RoughnessTexture"
cmds.connectAttr(orm_output, mtl)
cmds.connectAttr(orm_output, ao)
cmds.connectAttr(orm_output, rough)
# connect nrm map if there is one
bump_node = cmds.listConnections(shader +
".normalCamera")
if bump_node:
for bump in bump_node:
nrm = cmds.listConnections(bump +
".bumpValue")
if nrm:
nrm_output = nrm[0] + ".outColor"
glsl_nrm = glsl + ".u_NormalTexture"
cmds.connectAttr(nrm_output, glsl_nrm)

View file

@ -0,0 +1,31 @@
from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder
)
class ValidateGLSLPlugin(pyblish.api.InstancePlugin):
"""
Validate if the asset uses GLSL Shader
"""
order = ValidateContentsOrder + 0.15
families = ['gltf']
hosts = ['maya']
label = 'maya2glTF plugin'
actions = [RepairAction]
def process(self, instance):
if not cmds.pluginInfo("maya2glTF", query=True, loaded=True):
raise RuntimeError("maya2glTF is not loaded")
@classmethod
def repair(cls, instance):
"""
Repair instance by enabling the plugin
"""
return cmds.loadPlugin("maya2glTF", quiet=True)

View file

@ -63,5 +63,12 @@ class NukeAddon(OpenPypeModule, IHostAddon):
path_paths.append(quick_time_path)
env["PATH"] = os.pathsep.join(path_paths)
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(NUKE_ROOT_DIR, "hooks")
]
def get_workfile_extensions(self):
return [".nk"]

View file

@ -30,7 +30,6 @@ from .pipeline import (
parse_container,
update_container,
get_workfile_build_placeholder_plugins,
)
from .lib import (
INSTANCE_DATA_KNOB,
@ -79,8 +78,6 @@ __all__ = (
"parse_container",
"update_container",
"get_workfile_build_placeholder_plugins",
"INSTANCE_DATA_KNOB",
"ROOT_DATA_KNOB",
"maintained_selection",

View file

@ -0,0 +1,4 @@
import os
ASSIST = bool(os.getenv("NUKEASSIST"))

View file

@ -50,6 +50,7 @@ from openpype.pipeline.colorspace import (
from openpype.pipeline.workfile import BuildWorkfile
from . import gizmo_menu
from .constants import ASSIST
from .workio import (
save_file,
@ -215,7 +216,7 @@ def update_node_data(node, knobname, data):
class Knobby(object):
"""[DEPRICATED] For creating knob which it's type isn't
"""[DEPRECATED] For creating knob which it's type isn't
mapped in `create_knobs`
Args:
@ -249,7 +250,7 @@ class Knobby(object):
def create_knobs(data, tab=None):
"""[DEPRICATED] Create knobs by data
"""Create knobs by data
Depending on the type of each dict value and creates the correct Knob.
@ -343,7 +344,7 @@ def create_knobs(data, tab=None):
def imprint(node, data, tab=None):
"""[DEPRICATED] Store attributes with value on node
"""Store attributes with value on node
Parse user data into Node knobs.
Use `collections.OrderedDict` to ensure knob order.
@ -398,8 +399,9 @@ def imprint(node, data, tab=None):
node.addKnob(knob)
@deprecated
def add_publish_knob(node):
"""[DEPRICATED] Add Publish knob to node
"""[DEPRECATED] Add Publish knob to node
Arguments:
node (nuke.Node): nuke node to be processed
@ -416,8 +418,9 @@ def add_publish_knob(node):
return node
@deprecated
def set_avalon_knob_data(node, data=None, prefix="avalon:"):
"""[DEPRICATED] Sets data into nodes's avalon knob
"""[DEPRECATED] Sets data into nodes's avalon knob
Arguments:
node (nuke.Node): Nuke node to imprint with data,
@ -478,8 +481,9 @@ def set_avalon_knob_data(node, data=None, prefix="avalon:"):
return node
@deprecated
def get_avalon_knob_data(node, prefix="avalon:", create=True):
"""[DEPRICATED] Gets a data from nodes's avalon knob
"""[DEPRECATED] Gets a data from nodes's avalon knob
Arguments:
node (obj): Nuke node to search for data,
@ -521,8 +525,9 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
return data
@deprecated
def fix_data_for_node_create(data):
"""[DEPRICATED] Fixing data to be used for nuke knobs
"""[DEPRECATED] Fixing data to be used for nuke knobs
"""
for k, v in data.items():
if isinstance(v, six.text_type):
@ -532,8 +537,9 @@ def fix_data_for_node_create(data):
return data
@deprecated
def add_write_node_legacy(name, **kwarg):
"""[DEPRICATED] Adding nuke write node
"""[DEPRECATED] Adding nuke write node
Arguments:
name (str): nuke node name
kwarg (attrs): data for nuke knobs
@ -697,7 +703,7 @@ def get_nuke_imageio_settings():
@deprecated("openpype.hosts.nuke.api.lib.get_nuke_imageio_settings")
def get_created_node_imageio_setting_legacy(nodeclass, creator, subset):
'''[DEPRICATED] Get preset data for dataflow (fileType, compression, bitDepth)
'''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth)
'''
assert any([creator, nodeclass]), nuke.message(
@ -1241,7 +1247,7 @@ def create_write_node(
nodes to be created before write with dependency
review (bool)[optional]: adding review knob
farm (bool)[optional]: rendering workflow target
kwargs (dict)[optional]: additional key arguments for formating
kwargs (dict)[optional]: additional key arguments for formatting
Example:
prenodes = {
@ -2258,14 +2264,20 @@ class WorkfileSettings(object):
node['frame_range'].setValue(range)
node['frame_range_lock'].setValue(True)
set_node_data(
self._root_node,
INSTANCE_DATA_KNOB,
{
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}
)
if not ASSIST:
set_node_data(
self._root_node,
INSTANCE_DATA_KNOB,
{
"handleStart": int(handle_start),
"handleEnd": int(handle_end)
}
)
else:
log.warning(
"NukeAssist mode is not allowing "
"updating custom knobs..."
)
def reset_resolution(self):
"""Set resolution to project resolution."""

View file

@ -60,6 +60,7 @@ from .workio import (
work_root,
current_file
)
from .constants import ASSIST
log = Logger.get_logger(__name__)
@ -72,7 +73,6 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
MENU_LABEL = os.environ["AVALON_LABEL"]
# registering pyblish gui regarding settings in presets
if os.getenv("PYBLISH_GUI", None):
pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None))
@ -101,6 +101,12 @@ class NukeHost(
def get_workfile_extensions(self):
return file_extensions()
def get_workfile_build_placeholder_plugins(self):
return [
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
]
def get_containers(self):
return ls()
@ -200,45 +206,45 @@ def _show_workfiles():
host_tools.show_workfiles(parent=None, on_top=False)
def get_workfile_build_placeholder_plugins():
return [
NukePlaceholderLoadPlugin,
NukePlaceholderCreatePlugin
]
def _install_menu():
"""Install Avalon menu into Nuke's main menu bar."""
# uninstall original avalon menu
main_window = get_main_window()
menubar = nuke.menu("Nuke")
menu = menubar.addMenu(MENU_LABEL)
label = "{0}, {1}".format(
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
)
Context.context_label = label
context_action = menu.addCommand(label)
context_action.setEnabled(False)
if not ASSIST:
label = "{0}, {1}".format(
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
)
Context.context_label = label
context_action = menu.addCommand(label)
context_action.setEnabled(False)
# add separator after context label
menu.addSeparator()
menu.addSeparator()
menu.addCommand(
"Work Files...",
_show_workfiles
)
menu.addSeparator()
menu.addCommand(
"Create...",
lambda: host_tools.show_publisher(
tab="create"
if not ASSIST:
menu.addCommand(
"Create...",
lambda: host_tools.show_publisher(
tab="create"
)
)
)
menu.addCommand(
"Publish...",
lambda: host_tools.show_publisher(
tab="publish"
menu.addCommand(
"Publish...",
lambda: host_tools.show_publisher(
tab="publish"
)
)
)
menu.addCommand(
"Load...",
lambda: host_tools.show_loader(
@ -286,15 +292,18 @@ def _install_menu():
"Build Workfile from template",
lambda: build_workfile_template()
)
menu_template.addSeparator()
menu_template.addCommand(
"Create Place Holder",
lambda: create_placeholder()
)
menu_template.addCommand(
"Update Place Holder",
lambda: update_placeholder()
)
if not ASSIST:
menu_template.addSeparator()
menu_template.addCommand(
"Create Place Holder",
lambda: create_placeholder()
)
menu_template.addCommand(
"Update Place Holder",
lambda: update_placeholder()
)
menu.addSeparator()
menu.addCommand(
"Experimental tools...",

View file

@ -558,9 +558,7 @@ class ExporterReview(object):
self.path_in = self.instance.data.get("path", None)
self.staging_dir = self.instance.data["stagingDir"]
self.collection = self.instance.data.get("collection", None)
self.data = dict({
"representations": list()
})
self.data = {"representations": []}
def get_file_info(self):
if self.collection:
@ -626,7 +624,7 @@ class ExporterReview(object):
nuke_imageio = opnlib.get_nuke_imageio_settings()
# TODO: this is only securing backward compatibility lets remove
# this once all projects's anotomy are updated to newer config
# this once all projects's anatomy are updated to newer config
if "baking" in nuke_imageio.keys():
return nuke_imageio["baking"]["viewerProcess"]
else:
@ -823,8 +821,41 @@ class ExporterReviewMov(ExporterReview):
add_tags = []
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
# TODO: remove this when `reformat_nodes_config`
# is changed in settings
reformat_node_add = kwargs["reformat_node_add"]
reformat_node_config = kwargs["reformat_node_config"]
# TODO: make this required in future
reformat_nodes_config = kwargs.get("reformat_nodes_config", {})
# TODO: remove this once deprecated is removed
# make sure only reformat_nodes_config is used in future
if reformat_node_add and reformat_nodes_config.get("enabled"):
self.log.warning(
"`reformat_node_add` is deprecated. "
"Please use only `reformat_nodes_config` instead.")
reformat_nodes_config = None
# TODO: reformat code when backward compatibility is not needed
# warning if reformat_nodes_config is not set
if not reformat_nodes_config:
self.log.warning(
"Please set `reformat_nodes_config` in settings. "
"Using `reformat_node_config` instead."
)
reformat_nodes_config = {
"enabled": reformat_node_add,
"reposition_nodes": [
{
"node_class": "Reformat",
"knobs": reformat_node_config
}
]
}
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
@ -846,7 +877,6 @@ class ExporterReviewMov(ExporterReview):
subset = self.instance.data["subset"]
self._temp_nodes[subset] = []
# ---------- start nodes creation
# Read node
r_node = nuke.createNode("Read")
@ -860,44 +890,39 @@ class ExporterReviewMov(ExporterReview):
if read_raw:
r_node["raw"].setValue(1)
# connect
self._temp_nodes[subset].append(r_node)
self.previous_node = r_node
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
# connect to Read node
self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`")
# add reformat node
if reformat_node_add:
if reformat_nodes_config["enabled"]:
reposition_nodes = reformat_nodes_config["reposition_nodes"]
for reposition_node in reposition_nodes:
node_class = reposition_node["node_class"]
knobs = reposition_node["knobs"]
node = nuke.createNode(node_class)
set_node_knobs_from_settings(node, knobs)
# connect in order
self._connect_to_above_nodes(
node, subset, "Reposition node... `{}`"
)
# append reformated tag
add_tags.append("reformated")
rf_node = nuke.createNode("Reformat")
set_node_knobs_from_settings(rf_node, reformat_node_config)
# connect
rf_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(rf_node)
self.previous_node = rf_node
self.log.debug(
"Reformat... `{}`".format(self._temp_nodes[subset]))
# only create colorspace baking if toggled on
if bake_viewer_process:
if bake_viewer_input_process_node:
# View Process node
ipn = get_view_process_node()
if ipn is not None:
# connect
ipn.setInput(0, self.previous_node)
self._temp_nodes[subset].append(ipn)
self.previous_node = ipn
self.log.debug(
"ViewProcess... `{}`".format(
self._temp_nodes[subset]))
# connect to ViewProcess node
self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`")
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
# assign display
display, viewer = get_viewer_config_from_string(
str(baking_view_profile)
)
@ -907,13 +932,7 @@ class ExporterReviewMov(ExporterReview):
# assign viewer
dag_node["view"].setValue(viewer)
# connect
dag_node.setInput(0, self.previous_node)
self._temp_nodes[subset].append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(
self._temp_nodes[subset]))
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
# Write node
write_node = nuke.createNode("Write")
self.log.debug("Path: {}".format(self.path))
@ -967,6 +986,15 @@ class ExporterReviewMov(ExporterReview):
return self.data
def _shift_to_previous_node_and_temp(self, subset, node, message):
self._temp_nodes[subset].append(node)
self.previous_node = node
self.log.debug(message.format(self._temp_nodes[subset]))
def _connect_to_above_nodes(self, node, subset, message):
node.setInput(0, self.previous_node)
self._shift_to_previous_node_and_temp(subset, node, message)
@deprecated("openpype.hosts.nuke.api.plugin.NukeWriteCreator")
class AbstractWriteRender(OpenPypeCreator):

View file

@ -0,0 +1,11 @@
from openpype.lib import PreLaunchHook
class PrelaunchNukeAssistHook(PreLaunchHook):
"""
Adding flag when nukeassist
"""
app_groups = ["nukeassist"]
def execute(self):
self.launch_context.env["NUKEASSIST"] = "1"

View file

@ -220,8 +220,20 @@ class LoadClip(plugin.NukeLoader):
dict: altered representation data
"""
representation = deepcopy(representation)
frame = representation["context"]["frame"]
representation["context"]["frame"] = "#" * len(str(frame))
context = representation["context"]
template = representation["data"]["template"]
if (
"{originalBasename}" in template
and "frame" in context
):
frame = context["frame"]
hashed_frame = "#" * len(str(frame))
origin_basename = context["originalBasename"]
context["originalBasename"] = origin_basename.replace(
frame, hashed_frame
)
representation["context"]["frame"] = hashed_frame
return representation
def update(self, container, representation):

View file

@ -23,7 +23,7 @@ class ExtractReviewData(publish.Extractor):
representations = instance.data.get("representations", [])
# review can be removed since `ProcessSubmittedJobOnFarm` will create
# reviable representation if needed
# reviewable representation if needed
if (
"render.farm" in instance.data["families"]
and "review" in instance.data["families"]

View file

@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc):
self.add_methods(
(route_name, self.workfiles_tool),
(route_name, self.loader_tool),
(route_name, self.creator_tool),
(route_name, self.subset_manager_tool),
(route_name, self.publish_tool),
(route_name, self.scene_inventory_tool),
(route_name, self.library_loader_tool),
@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc):
self._execute_in_main_thread(item)
return
async def creator_tool(self):
log.info("Triggering Creator tool")
item = MainThreadItem(self.tools_helper.show_creator)
await self._async_execute_in_main_thread(item, wait=False)
async def subset_manager_tool(self):
log.info("Triggering Subset Manager tool")
item = MainThreadItem(self.tools_helper.show_subset_manager)
# Do not wait for result of callback
self._execute_in_main_thread(item, wait=False)
return
async def publish_tool(self):
log.info("Triggering Publish tool")
item = MainThreadItem(self.tools_helper.show_publish)
item = MainThreadItem(self.tools_helper.show_publisher_tool)
self._execute_in_main_thread(item)
return
@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator):
"callback": "loader_tool",
"label": "Load",
"help": "Open loader tool"
}, {
"callback": "creator_tool",
"label": "Create",
"help": "Open creator tool"
}, {
"callback": "scene_inventory_tool",
"label": "Scene inventory",
@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator):
"callback": "library_loader_tool",
"label": "Library",
"help": "Open library loader tool"
}, {
"callback": "subset_manager_tool",
"label": "Subset Manager",
"help": "Open subset manager tool"
}, {
"callback": "experimental_tools",
"label": "Experimental tools",

View file

@ -202,8 +202,9 @@ def get_groups_data(communicator=None):
# Variable containing full path to output file
"output_path = \"{}\"".format(output_filepath),
"empty = 0",
# Loop over 100 groups
"FOR idx = 1 TO 100",
# Loop over 26 groups which is ATM maximum possible (in 11.7)
# - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
"FOR idx = 1 TO 26",
# Receive information about groups
"tv_layercolor \"getcolor\" 0 idx",
"PARSE result clip_id group_index c_red c_green c_blue group_name",

View file

@ -8,7 +8,7 @@ import requests
import pyblish.api
from openpype.client import get_project, get_asset_by_name
from openpype.host import HostBase, IWorkfileHost, ILoadHost
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback
@ -18,6 +18,7 @@ from openpype.pipeline import (
register_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.pipeline.context_tools import get_global_context
from .lib import (
execute_george,
@ -29,6 +30,7 @@ log = logging.getLogger(__name__)
METADATA_SECTION = "avalon"
SECTION_NAME_CONTEXT = "context"
SECTION_NAME_CREATE_CONTEXT = "create_context"
SECTION_NAME_INSTANCES = "instances"
SECTION_NAME_CONTAINERS = "containers"
# Maximum length of metadata chunk string
@ -58,7 +60,7 @@ instances=2
"""
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "tvpaint"
def install(self):
@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
if self.on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback(
"instanceToggled", self.on_instance_toggle
)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)
def get_current_project_name(self):
"""
Returns:
Union[str, None]: Current project name.
"""
return self.get_current_context().get("project_name")
def get_current_asset_name(self):
"""
Returns:
Union[str, None]: Current asset name.
"""
return self.get_current_context().get("asset_name")
def get_current_task_name(self):
"""
Returns:
Union[str, None]: Current task name.
"""
return self.get_current_context().get("task_name")
def get_current_context(self):
context = get_current_workfile_context()
if not context:
return get_global_context()
if "project_name" in context:
return context
# This is legacy way how context was stored
return {
"project_name": context.get("project"),
"asset_name": context.get("asset"),
"task_name": context.get("task")
}
# --- Create ---
def get_context_data(self):
return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
def update_context_data(self, data, changes):
return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
# --- Workfile ---
def open_workfile(self, filepath):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
def save_workfile(self, filepath=None):
if not filepath:
filepath = self.get_current_workfile()
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
context = get_global_context()
save_current_workfile_context(context)
# Execute george script to save workfile.
@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
def get_workfile_extensions(self):
return [".tvpp"]
# --- Load ---
def get_containers(self):
return get_containers()
@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
return
log.info("Setting up project...")
set_context_settings()
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Implementation for Subset manager tool.
"""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
global_context = get_global_context()
project_name = global_context.get("project_name")
asset_name = global_context.get("aset_name")
if not project_name or not asset_name:
return
current_instances.pop(found_idx)
write_instances(current_instances)
asset_doc = get_asset_by_name(project_name, asset_name)
set_context_settings(project_name, asset_doc)
def application_exit(self):
"""Logic related to TimerManager.
@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def on_instance_toggle(self, instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
self.write_instances(current_instances)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
def containerise(
name, namespace, members, context, loader, current_containers=None
@ -462,40 +470,17 @@ def get_containers():
return output
def set_context_settings(asset_doc=None):
def set_context_settings(project_name, asset_doc):
"""Set workfile settings by asset document data.
Change fps, resolution and frame start/end.
"""
project_name = legacy_io.active_project()
if asset_doc is None:
asset_name = legacy_io.Session["AVALON_ASSET"]
# Use current session asset if not passed
asset_doc = get_asset_by_name(project_name, asset_name)
project_doc = get_project(project_name)
framerate = asset_doc["data"].get("fps")
if framerate is None:
framerate = project_doc["data"].get("fps")
if framerate is not None:
execute_george(
"tv_framerate {} \"timestretch\"".format(framerate)
)
else:
print("Framerate was not found!")
width_key = "resolutionWidth"
height_key = "resolutionHeight"
width = asset_doc["data"].get(width_key)
height = asset_doc["data"].get(height_key)
if width is None or height is None:
width = project_doc["data"].get(width_key)
height = project_doc["data"].get(height_key)
if width is None or height is None:
print("Resolution was not found!")
else:
@ -503,6 +488,15 @@ def set_context_settings(asset_doc=None):
"tv_resizepage {} {} 0".format(width, height)
)
framerate = asset_doc["data"].get("fps")
if framerate is not None:
execute_george(
"tv_framerate {} \"timestretch\"".format(framerate)
)
else:
print("Framerate was not found!")
frame_start = asset_doc["data"].get("frameStart")
frame_end = asset_doc["data"].get("frameEnd")

View file

@ -1,80 +1,142 @@
import re
import uuid
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
registered_host,
from openpype.pipeline import LoaderPlugin
from openpype.pipeline.create import (
CreatedInstance,
get_subset_name,
AutoCreator,
Creator,
)
from openpype.pipeline.create.creator_plugins import cache_and_get_instances
from .lib import get_layers_data
from .pipeline import get_current_workfile_context
class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
# Add unified identifier created with `uuid` module
self.data["uuid"] = str(uuid.uuid4())
SHARED_DATA_KEY = "openpype.tvpaint.instances"
@classmethod
def get_dynamic_data(cls, *args, **kwargs):
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
# Change asset and name by current workfile context
workfile_context = get_current_workfile_context()
asset_name = workfile_context.get("asset")
task_name = workfile_context.get("task")
if "asset" not in dynamic_data and asset_name:
dynamic_data["asset"] = asset_name
class TVPaintCreatorCommon:
@property
def subset_template_family_filter(self):
return self.family
if "task" not in dynamic_data and task_name:
dynamic_data["task"] = task_name
return dynamic_data
@staticmethod
def are_instances_same(instance_1, instance_2):
"""Compare instances but skip keys with unique values.
During compare are skipped keys that will be 100% sure
different on new instance, like "id".
Returns:
bool: True if instances are same.
"""
if (
not isinstance(instance_1, dict)
or not isinstance(instance_2, dict)
):
return instance_1 == instance_2
checked_keys = set()
checked_keys.add("id")
for key, value in instance_1.items():
if key not in checked_keys:
if key not in instance_2:
return False
if value != instance_2[key]:
return False
checked_keys.add(key)
for key in instance_2.keys():
if key not in checked_keys:
return False
return True
def write_instances(self, data):
self.log.debug(
"Storing instance data to workfile. {}".format(str(data))
def _cache_and_get_instances(self):
return cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
host = registered_host()
return host.write_instances(data)
def process(self):
host = registered_host()
data = host.list_instances()
data.append(self.data)
self.write_instances(data)
def _collect_create_instances(self):
instances_by_identifier = self._cache_and_get_instances()
for instance_data in instances_by_identifier[self.identifier]:
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def _update_create_instances(self, update_list):
if not update_list:
return
cur_instances = self.host.list_instances()
cur_instances_by_id = {}
for instance_data in cur_instances:
instance_id = instance_data.get("instance_id")
if instance_id:
cur_instances_by_id[instance_id] = instance_data
for instance, changes in update_list:
instance_data = changes.new_value
cur_instance_data = cur_instances_by_id.get(instance.id)
if cur_instance_data is None:
cur_instances.append(instance_data)
continue
for key in set(cur_instance_data) - set(instance_data):
cur_instance_data.pop(key)
cur_instance_data.update(instance_data)
self.host.write_instances(cur_instances)
def _custom_get_subset_name(
self,
variant,
task_name,
asset_doc,
project_name,
host_name=None,
instance=None
):
dynamic_data = self.get_dynamic_data(
variant, task_name, asset_doc, project_name, host_name, instance
)
return get_subset_name(
self.family,
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=self.project_settings,
family_filter=self.subset_template_family_filter
)
class TVPaintCreator(Creator, TVPaintCreatorCommon):
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def remove_instances(self, instances):
ids_to_remove = {
instance.id
for instance in instances
}
cur_instances = self.host.list_instances()
changed = False
new_instances = []
for instance_data in cur_instances:
if instance_data.get("instance_id") in ids_to_remove:
changed = True
else:
new_instances.append(instance_data)
if changed:
self.host.write_instances(new_instances)
for instance in instances:
self._remove_instance_from_context(instance)
def get_dynamic_data(self, *args, **kwargs):
# Change asset and name by current workfile context
create_context = self.create_context
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
output = {}
if asset_name:
output["asset"] = asset_name
if task_name:
output["task"] = task_name
return output
def get_subset_name(self, *args, **kwargs):
return self._custom_get_subset_name(*args, **kwargs)
def _store_new_instance(self, new_instance):
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
def collect_instances(self):
self._collect_create_instances()
def update_instances(self, update_list):
self._update_create_instances(update_list)
def get_subset_name(self, *args, **kwargs):
return self._custom_get_subset_name(*args, **kwargs)
class Loader(LoaderPlugin):

View file

@ -0,0 +1,150 @@
import collections
from openpype.pipeline.create.creator_plugins import (
SubsetConvertorPlugin,
cache_and_get_instances,
)
from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY
from openpype.hosts.tvpaint.api.lib import get_groups_data
class TVPaintLegacyConverted(SubsetConvertorPlugin):
"""Conversion of legacy instances in scene to new creators.
This convertor handles only instances created by core creators.
All instances that would be created using auto-creators are removed as at
the moment of finding them would there already be existing instances.
"""
identifier = "tvpaint.legacy.converter"
def find_instances(self):
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, self.host.list_instances
)
if instances_by_identifier[None]:
self.add_convertor_item("Convert legacy instances")
def convert(self):
current_instances = self.host.list_instances()
to_convert = collections.defaultdict(list)
converted = False
for instance in current_instances:
if instance.get("creator_identifier") is not None:
continue
converted = True
family = instance.get("family")
if family in (
"renderLayer",
"renderPass",
"renderScene",
"review",
"workfile",
):
to_convert[family].append(instance)
else:
instance["keep"] = False
# Skip if nothing was changed
if not converted:
self.remove_convertor_item()
return
self._convert_render_layers(
to_convert["renderLayer"], current_instances)
self._convert_render_passes(
to_convert["renderpass"], current_instances)
self._convert_render_scenes(
to_convert["renderScene"], current_instances)
self._convert_workfiles(
to_convert["workfile"], current_instances)
self._convert_reviews(
to_convert["review"], current_instances)
new_instances = [
instance
for instance in current_instances
if instance.get("keep") is not False
]
self.host.write_instances(new_instances)
# remove legacy item if all is fine
self.remove_convertor_item()
def _convert_render_layers(self, render_layers, current_instances):
if not render_layers:
return
# Look for possible existing render layers in scene
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_identifier"]["group_id"]
render_layers_by_group_id[group_id] = instance
groups_by_id = {
group["group_id"]: group
for group in get_groups_data()
}
for render_layer in render_layers:
group_id = render_layer.pop("group_id")
# Just remove legacy instance if group is already occupied
if group_id in render_layers_by_group_id:
render_layer["keep"] = False
continue
# Add identifier
render_layer["creator_identifier"] = "render.layer"
# Change 'uuid' to 'instance_id'
render_layer["instance_id"] = render_layer.pop("uuid")
# Fill creator attributes
render_layer["creator_attributes"] = {
"group_id": group_id
}
render_layer["family"] = "render"
group = groups_by_id[group_id]
# Use group name for variant
group["variant"] = group["name"]
def _convert_render_passes(self, render_passes, current_instances):
if not render_passes:
return
# Render passes must have available render layers so we look for render
# layers first
# - '_convert_render_layers' must be called before this method
render_layers_by_group_id = {}
for instance in current_instances:
if instance.get("creator_identifier") == "render.layer":
group_id = instance["creator_identifier"]["group_id"]
render_layers_by_group_id[group_id] = instance
for render_pass in render_passes:
group_id = render_pass.pop("group_id")
render_layer = render_layers_by_group_id.get(group_id)
if not render_layer:
render_pass["keep"] = False
continue
render_pass["creator_identifier"] = "render.pass"
render_pass["instance_id"] = render_pass.pop("uuid")
render_pass["family"] = "render"
render_pass["creator_attributes"] = {
"render_layer_instance_id": render_layer["instance_id"]
}
render_pass["variant"] = render_pass.pop("pass")
render_pass.pop("renderlayer")
# Rest of instances are just marked for deletion
def _convert_render_scenes(self, render_scenes, current_instances):
for render_scene in render_scenes:
render_scene["keep"] = False
def _convert_workfiles(self, workfiles, current_instances):
for render_scene in workfiles:
render_scene["keep"] = False
def _convert_reviews(self, reviews, current_instances):
for render_scene in reviews:
render_scene["keep"] = False

File diff suppressed because it is too large Load diff

View file

@ -1,231 +0,0 @@
from openpype.lib import prepare_template_data
from openpype.pipeline import CreatorError
from openpype.hosts.tvpaint.api import (
plugin,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
get_groups_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderlayer(plugin.Creator):
"""Mark layer group as one instance."""
name = "render_layer"
label = "RenderLayer"
family = "renderLayer"
icon = "cube"
defaults = ["Main"]
rename_group = True
render_pass = "beauty"
rename_script_template = (
"tv_layercolor \"setcolor\""
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
)
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
]
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
# Use render pass name from creator's plugin
dynamic_data["renderpass"] = cls.render_pass
# Add variant to render layer
dynamic_data["renderlayer"] = variant
# Change family for subset name fill
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
return dynamic_data
@classmethod
def get_default_variant(cls):
"""Default value for variant in Creator tool.
Method checks if TVPaint implementation is running and tries to find
selected layers from TVPaint. If only one is selected it's name is
returned.
Returns:
str: Default variant name for Creator tool.
"""
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = get_layers_data()
selected_layers = [
layer
for layer in layers_data
if layer["selected"]
]
# Return layer name if only one is selected
if len(selected_layers) == 1:
return selected_layers[0]["name"]
# Use defaults
if cls.defaults:
return cls.defaults[0]
return None
def process(self):
self.log.debug("Query data from workfile.")
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking for selection groups.")
# Collect group ids from selection
group_ids = set()
for layer in layers_data:
if layer["selected"]:
group_ids.add(layer["group_id"])
# Raise if there is no selection
if not group_ids:
raise CreatorError("Nothing is selected.")
# This creator should run only on one group
if len(group_ids) > 1:
raise CreatorError("More than one group is in selection.")
group_id = tuple(group_ids)[0]
# If group id is `0` it is `default` group which is invalid
if group_id == 0:
raise CreatorError(
"Selection is not in group. Can't mark selection as Beauty."
)
self.log.debug(f"Selected group id is \"{group_id}\".")
self.data["group_id"] = group_id
group_data = get_groups_data()
group_name = None
for group in group_data:
if group["group_id"] == group_id:
group_name = group["name"]
break
if group_name is None:
raise AssertionError(
"Couldn't find group by id \"{}\"".format(group_id)
)
subset_name_fill_data = {
"group": group_name
}
family = self.family = self.data["family"]
# Fill dynamic key 'group'
subset_name = self.data["subset"].format(
**prepare_template_data(subset_name_fill_data)
)
self.data["subset"] = subset_name
# Check for instances of same group
existing_instance = None
existing_instance_idx = None
# Check if subset name is not already taken
same_subset_instance = None
same_subset_instance_idx = None
for idx, instance in enumerate(instances):
if instance["family"] == family:
if instance["group_id"] == group_id:
existing_instance = instance
existing_instance_idx = idx
elif instance["subset"] == subset_name:
same_subset_instance = instance
same_subset_instance_idx = idx
if (
same_subset_instance_idx is not None
and existing_instance_idx is not None
):
break
if same_subset_instance_idx is not None:
if self._ask_user_subset_override(same_subset_instance):
instances.pop(same_subset_instance_idx)
else:
return
if existing_instance is not None:
self.log.info(
f"Beauty instance for group id {group_id} already exists"
", overriding"
)
instances[existing_instance_idx] = self.data
else:
instances.append(self.data)
self.write_instances(instances)
if not self.rename_group:
self.log.info("Group rename function is turned off. Skipping")
return
self.log.debug("Querying groups data from workfile.")
groups_data = get_groups_data()
self.log.debug("Changing name of the group.")
selected_group = None
for group_data in groups_data:
if group_data["group_id"] == group_id:
selected_group = group_data
# Rename TVPaint group (keep color same)
# - groups can't contain spaces
new_group_name = self.data["variant"].replace(" ", "_")
rename_script = self.rename_script_template.format(
clip_id=selected_group["clip_id"],
group_id=selected_group["group_id"],
r=selected_group["red"],
g=selected_group["green"],
b=selected_group["blue"],
name=new_group_name
)
execute_george_through_file(rename_script)
self.log.info(
f"Name of group with index {group_id}"
f" was changed to \"{new_group_name}\"."
)
def _ask_user_subset_override(self, instance):
from qtpy import QtCore
from qtpy.QtWidgets import QMessageBox
title = "Subset \"{}\" already exist".format(instance["subset"])
text = (
"Instance with subset name \"{}\" already exists."
"\n\nDo you want to override existing?"
).format(instance["subset"])
dialog = QMessageBox()
dialog.setWindowFlags(
dialog.windowFlags()
| QtCore.Qt.WindowStaysOnTopHint
)
dialog.setWindowTitle(title)
dialog.setText(text)
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
dialog.setDefaultButton(QMessageBox.Yes)
dialog.exec_()
if dialog.result() == QMessageBox.Yes:
return True
return False

View file

@ -1,167 +0,0 @@
from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import get_layers_data
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderPass(plugin.Creator):
"""Render pass is combination of one or more layers from same group.
Requirement to create Render Pass is to have already created beauty
instance. Beauty instance is used as base for subset name.
"""
name = "render_pass"
label = "RenderPass"
family = "renderPass"
icon = "cube"
defaults = ["Main"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer"
]
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["renderpass"] = variant
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
return dynamic_data
@classmethod
def get_default_variant(cls):
"""Default value for variant in Creator tool.
Method checks if TVPaint implementation is running and tries to find
selected layers from TVPaint. If only one is selected it's name is
returned.
Returns:
str: Default variant name for Creator tool.
"""
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = get_layers_data()
selected_layers = [
layer
for layer in layers_data
if layer["selected"]
]
# Return layer name if only one is selected
if len(selected_layers) == 1:
return selected_layers[0]["name"]
# Use defaults
if cls.defaults:
return cls.defaults[0]
return None
def process(self):
self.log.debug("Query data from workfile.")
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking selection.")
# Get all selected layers and their group ids
group_ids = set()
selected_layers = []
for layer in layers_data:
if layer["selected"]:
selected_layers.append(layer)
group_ids.add(layer["group_id"])
# Raise if nothing is selected
if not selected_layers:
raise CreatorError("Nothing is selected.")
# Raise if layers from multiple groups are selected
if len(group_ids) != 1:
raise CreatorError("More than one group is in selection.")
group_id = tuple(group_ids)[0]
self.log.debug(f"Selected group id is \"{group_id}\".")
# Find beauty instance for selected layers
beauty_instance = None
for instance in instances:
if (
instance["family"] == "renderLayer"
and instance["group_id"] == group_id
):
beauty_instance = instance
break
# Beauty is required for this creator so raise if was not found
if beauty_instance is None:
raise CreatorError("Beauty pass does not exist yet.")
subset_name = self.data["subset"]
subset_name_fill_data = {}
# Backwards compatibility
# - beauty may be created with older creator where variant was not
# stored
if "variant" not in beauty_instance:
render_layer = beauty_instance["name"]
else:
render_layer = beauty_instance["variant"]
subset_name_fill_data["renderlayer"] = render_layer
subset_name_fill_data["render_layer"] = render_layer
# Format dynamic keys in subset name
new_subset_name = subset_name.format(
**prepare_template_data(subset_name_fill_data)
)
self.data["subset"] = new_subset_name
self.log.info(f"New subset name is \"{new_subset_name}\".")
family = self.data["family"]
variant = self.data["variant"]
self.data["group_id"] = group_id
self.data["pass"] = variant
self.data["renderlayer"] = render_layer
# Collect selected layer ids to be stored into instance
layer_names = [layer["name"] for layer in selected_layers]
self.data["layer_names"] = layer_names
# Check if same instance already exists
existing_instance = None
existing_instance_idx = None
for idx, instance in enumerate(instances):
if (
instance["family"] == family
and instance["group_id"] == group_id
and instance["pass"] == variant
):
existing_instance = instance
existing_instance_idx = idx
break
if existing_instance is not None:
self.log.info(
f"Render pass instance for group id {group_id}"
f" and name \"{variant}\" already exists, overriding."
)
instances[existing_instance_idx] = self.data
else:
instances.append(self.data)
self.write_instances(instances)

View file

@ -0,0 +1,76 @@
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintReviewCreator(TVPaintAutoCreator):
family = "review"
identifier = "scene.review"
label = "Review"
icon = "ei.video"
# Settings
active_on_create = True
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_review"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
self.active_on_create = plugin_settings["active_on_create"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant,
task_name,
asset_doc,
project_name,
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if not self.active_on_create:
data["active"] = False
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
existing_instance["variant"],
task_name,
asset_doc,
project_name,
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -0,0 +1,70 @@
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
class TVPaintWorkfileCreator(TVPaintAutoCreator):
family = "workfile"
identifier = "workfile"
label = "Workfile"
icon = "fa.file-o"
def apply_settings(self, project_settings, system_settings):
plugin_settings = (
project_settings["tvpaint"]["create"]["create_workfile"]
)
self.default_variant = plugin_settings["default_variant"]
self.default_variants = plugin_settings["default_variants"]
def create(self):
existing_instance = None
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
existing_instance = instance
break
create_context = self.create_context
host_name = create_context.host_name
project_name = create_context.get_current_project_name()
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
self.default_variant,
task_name,
asset_doc,
project_name,
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
new_instance = CreatedInstance(
self.family, subset_name, data, self
)
instances_data = self.host.list_instances()
instances_data.append(new_instance.data_to_store())
self.host.write_instances(instances_data)
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
existing_instance["variant"],
task_name,
asset_doc,
project_name,
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,37 +1,34 @@
import pyblish.api
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
class CollectOutputFrameRange(pyblish.api.InstancePlugin):
"""Collect frame start/end from context.
When instances are collected context does not contain `frameStart` and
`frameEnd` keys yet. They are collected in global plugin
`CollectContextEntities`.
"""
label = "Collect output frame range"
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder + 0.4999
hosts = ["tvpaint"]
families = ["review", "render"]
def process(self, context):
for instance in context:
frame_start = instance.data.get("frameStart")
frame_end = instance.data.get("frameEnd")
if frame_start is not None and frame_end is not None:
self.log.debug(
"Instance {} already has set frames {}-{}".format(
str(instance), frame_start, frame_end
)
)
return
def process(self, instance):
asset_doc = instance.data.get("assetEntity")
if not asset_doc:
return
frame_start = context.data.get("frameStart")
frame_end = context.data.get("frameEnd")
context = instance.context
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, str(instance)
)
frame_start = asset_doc["data"]["frameStart"]
frame_end = frame_start + (
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
)
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, instance.data["subset"]
)
)

View file

@ -1,280 +0,0 @@
import json
import copy
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollectInstances(pyblish.api.ContextPlugin):
label = "Collect Instances"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
def process(self, context):
workfile_instances = context.data["workfileInstances"]
self.log.debug("Collected ({}) instances:\n{}".format(
len(workfile_instances),
json.dumps(workfile_instances, indent=4)
))
filtered_instance_data = []
# Backwards compatibility for workfiles that already have review
# instance in metadata.
review_instance_exist = False
for instance_data in workfile_instances:
family = instance_data["family"]
if family == "review":
review_instance_exist = True
elif family not in ("renderPass", "renderLayer"):
self.log.info("Unknown family \"{}\". Skipping {}".format(
family, json.dumps(instance_data, indent=4)
))
continue
filtered_instance_data.append(instance_data)
# Fake review instance if review was not found in metadata families
if not review_instance_exist:
filtered_instance_data.append(
self._create_review_instance_data(context)
)
for instance_data in filtered_instance_data:
instance_data["fps"] = context.data["sceneFps"]
# Conversion from older instances
# - change 'render_layer' to 'renderlayer'
render_layer = instance_data.get("instance_data")
if not render_layer:
# Render Layer has only variant
if instance_data["family"] == "renderLayer":
render_layer = instance_data.get("variant")
# Backwards compatibility for renderPasses
elif "render_layer" in instance_data:
render_layer = instance_data["render_layer"]
if render_layer:
instance_data["renderlayer"] = render_layer
# Store workfile instance data to instance data
instance_data["originData"] = copy.deepcopy(instance_data)
# Global instance data modifications
# Fill families
family = instance_data["family"]
families = [family]
if family != "review":
families.append("review")
# Add `review` family for thumbnail integration
instance_data["families"] = families
# Instance name
subset_name = instance_data["subset"]
name = instance_data.get("name", subset_name)
instance_data["name"] = name
instance_data["label"] = "{} [{}-{}]".format(
name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
)
active = instance_data.get("active", True)
instance_data["active"] = active
instance_data["publish"] = active
# Add representations key
instance_data["representations"] = []
# Different instance creation based on family
instance = None
if family == "review":
# Change subset name of review instance
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
asset_name = context.data["workfile_context"]["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = context.data["hostName"]
# Use empty variant value
variant = ""
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
instance_data["subset"] = new_subset_name
instance = context.create_instance(**instance_data)
instance.data["layers"] = copy.deepcopy(
context.data["layersData"]
)
elif family == "renderLayer":
instance = self.create_render_layer_instance(
context, instance_data
)
elif family == "renderPass":
instance = self.create_render_pass_instance(
context, instance_data
)
if instance is None:
continue
any_visible = False
for layer in instance.data["layers"]:
if layer["visible"]:
any_visible = True
break
instance.data["publish"] = any_visible
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
))
def _create_review_instance_data(self, context):
"""Fake review instance data."""
return {
"family": "review",
"asset": context.data["asset"],
# Dummy subset name
"subset": "reviewMain"
}
def create_render_layer_instance(self, context, instance_data):
name = instance_data["name"]
# Change label
subset_name = instance_data["subset"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation
if "variant" not in instance_data:
instance_data["label"] = "{}_Beauty".format(name)
# Change subset name
# Final family of an instance will be `render`
new_family = "render"
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = "{}{}_{}_Beauty".format(
new_family, task_name.capitalize(), name
)
instance_data["subset"] = new_subset_name
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
subset_name, new_subset_name
))
# Get all layers for the layer
layers_data = context.data["layersData"]
group_id = instance_data["group_id"]
group_layers = []
for layer in layers_data:
if layer["group_id"] == group_id:
group_layers.append(layer)
if not group_layers:
# Should be handled here?
self.log.warning((
f"Group with id {group_id} does not contain any layers."
f" Instance \"{name}\" not created."
))
return None
instance_data["layers"] = group_layers
return context.create_instance(**instance_data)
def create_render_pass_instance(self, context, instance_data):
pass_name = instance_data["pass"]
self.log.info(
"Creating render pass instance. \"{}\"".format(pass_name)
)
# Change label
render_layer = instance_data["renderlayer"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation
if "variant" not in instance_data:
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
# Change subset name
# Final family of an instance will be `render`
new_family = "render"
old_subset_name = instance_data["subset"]
task_name = legacy_io.Session["AVALON_TASK"]
new_subset_name = "{}{}_{}_{}".format(
new_family, task_name.capitalize(), render_layer, pass_name
)
instance_data["subset"] = new_subset_name
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
old_subset_name, new_subset_name
))
layers_data = context.data["layersData"]
layers_by_name = {
layer["name"]: layer
for layer in layers_data
}
if "layer_names" in instance_data:
layer_names = instance_data["layer_names"]
else:
# Backwards compatibility
# - not 100% working as it was found out that layer ids can't be
# used as unified identifier across multiple workstations
layers_by_id = {
layer["layer_id"]: layer
for layer in layers_data
}
layer_ids = instance_data["layer_ids"]
layer_names = []
for layer_id in layer_ids:
layer = layers_by_id.get(layer_id)
if layer:
layer_names.append(layer["name"])
if not layer_names:
raise ValueError((
"Metadata contain old way of storing layers information."
" It is not possible to identify layers to publish with"
" these data. Please remove Render Pass instances with"
" Subset manager and use Creator tool to recreate them."
))
render_pass_layers = []
for layer_name in layer_names:
layer = layers_by_name.get(layer_name)
# NOTE This is kind of validation before validators?
if not layer:
self.log.warning(
f"Layer with name {layer_name} was not found."
)
continue
render_pass_layers.append(layer)
if not render_pass_layers:
name = instance_data["name"]
self.log.warning(
f"None of the layers from the RenderPass \"{name}\""
" exist anymore. Instance not created."
)
return None
instance_data["layers"] = render_pass_layers
return context.create_instance(**instance_data)

View file

@ -0,0 +1,114 @@
import copy
import pyblish.api
from openpype.lib import prepare_template_data
class CollectRenderInstances(pyblish.api.InstancePlugin):
label = "Collect Render Instances"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["render", "review"]
ignore_render_pass_transparency = False
def process(self, instance):
context = instance.context
creator_identifier = instance.data["creator_identifier"]
if creator_identifier == "render.layer":
self._collect_data_for_render_layer(instance)
elif creator_identifier == "render.pass":
self._collect_data_for_render_pass(instance)
elif creator_identifier == "render.scene":
self._collect_data_for_render_scene(instance)
else:
if creator_identifier == "scene.review":
self._collect_data_for_review(instance)
return
subset_name = instance.data["subset"]
instance.data["name"] = subset_name
instance.data["label"] = "{} [{}-{}]".format(
subset_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
)
def _collect_data_for_render_layer(self, instance):
instance.data["families"].append("renderLayer")
creator_attributes = instance.data["creator_attributes"]
group_id = creator_attributes["group_id"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
layers_data = instance.context.data["layersData"]
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["group_id"] == group_id
]
def _collect_data_for_render_pass(self, instance):
instance.data["families"].append("renderPass")
layer_names = set(instance.data["layer_names"])
layers_data = instance.context.data["layersData"]
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = [
copy.deepcopy(layer)
for layer in layers_data
if layer["name"] in layer_names
]
instance.data["ignoreLayersTransparency"] = (
self.ignore_render_pass_transparency
)
render_layer_data = None
render_layer_id = creator_attributes["render_layer_instance_id"]
for in_data in instance.context.data["workfileInstances"]:
if (
in_data["creator_identifier"] == "render.layer"
and in_data["instance_id"] == render_layer_id
):
render_layer_data = in_data
break
instance.data["renderLayerData"] = copy.deepcopy(render_layer_data)
# Invalid state
if render_layer_data is None:
return
render_layer_name = render_layer_data["variant"]
subset_name = instance.data["subset"]
instance.data["subset"] = subset_name.format(
**prepare_template_data({"renderlayer": render_layer_name})
)
def _collect_data_for_render_scene(self, instance):
instance.data["families"].append("renderScene")
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["mark_for_review"]:
instance.data["families"].append("review")
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)
render_pass_name = (
instance.data["creator_attributes"]["render_pass_name"]
)
subset_name = instance.data["subset"]
instance.data["subset"] = subset_name.format(
**prepare_template_data({"renderpass": render_pass_name})
)
def _collect_data_for_review(self, instance):
instance.data["layers"] = copy.deepcopy(
instance.context.data["layersData"]
)

View file

@ -1,114 +0,0 @@
import json
import copy
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline.create import get_subset_name
class CollectRenderScene(pyblish.api.ContextPlugin):
"""Collect instance which renders whole scene in PNG.
Creates instance with family 'renderScene' which will have all layers
to render which will be composite into one result. The instance is not
collected from scene.
Scene will be rendered with all visible layers similar way like review is.
Instance is disabled if there are any created instances of 'renderLayer'
or 'renderPass'. That is because it is expected that this instance is
used as lazy publish of TVPaint file.
Subset name is created similar way like 'renderLayer' family. It can use
`renderPass` and `renderLayer` keys which can be set using settings and
`variant` is filled using `renderPass` value.
"""
label = "Collect Render Scene"
order = pyblish.api.CollectorOrder - 0.39
hosts = ["tvpaint"]
# Value of 'render_pass' in subset name template
render_pass = "beauty"
# Settings attributes
enabled = False
# Value of 'render_layer' and 'variant' in subset name template
render_layer = "Main"
def process(self, context):
# Check if there are created instances of renderPass and renderLayer
# - that will define if renderScene instance is enabled after
# collection
any_created_instance = False
for instance in context:
family = instance.data["family"]
if family in ("renderPass", "renderLayer"):
any_created_instance = True
break
# Global instance data modifications
# Fill families
family = "renderScene"
# Add `review` family for thumbnail integration
families = [family, "review"]
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
workfile_context = context.data["workfile_context"]
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
asset_name = workfile_context["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = context.data["hostName"]
# Variant is using render pass name
variant = self.render_layer
dynamic_data = {
"renderlayer": self.render_layer,
"renderpass": self.render_pass,
}
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
dynamic_data["render_pass"] = dynamic_data["renderpass"]
task_name = workfile_context["task"]
subset_name = get_subset_name(
"render",
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data,
project_settings=context.data["project_settings"]
)
instance_data = {
"family": family,
"families": families,
"fps": context.data["sceneFps"],
"subset": subset_name,
"name": subset_name,
"label": "{} [{}-{}]".format(
subset_name,
context.data["sceneMarkIn"] + 1,
context.data["sceneMarkOut"] + 1
),
"active": not any_created_instance,
"publish": not any_created_instance,
"representations": [],
"layers": copy.deepcopy(context.data["layersData"]),
"asset": asset_name,
"task": task_name,
# Add render layer to instance data
"renderlayer": self.render_layer
}
instance = context.create_instance(**instance_data)
self.log.debug("Created instance: {}\n{}".format(
instance, json.dumps(instance.data, indent=4)
))

View file

@ -2,17 +2,15 @@ import os
import json
import pyblish.api
from openpype.client import get_asset_by_name
from openpype.pipeline import legacy_io
from openpype.pipeline.create import get_subset_name
class CollectWorkfile(pyblish.api.ContextPlugin):
class CollectWorkfile(pyblish.api.InstancePlugin):
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["tvpaint"]
families = ["workfile"]
def process(self, context):
def process(self, instance):
context = instance.context
current_file = context.data["currentFile"]
self.log.info(
@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
dirpath, filename = os.path.split(current_file)
basename, ext = os.path.splitext(filename)
instance = context.create_instance(name=basename)
# Project name from workfile context
project_name = context.data["workfile_context"]["project"]
# Get subset name of workfile instance
# Collect asset doc to get asset id
# - not sure if it's good idea to require asset id in
# get_subset_name?
family = "workfile"
asset_name = context.data["workfile_context"]["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
# Host name from environment variable
host_name = os.environ["AVALON_APP"]
# Use empty variant value
variant = ""
task_name = legacy_io.Session["AVALON_TASK"]
subset_name = get_subset_name(
family,
variant,
task_name,
asset_doc,
project_name,
host_name,
project_settings=context.data["project_settings"]
)
# Create Workfile instance
instance.data.update({
"subset": subset_name,
"asset": context.data["asset"],
"label": subset_name,
"publish": True,
"family": "workfile",
"families": ["workfile"],
"representations": [{
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
}]
instance.data["representations"].append({
"name": ext.lstrip("."),
"ext": ext.lstrip("."),
"files": filename,
"stagingDir": dirpath
})
self.log.info("Collected workfile instance: {}".format(
json.dumps(instance.data, indent=4)
))

View file

@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect and store current context to have reference
current_context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
"project_name": context.data["projectName"],
"asset_name": context.data["asset"],
"task_name": context.data["task"]
}
context.data["previous_context"] = current_context
self.log.debug("Current context is: {}".format(current_context))
@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
self.log.info("Collecting workfile context")
workfile_context = get_current_workfile_context()
if "project" in workfile_context:
workfile_context = {
"project_name": workfile_context.get("project"),
"asset_name": workfile_context.get("asset"),
"task_name": workfile_context.get("task"),
}
# Store workfile context to pyblish context
context.data["workfile_context"] = workfile_context
if workfile_context:
# Change current context with context from workfile
key_map = (
("AVALON_ASSET", "asset"),
("AVALON_TASK", "task")
("AVALON_ASSET", "asset_name"),
("AVALON_TASK", "task_name")
)
for env_key, key in key_map:
legacy_io.Session[env_key] = workfile_context[key]
os.environ[env_key] = workfile_context[key]
self.log.info("Context changed to: {}".format(workfile_context))
asset_name = workfile_context["asset"]
task_name = workfile_context["task"]
asset_name = workfile_context["asset_name"]
task_name = workfile_context["task_name"]
else:
asset_name = current_context["asset"]
task_name = current_context["task"]
asset_name = current_context["asset_name"]
task_name = current_context["task_name"]
# Handle older workfiles or workfiles without metadata
self.log.warning((
"Workfile does not contain information about context."
@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Store context asset name
context.data["asset"] = asset_name
context.data["task"] = task_name
self.log.info(
"Context is set to Asset: \"{}\" and Task: \"{}\"".format(
asset_name, task_name

View file

@ -6,6 +6,7 @@ from PIL import Image
import pyblish.api
from openpype.pipeline.publish import KnownPublishError
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import (
class ExtractSequence(pyblish.api.Extractor):
label = "Extract Sequence"
hosts = ["tvpaint"]
families = ["review", "renderPass", "renderLayer", "renderScene"]
families_to_review = ["review"]
families = ["review", "render"]
# Modifiable with settings
review_bg = [255, 255, 255, 255]
@ -59,6 +59,10 @@ class ExtractSequence(pyblish.api.Extractor):
)
)
ignore_layers_transparency = instance.data.get(
"ignoreLayersTransparency", False
)
family_lowered = instance.data["family"].lower()
mark_in = instance.context.data["sceneMarkIn"]
mark_out = instance.context.data["sceneMarkOut"]
@ -114,7 +118,11 @@ class ExtractSequence(pyblish.api.Extractor):
else:
# Render output
result = self.render(
output_dir, mark_in, mark_out, filtered_layers
output_dir,
mark_in,
mark_out,
filtered_layers,
ignore_layers_transparency
)
output_filepaths_by_frame_idx, thumbnail_fullpath = result
@ -136,7 +144,7 @@ class ExtractSequence(pyblish.api.Extractor):
# Fill tags and new families from project settings
tags = []
if family_lowered in self.families_to_review:
if family_lowered == "review":
tags.append("review")
# Sequence of one frame
@ -162,10 +170,6 @@ class ExtractSequence(pyblish.api.Extractor):
instance.data["representations"].append(new_repre)
if family_lowered in ("renderpass", "renderlayer", "renderscene"):
# Change family to render
instance.data["family"] = "render"
if not thumbnail_fullpath:
return
@ -259,7 +263,7 @@ class ExtractSequence(pyblish.api.Extractor):
output_filepaths_by_frame_idx[frame_idx] = filepath
if not os.path.exists(filepath):
raise AssertionError(
raise KnownPublishError(
"Output was not rendered. File was not found {}".format(
filepath
)
@ -278,7 +282,9 @@ class ExtractSequence(pyblish.api.Extractor):
return output_filepaths_by_frame_idx, thumbnail_filepath
def render(self, output_dir, mark_in, mark_out, layers):
def render(
self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity
):
""" Export images from TVPaint.
Args:
@ -286,6 +292,7 @@ class ExtractSequence(pyblish.api.Extractor):
mark_in (int): Starting frame index from which export will begin.
mark_out (int): On which frame index export will end.
layers (list): List of layers to be exported.
ignore_layer_opacity (bool): Layer's opacity will be ignored.
Returns:
tuple: With 2 items first is list of filenames second is path to
@ -327,7 +334,7 @@ class ExtractSequence(pyblish.api.Extractor):
for layer_id, render_data in extraction_data_by_layer_id.items():
layer = layers_by_id[layer_id]
filepaths_by_layer_id[layer_id] = self._render_layer(
render_data, layer, output_dir
render_data, layer, output_dir, ignore_layer_opacity
)
# Prepare final filepaths where compositing should store result
@ -384,7 +391,9 @@ class ExtractSequence(pyblish.api.Extractor):
red, green, blue = self.review_bg
return (red, green, blue)
def _render_layer(self, render_data, layer, output_dir):
def _render_layer(
self, render_data, layer, output_dir, ignore_layer_opacity
):
frame_references = render_data["frame_references"]
filenames_by_frame_index = render_data["filenames_by_frame_index"]
@ -393,6 +402,12 @@ class ExtractSequence(pyblish.api.Extractor):
"tv_layerset {}".format(layer_id),
"tv_SaveMode \"PNG\""
]
# Set density to 100 and store previous opacity
if ignore_layer_opacity:
george_script_lines.extend([
"tv_layerdensity 100",
"orig_opacity = result",
])
filepaths_by_frame = {}
frames_to_render = []
@ -413,6 +428,10 @@ class ExtractSequence(pyblish.api.Extractor):
# Store image to output
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
# Set density back to origin opacity
if ignore_layer_opacity:
george_script_lines.append("tv_layerdensity orig_opacity")
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
",".join(frames_to_render), layer_id, layer["name"]
))

View file

@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Overused Color group</title>
<description>## One Color group is used by multiple Render Layers
Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups.
### Missing layer names
{groups_information}
### How to repair?
Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue.
</description>
</error>
</root>

View file

@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
duplicated_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
# It is not job of this validator to handle missing layers
if layers is None:
continue
if len(layers) > 1:
duplicated_layer_names.append(layer_name)

View file

@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
label = "Validate Layers Visibility"
order = pyblish.api.ValidatorOrder
families = ["review", "renderPass", "renderLayer", "renderScene"]
families = ["review", "render"]
def process(self, instance):
layers = instance.data["layers"]
# Instance have empty layers
# - it is not job of this validator to check that
if not layers:
return
layer_names = set()
for layer in instance.data["layers"]:
for layer in layers:
layer_names.add(layer["name"])
if layer["visible"]:
return

View file

@ -0,0 +1,74 @@
import collections
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
class ValidateRenderLayerGroups(pyblish.api.ContextPlugin):
"""Validate group ids of renderLayer subsets.
Validate that there are not 2 render layers using the same group.
"""
label = "Validate Render Layers Group"
order = pyblish.api.ValidatorOrder + 0.1
def process(self, context):
# Prepare layers
render_layers_by_group_id = collections.defaultdict(list)
for instance in context:
families = instance.data.get("families")
if not families or "renderLayer" not in families:
continue
group_id = instance.data["creator_attributes"]["group_id"]
render_layers_by_group_id[group_id].append(instance)
duplicated_instances = []
for group_id, instances in render_layers_by_group_id.items():
if len(instances) > 1:
duplicated_instances.append((group_id, instances))
if not duplicated_instances:
return
# Exception message preparations
groups_data = context.data["groupsData"]
groups_by_id = {
group["group_id"]: group
for group in groups_data
}
per_group_msgs = []
groups_information_lines = []
for group_id, instances in duplicated_instances:
group = groups_by_id[group_id]
group_label = "Group \"{}\" ({})".format(
group["name"],
group["group_id"],
)
line_join_subset_names = "\n".join([
f" - {instance['subset']}"
for instance in instances
])
joined_subset_names = ", ".join([
f"\"{instance['subset']}\""
for instance in instances
])
per_group_msgs.append(
"{} < {} >".format(group_label, joined_subset_names)
)
groups_information_lines.append(
"<b>{}</b>\n{}".format(group_label, line_join_subset_names)
)
# Raise an error
raise PublishXmlValidationError(
self,
(
"More than one Render Layer is using the same TVPaint"
" group color. {}"
).format(" | ".join(per_group_msgs)),
formatting_data={
"groups_information": "\n".join(groups_information_lines)
}
)

View file

@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
),
"expected_group": correct_group["name"],
"layer_names": ", ".join(invalid_layer_names)
}
)

View file

@ -42,7 +42,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
"expected_width": expected_data["resolutionWidth"],
"expected_height": expected_data["resolutionHeight"],
"current_width": scene_data["resolutionWidth"],
"current_height": scene_data["resolutionWidth"],
"current_height": scene_data["resolutionHeight"],
"expected_pixel_ratio": expected_data["pixelAspect"],
"current_pixel_ratio": scene_data["pixelAspect"]
}

View file

@ -1,5 +1,9 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError, registered_host
from openpype.pipeline import (
PublishXmlValidationError,
PublishValidationError,
registered_host,
)
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
actions = [ValidateWorkfileMetadataRepair]
required_keys = {"project", "asset", "task"}
required_keys = {"project_name", "asset_name", "task_name"}
def process(self, context):
workfile_context = context.data["workfile_context"]
if not workfile_context:
raise AssertionError(
"Current workfile is missing whole metadata about context."
raise PublishValidationError(
"Current workfile is missing whole metadata about context.",
"Missing context",
(
"Current workfile is missing metadata about task."
" To fix this issue save the file using Workfiles tool."
)
)
missing_keys = []

View file

@ -1,4 +1,3 @@
import os
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
def process(self, context):
workfile_context = context.data.get("workfile_context")
# If workfile context is missing than project is matching to
# `AVALON_PROJECT` value for 100%
# global project
if not workfile_context:
self.log.info(
"Workfile context (\"workfile_context\") is not filled."
)
return
workfile_project_name = workfile_context["project"]
env_project_name = os.environ["AVALON_PROJECT"]
workfile_project_name = workfile_context["project_name"]
env_project_name = context.data["projectName"]
if workfile_project_name == env_project_name:
self.log.info((
"Both workfile project and environment project are same. {}"

View file

@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
"""Unreal Editor OpenPype host API."""
from .plugin import Loader
from .plugin import (
UnrealActorCreator,
UnrealAssetCreator,
Loader
)
from .pipeline import (
install,

View file

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
import os
import json
import logging
from typing import List
from contextlib import contextmanager
import semver
import time
import pyblish.api
@ -16,13 +18,14 @@ from openpype.pipeline import (
)
from openpype.tools.utils import host_tools
import openpype.hosts.unreal
from openpype.host import HostBase, ILoadHost
from openpype.host import HostBase, ILoadHost, IPublishHost
import unreal # noqa
logger = logging.getLogger("openpype.hosts.unreal")
OPENPYPE_CONTAINERS = "OpenPypeContainers"
CONTEXT_CONTAINER = "OpenPype/context.json"
UNREAL_VERSION = semver.VersionInfo(
*os.getenv("OPENPYPE_UNREAL_VERSION").split(".")
)
@ -35,7 +38,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class UnrealHost(HostBase, ILoadHost):
class UnrealHost(HostBase, ILoadHost, IPublishHost):
"""Unreal host implementation.
For some time this class will re-use functions from module based
@ -60,6 +63,32 @@ class UnrealHost(HostBase, ILoadHost):
show_tools_dialog()
def update_context_data(self, data, changes):
content_path = unreal.Paths.project_content_dir()
op_ctx = content_path + CONTEXT_CONTAINER
attempts = 3
for i in range(attempts):
try:
with open(op_ctx, "w+") as f:
json.dump(data, f)
break
except IOError:
if i == attempts - 1:
raise Exception("Failed to write context data. Aborting.")
unreal.log_warning("Failed to write context data. Retrying...")
i += 1
time.sleep(3)
continue
def get_context_data(self):
content_path = unreal.Paths.project_content_dir()
op_ctx = content_path + CONTEXT_CONTAINER
if not os.path.isfile(op_ctx):
return {}
with open(op_ctx, "r") as fp:
data = json.load(fp)
return data
def install():
"""Install Unreal configuration for OpenPype."""
@ -133,6 +162,31 @@ def ls():
yield data
def ls_inst():
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# UE 5.1 changed how class name is specified
class_name = [
"/Script/OpenPype",
"OpenPypePublishInstance"
] if (
UNREAL_VERSION.major == 5
and UNREAL_VERSION.minor > 0
) else "OpenPypePublishInstance" # noqa
instances = ar.get_assets_by_class(class_name, True)
# get_asset_by_class returns AssetData. To get all metadata we need to
# load asset. get_tag_values() work only on metadata registered in
# Asset Registry Project settings (and there is no way to set it with
# python short of editing ini configuration file).
for asset_data in instances:
asset = asset_data.get_asset()
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
data["objectName"] = asset_data.asset_name
data = cast_map_to_str_dict(data)
yield data
def parse_container(container):
"""To get data from container, AssetContainer must be loaded.

View file

@ -1,7 +1,245 @@
# -*- coding: utf-8 -*-
from abc import ABC
import ast
import collections
import sys
import six
from abc import (
ABC,
ABCMeta,
)
from openpype.pipeline import LoaderPlugin
import unreal
from .pipeline import (
create_publish_instance,
imprint,
ls_inst,
UNREAL_VERSION
)
from openpype.lib import (
BoolDef,
UILabelDef
)
from openpype.pipeline import (
Creator,
LoaderPlugin,
CreatorError,
CreatedInstance
)
@six.add_metaclass(ABCMeta)
class UnrealBaseCreator(Creator):
"""Base class for Unreal creator plugins."""
root = "/Game/OpenPype/PublishInstances"
suffix = "_INS"
@staticmethod
def cache_subsets(shared_data):
"""Cache instances for Creators to shared data.
Create `unreal_cached_subsets` key when needed in shared data and
fill it with all collected instances from the scene under its
respective creator identifiers.
If legacy instances are detected in the scene, create
`unreal_cached_legacy_subsets` there and fill it with
all legacy subsets under family as a key.
Args:
Dict[str, Any]: Shared data.
Return:
Dict[str, Any]: Shared data dictionary.
"""
if shared_data.get("unreal_cached_subsets") is None:
unreal_cached_subsets = collections.defaultdict(list)
unreal_cached_legacy_subsets = collections.defaultdict(list)
for instance in ls_inst():
creator_id = instance.get("creator_identifier")
if creator_id:
unreal_cached_subsets[creator_id].append(instance)
else:
family = instance.get("family")
unreal_cached_legacy_subsets[family].append(instance)
shared_data["unreal_cached_subsets"] = unreal_cached_subsets
shared_data["unreal_cached_legacy_subsets"] = (
unreal_cached_legacy_subsets
)
return shared_data
def create(self, subset_name, instance_data, pre_create_data):
try:
instance_name = f"{subset_name}{self.suffix}"
pub_instance = create_publish_instance(instance_name, self.root)
instance_data["subset"] = subset_name
instance_data["instance_path"] = f"{self.root}/{instance_name}"
instance = CreatedInstance(
self.family,
subset_name,
instance_data,
self)
self._add_instance_to_context(instance)
pub_instance.set_editor_property('add_external_assets', True)
assets = pub_instance.get_editor_property('asset_data_external')
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for member in pre_create_data.get("members", []):
obj = ar.get_asset_by_object_path(member).get_asset()
assets.add(obj)
imprint(f"{self.root}/{instance_name}", instance.data_to_store())
return instance
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def collect_instances(self):
# cache instances if missing
self.cache_subsets(self.collection_shared_data)
for instance in self.collection_shared_data[
"unreal_cached_subsets"].get(self.identifier, []):
# Unreal saves metadata as string, so we need to convert it back
instance['creator_attributes'] = ast.literal_eval(
instance.get('creator_attributes', '{}'))
instance['publish_attributes'] = ast.literal_eval(
instance.get('publish_attributes', '{}'))
created_instance = CreatedInstance.from_existing(instance, self)
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, changes in update_list:
instance_node = created_inst.get("instance_path", "")
if not instance_node:
unreal.log_warning(
f"Instance node not found for {created_inst}")
continue
new_values = {
key: changes[key].new_value
for key in changes.changed_keys
}
imprint(
instance_node,
new_values
)
def remove_instances(self, instances):
for instance in instances:
instance_node = instance.data.get("instance_path", "")
if instance_node:
unreal.EditorAssetLibrary.delete_asset(instance_node)
self._remove_instance_from_context(instance)
@six.add_metaclass(ABCMeta)
class UnrealAssetCreator(UnrealBaseCreator):
"""Base class for Unreal creator plugins based on assets."""
def create(self, subset_name, instance_data, pre_create_data):
"""Create instance of the asset.
Args:
subset_name (str): Name of the subset.
instance_data (dict): Data for the instance.
pre_create_data (dict): Data for the instance.
Returns:
CreatedInstance: Created instance.
"""
try:
# Check if instance data has members, filled by the plugin.
# If not, use selection.
if not pre_create_data.get("members"):
pre_create_data["members"] = []
if pre_create_data.get("use_selection"):
utilib = unreal.EditorUtilityLibrary
sel_objects = utilib.get_selected_assets()
pre_create_data["members"] = [
a.get_path_name() for a in sel_objects]
super(UnrealAssetCreator, self).create(
subset_name,
instance_data,
pre_create_data)
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection", label="Use selection", default=True)
]
@six.add_metaclass(ABCMeta)
class UnrealActorCreator(UnrealBaseCreator):
"""Base class for Unreal creator plugins based on actors."""
def create(self, subset_name, instance_data, pre_create_data):
"""Create instance of the asset.
Args:
subset_name (str): Name of the subset.
instance_data (dict): Data for the instance.
pre_create_data (dict): Data for the instance.
Returns:
CreatedInstance: Created instance.
"""
try:
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
# Check if the level is saved
if world.get_path_name().startswith("/Temp/"):
raise CreatorError(
"Level must be saved before creating instances.")
# Check if instance data has members, filled by the plugin.
# If not, use selection.
if not instance_data.get("members"):
actor_subsystem = unreal.EditorActorSubsystem()
sel_actors = actor_subsystem.get_selected_level_actors()
selection = [a.get_path_name() for a in sel_actors]
instance_data["members"] = selection
instance_data["level"] = world.get_path_name()
super(UnrealActorCreator, self).create(
subset_name,
instance_data,
pre_create_data)
except Exception as er:
six.reraise(
CreatorError,
CreatorError(f"Creator error: {er}"),
sys.exc_info()[2])
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select actors to create instance from them.")
]
class Loader(LoaderPlugin, ABC):

View file

@ -17,9 +17,8 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ToolsBtnsWidget, self).__init__(parent)
create_btn = QtWidgets.QPushButton("Create...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
publish_btn = QtWidgets.QPushButton("Publisher...", self)
manage_btn = QtWidgets.QPushButton("Manage...", self)
render_btn = QtWidgets.QPushButton("Render...", self)
experimental_tools_btn = QtWidgets.QPushButton(
@ -28,7 +27,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(create_btn, 0)
layout.addWidget(load_btn, 0)
layout.addWidget(publish_btn, 0)
layout.addWidget(manage_btn, 0)
@ -36,7 +34,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
layout.addWidget(experimental_tools_btn, 0)
layout.addStretch(1)
create_btn.clicked.connect(self._on_create)
load_btn.clicked.connect(self._on_load)
publish_btn.clicked.connect(self._on_publish)
manage_btn.clicked.connect(self._on_manage)
@ -50,7 +47,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
self.tool_required.emit("loader")
def _on_publish(self):
self.tool_required.emit("publish")
self.tool_required.emit("publisher")
def _on_manage(self):
self.tool_required.emit("sceneinventory")

View file

@ -1,41 +1,38 @@
# -*- coding: utf-8 -*-
import unreal
from unreal import EditorAssetLibrary as eal
from unreal import EditorLevelLibrary as ell
from openpype.hosts.unreal.api.pipeline import instantiate
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
class CreateCamera(LegacyCreator):
"""Layout output for character rigs"""
class CreateCamera(UnrealAssetCreator):
"""Create Camera."""
name = "layoutMain"
identifier = "io.openpype.creators.unreal.camera"
label = "Camera"
family = "camera"
icon = "cubes"
icon = "fa.camera"
root = "/Game/OpenPype/Instances"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
def __init__(self, *args, **kwargs):
super(CreateCamera, self).__init__(*args, **kwargs)
if len(selection) != 1:
raise CreatorError("Please select only one object.")
def process(self):
data = self.data
# Add the current level path to the metadata
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
name = data["subset"]
instance_data["level"] = world.get_path_name()
data["level"] = ell.get_editor_world().get_path_name()
if not eal.does_directory_exist(self.root):
eal.make_directory(self.root)
factory = unreal.LevelSequenceFactoryNew()
tools = unreal.AssetToolsHelpers().get_asset_tools()
tools.create_asset(name, f"{self.root}/{name}", None, factory)
asset_name = f"{self.root}/{name}/{name}.{name}"
data["members"] = [asset_name]
instantiate(f"{self.root}", name, data, None, self.suffix)
super(CreateCamera, self).create(
subset_name,
instance_data,
pre_create_data)

View file

@ -1,42 +1,13 @@
# -*- coding: utf-8 -*-
from unreal import EditorLevelLibrary
from openpype.pipeline import LegacyCreator
from openpype.hosts.unreal.api.pipeline import instantiate
from openpype.hosts.unreal.api.plugin import (
UnrealActorCreator,
)
class CreateLayout(LegacyCreator):
class CreateLayout(UnrealActorCreator):
"""Layout output for character rigs."""
name = "layoutMain"
identifier = "io.openpype.creators.unreal.layout"
label = "Layout"
family = "layout"
icon = "cubes"
root = "/Game"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateLayout, self).__init__(*args, **kwargs)
def process(self):
data = self.data
name = data["subset"]
selection = []
# if (self.options or {}).get("useSelection"):
# sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
# selection = [a.get_path_name() for a in sel_objects]
data["level"] = EditorLevelLibrary.get_editor_world().get_path_name()
data["members"] = []
if (self.options or {}).get("useSelection"):
# Set as members the selected actors
for actor in EditorLevelLibrary.get_selected_level_actors():
data["members"].append("{}.{}".format(
actor.get_outer().get_name(), actor.get_name()))
instantiate(self.root, name, data, selection, self.suffix)

View file

@ -1,56 +1,57 @@
# -*- coding: utf-8 -*-
"""Create look in Unreal."""
import unreal # noqa
from openpype.hosts.unreal.api import pipeline, plugin
from openpype.pipeline import LegacyCreator
import unreal
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import (
create_folder
)
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator
)
from openpype.lib import UILabelDef
class CreateLook(LegacyCreator):
class CreateLook(UnrealAssetCreator):
"""Shader connections defining shape look."""
name = "unrealLook"
label = "Unreal - Look"
identifier = "io.openpype.creators.unreal.look"
label = "Look"
family = "look"
icon = "paint-brush"
root = "/Game/Avalon/Assets"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
# We need to set this to True for the parent class to work
pre_create_data["use_selection"] = True
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
def __init__(self, *args, **kwargs):
super(CreateLook, self).__init__(*args, **kwargs)
if len(selection) != 1:
raise CreatorError("Please select only one asset.")
def process(self):
name = self.data["subset"]
selected_asset = selection[0]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
look_directory = "/Game/OpenPype/Looks"
# Create the folder
path = f"{self.root}/{self.data['asset']}"
new_name = pipeline.create_folder(path, name)
full_path = f"{path}/{new_name}"
folder_name = create_folder(look_directory, subset_name)
path = f"{look_directory}/{folder_name}"
instance_data["look"] = path
# Create a new cube static mesh
ar = unreal.AssetRegistryHelpers.get_asset_registry()
cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube")
# Create the avalon publish instance object
container_name = f"{name}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=full_path)
# Get the mesh of the selected object
original_mesh = ar.get_asset_by_object_path(selection[0]).get_asset()
materials = original_mesh.get_editor_property('materials')
original_mesh = ar.get_asset_by_object_path(selected_asset).get_asset()
materials = original_mesh.get_editor_property('static_materials')
self.data["members"] = []
pre_create_data["members"] = []
# Add the materials to the cube
for material in materials:
name = material.get_editor_property('material_slot_name')
object_path = f"{full_path}/{name}.{name}"
mat_name = material.get_editor_property('material_slot_name')
object_path = f"{path}/{mat_name}.{mat_name}"
unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset(
cube.get_asset(), object_path
)
@ -61,8 +62,16 @@ class CreateLook(LegacyCreator):
unreal_object.add_material(
material.get_editor_property('material_interface'))
self.data["members"].append(object_path)
pre_create_data["members"].append(object_path)
unreal.EditorAssetLibrary.save_asset(object_path)
pipeline.imprint(f"{full_path}/{container_name}", self.data)
super(CreateLook, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select the asset from which to create the look.")
]

View file

@ -1,117 +1,138 @@
# -*- coding: utf-8 -*-
import unreal
from openpype.hosts.unreal.api import pipeline
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.pipeline import (
get_subsequences
)
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator
)
from openpype.lib import UILabelDef
class CreateRender(LegacyCreator):
class CreateRender(UnrealAssetCreator):
"""Create instance for sequence for rendering"""
name = "unrealRender"
label = "Unreal - Render"
identifier = "io.openpype.creators.unreal.render"
label = "Render"
family = "render"
icon = "cube"
asset_types = ["LevelSequence"]
root = "/Game/OpenPype/PublishInstances"
suffix = "_INS"
def process(self):
subset = self.data["subset"]
icon = "eye"
def create(self, subset_name, instance_data, pre_create_data):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# The asset name is the the third element of the path which contains
# the map.
# The index of the split path is 3 because the first element is an
# empty string, as the path begins with "/Content".
a = unreal.EditorUtilityLibrary.get_selected_assets()[0]
asset_name = a.get_path_name().split("/")[3]
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [
a.get_path_name() for a in sel_objects
if a.get_class().get_name() == "LevelSequence"]
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
ms = sequences[0].get_editor_property('object_path')
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
levels = ar.get_assets(filter)
ml = levels[0].get_editor_property('object_path')
if not selection:
raise CreatorError("Please select at least one Level Sequence.")
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [
a.get_path_name() for a in sel_objects
if a.get_class().get_name() in self.asset_types]
else:
selection.append(self.data['sequence'])
seq_data = None
unreal.log(f"selection: {selection}")
for sel in selection:
selected_asset = ar.get_asset_by_object_path(sel).get_asset()
selected_asset_path = selected_asset.get_path_name()
path = f"{self.root}"
unreal.EditorAssetLibrary.make_directory(path)
# Check if the selected asset is a level sequence asset.
if selected_asset.get_class().get_name() != "LevelSequence":
unreal.log_warning(
f"Skipping {selected_asset.get_name()}. It isn't a Level "
"Sequence.")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# The asset name is the third element of the path which
# contains the map.
# To take the asset name, we remove from the path the prefix
# "/Game/OpenPype/" and then we split the path by "/".
sel_path = selected_asset_path
asset_name = sel_path.replace("/Game/OpenPype/", "").split("/")[0]
for a in selection:
ms_obj = ar.get_asset_by_object_path(ms).get_asset()
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
ar_filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
sequences = ar.get_assets(ar_filter)
master_seq = sequences[0].get_asset().get_path_name()
master_seq_obj = sequences[0].get_asset()
ar_filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{asset_name}"],
recursive_paths=False)
levels = ar.get_assets(ar_filter)
master_lvl = levels[0].get_asset().get_path_name()
seq_data = None
# If the selected asset is the master sequence, we get its data
# and then we create the instance for the master sequence.
# Otherwise, we cycle from the master sequence to find the selected
# sequence and we get its data. This data will be used to create
# the instance for the selected sequence. In particular,
# we get the frame range of the selected sequence and its final
# output path.
master_seq_data = {
"sequence": master_seq_obj,
"output": f"{master_seq_obj.get_name()}",
"frame_range": (
master_seq_obj.get_playback_start(),
master_seq_obj.get_playback_end())}
if a == ms:
seq_data = {
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}
if selected_asset_path == master_seq:
seq_data = master_seq_data
else:
seq_data_list = [{
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}]
seq_data_list = [master_seq_data]
for s in seq_data_list:
subscenes = pipeline.get_subsequences(s.get('sequence'))
for seq in seq_data_list:
subscenes = get_subsequences(seq.get('sequence'))
for ss in subscenes:
for sub_seq in subscenes:
sub_seq_obj = sub_seq.get_sequence()
curr_data = {
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"sequence": sub_seq_obj,
"output": (f"{seq.get('output')}/"
f"{sub_seq_obj.get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame() - 1)
}
sub_seq.get_start_frame(),
sub_seq.get_end_frame() - 1)}
if ss.get_sequence().get_path_name() == a:
# If the selected asset is the current sub-sequence,
# we get its data and we break the loop.
# Otherwise, we add the current sub-sequence data to
# the list of sequences to check.
if sub_seq_obj.get_path_name() == selected_asset_path:
seq_data = curr_data
break
seq_data_list.append(curr_data)
# If we found the selected asset, we break the loop.
if seq_data is not None:
break
# If we didn't find the selected asset, we don't create the
# instance.
if not seq_data:
unreal.log_warning(
f"Skipping {selected_asset.get_name()}. It isn't a "
"sub-sequence of the master sequence.")
continue
d = self.data.copy()
d["members"] = [a]
d["sequence"] = a
d["master_sequence"] = ms
d["master_level"] = ml
d["output"] = seq_data.get('output')
d["frameStart"] = seq_data.get('frame_range')[0]
d["frameEnd"] = seq_data.get('frame_range')[1]
instance_data["members"] = [selected_asset_path]
instance_data["sequence"] = selected_asset_path
instance_data["master_sequence"] = master_seq
instance_data["master_level"] = master_lvl
instance_data["output"] = seq_data.get('output')
instance_data["frameStart"] = seq_data.get('frame_range')[0]
instance_data["frameEnd"] = seq_data.get('frame_range')[1]
container_name = f"{subset}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=path)
pipeline.imprint(f"{path}/{container_name}", d)
super(CreateRender, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
return [
UILabelDef("Select the sequence to render.")
]

View file

@ -1,35 +1,13 @@
# -*- coding: utf-8 -*-
"""Create Static Meshes as FBX geometry."""
import unreal # noqa
from openpype.hosts.unreal.api.pipeline import (
instantiate,
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
from openpype.pipeline import LegacyCreator
class CreateStaticMeshFBX(LegacyCreator):
"""Static FBX geometry."""
class CreateStaticMeshFBX(UnrealAssetCreator):
"""Create Static Meshes as FBX geometry."""
name = "unrealStaticMeshMain"
label = "Unreal - Static Mesh"
identifier = "io.openpype.creators.unreal.staticmeshfbx"
label = "Static Mesh (FBX)"
family = "unrealStaticMesh"
icon = "cube"
asset_types = ["StaticMesh"]
root = "/Game"
suffix = "_INS"
def __init__(self, *args, **kwargs):
super(CreateStaticMeshFBX, self).__init__(*args, **kwargs)
def process(self):
name = self.data["subset"]
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
unreal.log("selection: {}".format(selection))
instantiate(self.root, name, self.data, selection, self.suffix)

View file

@ -1,41 +1,31 @@
"""Create UAsset."""
# -*- coding: utf-8 -*-
from pathlib import Path
import unreal
from openpype.hosts.unreal.api import pipeline
from openpype.pipeline import LegacyCreator
from openpype.pipeline import CreatorError
from openpype.hosts.unreal.api.plugin import (
UnrealAssetCreator,
)
class CreateUAsset(LegacyCreator):
"""UAsset."""
class CreateUAsset(UnrealAssetCreator):
"""Create UAsset."""
name = "UAsset"
identifier = "io.openpype.creators.unreal.uasset"
label = "UAsset"
family = "uasset"
icon = "cube"
root = "/Game/OpenPype"
suffix = "_INS"
def create(self, subset_name, instance_data, pre_create_data):
if pre_create_data.get("use_selection"):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
def __init__(self, *args, **kwargs):
super(CreateUAsset, self).__init__(*args, **kwargs)
def process(self):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
subset = self.data["subset"]
path = f"{self.root}/PublishInstances/"
unreal.EditorAssetLibrary.make_directory(path)
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [a.get_path_name() for a in sel_objects]
if len(selection) != 1:
raise RuntimeError("Please select only one object.")
raise CreatorError("Please select only one object.")
obj = selection[0]
@ -43,19 +33,14 @@ class CreateUAsset(LegacyCreator):
sys_path = unreal.SystemLibrary.get_system_path(asset)
if not sys_path:
raise RuntimeError(
raise CreatorError(
f"{Path(obj).name} is not on the disk. Likely it needs to"
"be saved first.")
if Path(sys_path).suffix != ".uasset":
raise RuntimeError(f"{Path(sys_path).name} is not a UAsset.")
raise CreatorError(f"{Path(sys_path).name} is not a UAsset.")
unreal.log("selection: {}".format(selection))
container_name = f"{subset}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=path)
data = self.data.copy()
data["members"] = selection
pipeline.imprint(f"{path}/{container_name}", data)
super(CreateUAsset, self).create(
subset_name,
instance_data,
pre_create_data)

View file

@ -0,0 +1,46 @@
import unreal
import pyblish.api
class CollectInstanceMembers(pyblish.api.InstancePlugin):
"""
Collect members of instance.
This collector will collect the assets for the families that support to
have them included as External Data, and will add them to the instance
as members.
"""
order = pyblish.api.CollectorOrder + 0.1
hosts = ["unreal"]
families = ["camera", "look", "unrealStaticMesh", "uasset"]
label = "Collect Instance Members"
def process(self, instance):
"""Collect members of instance."""
self.log.info("Collecting instance members")
ar = unreal.AssetRegistryHelpers.get_asset_registry()
inst_path = instance.data.get('instance_path')
inst_name = instance.data.get('objectName')
pub_instance = ar.get_asset_by_object_path(
f"{inst_path}.{inst_name}").get_asset()
if not pub_instance:
self.log.error(f"{inst_path}.{inst_name}")
raise RuntimeError(f"Instance {instance} not found.")
if not pub_instance.get_editor_property("add_external_assets"):
# No external assets in the instance
return
assets = pub_instance.get_editor_property('asset_data_external')
members = [asset.get_path_name() for asset in assets]
self.log.debug(f"Members: {members}")
instance.data["members"] = members

View file

@ -1,67 +0,0 @@
# -*- coding: utf-8 -*-
"""Collect publishable instances in Unreal."""
import ast
import unreal # noqa
import pyblish.api
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.pipeline.publish import KnownPublishError
class CollectInstances(pyblish.api.ContextPlugin):
"""Gather instances by OpenPypePublishInstance class
This collector finds all paths containing `OpenPypePublishInstance` class
asset
Identifier:
id (str): "pyblish.avalon.instance"
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["unreal"]
def process(self, context):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
class_name = [
"/Script/OpenPype",
"OpenPypePublishInstance"
] if (
UNREAL_VERSION.major == 5
and UNREAL_VERSION.minor > 0
) else "OpenPypePublishInstance" # noqa
instance_containers = ar.get_assets_by_class(class_name, True)
for container_data in instance_containers:
asset = container_data.get_asset()
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
data["objectName"] = container_data.asset_name
# convert to strings
data = {str(key): str(value) for (key, value) in data.items()}
if not data.get("family"):
raise KnownPublishError("instance has no family")
# content of container
members = ast.literal_eval(data.get("members"))
self.log.debug(members)
self.log.debug(asset.get_path_name())
# remove instance container
self.log.info("Creating instance for {}".format(asset.get_name()))
instance = context.create_instance(asset.get_name())
instance[:] = members
# Store the exact members of the object set
instance.data["setMembers"] = members
instance.data["families"] = [data.get("family")]
instance.data["level"] = data.get("level")
instance.data["parent"] = data.get("parent")
label = "{0} ({1})".format(asset.get_name()[:-4],
data["asset"])
instance.data["label"] = label
instance.data.update(data)

View file

@ -3,10 +3,9 @@
import os
import unreal
from unreal import EditorAssetLibrary as eal
from unreal import EditorLevelLibrary as ell
from openpype.pipeline import publish
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
class ExtractCamera(publish.Extractor):
@ -18,6 +17,8 @@ class ExtractCamera(publish.Extractor):
optional = True
def process(self, instance):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# Define extract output file path
staging_dir = self.staging_dir(instance)
fbx_filename = "{}.fbx".format(instance.name)
@ -26,23 +27,54 @@ class ExtractCamera(publish.Extractor):
self.log.info("Performing extraction..")
# Check if the loaded level is the same of the instance
current_level = ell.get_editor_world().get_path_name()
if UNREAL_VERSION.major == 5:
world = unreal.UnrealEditorSubsystem().get_editor_world()
else:
world = unreal.EditorLevelLibrary.get_editor_world()
current_level = world.get_path_name()
assert current_level == instance.data.get("level"), \
"Wrong level loaded"
for member in instance[:]:
data = eal.find_asset_data(member)
if data.asset_class == "LevelSequence":
ar = unreal.AssetRegistryHelpers.get_asset_registry()
sequence = ar.get_asset_by_object_path(member).get_asset()
unreal.SequencerTools.export_fbx(
ell.get_editor_world(),
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
break
for member in instance.data.get('members'):
data = ar.get_asset_by_object_path(member)
if UNREAL_VERSION.major == 5:
is_level_sequence = (
data.asset_class_path.asset_name == "LevelSequence")
else:
is_level_sequence = (data.asset_class == "LevelSequence")
if is_level_sequence:
sequence = data.get_asset()
if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor >= 1:
params = unreal.SequencerExportFBXParams(
world=world,
root_sequence=sequence,
sequence=sequence,
bindings=sequence.get_bindings(),
master_tracks=sequence.get_master_tracks(),
fbx_file_name=os.path.join(staging_dir, fbx_filename)
)
unreal.SequencerTools.export_level_sequence_fbx(params)
elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26:
unreal.SequencerTools.export_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
else:
# Unreal 5.0 or 4.27
unreal.SequencerTools.export_level_sequence_fbx(
world,
sequence,
sequence.get_bindings(),
unreal.FbxExportOption(),
os.path.join(staging_dir, fbx_filename)
)
if not os.path.isfile(os.path.join(staging_dir, fbx_filename)):
raise RuntimeError("Failed to extract camera")
if "representations" not in instance.data:
instance.data["representations"] = []

View file

@ -29,13 +29,13 @@ class ExtractLook(publish.Extractor):
for member in instance:
asset = ar.get_asset_by_object_path(member)
object = asset.get_asset()
obj = asset.get_asset()
name = asset.get_editor_property('asset_name')
json_element = {'material': str(name)}
material_obj = object.get_editor_property('static_materials')[0]
material_obj = obj.get_editor_property('static_materials')[0]
material = material_obj.material_interface
base_color = mat_lib.get_material_property_input_node(

View file

@ -22,7 +22,13 @@ class ExtractUAsset(publish.Extractor):
staging_dir = self.staging_dir(instance)
filename = "{}.uasset".format(instance.name)
obj = instance[0]
members = instance.data.get("members", [])
if not members:
raise RuntimeError("No members found in instance.")
# UAsset publishing supports only one member
obj = members[0]
asset = ar.get_asset_by_object_path(obj).get_asset()
sys_path = unreal.SystemLibrary.get_system_path(asset)

View file

@ -81,11 +81,14 @@ def run_subprocess(*args, **kwargs):
Entered arguments and keyword arguments are passed to subprocess Popen.
On windows are 'creationflags' filled with flags that should cause ignore
creation of new window.
Args:
*args: Variable length arument list passed to Popen.
*args: Variable length argument list passed to Popen.
**kwargs : Arbitrary keyword arguments passed to Popen. Is possible to
pass `logging.Logger` object under "logger" if want to use
different than lib's logger.
pass `logging.Logger` object under "logger" to use custom logger
for output.
Returns:
str: Full output of subprocess concatenated stdout and stderr.
@ -95,6 +98,17 @@ def run_subprocess(*args, **kwargs):
return code.
"""
# Modify creation flags on windows to hide console window if in UI mode
if (
platform.system().lower() == "windows"
and "creationflags" not in kwargs
):
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
# Get environents from kwarg or use current process environments if were
# not passed.
env = kwargs.get("env") or os.environ
@ -107,10 +121,10 @@ def run_subprocess(*args, **kwargs):
logger = Logger.get_logger("run_subprocess")
# set overrides
kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE)
kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
kwargs['env'] = filtered_env
kwargs["stdout"] = kwargs.get("stdout", subprocess.PIPE)
kwargs["stderr"] = kwargs.get("stderr", subprocess.PIPE)
kwargs["stdin"] = kwargs.get("stdin", subprocess.PIPE)
kwargs["env"] = filtered_env
proc = subprocess.Popen(*args, **kwargs)

View file

@ -28,6 +28,7 @@ def import_filepath(filepath, module_name=None):
# Prepare module object where content of file will be parsed
module = types.ModuleType(module_name)
module.__file__ = filepath
if six.PY3:
# Use loader so module has full specs
@ -41,7 +42,6 @@ def import_filepath(filepath, module_name=None):
# Execute content and store it to module object
six.exec_(_stream.read(), module.__dict__)
module.__file__ = filepath
return module

View file

@ -5,6 +5,7 @@ import json
import collections
import tempfile
import subprocess
import platform
import xml.etree.ElementTree
@ -745,11 +746,18 @@ def get_ffprobe_data(path_to_file, logger=None):
logger.debug("FFprobe command: {}".format(
subprocess.list2cmdline(args)
))
popen = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
kwargs = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
popen = subprocess.Popen(args, **kwargs)
popen_stdout, popen_stderr = popen.communicate()
if popen_stdout:
@ -1044,7 +1052,7 @@ def convert_colorspace(
output_path,
config_path,
source_colorspace,
target_colorspace,
target_colorspace=None,
view=None,
display=None,
additional_command_args=None,
@ -1092,7 +1100,7 @@ def convert_colorspace(
raise ValueError("Both screen and display must be set.")
if additional_command_args:
oiio_cmd.extend(split_cmd_args(additional_command_args))
oiio_cmd.extend(additional_command_args)
if target_colorspace:
oiio_cmd.extend(["--colorconvert",

View file

@ -0,0 +1,218 @@
import os
import getpass
import copy
import attr
from openpype.pipeline import legacy_io
from openpype.settings import get_project_settings
from openpype.hosts.max.api.lib import (
get_current_renderer,
get_multipass_setting
)
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class MaxPluginInfo(object):
SceneFile = attr.ib(default=None) # Input
Version = attr.ib(default=None) # Mandatory for Deadline
SaveFile = attr.ib(default=True)
IgnoreInputs = attr.ib(default=True)
class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
label = "Submit Render to Deadline"
hosts = ["max"]
families = ["maxrender"]
targets = ["local"]
use_published = True
priority = 50
tile_priority = 50
chunk_size = 1
jobInfo = {}
pluginInfo = {}
group = None
deadline_pool = None
deadline_pool_secondary = None
framePerTask = 1
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="3dsmax")
# todo: test whether this works for existing production cases
# where custom jobInfo was stored in the project settings
job_info.update(self.jobInfo)
instance = self._instance
context = instance.context
# Always use the original work file name for the Job name even when
# rendering is done from the published Work File. The original work
# file name is clearer because it can also have subversion strings,
# etc. which are stripped for the published file.
src_filepath = context.data["currentFile"]
src_filename = os.path.basename(src_filepath)
job_info.Name = "%s - %s" % (src_filename, instance.name)
job_info.BatchName = src_filename
job_info.Plugin = instance.data["plugin"]
job_info.UserName = context.data.get("deadlineUser", getpass.getuser())
# Deadline requires integers in frame range
frames = "{start}-{end}".format(
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
)
job_info.Frames = frames
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.ChunkSize = instance.data.get("chunkSize", 1)
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
job_info.FramesPerTask = instance.data.get("framesPerTask", 1)
if self.group:
job_info.Group = self.group
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
job_info.update(render_globals)
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"OPENPYPE_SG_USER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_VERSION",
"IS_TEST"
]
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if not value:
continue
job_info.EnvironmentKeyValue[key] = value
# to recognize job from PYPE for turning Event On/Off
job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1"
job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1"
# Add list of expected files to job
# ---------------------------------
exp = instance.data.get("expectedFiles")
for filepath in exp:
job_info.OutputDirectory += os.path.dirname(filepath)
job_info.OutputFilename += os.path.basename(filepath)
return job_info
def get_plugin_info(self):
instance = self._instance
plugin_info = MaxPluginInfo(
SceneFile=self.scene_path,
Version=instance.data["maxversion"],
SaveFile=True,
IgnoreInputs=True
)
plugin_payload = attr.asdict(plugin_info)
# Patching with pluginInfo from settings
for key, value in self.pluginInfo.items():
plugin_payload[key] = value
return plugin_payload
def process_submission(self):
instance = self._instance
filepath = self.scene_path
expected_files = instance.data["expectedFiles"]
if not expected_files:
raise RuntimeError("No Render Elements found!")
output_dir = os.path.dirname(expected_files[0])
instance.data["outputDir"] = output_dir
instance.data["toBeRenderedOn"] = "deadline"
filename = os.path.basename(filepath)
payload_data = {
"filename": filename,
"dirname": output_dir
}
self.log.debug("Submitting 3dsMax render..")
payload = self._use_published_name(payload_data)
job_info, plugin_info = payload
self.submit(self.assemble_payload(job_info, plugin_info))
def _use_published_name(self, data):
instance = self._instance
job_info = copy.deepcopy(self.job_info)
plugin_info = copy.deepcopy(self.plugin_info)
plugin_data = {}
project_setting = get_project_settings(
legacy_io.Session["AVALON_PROJECT"]
)
multipass = get_multipass_setting(project_setting)
if multipass:
plugin_data["DisableMultipass"] = 0
else:
plugin_data["DisableMultipass"] = 1
expected_files = instance.data.get("expectedFiles")
if not expected_files:
raise RuntimeError("No render elements found")
old_output_dir = os.path.dirname(expected_files[0])
output_beauty = RenderSettings().get_render_output(instance.name,
old_output_dir)
filepath = self.from_published_scene()
def _clean_name(path):
return os.path.splitext(os.path.basename(path))[0]
new_scene = _clean_name(filepath)
orig_scene = _clean_name(instance.context.data["currentFile"])
output_beauty = output_beauty.replace(orig_scene, new_scene)
output_beauty = output_beauty.replace("\\", "/")
plugin_data["RenderOutput"] = output_beauty
renderer_class = get_current_renderer()
renderer = str(renderer_class).split(":")[0]
if renderer in [
"ART_Renderer",
"Redshift_Renderer",
"V_Ray_6_Hotfix_3",
"V_Ray_GPU_6_Hotfix_3",
"Default_Scanline_Renderer",
"Quicksilver_Hardware_Renderer",
]:
render_elem_list = RenderSettings().get_render_element()
for i, element in enumerate(render_elem_list):
element = element.replace(orig_scene, new_scene)
plugin_data["RenderElementOutputFilename%d" % i] = element # noqa
self.log.debug("plugin data:{}".format(plugin_data))
plugin_info.update(plugin_data)
return job_info, plugin_info

View file

@ -266,7 +266,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"PYBLISHPLUGINPATH",
"NUKE_PATH",
"TOOL_ENV",
"FOUNDRY_LICENSE"
"FOUNDRY_LICENSE",
"OPENPYPE_SG_USER",
]
# Add OpenPype version if we are running from build.

View file

@ -118,15 +118,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
deadline_plugin = "OpenPype"
targets = ["local"]
hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"]
hosts = ["fusion", "max", "maya", "nuke",
"celaction", "aftereffects", "harmony"]
families = ["render.farm", "prerender.farm",
"renderlayer", "imagesequence", "vrayscene"]
"renderlayer", "imagesequence", "maxrender", "vrayscene"]
aov_filter = {"maya": [r".*([Bb]eauty).*"],
"aftereffects": [r".*"], # for everything from AE
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}
"celaction": [r".*"],
"max": [r".*"]}
environ_job_filter = [
"OPENPYPE_METADATA_FILE"
@ -137,7 +139,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"FTRACK_API_KEY",
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME"
"OPENPYPE_USERNAME",
"OPENPYPE_SG_USER",
]
# Add OpenPype version if we are running from build.
@ -192,7 +195,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata_path = os.path.join(output_dir, metadata_filename)
# Convert output dir to `{root}/rest/of/path/...` with Anatomy
success, roothless_mtdt_p = self.anatomy.find_root_template_from_path(
success, rootless_mtdt_p = self.anatomy.find_root_template_from_path(
metadata_path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
@ -200,9 +203,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Could not find root path for remapping \"{}\"."
" This may cause issues on farm."
).format(output_dir))
roothless_mtdt_p = metadata_path
rootless_mtdt_p = metadata_path
return metadata_path, roothless_mtdt_p
return metadata_path, rootless_mtdt_p
def _submit_deadline_post_job(self, instance, job, instances):
"""Submit publish job to Deadline.
@ -235,7 +238,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, roothless_metadata_path = \
metadata_path, rootless_metadata_path = \
self._create_metadata_path(instance)
environment = {
@ -272,7 +275,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
args = [
"--headless",
'publish',
roothless_metadata_path,
rootless_metadata_path,
"--targets", "deadline",
"--targets", "farm"
]
@ -296,8 +299,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"Group": self.deadline_group,
"Pool": instance.data.get("primaryPool"),
"SecondaryPool": instance.data.get("secondaryPool"),
"OutputDirectory0": output_dir
# ensure the outputdirectory with correct slashes
"OutputDirectory0": output_dir.replace("\\", "/")
},
"PluginInfo": {
"Version": self.plugin_pype_version,
@ -409,7 +412,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
assert fn is not None, "padding string wasn't found"
# list of tuples (source, destination)
staging = representation.get("stagingDir")
staging = self.anatomy.fill_roots(staging)
staging = self.anatomy.fill_root(staging)
resource_files.append(
(frame,
os.path.join(staging,
@ -605,7 +608,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
host_name = os.environ.get("AVALON_APP", "")
collections, remainders = clique.assemble(exp_files)
# create representation for every collected sequento ce
# create representation for every collected sequence
for collection in collections:
ext = collection.tail.lstrip(".")
preview = False
@ -673,7 +676,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
self._solve_families(instance, preview)
# add reminders as representations
# add remainders as representations
for remainder in remainders:
ext = remainder.split(".")[-1]
@ -693,7 +696,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"name": ext,
"ext": ext,
"files": os.path.basename(remainder),
"stagingDir": os.path.dirname(remainder),
"stagingDir": staging,
}
preview = match_aov_pattern(
@ -1005,6 +1008,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
'''
render_job = None
submission_type = ""
if instance.data.get("toBeRenderedOn") == "deadline":
render_job = data.pop("deadlineSubmissionJob", None)
submission_type = "deadline"
@ -1088,7 +1092,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
}
publish_job.update({"ftrack": ftrack})
metadata_path, roothless_metadata_path = self._create_metadata_path(
metadata_path, rootless_metadata_path = self._create_metadata_path(
instance)
self.log.info("Writing json file: {}".format(metadata_path))

View file

@ -91,7 +91,7 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
for job_id in render_job_ids:
job_info = self._get_job_info(job_id)
frame_list = job_info["Props"]["Frames"]
frame_list = job_info["Props"].get("Frames")
if frame_list:
all_frame_lists.extend(frame_list.split(','))

View file

@ -83,6 +83,11 @@ class CollectShotgridSession(pyblish.api.ContextPlugin):
"login to shotgrid withing openpype Tray"
)
# Set OPENPYPE_SG_USER with login so other deadline tasks can make
# use of it
self.log.info("Setting OPENPYPE_SG_USER to '%s'.", login)
os.environ["OPENPYPE_SG_USER"] = login
session = shotgun_api3.Shotgun(
base_url=shotgrid_url,
script_name=shotgrid_script_name,

View file

@ -7,7 +7,7 @@ from openpype.pipeline.publish import get_publish_repre_path
class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
"""
Create published Files from representations and add it to version. If
representation is tagged add shotgrid review, it will add it in
representation is tagged as shotgrid review, it will add it in
path to movie for a movie file or path to frame for an image sequence.
"""
@ -27,11 +27,11 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin):
local_path = get_publish_repre_path(
instance, representation, False
)
code = os.path.basename(local_path)
if representation.get("tags", []):
continue
code = os.path.basename(local_path)
published_file = self._find_existing_publish(
code, context, shotgrid_version
)

View file

@ -37,9 +37,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin):
self.log.info("Use existing Shotgrid version: {}".format(version))
data_to_update = {}
status = context.data.get("intent", {}).get("value")
if status:
data_to_update["sg_status_list"] = status
intent = context.data.get("intent")
if intent:
data_to_update["sg_status_list"] = intent["value"]
for representation in instance.data.get("representations", []):
local_path = get_publish_repre_path(

View file

@ -187,7 +187,7 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
repre_review_path = get_publish_repre_path(
instance, repre, False
)
if os.path.exists(repre_review_path):
if repre_review_path and os.path.exists(repre_review_path):
review_path = repre_review_path
if "burnin" in tags: # burnin has precedence if exists
break

View file

@ -1390,6 +1390,8 @@ class CreateContext:
self.autocreators = {}
# Manual creators
self.manual_creators = {}
# Creators that are disabled
self.disabled_creators = {}
self.convertors_plugins = {}
self.convertor_items_by_id = {}
@ -1667,6 +1669,7 @@ class CreateContext:
# Discover and prepare creators
creators = {}
disabled_creators = {}
autocreators = {}
manual_creators = {}
report = discover_creator_plugins(return_report=True)
@ -1703,6 +1706,9 @@ class CreateContext:
self,
self.headless
)
if not creator.enabled:
disabled_creators[creator_identifier] = creator
continue
creators[creator_identifier] = creator
if isinstance(creator, AutoCreator):
autocreators[creator_identifier] = creator
@ -1713,6 +1719,7 @@ class CreateContext:
self.manual_creators = manual_creators
self.creators = creators
self.disabled_creators = disabled_creators
def _reset_convertor_plugins(self):
convertors_plugins = {}

View file

@ -79,6 +79,10 @@ class SubsetConvertorPlugin(object):
self._log = Logger.get_logger(self.__class__.__name__)
return self._log
@property
def host(self):
return self._create_context.host
@abstractproperty
def identifier(self):
"""Converted identifier.

View file

@ -70,7 +70,8 @@ def get_subset_name(
host_name=None,
default_template=None,
dynamic_data=None,
project_settings=None
project_settings=None,
family_filter=None,
):
"""Calculate subset name based on passed context and OpenPype settings.
@ -82,23 +83,35 @@ def get_subset_name(
That's main reason why so many arguments are required to calculate subset
name.
Option to pass family filter was added for special cases when creator or
automated publishing require special subset name template which would be
hard to maintain using its family value.
Why not just pass the right family? -> Family is also used as fill
value and for filtering of publish plugins.
Todos:
Find better filtering options to avoid requirement of
argument 'family_filter'.
Args:
family (str): Instance family.
variant (str): In most of the cases it is user input during creation.
task_name (str): Task name on which context is instance created.
asset_doc (dict): Queried asset document with its tasks in data.
Used to get task type.
project_name (str): Name of project on which is instance created.
Important for project settings that are loaded.
host_name (str): One of filtering criteria for template profile
filters.
default_template (str): Default template if any profile does not match
passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if
is not passed.
dynamic_data (dict): Dynamic data specific for a creator which creates
instance.
project_settings (Union[Dict[str, Any], None]): Prepared settings for
project. Settings are queried if not passed.
project_name (Optional[str]): Name of project on which is instance
created. Important for project settings that are loaded.
host_name (Optional[str]): One of filtering criteria for template
profile filters.
default_template (Optional[str]): Default template if any profile does
not match passed context. Constant 'DEFAULT_SUBSET_TEMPLATE'
is used if is not passed.
dynamic_data (Optional[Dict[str, Any]]): Dynamic data specific for
a creator which creates instance.
project_settings (Optional[Union[Dict[str, Any]]]): Prepared settings
for project. Settings are queried if not passed.
family_filter (Optional[str]): Use different family for subset template
filtering. Value of 'family' is used when not passed.
"""
if not family:
@ -119,7 +132,7 @@ def get_subset_name(
template = get_subset_name_template(
project_name,
family,
family_filter or family,
task_name,
task_type,
host_name,

View file

@ -12,6 +12,7 @@ import pyblish.api
from openpype.lib import (
Logger,
import_filepath,
filter_profiles
)
from openpype.settings import (
@ -301,12 +302,8 @@ def publish_plugins_discover(paths=None):
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath, "rb") as f:
six.exec_(f.read(), module.__dict__)
module = import_filepath(abspath, mod_name)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
@ -683,6 +680,12 @@ def get_publish_repre_path(instance, repre, only_published=False):
staging_dir = repre.get("stagingDir")
if not staging_dir:
staging_dir = get_instance_staging_dir(instance)
# Expand the staging dir path in case it's been stored with the root
# template syntax
anatomy = instance.context.data["anatomy"]
staging_dir = anatomy.fill_root(staging_dir)
src_path = os.path.normpath(os.path.join(staging_dir, filename))
if os.path.exists(src_path):
return src_path

View file

@ -14,16 +14,19 @@ from openpype.pipeline.editorial import (
range_from_frames,
make_sequence_collection
)
from openpype.pipeline.publish import (
get_publish_template_name
)
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
"""Get Resources for a subset version"""
label = "Collect OTIO Subset Resources"
order = pyblish.api.CollectorOrder - 0.077
order = pyblish.api.CollectorOrder + 0.491
families = ["clip"]
hosts = ["resolve", "hiero", "flame"]
def process(self, instance):
if "audio" in instance.data["family"]:
@ -35,14 +38,21 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
if not instance.data.get("versionData"):
instance.data["versionData"] = {}
template_name = self.get_template_name(instance)
anatomy = instance.context.data["anatomy"]
publish_template_category = anatomy.templates[template_name]
template = os.path.normpath(publish_template_category["path"])
self.log.debug(
">> template: {}".format(template))
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
# get basic variables
otio_clip = instance.data["otioClip"]
otio_avalable_range = otio_clip.available_range()
media_fps = otio_avalable_range.start_time.rate
available_duration = otio_avalable_range.duration.value
otio_available_range = otio_clip.available_range()
media_fps = otio_available_range.start_time.rate
available_duration = otio_available_range.duration.value
# get available range trimmed with processed retimes
retimed_attributes = get_media_range_with_retimes(
@ -84,6 +94,11 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
frame_start = instance.data["frameStart"]
frame_end = frame_start + (media_out - media_in)
# Fit start /end frame to media in /out
if "{originalBasename}" in template:
frame_start = media_in
frame_end = media_out
# add to version data start and end range data
# for loader plugins to be correctly displayed and loaded
instance.data["versionData"].update({
@ -153,7 +168,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
repre = self._create_representation(
frame_start, frame_end, collection=collection)
instance.data["originalBasename"] = collection.format("{head}")
else:
_trim = False
dirname, filename = os.path.split(media_ref.target_url)
@ -168,8 +182,6 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
repre = self._create_representation(
frame_start, frame_end, file=filename, trim=_trim)
instance.data["originalBasename"] = os.path.splitext(filename)[0]
instance.data["originalDirname"] = self.staging_dir
if repre:
@ -225,3 +237,26 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
if kwargs.get("trim") is True:
representation_data["tags"] = ["trim"]
return representation_data
def get_template_name(self, instance):
"""Return anatomy template name to use for integration"""
# Anatomy data is pre-filled by Collectors
context = instance.context
project_name = context.data["projectName"]
# Task can be optional in anatomy data
host_name = context.data["hostName"]
family = instance.data["family"]
anatomy_data = instance.data["anatomyData"]
task_info = anatomy_data.get("task") or {}
return get_publish_template_name(
project_name,
host_name,
family,
task_name=task_info.get("name"),
task_type=task_info.get("type"),
project_settings=context.data["project_settings"],
logger=self.log
)

View file

@ -1,7 +1,6 @@
import os
import copy
import clique
import pyblish.api
from openpype.pipeline import publish
@ -118,11 +117,17 @@ class ExtractOIIOTranscode(publish.Extractor):
output_name,
output_extension)
target_colorspace = (output_def["colorspace"] or
colorspace_data.get("colorspace"))
view = output_def["view"] or colorspace_data.get("view")
display = (output_def["display"] or
colorspace_data.get("display"))
transcoding_type = output_def["transcoding_type"]
target_colorspace = view = display = None
if transcoding_type == "colorspace":
target_colorspace = (output_def["colorspace"] or
colorspace_data.get("colorspace"))
else:
view = output_def["view"] or colorspace_data.get("view")
display = (output_def["display"] or
colorspace_data.get("display"))
# both could be already collected by DCC,
# but could be overwritten
if view:
@ -217,6 +222,33 @@ class ExtractOIIOTranscode(publish.Extractor):
renamed_files.append(file_name)
new_repre["files"] = renamed_files
def _rename_in_representation(self, new_repre, files_to_convert,
output_name, output_extension):
"""Replace old extension with new one everywhere in representation.
Args:
new_repre (dict)
files_to_convert (list): of filenames from repre["files"],
standardized to always list
output_name (str): key of output definition from Settings,
if "<passthrough>" token used, keep original repre name
output_extension (str): extension from output definition
"""
if output_name != "passthrough":
new_repre["name"] = output_name
if not output_extension:
return
new_repre["ext"] = output_extension
renamed_files = []
for file_name in files_to_convert:
file_name, _ = os.path.splitext(file_name)
file_name = '{}.{}'.format(file_name,
output_extension)
renamed_files.append(file_name)
new_repre["files"] = renamed_files
def _translate_to_sequence(self, files_to_convert):
"""Returns original list or list with filename formatted in single
sequence format.

View file

@ -52,7 +52,16 @@ def _get_ffprobe_data(source):
"-show_streams",
source
]
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
kwargs = {
"stdout": subprocess.PIPE,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
proc = subprocess.Popen(command, **kwargs)
out = proc.communicate()[0]
if proc.returncode != 0:
raise RuntimeError("Failed to run: %s" % command)
@ -331,12 +340,18 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
)
print("Launching command: {}".format(command))
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
kwargs = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"shell": True,
}
if platform.system().lower() == "windows":
kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP
| getattr(subprocess, "DETACHED_PROCESS", 0)
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
)
proc = subprocess.Popen(command, **kwargs)
_stdout, _stderr = proc.communicate()
if _stdout:

View file

@ -23,4 +23,4 @@
],
"tools_env": [],
"active": true
}
}

View file

@ -255,4 +255,4 @@
]
}
}
}
}

View file

@ -4,4 +4,4 @@
"darwin": "/Volumes/path",
"linux": "/mnt/share/projects"
}
}
}

View file

@ -41,4 +41,4 @@
"Compositing": {
"short_name": "comp"
}
}
}

View file

@ -66,4 +66,4 @@
"source": "source"
}
}
}
}

View file

@ -33,4 +33,4 @@
"create_first_version": false,
"custom_templates": []
}
}
}

View file

@ -82,4 +82,4 @@
"active": false
}
}
}
}

View file

@ -16,4 +16,4 @@
"anatomy_template_key_metadata": "render"
}
}
}
}

View file

@ -36,6 +36,18 @@
"scene_patches": [],
"strict_error_checking": true
},
"MaxSubmitDeadline": {
"enabled": true,
"optional": false,
"active": true,
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none",
"deadline_pool": "",
"deadline_pool_secondary": "",
"framePerTask": 1
},
"NukeSubmitDeadline": {
"enabled": true,
"optional": false,
@ -103,8 +115,11 @@
],
"harmony": [
".*"
],
"max": [
".*"
]
}
}
}
}
}

View file

@ -163,4 +163,4 @@
]
}
}
}
}

View file

@ -496,4 +496,4 @@
"farm_status_profiles": []
}
}
}
}

View file

@ -17,4 +17,4 @@
}
}
}
}
}

View file

@ -611,4 +611,4 @@
"linux": []
},
"project_environments": {}
}
}

View file

@ -50,4 +50,4 @@
"skip_timelines_check": []
}
}
}
}

View file

@ -97,4 +97,4 @@
}
]
}
}
}

Some files were not shown because too many files have changed in this diff Show more