mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
0efc1ad734
235 changed files with 6797 additions and 2620 deletions
|
|
@ -13,7 +13,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
# Should be as last hook because must change launch arguments to string
|
||||
order = 1000
|
||||
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
|
||||
app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"]
|
||||
platforms = ["windows"]
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -210,7 +210,8 @@ def switch_item(container,
|
|||
if any(not x for x in [asset_name, subset_name, representation_name]):
|
||||
repre_id = container["representation"]
|
||||
representation = get_representation_by_id(project_name, repre_id)
|
||||
repre_parent_docs = get_representation_parents(representation)
|
||||
repre_parent_docs = get_representation_parents(
|
||||
project_name, representation)
|
||||
if repre_parent_docs:
|
||||
version, subset, asset, _ = repre_parent_docs
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
"Make sure the environment in fusion settings has "
|
||||
"'FUSION_PYTHON3_HOME' set correctly and make sure "
|
||||
"Python 3 is installed in the given path."
|
||||
f"\n\nPYTHON36: {fusion_python3_home}"
|
||||
f"\n\nPYTHON PATH: {fusion_python3_home}"
|
||||
)
|
||||
|
||||
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"outputDir": os.path.dirname(path),
|
||||
"ext": ext, # todo: should be redundant
|
||||
"label": label,
|
||||
"task": context.data["task"],
|
||||
"frameStart": context.data["frameStart"],
|
||||
"frameEnd": context.data["frameEnd"],
|
||||
"frameStartHandle": context.data["frameStartHandle"],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
|
@ -23,23 +21,53 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
# This plug-in runs only once and thus assumes all instances
|
||||
# currently will render the same frame range
|
||||
context = instance.context
|
||||
key = "__hasRun{}".format(self.__class__.__name__)
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if context.data.get(key, False):
|
||||
return
|
||||
else:
|
||||
context.data[key] = True
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
context.data[key] = True
|
||||
|
||||
self.render_once(context)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
ext = os.path.splitext(os.path.basename(path))[-1]
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': f"%0{len(str(frame_end))}d" % frame_start,
|
||||
'files': files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting render")
|
||||
self.log.info("Start frame: {}".format(frame_start))
|
||||
self.log.info("End frame: {}".format(frame_end))
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render({
|
||||
|
|
@ -48,26 +76,5 @@ class Fusionlocal(pyblish.api.InstancePlugin):
|
|||
"Wait": True
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
collected_frames = os.listdir(output_dir)
|
||||
repre = {
|
||||
'name': ext[1:],
|
||||
'ext': ext[1:],
|
||||
'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start,
|
||||
'files': collected_frames,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
repre_preview = repre.copy()
|
||||
repre_preview["name"] = repre_preview["ext"] = "mp4"
|
||||
repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"]
|
||||
instance.data["representations"].append(repre_preview)
|
||||
|
||||
self.log.debug(f"_ instance.data: {pformat(instance.data)}")
|
||||
|
||||
if not result:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
|
|||
|
|
@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None):
|
|||
|
||||
return ([x for x in child_list if rt.superClassOf(x) == node_type]
|
||||
if node_type else child_list)
|
||||
|
||||
|
||||
def get_current_renderer():
|
||||
"""get current renderer"""
|
||||
return rt.renderers.production
|
||||
|
||||
|
||||
def get_default_render_folder(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["default_render_image_folder"])
|
||||
|
||||
|
||||
def set_framerange(start_frame, end_frame):
|
||||
"""
|
||||
Note:
|
||||
Frame range can be specified in different types. Possible values are:
|
||||
* `1` - Single frame.
|
||||
* `2` - Active time segment ( animationRange ).
|
||||
* `3` - User specified Range.
|
||||
* `4` - User specified Frame pickup string (for example `1,3,5-12`).
|
||||
|
||||
Todo:
|
||||
Current type is hard-coded, there should be a custom setting for this.
|
||||
"""
|
||||
rt.rendTimeType = 4
|
||||
if start_frame is not None and end_frame is not None:
|
||||
frame_range = "{0}-{1}".format(start_frame, end_frame)
|
||||
rt.rendPickupFrames = frame_range
|
||||
|
||||
|
||||
def get_multipass_setting(project_setting=None):
|
||||
return (project_setting["max"]
|
||||
["RenderSettings"]
|
||||
["multipass"])
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
get max version date for deadline
|
||||
|
||||
Returns:
|
||||
#(25000, 62, 0, 25, 0, 0, 997, 2023, "")
|
||||
max_info[7] = max version date
|
||||
"""
|
||||
max_info = rt.maxversion()
|
||||
return max_info[7]
|
||||
|
|
|
|||
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
114
openpype/hosts/max/api/lib_renderproducts.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
# Render Element Example : For scanline render, VRay
|
||||
# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC
|
||||
# arnold
|
||||
# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html
|
||||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class RenderProducts(object):
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def render_product(self, container):
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
|
||||
output_file = os.path.join(folder,
|
||||
render_folder,
|
||||
filename,
|
||||
container)
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
full_render_list = []
|
||||
beauty = self.beauty_render_product(output_file, img_fmt)
|
||||
full_render_list.append(beauty)
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return full_render_list
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = self.render_elements_product(output_file,
|
||||
img_fmt)
|
||||
if render_elem_list:
|
||||
full_render_list.extend(iter(render_elem_list))
|
||||
return full_render_list
|
||||
|
||||
if renderer == "Arnold":
|
||||
aov_list = self.arnold_render_product(output_file,
|
||||
img_fmt)
|
||||
if aov_list:
|
||||
full_render_list.extend(iter(aov_list))
|
||||
return full_render_list
|
||||
|
||||
def beauty_render_product(self, folder, fmt):
|
||||
beauty_output = f"{folder}.####.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
return beauty_output
|
||||
|
||||
# TODO: Get the arnold render product
|
||||
def arnold_render_product(self, folder, fmt):
|
||||
"""Get all the Arnold AOVs"""
|
||||
aovs = []
|
||||
|
||||
amw = rt.MaxtoAOps.AOVsManagerWindow()
|
||||
aov_mgr = rt.renderers.current.AOVManager
|
||||
# Check if there is any aov group set in AOV manager
|
||||
aov_group_num = len(aov_mgr.drivers)
|
||||
if aov_group_num < 1:
|
||||
return
|
||||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
for aov in aov_mgr.drivers[i].aov_list:
|
||||
render_element = f"{folder}_{aov.name}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aovs
|
||||
|
||||
def render_elements_product(self, folder, fmt):
|
||||
"""Get all the render element output files. """
|
||||
render_dirname = []
|
||||
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
# get render elements from the renders
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
if renderlayer_name.enabled:
|
||||
render_element = f"{folder}_{renderpass}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
|
||||
return render_dirname
|
||||
|
||||
def image_format(self):
|
||||
return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
168
openpype/hosts/max/api/lib_rendersettings.py
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
import os
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import Logger
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
|
||||
from openpype.hosts.max.api.lib import (
|
||||
set_framerange,
|
||||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
|
||||
|
||||
class RenderSettings(object):
|
||||
|
||||
log = Logger.get_logger("RenderSettings")
|
||||
|
||||
_aov_chars = {
|
||||
"dot": ".",
|
||||
"dash": "-",
|
||||
"underscore": "_"
|
||||
}
|
||||
|
||||
def __init__(self, project_settings=None):
|
||||
"""
|
||||
Set up the naming convention for the render
|
||||
elements for the deadline submission
|
||||
"""
|
||||
|
||||
self._project_settings = project_settings
|
||||
if not self._project_settings:
|
||||
self._project_settings = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
|
||||
def set_render_camera(self, selection):
|
||||
for sel in selection:
|
||||
# to avoid Attribute Error from pymxs wrapper
|
||||
found = False
|
||||
if rt.classOf(sel) in rt.Camera.classes:
|
||||
found = True
|
||||
rt.viewport.setCamera(sel)
|
||||
break
|
||||
if not found:
|
||||
raise RuntimeError("Camera not found")
|
||||
|
||||
def render_output(self, container):
|
||||
folder = rt.maxFilePath
|
||||
# hard-coded, should be customized in the setting
|
||||
file = rt.maxFileName
|
||||
folder = folder.replace("\\", "/")
|
||||
# hard-coded, set the renderoutput path
|
||||
setting = self._project_settings
|
||||
render_folder = get_default_render_folder(setting)
|
||||
filename, ext = os.path.splitext(file)
|
||||
output_dir = os.path.join(folder,
|
||||
render_folder,
|
||||
filename)
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
# hard-coded, should be customized in the setting
|
||||
context = get_current_project_asset()
|
||||
|
||||
# get project resolution
|
||||
width = context["data"].get("resolutionWidth")
|
||||
height = context["data"].get("resolutionHeight")
|
||||
# Set Frame Range
|
||||
frame_start = context["data"].get("frame_start")
|
||||
frame_end = context["data"].get("frame_end")
|
||||
set_framerange(frame_start, frame_end)
|
||||
# get the production render
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return
|
||||
# TODO: Finish the arnold render setup
|
||||
if renderer == "Arnold":
|
||||
self.arnold_setup()
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
self.render_element_layer(output, width, height, img_fmt)
|
||||
|
||||
rt.rendSaveFile = True
|
||||
|
||||
def arnold_setup(self):
|
||||
# get Arnold RenderView run in the background
|
||||
# for setting up renderable camera
|
||||
arv = rt.MAXToAOps.ArnoldRenderView()
|
||||
render_camera = rt.viewport.GetCamera()
|
||||
arv.setOption("Camera", str(render_camera))
|
||||
|
||||
# TODO: add AOVs and extension
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
setup_cmd = (
|
||||
f"""
|
||||
amw = MaxtoAOps.AOVsManagerWindow()
|
||||
amw.close()
|
||||
aovmgr = renderers.current.AOVManager
|
||||
aovmgr.drivers = #()
|
||||
img_fmt = "{img_fmt}"
|
||||
if img_fmt == "png" then driver = ArnoldPNGDriver()
|
||||
if img_fmt == "jpg" then driver = ArnoldJPEGDriver()
|
||||
if img_fmt == "exr" then driver = ArnoldEXRDriver()
|
||||
if img_fmt == "tif" then driver = ArnoldTIFFDriver()
|
||||
if img_fmt == "tiff" then driver = ArnoldTIFFDriver()
|
||||
append aovmgr.drivers driver
|
||||
aovmgr.drivers[1].aov_list = #()
|
||||
""")
|
||||
|
||||
rt.execute(setup_cmd)
|
||||
arv.close()
|
||||
|
||||
def render_element_layer(self, dir, width, height, ext):
|
||||
"""For Renderers with render elements"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
orig_render_elem = []
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
|
||||
for i in range(render_elem_num):
|
||||
render_element = render_elem.GetRenderElementFilename(i)
|
||||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
33
openpype/hosts/max/plugins/create/create_render.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating camera."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
class CreateRender(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.render"
|
||||
label = "Render"
|
||||
family = "maxrender"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container_name = instance.data.get("instance_node")
|
||||
container = rt.getNodeByName(container_name)
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings().set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
|
|
@ -1,7 +1,10 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load
|
||||
load,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class FbxLoader(load.LoaderPlugin):
|
||||
|
|
@ -36,14 +39,26 @@ importFile @"{filepath}" #noPrompt using:FBXIMP
|
|||
container_name = f"{name}_CON"
|
||||
|
||||
asset = rt.getNodeByName(f"{name}")
|
||||
# rename the container with "_CON"
|
||||
container = rt.container(name=container_name)
|
||||
asset.Parent = container
|
||||
|
||||
return container
|
||||
return containerise(
|
||||
name, [asset], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
fbx_objects = self.get_container_children(node)
|
||||
for fbx_object in fbx_objects:
|
||||
fbx_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
|
|
@ -35,16 +37,26 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
self.log.error("Something failed when loading.")
|
||||
|
||||
max_container = max_containers.pop()
|
||||
container_name = f"{name}_CON"
|
||||
# rename the container with "_CON"
|
||||
# get the original container
|
||||
container = rt.container(name=container_name)
|
||||
max_container.Parent = container
|
||||
|
||||
return container
|
||||
return containerise(
|
||||
name, [max_container], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
max_objects = self.get_container_children(node)
|
||||
for max_object in max_objects:
|
||||
max_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ importFile @"{file_path}" #noPrompt
|
|||
def remove(self, container):
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = container["node"]
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
67
openpype/hosts/max/plugins/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Render"""
|
||||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import get_current_asset_name
|
||||
from openpype.hosts.max.api.lib import get_max_version
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
from openpype.client import get_last_version_by_subset_name
|
||||
|
||||
|
||||
class CollectRender(pyblish.api.InstancePlugin):
|
||||
"""Collect Render for Deadline"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
label = "Collect 3dsmax Render Layers"
|
||||
hosts = ['max']
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
folder = rt.maxFilePath
|
||||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
context.data['currentFile'] = current_file
|
||||
asset = get_current_asset_name()
|
||||
|
||||
render_layer_files = RenderProducts().render_product(instance.name)
|
||||
folder = folder.replace("\\", "/")
|
||||
|
||||
img_format = RenderProducts().image_format()
|
||||
project_name = context.data["projectName"]
|
||||
asset_doc = context.data["assetEntity"]
|
||||
asset_id = asset_doc["_id"]
|
||||
version_doc = get_last_version_by_subset_name(project_name,
|
||||
instance.name,
|
||||
asset_id)
|
||||
|
||||
self.log.debug("version_doc: {0}".format(version_doc))
|
||||
version_int = 1
|
||||
if version_doc:
|
||||
version_int += int(version_doc["name"])
|
||||
|
||||
self.log.debug(f"Setting {version_int} to context.")
|
||||
context.data["version"] = version_int
|
||||
|
||||
# setup the plugin as 3dsmax for the internal renderer
|
||||
data = {
|
||||
"subset": instance.name,
|
||||
"asset": asset,
|
||||
"publish": True,
|
||||
"maxversion": str(get_max_version()),
|
||||
"imageFormat": img_format,
|
||||
"family": 'maxrender',
|
||||
"families": ['maxrender'],
|
||||
"source": filepath,
|
||||
"expectedFiles": render_layer_files,
|
||||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
@ -15,7 +15,7 @@ from math import ceil
|
|||
from six import string_types
|
||||
|
||||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
from maya.api import OpenMaya
|
||||
|
||||
from openpype.client import (
|
||||
get_project,
|
||||
|
|
@ -403,9 +403,9 @@ def lsattrs(attrs):
|
|||
|
||||
"""
|
||||
|
||||
dep_fn = om.MFnDependencyNode()
|
||||
dag_fn = om.MFnDagNode()
|
||||
selection_list = om.MSelectionList()
|
||||
dep_fn = OpenMaya.MFnDependencyNode()
|
||||
dag_fn = OpenMaya.MFnDagNode()
|
||||
selection_list = OpenMaya.MSelectionList()
|
||||
|
||||
first_attr = next(iter(attrs))
|
||||
|
||||
|
|
@ -419,7 +419,7 @@ def lsattrs(attrs):
|
|||
matches = set()
|
||||
for i in range(selection_list.length()):
|
||||
node = selection_list.getDependNode(i)
|
||||
if node.hasFn(om.MFn.kDagNode):
|
||||
if node.hasFn(OpenMaya.MFn.kDagNode):
|
||||
fn_node = dag_fn.setObject(node)
|
||||
full_path_names = [path.fullPathName()
|
||||
for path in fn_node.getAllPaths()]
|
||||
|
|
@ -868,11 +868,11 @@ def maintained_selection_api():
|
|||
Warning: This is *not* added to the undo stack.
|
||||
|
||||
"""
|
||||
original = om.MGlobal.getActiveSelectionList()
|
||||
original = OpenMaya.MGlobal.getActiveSelectionList()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
om.MGlobal.setActiveSelectionList(original)
|
||||
OpenMaya.MGlobal.setActiveSelectionList(original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
@ -1282,11 +1282,11 @@ def get_id(node):
|
|||
if node is None:
|
||||
return
|
||||
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
|
||||
api_node = sel.getDependNode(0)
|
||||
fn = om.MFnDependencyNode(api_node)
|
||||
fn = OpenMaya.MFnDependencyNode(api_node)
|
||||
|
||||
if not fn.hasAttribute("cbId"):
|
||||
return
|
||||
|
|
@ -3341,15 +3341,15 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
@memodict
|
||||
def get_visibility_mplug(node):
|
||||
"""Return api 2.0 MPlug with cached memoize decorator"""
|
||||
sel = om.MSelectionList()
|
||||
sel = OpenMaya.MSelectionList()
|
||||
sel.add(node)
|
||||
dag = sel.getDagPath(0)
|
||||
return om.MFnDagNode(dag).findPlug("visibility", True)
|
||||
return OpenMaya.MFnDagNode(dag).findPlug("visibility", True)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def dgcontext(mtime):
|
||||
"""MDGContext context manager"""
|
||||
context = om.MDGContext(mtime)
|
||||
context = OpenMaya.MDGContext(mtime)
|
||||
try:
|
||||
previous = context.makeCurrent()
|
||||
yield context
|
||||
|
|
@ -3358,9 +3358,9 @@ def iter_visible_nodes_in_range(nodes, start, end):
|
|||
|
||||
# We skip the first frame as we already used that frame to check for
|
||||
# overall visibilities. And end+1 to include the end frame.
|
||||
scene_units = om.MTime.uiUnit()
|
||||
scene_units = OpenMaya.MTime.uiUnit()
|
||||
for frame in range(start + 1, end + 1):
|
||||
mtime = om.MTime(frame, unit=scene_units)
|
||||
mtime = OpenMaya.MTime(frame, unit=scene_units)
|
||||
|
||||
# Build little cache so we don't query the same MPlug's value
|
||||
# again if it was checked on this frame and also is a dependency
|
||||
|
|
@ -3509,3 +3509,56 @@ def write_xgen_file(data, filepath):
|
|||
|
||||
with open(filepath, "w") as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def get_color_management_preferences():
|
||||
"""Get and resolve OCIO preferences."""
|
||||
data = {
|
||||
# Is color management enabled.
|
||||
"enabled": cmds.colorManagementPrefs(
|
||||
query=True, cmEnabled=True
|
||||
),
|
||||
"rendering_space": cmds.colorManagementPrefs(
|
||||
query=True, renderingSpaceName=True
|
||||
),
|
||||
"output_transform": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformName=True
|
||||
),
|
||||
"output_transform_enabled": cmds.colorManagementPrefs(
|
||||
query=True, outputTransformEnabled=True
|
||||
),
|
||||
"view_transform": cmds.colorManagementPrefs(
|
||||
query=True, viewTransformName=True
|
||||
)
|
||||
}
|
||||
|
||||
# Split view and display from view_transform. view_transform comes in
|
||||
# format of "{view} ({display})".
|
||||
regex = re.compile(r"^(?P<view>.+) \((?P<display>.+)\)$")
|
||||
match = regex.match(data["view_transform"])
|
||||
data.update({
|
||||
"display": match.group("display"),
|
||||
"view": match.group("view")
|
||||
})
|
||||
|
||||
# Get config absolute path.
|
||||
path = cmds.colorManagementPrefs(
|
||||
query=True, configFilePath=True
|
||||
)
|
||||
|
||||
# The OCIO config supports a custom <MAYA_RESOURCES> token.
|
||||
maya_resources_token = "<MAYA_RESOURCES>"
|
||||
maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources()
|
||||
path = path.replace(maya_resources_token, maya_resources_path)
|
||||
|
||||
data["config"] = path
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_color_management_output_transform():
|
||||
preferences = get_color_management_preferences()
|
||||
colorspace = preferences["rendering_space"]
|
||||
if preferences["output_transform_enabled"]:
|
||||
colorspace = preferences["output_transform"]
|
||||
return colorspace
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import attr
|
|||
|
||||
from . import lib
|
||||
from . import lib_rendersetup
|
||||
from openpype.pipeline.colorspace import get_ocio_config_views
|
||||
|
||||
from maya import cmds, mel
|
||||
|
||||
|
|
@ -127,6 +128,7 @@ class RenderProduct(object):
|
|||
"""
|
||||
productName = attr.ib()
|
||||
ext = attr.ib() # extension
|
||||
colorspace = attr.ib() # colorspace
|
||||
aov = attr.ib(default=None) # source aov
|
||||
driver = attr.ib(default=None) # source driver
|
||||
multipart = attr.ib(default=False) # multichannel file
|
||||
|
|
@ -196,12 +198,18 @@ class ARenderProducts:
|
|||
"""Constructor."""
|
||||
self.layer = layer
|
||||
self.render_instance = render_instance
|
||||
self.multipart = False
|
||||
self.multipart = self.get_multipart()
|
||||
|
||||
# Initialize
|
||||
self.layer_data = self._get_layer_data()
|
||||
self.layer_data.products = self.get_render_products()
|
||||
|
||||
def get_multipart(self):
|
||||
raise NotImplementedError(
|
||||
"The render product implementation does not have a "
|
||||
"\"get_multipart\" method."
|
||||
)
|
||||
|
||||
def has_camera_token(self):
|
||||
# type: () -> bool
|
||||
"""Check if camera token is in image prefix.
|
||||
|
|
@ -344,7 +352,6 @@ class ARenderProducts:
|
|||
separator = file_prefix[matches[0].end(1):matches[1].start(1)]
|
||||
return separator
|
||||
|
||||
|
||||
def _get_layer_data(self):
|
||||
# type: () -> LayerMetadata
|
||||
# ______________________________________________
|
||||
|
|
@ -531,16 +538,20 @@ class RenderProductsArnold(ARenderProducts):
|
|||
|
||||
return prefix
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
multilayer = bool(self._get_attr("defaultArnoldDriver.multipart"))
|
||||
merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs"))
|
||||
if multilayer or merge_AOVs:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def _get_aov_render_products(self, aov, cameras=None):
|
||||
"""Return all render products for the AOV"""
|
||||
|
||||
products = []
|
||||
aov_name = self._get_attr(aov, "name")
|
||||
ai_drivers = cmds.listConnections("{}.outputs".format(aov),
|
||||
source=True,
|
||||
destination=False,
|
||||
|
|
@ -553,6 +564,9 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
for ai_driver in ai_drivers:
|
||||
colorspace = self._get_colorspace(
|
||||
ai_driver + ".colorManagement"
|
||||
)
|
||||
# todo: check aiAOVDriver.prefix as it could have
|
||||
# a custom path prefix set for this driver
|
||||
|
||||
|
|
@ -590,12 +604,15 @@ class RenderProductsArnold(ARenderProducts):
|
|||
global_aov = self._get_attr(aov, "globalAov")
|
||||
if global_aov:
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name,
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
multipart=self.multipart,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
all_light_groups = self._get_attr(aov, "lightGroups")
|
||||
|
|
@ -603,13 +620,16 @@ class RenderProductsArnold(ARenderProducts):
|
|||
# All light groups is enabled. A single multipart
|
||||
# Render Product
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=name + "_lgroups",
|
||||
ext=ext,
|
||||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
# Always multichannel output
|
||||
multipart=True,
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
else:
|
||||
value = self._get_attr(aov, "lightGroupsList")
|
||||
|
|
@ -625,12 +645,36 @@ class RenderProductsArnold(ARenderProducts):
|
|||
aov=aov_name,
|
||||
driver=ai_driver,
|
||||
ext=ext,
|
||||
camera=camera
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
def _get_colorspace(self, attribute):
|
||||
"""Resolve colorspace from Arnold settings."""
|
||||
|
||||
def _view_transform():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
views_data = get_ocio_config_views(preferences["config"])
|
||||
view_data = views_data[
|
||||
"{}/{}".format(preferences["display"], preferences["view"])
|
||||
]
|
||||
return view_data["colorspace"]
|
||||
|
||||
def _raw():
|
||||
preferences = lib.get_color_management_preferences()
|
||||
return preferences["rendering_space"]
|
||||
|
||||
resolved_values = {
|
||||
"Raw": _raw,
|
||||
"Use View Transform": _view_transform,
|
||||
# Default. Same as Maya Preferences.
|
||||
"Use Output Transform": lib.get_color_management_output_transform
|
||||
}
|
||||
return resolved_values[self._get_attr(attribute)]()
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
||||
|
|
@ -659,11 +703,19 @@ class RenderProductsArnold(ARenderProducts):
|
|||
]
|
||||
|
||||
default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey")
|
||||
beauty_products = [RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera) for camera in cameras]
|
||||
colorspace = self._get_colorspace(
|
||||
"defaultArnoldDriver.colorManagement"
|
||||
)
|
||||
beauty_products = [
|
||||
RenderProduct(
|
||||
productName="beauty",
|
||||
ext=default_ext,
|
||||
driver="defaultArnoldDriver",
|
||||
camera=camera,
|
||||
colorspace=colorspace
|
||||
) for camera in cameras
|
||||
]
|
||||
|
||||
# AOVs > Legacy > Maya Render View > Mode
|
||||
aovs_enabled = bool(
|
||||
self._get_attr("defaultArnoldRenderOptions.aovMode")
|
||||
|
|
@ -731,6 +783,14 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
renderer = "vray"
|
||||
|
||||
def get_multipart(self):
|
||||
multipart = False
|
||||
image_format = self._get_attr("vraySettings.imageFormatStr")
|
||||
if image_format == "exr (multichannel)":
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
# type: () -> str
|
||||
"""Get image prefix for V-Ray.
|
||||
|
|
@ -797,11 +857,6 @@ class RenderProductsVray(ARenderProducts):
|
|||
if default_ext in {"exr (multichannel)", "exr (deep)"}:
|
||||
default_ext = "exr"
|
||||
|
||||
# Define multipart.
|
||||
multipart = False
|
||||
if image_format_str == "exr (multichannel)":
|
||||
multipart = True
|
||||
|
||||
products = []
|
||||
|
||||
# add beauty as default when not disabled
|
||||
|
|
@ -813,7 +868,8 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=multipart
|
||||
colorspace=lib.get_color_management_output_transform(),
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
|
||||
|
|
@ -826,10 +882,10 @@ class RenderProductsVray(ARenderProducts):
|
|||
productName="Alpha",
|
||||
ext=default_ext,
|
||||
camera=camera,
|
||||
multipart=multipart
|
||||
multipart=self.multipart
|
||||
)
|
||||
)
|
||||
if multipart:
|
||||
if self.multipart:
|
||||
# AOVs are merged in m-channel file, only main layer is rendered
|
||||
return products
|
||||
|
||||
|
|
@ -868,10 +924,13 @@ class RenderProductsVray(ARenderProducts):
|
|||
|
||||
aov_name = self._get_vray_aov_name(aov)
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera)
|
||||
product = RenderProduct(
|
||||
productName=aov_name,
|
||||
ext=default_ext,
|
||||
aov=aov,
|
||||
camera=camera,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
@ -989,6 +1048,34 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
renderer = "redshift"
|
||||
unmerged_aovs = {"Cryptomatte"}
|
||||
|
||||
def get_files(self, product):
|
||||
# When outputting AOVs we need to replace Redshift specific AOV tokens
|
||||
# with Maya render tokens for generating file sequences. We validate to
|
||||
# a specific AOV fileprefix so we only need to accout for one
|
||||
# replacement.
|
||||
if not product.multipart and product.driver:
|
||||
file_prefix = self._get_attr(product.driver + ".filePrefix")
|
||||
self.layer_data.filePrefix = file_prefix.replace(
|
||||
"<BeautyPath>/<BeautyFile>",
|
||||
"<Scene>/<RenderLayer>/<RenderLayer>"
|
||||
)
|
||||
|
||||
return super(RenderProductsRedshift, self).get_files(product)
|
||||
|
||||
def get_multipart(self):
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(
|
||||
self._get_attr("redshiftOptions.exrForceMultilayer")
|
||||
)
|
||||
if force_layer:
|
||||
multipart = True
|
||||
|
||||
return multipart
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
"""Get image prefix for Redshift.
|
||||
|
||||
|
|
@ -1028,16 +1115,6 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
for c in self.get_renderable_cameras()
|
||||
]
|
||||
|
||||
# For Redshift we don't directly return upon forcing multilayer
|
||||
# due to some AOVs still being written into separate files,
|
||||
# like Cryptomatte.
|
||||
# AOVs are merged in multi-channel file
|
||||
multipart = False
|
||||
force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa
|
||||
exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart"))
|
||||
if exMultipart or force_layer:
|
||||
multipart = True
|
||||
|
||||
# Get Redshift Extension from image format
|
||||
image_format = self._get_attr("redshiftOptions.imageFormat") # integer
|
||||
ext = mel.eval("redshiftGetImageExtension(%i)" % image_format)
|
||||
|
|
@ -1059,7 +1136,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
continue
|
||||
|
||||
aov_type = self._get_attr(aov, "aovType")
|
||||
if multipart and aov_type not in self.unmerged_aovs:
|
||||
if self.multipart and aov_type not in self.unmerged_aovs:
|
||||
continue
|
||||
|
||||
# Any AOVs that still get processed, like Cryptomatte
|
||||
|
|
@ -1094,8 +1171,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
productName=aov_light_group_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
if light_groups:
|
||||
|
|
@ -1108,8 +1186,9 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
product = RenderProduct(productName=aov_name,
|
||||
aov=aov_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
camera=camera)
|
||||
multipart=False,
|
||||
camera=camera,
|
||||
driver=aov)
|
||||
products.append(product)
|
||||
|
||||
# When a Beauty AOV is added manually, it will be rendered as
|
||||
|
|
@ -1124,7 +1203,7 @@ class RenderProductsRedshift(ARenderProducts):
|
|||
products.insert(0,
|
||||
RenderProduct(productName=beauty_name,
|
||||
ext=ext,
|
||||
multipart=multipart,
|
||||
multipart=self.multipart,
|
||||
camera=camera))
|
||||
|
||||
return products
|
||||
|
|
@ -1144,6 +1223,10 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
renderer = "renderman"
|
||||
unmerged_aovs = {"PxrCryptomatte"}
|
||||
|
||||
def get_multipart(self):
|
||||
# Implemented as display specific in "get_render_products".
|
||||
return False
|
||||
|
||||
def get_render_products(self):
|
||||
"""Get all AOVs.
|
||||
|
||||
|
|
@ -1283,6 +1366,10 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
{"label": "EXR(exr)", "index": 40, "extension": "exr"}
|
||||
]
|
||||
|
||||
def get_multipart(self):
|
||||
# MayaHardware does not support multipart EXRs.
|
||||
return False
|
||||
|
||||
def _get_extension(self, value):
|
||||
result = None
|
||||
if isinstance(value, int):
|
||||
|
|
@ -1327,7 +1414,12 @@ class RenderProductsMayaHardware(ARenderProducts):
|
|||
|
||||
products = []
|
||||
for cam in self.get_renderable_cameras():
|
||||
product = RenderProduct(productName="beauty", ext=ext, camera=cam)
|
||||
product = RenderProduct(
|
||||
productName="beauty",
|
||||
ext=ext,
|
||||
camera=cam,
|
||||
colorspace=lib.get_color_management_output_transform()
|
||||
)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ class RenderSettings(object):
|
|||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'mayahardware2': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ class CreateAnimation(plugin.Creator):
|
|||
icon = "male"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateAnimation, self).__init__(*args, **kwargs)
|
||||
|
|
@ -47,3 +48,6 @@ class CreateAnimation(plugin.Creator):
|
|||
|
||||
# Default to write normals.
|
||||
self.data["writeNormals"] = True
|
||||
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ class CreatePointCache(plugin.Creator):
|
|||
icon = "gears"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreatePointCache, self).__init__(*args, **kwargs)
|
||||
|
|
@ -33,6 +34,8 @@ class CreatePointCache(plugin.Creator):
|
|||
self.data["refresh"] = False # Default to suspend refresh.
|
||||
|
||||
# Add options for custom attributes
|
||||
value = self.include_user_defined_attributes
|
||||
self.data["includeUserDefinedAttributes"] = value
|
||||
self.data["attr"] = ""
|
||||
self.data["attrPrefix"] = ""
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
|
||||
hierarchy = members + descendants
|
||||
|
||||
|
||||
# Ignore certain node types (e.g. constraints)
|
||||
ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True)
|
||||
if ignore:
|
||||
|
|
@ -58,3 +57,18 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin):
|
|||
|
||||
if instance.data.get("farm"):
|
||||
instance.data["families"].append("publish.farm")
|
||||
|
||||
# Collect user defined attributes.
|
||||
if not instance.data.get("includeUserDefinedAttributes", False):
|
||||
return
|
||||
|
||||
user_defined_attributes = set()
|
||||
for node in hierarchy:
|
||||
attrs = cmds.listAttr(node, userDefined=True) or list()
|
||||
shapes = cmds.listRelatives(node, shapes=True) or list()
|
||||
for shape in shapes:
|
||||
attrs.extend(cmds.listAttr(shape, userDefined=True) or list())
|
||||
|
||||
user_defined_attributes.update(attrs)
|
||||
|
||||
instance.data["userDefinedAttributes"] = list(user_defined_attributes)
|
||||
|
|
|
|||
|
|
@ -42,3 +42,18 @@ class CollectPointcache(pyblish.api.InstancePlugin):
|
|||
if proxy_set:
|
||||
instance.remove(proxy_set)
|
||||
instance.data["setMembers"].remove(proxy_set)
|
||||
|
||||
# Collect user defined attributes.
|
||||
if not instance.data.get("includeUserDefinedAttributes", False):
|
||||
return
|
||||
|
||||
user_defined_attributes = set()
|
||||
for node in instance:
|
||||
attrs = cmds.listAttr(node, userDefined=True) or list()
|
||||
shapes = cmds.listRelatives(node, shapes=True) or list()
|
||||
for shape in shapes:
|
||||
attrs.extend(cmds.listAttr(shape, userDefined=True) or list())
|
||||
|
||||
user_defined_attributes.update(attrs)
|
||||
|
||||
instance.data["userDefinedAttributes"] = list(user_defined_attributes)
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ Provides:
|
|||
import re
|
||||
import os
|
||||
import platform
|
||||
import json
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
|
@ -183,7 +184,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
self.log.info("multipart: {}".format(
|
||||
multipart))
|
||||
assert exp_files, "no file names were generated, this is bug"
|
||||
self.log.info(exp_files)
|
||||
self.log.info(
|
||||
"expected files: {}".format(
|
||||
json.dumps(exp_files, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
|
|
@ -264,7 +269,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
|
||||
colorspace_data = lib.get_color_management_preferences()
|
||||
data = {
|
||||
"subset": expected_layer_name,
|
||||
"attachTo": attach_to,
|
||||
|
|
@ -318,6 +323,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
|
|||
"renderSetupIncludeLights": render_instance.data.get(
|
||||
"renderSetupIncludeLights"
|
||||
),
|
||||
"colorspaceConfig": colorspace_data["config"],
|
||||
"colorspaceDisplay": colorspace_data["display"],
|
||||
"colorspaceView": colorspace_data["view"],
|
||||
"strict_error_checking": render_instance.data.get(
|
||||
"strict_error_checking", True
|
||||
)
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ class ExtractGLB(publish.Extractor):
|
|||
|
||||
self.log.info("Extracting GLB to: {}".format(path))
|
||||
|
||||
cmds.loadPlugin("maya2glTF", quiet=True)
|
||||
|
||||
nodes = instance[:]
|
||||
|
||||
self.log.info("Instance: {0}".format(nodes))
|
||||
|
|
@ -45,6 +47,7 @@ class ExtractGLB(publish.Extractor):
|
|||
"glb": True,
|
||||
"vno": True # visibleNodeOnly
|
||||
}
|
||||
|
||||
with lib.maintained_selection():
|
||||
cmds.select(nodes, hi=True, noExpand=True)
|
||||
extract_gltf(staging_dir,
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import copy
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
@ -10,7 +9,6 @@ from openpype.hosts.maya.api.lib import (
|
|||
maintained_selection,
|
||||
iter_visible_nodes_in_range
|
||||
)
|
||||
from openpype.lib import StringTemplate
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor):
|
||||
|
|
@ -41,6 +39,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
attrs = instance.data.get("attr", "").split(";")
|
||||
attrs = [value for value in attrs if value.strip()]
|
||||
attrs += instance.data.get("userDefinedAttributes", [])
|
||||
attrs += ["cbId"]
|
||||
|
||||
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
|
||||
|
|
@ -115,6 +114,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
|
||||
# Extract proxy.
|
||||
if not instance.data.get("proxy"):
|
||||
self.log.info("No proxy nodes found. Skipping proxy extraction.")
|
||||
return
|
||||
|
||||
path = path.replace(".abc", "_proxy.abc")
|
||||
|
|
@ -134,26 +134,14 @@ class ExtractAlembic(publish.Extractor):
|
|||
**options
|
||||
)
|
||||
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
template_data.update({"ext": "abc"})
|
||||
templates = instance.context.data["anatomy"].templates["publish"]
|
||||
published_filename_without_extension = StringTemplate(
|
||||
templates["file"]
|
||||
).format(template_data).replace(".abc", "_proxy")
|
||||
transfers = []
|
||||
destination = os.path.join(
|
||||
instance.data["resourcesDir"],
|
||||
filename.replace(
|
||||
filename.split(".")[0],
|
||||
published_filename_without_extension
|
||||
)
|
||||
)
|
||||
transfers.append((path, destination))
|
||||
|
||||
for source, destination in transfers:
|
||||
self.log.debug("Transfer: {} > {}".format(source, destination))
|
||||
|
||||
instance.data["transfers"] = transfers
|
||||
representation = {
|
||||
"name": "proxy",
|
||||
"ext": "abc",
|
||||
"files": os.path.basename(path),
|
||||
"stagingDir": dirname,
|
||||
"outputName": "proxy"
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_members_and_roots(self, instance):
|
||||
return instance[:], instance.data.get("setMembers")
|
||||
|
|
|
|||
207
openpype/hosts/maya/plugins/publish/validate_glsl_material.py
Normal file
207
openpype/hosts/maya/plugins/publish/validate_glsl_material.py
Normal file
|
|
@ -0,0 +1,207 @@
|
|||
import os
|
||||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder
|
||||
)
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateGLSLMaterial(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validate if the asset uses GLSL Shader
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder + 0.1
|
||||
families = ['gltf']
|
||||
hosts = ['maya']
|
||||
label = 'GLSL Shader for GLTF'
|
||||
actions = [RepairAction]
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, instance):
|
||||
shading_grp = self.get_material_from_shapes(instance)
|
||||
if not shading_grp:
|
||||
raise PublishValidationError("No shading group found")
|
||||
invalid = self.get_texture_shader_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Non GLSL Shader found: "
|
||||
"{0}".format(invalid))
|
||||
|
||||
def get_material_from_shapes(self, instance):
|
||||
shapes = cmds.ls(instance, type="mesh", long=True)
|
||||
for shape in shapes:
|
||||
shading_grp = cmds.listConnections(shape,
|
||||
destination=True,
|
||||
type="shadingEngine")
|
||||
|
||||
return shading_grp or []
|
||||
|
||||
def get_texture_shader_invalid(self, instance):
|
||||
|
||||
invalid = set()
|
||||
shading_grp = self.get_material_from_shapes(instance)
|
||||
for shading_group in shading_grp:
|
||||
material_name = "{}.surfaceShader".format(shading_group)
|
||||
material = cmds.listConnections(material_name,
|
||||
source=True,
|
||||
destination=False,
|
||||
type="GLSLShader")
|
||||
|
||||
if not material:
|
||||
# add material name
|
||||
material = cmds.listConnections(material_name)[0]
|
||||
invalid.add(material)
|
||||
|
||||
return list(invalid)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""
|
||||
Repair instance by assigning GLSL Shader
|
||||
to the material
|
||||
"""
|
||||
cls.assign_glsl_shader(instance)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def assign_glsl_shader(cls, instance):
|
||||
"""
|
||||
Converting StingrayPBS material to GLSL Shaders
|
||||
for the glb export through Maya2GLTF plugin
|
||||
"""
|
||||
|
||||
meshes = cmds.ls(instance, type="mesh", long=True)
|
||||
cls.log.info("meshes: {}".format(meshes))
|
||||
# load the glsl shader plugin
|
||||
cmds.loadPlugin("glslShader", quiet=True)
|
||||
|
||||
for mesh in meshes:
|
||||
# create glsl shader
|
||||
glsl = cmds.createNode('GLSLShader')
|
||||
glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True,
|
||||
renderable=True, noSurfaceShader=True)
|
||||
cmds.connectAttr(glsl + ".outColor",
|
||||
glsl_shading_grp + ".surfaceShader")
|
||||
|
||||
# load the maya2gltf shader
|
||||
ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa
|
||||
if not os.path.exists(ogsfx_path):
|
||||
if ogsfx_path:
|
||||
# if custom ogsfx path is not specified
|
||||
# the log below is the warning for the user
|
||||
cls.log.warning("ogsfx shader file "
|
||||
"not found in {}".format(ogsfx_path))
|
||||
|
||||
cls.log.info("Find the ogsfx shader file in "
|
||||
"default maya directory...")
|
||||
# re-direct to search the ogsfx path in maya_dir
|
||||
ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path
|
||||
if not os.path.exists(ogsfx_path):
|
||||
raise PublishValidationError("The ogsfx shader file does not " # noqa
|
||||
"exist: {}".format(ogsfx_path)) # noqa
|
||||
|
||||
cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string")
|
||||
# list the materials used for the assets
|
||||
shading_grp = cmds.listConnections(mesh,
|
||||
destination=True,
|
||||
type="shadingEngine")
|
||||
|
||||
# get the materials related to the selected assets
|
||||
for material in shading_grp:
|
||||
pbs_shader = cmds.listConnections(material,
|
||||
destination=True,
|
||||
type="StingrayPBS")
|
||||
if pbs_shader:
|
||||
cls.pbs_shader_conversion(pbs_shader, glsl)
|
||||
# setting up to relink the texture if
|
||||
# the mesh is with aiStandardSurface
|
||||
arnold_shader = cmds.listConnections(material,
|
||||
destination=True,
|
||||
type="aiStandardSurface")
|
||||
if arnold_shader:
|
||||
cls.arnold_shader_conversion(arnold_shader, glsl)
|
||||
|
||||
cmds.sets(mesh, forceElement=str(glsl_shading_grp))
|
||||
|
||||
@classmethod
|
||||
def pbs_shader_conversion(cls, main_shader, glsl):
|
||||
|
||||
cls.log.info("StringrayPBS detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
albedo = cmds.listConnections(shader +
|
||||
".TEX_color_map")
|
||||
if albedo:
|
||||
dif_output = albedo[0] + ".outColor"
|
||||
# get the glsl_shader input
|
||||
# reconnect the file nodes to maya2gltf shader
|
||||
glsl_dif = glsl + ".u_BaseColorTexture"
|
||||
cmds.connectAttr(dif_output, glsl_dif)
|
||||
|
||||
# connect orm map if there is one
|
||||
orm_packed = cmds.listConnections(shader +
|
||||
".TEX_ao_map")
|
||||
if orm_packed:
|
||||
orm_output = orm_packed[0] + ".outColor"
|
||||
|
||||
mtl = glsl + ".u_MetallicTexture"
|
||||
ao = glsl + ".u_OcclusionTexture"
|
||||
rough = glsl + ".u_RoughnessTexture"
|
||||
|
||||
cmds.connectAttr(orm_output, mtl)
|
||||
cmds.connectAttr(orm_output, ao)
|
||||
cmds.connectAttr(orm_output, rough)
|
||||
|
||||
# connect nrm map if there is one
|
||||
nrm = cmds.listConnections(shader +
|
||||
".TEX_normal_map")
|
||||
if nrm:
|
||||
nrm_output = nrm[0] + ".outColor"
|
||||
glsl_nrm = glsl + ".u_NormalTexture"
|
||||
cmds.connectAttr(nrm_output, glsl_nrm)
|
||||
|
||||
@classmethod
|
||||
def arnold_shader_conversion(cls, main_shader, glsl):
|
||||
cls.log.info("aiStandardSurface detected "
|
||||
"-> Can do texture conversion")
|
||||
|
||||
for shader in main_shader:
|
||||
# get the file textures related to the PBS Shader
|
||||
albedo = cmds.listConnections(shader + ".baseColor")
|
||||
if albedo:
|
||||
dif_output = albedo[0] + ".outColor"
|
||||
# get the glsl_shader input
|
||||
# reconnect the file nodes to maya2gltf shader
|
||||
glsl_dif = glsl + ".u_BaseColorTexture"
|
||||
cmds.connectAttr(dif_output, glsl_dif)
|
||||
|
||||
orm_packed = cmds.listConnections(shader +
|
||||
".specularRoughness")
|
||||
if orm_packed:
|
||||
orm_output = orm_packed[0] + ".outColor"
|
||||
|
||||
mtl = glsl + ".u_MetallicTexture"
|
||||
ao = glsl + ".u_OcclusionTexture"
|
||||
rough = glsl + ".u_RoughnessTexture"
|
||||
|
||||
cmds.connectAttr(orm_output, mtl)
|
||||
cmds.connectAttr(orm_output, ao)
|
||||
cmds.connectAttr(orm_output, rough)
|
||||
|
||||
# connect nrm map if there is one
|
||||
bump_node = cmds.listConnections(shader +
|
||||
".normalCamera")
|
||||
if bump_node:
|
||||
for bump in bump_node:
|
||||
nrm = cmds.listConnections(bump +
|
||||
".bumpValue")
|
||||
if nrm:
|
||||
nrm_output = nrm[0] + ".outColor"
|
||||
glsl_nrm = glsl + ".u_NormalTexture"
|
||||
cmds.connectAttr(nrm_output, glsl_nrm)
|
||||
31
openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py
Normal file
31
openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
|
||||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder
|
||||
)
|
||||
|
||||
|
||||
class ValidateGLSLPlugin(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validate if the asset uses GLSL Shader
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder + 0.15
|
||||
families = ['gltf']
|
||||
hosts = ['maya']
|
||||
label = 'maya2glTF plugin'
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not cmds.pluginInfo("maya2glTF", query=True, loaded=True):
|
||||
raise RuntimeError("maya2glTF is not loaded")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
"""
|
||||
Repair instance by enabling the plugin
|
||||
"""
|
||||
return cmds.loadPlugin("maya2glTF", quiet=True)
|
||||
|
|
@ -7,12 +7,13 @@ from openpype.hosts.maya.api import MayaHost
|
|||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
host = MayaHost()
|
||||
install_host(host)
|
||||
|
||||
|
||||
print("Starting OpenPype usersetup...")
|
||||
|
||||
|
||||
# Open Workfile Post Initialization.
|
||||
key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION"
|
||||
if bool(int(os.environ.get(key, "0"))):
|
||||
|
|
|
|||
|
|
@ -63,5 +63,12 @@ class NukeAddon(OpenPypeModule, IHostAddon):
|
|||
path_paths.append(quick_time_path)
|
||||
env["PATH"] = os.pathsep.join(path_paths)
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(NUKE_ROOT_DIR, "hooks")
|
||||
]
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".nk"]
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ from .pipeline import (
|
|||
parse_container,
|
||||
update_container,
|
||||
|
||||
get_workfile_build_placeholder_plugins,
|
||||
)
|
||||
from .lib import (
|
||||
INSTANCE_DATA_KNOB,
|
||||
|
|
@ -79,8 +78,6 @@ __all__ = (
|
|||
"parse_container",
|
||||
"update_container",
|
||||
|
||||
"get_workfile_build_placeholder_plugins",
|
||||
|
||||
"INSTANCE_DATA_KNOB",
|
||||
"ROOT_DATA_KNOB",
|
||||
"maintained_selection",
|
||||
|
|
|
|||
4
openpype/hosts/nuke/api/constants.py
Normal file
4
openpype/hosts/nuke/api/constants.py
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
import os
|
||||
|
||||
|
||||
ASSIST = bool(os.getenv("NUKEASSIST"))
|
||||
|
|
@ -50,6 +50,7 @@ from openpype.pipeline.colorspace import (
|
|||
from openpype.pipeline.workfile import BuildWorkfile
|
||||
|
||||
from . import gizmo_menu
|
||||
from .constants import ASSIST
|
||||
|
||||
from .workio import (
|
||||
save_file,
|
||||
|
|
@ -215,7 +216,7 @@ def update_node_data(node, knobname, data):
|
|||
|
||||
|
||||
class Knobby(object):
|
||||
"""[DEPRICATED] For creating knob which it's type isn't
|
||||
"""[DEPRECATED] For creating knob which it's type isn't
|
||||
mapped in `create_knobs`
|
||||
|
||||
Args:
|
||||
|
|
@ -249,7 +250,7 @@ class Knobby(object):
|
|||
|
||||
|
||||
def create_knobs(data, tab=None):
|
||||
"""[DEPRICATED] Create knobs by data
|
||||
"""Create knobs by data
|
||||
|
||||
Depending on the type of each dict value and creates the correct Knob.
|
||||
|
||||
|
|
@ -343,7 +344,7 @@ def create_knobs(data, tab=None):
|
|||
|
||||
|
||||
def imprint(node, data, tab=None):
|
||||
"""[DEPRICATED] Store attributes with value on node
|
||||
"""Store attributes with value on node
|
||||
|
||||
Parse user data into Node knobs.
|
||||
Use `collections.OrderedDict` to ensure knob order.
|
||||
|
|
@ -398,8 +399,9 @@ def imprint(node, data, tab=None):
|
|||
node.addKnob(knob)
|
||||
|
||||
|
||||
@deprecated
|
||||
def add_publish_knob(node):
|
||||
"""[DEPRICATED] Add Publish knob to node
|
||||
"""[DEPRECATED] Add Publish knob to node
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): nuke node to be processed
|
||||
|
|
@ -416,8 +418,9 @@ def add_publish_knob(node):
|
|||
return node
|
||||
|
||||
|
||||
@deprecated
|
||||
def set_avalon_knob_data(node, data=None, prefix="avalon:"):
|
||||
"""[DEPRICATED] Sets data into nodes's avalon knob
|
||||
"""[DEPRECATED] Sets data into nodes's avalon knob
|
||||
|
||||
Arguments:
|
||||
node (nuke.Node): Nuke node to imprint with data,
|
||||
|
|
@ -478,8 +481,9 @@ def set_avalon_knob_data(node, data=None, prefix="avalon:"):
|
|||
return node
|
||||
|
||||
|
||||
@deprecated
|
||||
def get_avalon_knob_data(node, prefix="avalon:", create=True):
|
||||
"""[DEPRICATED] Gets a data from nodes's avalon knob
|
||||
"""[DEPRECATED] Gets a data from nodes's avalon knob
|
||||
|
||||
Arguments:
|
||||
node (obj): Nuke node to search for data,
|
||||
|
|
@ -521,8 +525,9 @@ def get_avalon_knob_data(node, prefix="avalon:", create=True):
|
|||
return data
|
||||
|
||||
|
||||
@deprecated
|
||||
def fix_data_for_node_create(data):
|
||||
"""[DEPRICATED] Fixing data to be used for nuke knobs
|
||||
"""[DEPRECATED] Fixing data to be used for nuke knobs
|
||||
"""
|
||||
for k, v in data.items():
|
||||
if isinstance(v, six.text_type):
|
||||
|
|
@ -532,8 +537,9 @@ def fix_data_for_node_create(data):
|
|||
return data
|
||||
|
||||
|
||||
@deprecated
|
||||
def add_write_node_legacy(name, **kwarg):
|
||||
"""[DEPRICATED] Adding nuke write node
|
||||
"""[DEPRECATED] Adding nuke write node
|
||||
Arguments:
|
||||
name (str): nuke node name
|
||||
kwarg (attrs): data for nuke knobs
|
||||
|
|
@ -697,7 +703,7 @@ def get_nuke_imageio_settings():
|
|||
|
||||
@deprecated("openpype.hosts.nuke.api.lib.get_nuke_imageio_settings")
|
||||
def get_created_node_imageio_setting_legacy(nodeclass, creator, subset):
|
||||
'''[DEPRICATED] Get preset data for dataflow (fileType, compression, bitDepth)
|
||||
'''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth)
|
||||
'''
|
||||
|
||||
assert any([creator, nodeclass]), nuke.message(
|
||||
|
|
@ -1241,7 +1247,7 @@ def create_write_node(
|
|||
nodes to be created before write with dependency
|
||||
review (bool)[optional]: adding review knob
|
||||
farm (bool)[optional]: rendering workflow target
|
||||
kwargs (dict)[optional]: additional key arguments for formating
|
||||
kwargs (dict)[optional]: additional key arguments for formatting
|
||||
|
||||
Example:
|
||||
prenodes = {
|
||||
|
|
@ -2258,14 +2264,20 @@ class WorkfileSettings(object):
|
|||
node['frame_range'].setValue(range)
|
||||
node['frame_range_lock'].setValue(True)
|
||||
|
||||
set_node_data(
|
||||
self._root_node,
|
||||
INSTANCE_DATA_KNOB,
|
||||
{
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end)
|
||||
}
|
||||
)
|
||||
if not ASSIST:
|
||||
set_node_data(
|
||||
self._root_node,
|
||||
INSTANCE_DATA_KNOB,
|
||||
{
|
||||
"handleStart": int(handle_start),
|
||||
"handleEnd": int(handle_end)
|
||||
}
|
||||
)
|
||||
else:
|
||||
log.warning(
|
||||
"NukeAssist mode is not allowing "
|
||||
"updating custom knobs..."
|
||||
)
|
||||
|
||||
def reset_resolution(self):
|
||||
"""Set resolution to project resolution."""
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ from .workio import (
|
|||
work_root,
|
||||
current_file
|
||||
)
|
||||
from .constants import ASSIST
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
|
@ -72,7 +73,6 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
|||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
# registering pyblish gui regarding settings in presets
|
||||
if os.getenv("PYBLISH_GUI", None):
|
||||
pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
|
|
@ -101,6 +101,12 @@ class NukeHost(
|
|||
def get_workfile_extensions(self):
|
||||
return file_extensions()
|
||||
|
||||
def get_workfile_build_placeholder_plugins(self):
|
||||
return [
|
||||
NukePlaceholderLoadPlugin,
|
||||
NukePlaceholderCreatePlugin
|
||||
]
|
||||
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
|
|
@ -200,45 +206,45 @@ def _show_workfiles():
|
|||
host_tools.show_workfiles(parent=None, on_top=False)
|
||||
|
||||
|
||||
def get_workfile_build_placeholder_plugins():
|
||||
return [
|
||||
NukePlaceholderLoadPlugin,
|
||||
NukePlaceholderCreatePlugin
|
||||
]
|
||||
|
||||
|
||||
def _install_menu():
|
||||
"""Install Avalon menu into Nuke's main menu bar."""
|
||||
|
||||
# uninstall original avalon menu
|
||||
main_window = get_main_window()
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.addMenu(MENU_LABEL)
|
||||
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
Context.context_label = label
|
||||
context_action = menu.addCommand(label)
|
||||
context_action.setEnabled(False)
|
||||
if not ASSIST:
|
||||
label = "{0}, {1}".format(
|
||||
os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"]
|
||||
)
|
||||
Context.context_label = label
|
||||
context_action = menu.addCommand(label)
|
||||
context_action.setEnabled(False)
|
||||
|
||||
# add separator after context label
|
||||
menu.addSeparator()
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Work Files...",
|
||||
_show_workfiles
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Create...",
|
||||
lambda: host_tools.show_publisher(
|
||||
tab="create"
|
||||
if not ASSIST:
|
||||
menu.addCommand(
|
||||
"Create...",
|
||||
lambda: host_tools.show_publisher(
|
||||
tab="create"
|
||||
)
|
||||
)
|
||||
)
|
||||
menu.addCommand(
|
||||
"Publish...",
|
||||
lambda: host_tools.show_publisher(
|
||||
tab="publish"
|
||||
menu.addCommand(
|
||||
"Publish...",
|
||||
lambda: host_tools.show_publisher(
|
||||
tab="publish"
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
menu.addCommand(
|
||||
"Load...",
|
||||
lambda: host_tools.show_loader(
|
||||
|
|
@ -286,15 +292,18 @@ def _install_menu():
|
|||
"Build Workfile from template",
|
||||
lambda: build_workfile_template()
|
||||
)
|
||||
menu_template.addSeparator()
|
||||
menu_template.addCommand(
|
||||
"Create Place Holder",
|
||||
lambda: create_placeholder()
|
||||
)
|
||||
menu_template.addCommand(
|
||||
"Update Place Holder",
|
||||
lambda: update_placeholder()
|
||||
)
|
||||
|
||||
if not ASSIST:
|
||||
menu_template.addSeparator()
|
||||
menu_template.addCommand(
|
||||
"Create Place Holder",
|
||||
lambda: create_placeholder()
|
||||
)
|
||||
menu_template.addCommand(
|
||||
"Update Place Holder",
|
||||
lambda: update_placeholder()
|
||||
)
|
||||
|
||||
menu.addSeparator()
|
||||
menu.addCommand(
|
||||
"Experimental tools...",
|
||||
|
|
|
|||
|
|
@ -558,9 +558,7 @@ class ExporterReview(object):
|
|||
self.path_in = self.instance.data.get("path", None)
|
||||
self.staging_dir = self.instance.data["stagingDir"]
|
||||
self.collection = self.instance.data.get("collection", None)
|
||||
self.data = dict({
|
||||
"representations": list()
|
||||
})
|
||||
self.data = {"representations": []}
|
||||
|
||||
def get_file_info(self):
|
||||
if self.collection:
|
||||
|
|
@ -626,7 +624,7 @@ class ExporterReview(object):
|
|||
nuke_imageio = opnlib.get_nuke_imageio_settings()
|
||||
|
||||
# TODO: this is only securing backward compatibility lets remove
|
||||
# this once all projects's anotomy are updated to newer config
|
||||
# this once all projects's anatomy are updated to newer config
|
||||
if "baking" in nuke_imageio.keys():
|
||||
return nuke_imageio["baking"]["viewerProcess"]
|
||||
else:
|
||||
|
|
@ -823,8 +821,41 @@ class ExporterReviewMov(ExporterReview):
|
|||
add_tags = []
|
||||
self.publish_on_farm = farm
|
||||
read_raw = kwargs["read_raw"]
|
||||
|
||||
# TODO: remove this when `reformat_nodes_config`
|
||||
# is changed in settings
|
||||
reformat_node_add = kwargs["reformat_node_add"]
|
||||
reformat_node_config = kwargs["reformat_node_config"]
|
||||
|
||||
# TODO: make this required in future
|
||||
reformat_nodes_config = kwargs.get("reformat_nodes_config", {})
|
||||
|
||||
# TODO: remove this once deprecated is removed
|
||||
# make sure only reformat_nodes_config is used in future
|
||||
if reformat_node_add and reformat_nodes_config.get("enabled"):
|
||||
self.log.warning(
|
||||
"`reformat_node_add` is deprecated. "
|
||||
"Please use only `reformat_nodes_config` instead.")
|
||||
reformat_nodes_config = None
|
||||
|
||||
# TODO: reformat code when backward compatibility is not needed
|
||||
# warning if reformat_nodes_config is not set
|
||||
if not reformat_nodes_config:
|
||||
self.log.warning(
|
||||
"Please set `reformat_nodes_config` in settings. "
|
||||
"Using `reformat_node_config` instead."
|
||||
)
|
||||
reformat_nodes_config = {
|
||||
"enabled": reformat_node_add,
|
||||
"reposition_nodes": [
|
||||
{
|
||||
"node_class": "Reformat",
|
||||
"knobs": reformat_node_config
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
|
@ -846,7 +877,6 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
subset = self.instance.data["subset"]
|
||||
self._temp_nodes[subset] = []
|
||||
# ---------- start nodes creation
|
||||
|
||||
# Read node
|
||||
r_node = nuke.createNode("Read")
|
||||
|
|
@ -860,44 +890,39 @@ class ExporterReviewMov(ExporterReview):
|
|||
if read_raw:
|
||||
r_node["raw"].setValue(1)
|
||||
|
||||
# connect
|
||||
self._temp_nodes[subset].append(r_node)
|
||||
self.previous_node = r_node
|
||||
self.log.debug("Read... `{}`".format(self._temp_nodes[subset]))
|
||||
# connect to Read node
|
||||
self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`")
|
||||
|
||||
# add reformat node
|
||||
if reformat_node_add:
|
||||
if reformat_nodes_config["enabled"]:
|
||||
reposition_nodes = reformat_nodes_config["reposition_nodes"]
|
||||
for reposition_node in reposition_nodes:
|
||||
node_class = reposition_node["node_class"]
|
||||
knobs = reposition_node["knobs"]
|
||||
node = nuke.createNode(node_class)
|
||||
set_node_knobs_from_settings(node, knobs)
|
||||
|
||||
# connect in order
|
||||
self._connect_to_above_nodes(
|
||||
node, subset, "Reposition node... `{}`"
|
||||
)
|
||||
# append reformated tag
|
||||
add_tags.append("reformated")
|
||||
|
||||
rf_node = nuke.createNode("Reformat")
|
||||
set_node_knobs_from_settings(rf_node, reformat_node_config)
|
||||
|
||||
# connect
|
||||
rf_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(rf_node)
|
||||
self.previous_node = rf_node
|
||||
self.log.debug(
|
||||
"Reformat... `{}`".format(self._temp_nodes[subset]))
|
||||
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# View Process node
|
||||
ipn = get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(ipn)
|
||||
self.previous_node = ipn
|
||||
self.log.debug(
|
||||
"ViewProcess... `{}`".format(
|
||||
self._temp_nodes[subset]))
|
||||
# connect to ViewProcess node
|
||||
self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`")
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# OCIODisplay
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
|
||||
# assign display
|
||||
display, viewer = get_viewer_config_from_string(
|
||||
str(baking_view_profile)
|
||||
)
|
||||
|
|
@ -907,13 +932,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
# assign viewer
|
||||
dag_node["view"].setValue(viewer)
|
||||
|
||||
# connect
|
||||
dag_node.setInput(0, self.previous_node)
|
||||
self._temp_nodes[subset].append(dag_node)
|
||||
self.previous_node = dag_node
|
||||
self.log.debug("OCIODisplay... `{}`".format(
|
||||
self._temp_nodes[subset]))
|
||||
|
||||
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
self.log.debug("Path: {}".format(self.path))
|
||||
|
|
@ -967,6 +986,15 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
return self.data
|
||||
|
||||
def _shift_to_previous_node_and_temp(self, subset, node, message):
|
||||
self._temp_nodes[subset].append(node)
|
||||
self.previous_node = node
|
||||
self.log.debug(message.format(self._temp_nodes[subset]))
|
||||
|
||||
def _connect_to_above_nodes(self, node, subset, message):
|
||||
node.setInput(0, self.previous_node)
|
||||
self._shift_to_previous_node_and_temp(subset, node, message)
|
||||
|
||||
|
||||
@deprecated("openpype.hosts.nuke.api.plugin.NukeWriteCreator")
|
||||
class AbstractWriteRender(OpenPypeCreator):
|
||||
|
|
|
|||
11
openpype/hosts/nuke/hooks/pre_nukeassist_setup.py
Normal file
11
openpype/hosts/nuke/hooks/pre_nukeassist_setup.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
from openpype.lib import PreLaunchHook
|
||||
|
||||
|
||||
class PrelaunchNukeAssistHook(PreLaunchHook):
|
||||
"""
|
||||
Adding flag when nukeassist
|
||||
"""
|
||||
app_groups = ["nukeassist"]
|
||||
|
||||
def execute(self):
|
||||
self.launch_context.env["NUKEASSIST"] = "1"
|
||||
|
|
@ -220,8 +220,20 @@ class LoadClip(plugin.NukeLoader):
|
|||
dict: altered representation data
|
||||
"""
|
||||
representation = deepcopy(representation)
|
||||
frame = representation["context"]["frame"]
|
||||
representation["context"]["frame"] = "#" * len(str(frame))
|
||||
context = representation["context"]
|
||||
template = representation["data"]["template"]
|
||||
if (
|
||||
"{originalBasename}" in template
|
||||
and "frame" in context
|
||||
):
|
||||
frame = context["frame"]
|
||||
hashed_frame = "#" * len(str(frame))
|
||||
origin_basename = context["originalBasename"]
|
||||
context["originalBasename"] = origin_basename.replace(
|
||||
frame, hashed_frame
|
||||
)
|
||||
|
||||
representation["context"]["frame"] = hashed_frame
|
||||
return representation
|
||||
|
||||
def update(self, container, representation):
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ExtractReviewData(publish.Extractor):
|
|||
representations = instance.data.get("representations", [])
|
||||
|
||||
# review can be removed since `ProcessSubmittedJobOnFarm` will create
|
||||
# reviable representation if needed
|
||||
# reviewable representation if needed
|
||||
if (
|
||||
"render.farm" in instance.data["families"]
|
||||
and "review" in instance.data["families"]
|
||||
|
|
|
|||
|
|
@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
self.add_methods(
|
||||
(route_name, self.workfiles_tool),
|
||||
(route_name, self.loader_tool),
|
||||
(route_name, self.creator_tool),
|
||||
(route_name, self.subset_manager_tool),
|
||||
(route_name, self.publish_tool),
|
||||
(route_name, self.scene_inventory_tool),
|
||||
(route_name, self.library_loader_tool),
|
||||
|
|
@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
self._execute_in_main_thread(item)
|
||||
return
|
||||
|
||||
async def creator_tool(self):
|
||||
log.info("Triggering Creator tool")
|
||||
item = MainThreadItem(self.tools_helper.show_creator)
|
||||
await self._async_execute_in_main_thread(item, wait=False)
|
||||
|
||||
async def subset_manager_tool(self):
|
||||
log.info("Triggering Subset Manager tool")
|
||||
item = MainThreadItem(self.tools_helper.show_subset_manager)
|
||||
# Do not wait for result of callback
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def publish_tool(self):
|
||||
log.info("Triggering Publish tool")
|
||||
item = MainThreadItem(self.tools_helper.show_publish)
|
||||
item = MainThreadItem(self.tools_helper.show_publisher_tool)
|
||||
self._execute_in_main_thread(item)
|
||||
return
|
||||
|
||||
|
|
@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator):
|
|||
"callback": "loader_tool",
|
||||
"label": "Load",
|
||||
"help": "Open loader tool"
|
||||
}, {
|
||||
"callback": "creator_tool",
|
||||
"label": "Create",
|
||||
"help": "Open creator tool"
|
||||
}, {
|
||||
"callback": "scene_inventory_tool",
|
||||
"label": "Scene inventory",
|
||||
|
|
@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator):
|
|||
"callback": "library_loader_tool",
|
||||
"label": "Library",
|
||||
"help": "Open library loader tool"
|
||||
}, {
|
||||
"callback": "subset_manager_tool",
|
||||
"label": "Subset Manager",
|
||||
"help": "Open subset manager tool"
|
||||
}, {
|
||||
"callback": "experimental_tools",
|
||||
"label": "Experimental tools",
|
||||
|
|
|
|||
|
|
@ -43,14 +43,15 @@ def parse_layers_data(data):
|
|||
layer_id, group_id, visible, position, opacity, name,
|
||||
layer_type,
|
||||
frame_start, frame_end, prelighttable, postlighttable,
|
||||
selected, editable, sencil_state
|
||||
selected, editable, sencil_state, is_current
|
||||
) = layer_raw.split("|")
|
||||
layer = {
|
||||
"layer_id": int(layer_id),
|
||||
"group_id": int(group_id),
|
||||
"visible": visible == "ON",
|
||||
"position": int(position),
|
||||
"opacity": int(opacity),
|
||||
# Opacity from 'tv_layerinfo' is always set to '0' so it's unusable
|
||||
# "opacity": int(opacity),
|
||||
"name": name,
|
||||
"type": layer_type,
|
||||
"frame_start": int(frame_start),
|
||||
|
|
@ -59,7 +60,8 @@ def parse_layers_data(data):
|
|||
"postlighttable": postlighttable == "1",
|
||||
"selected": selected == "1",
|
||||
"editable": editable == "1",
|
||||
"sencil_state": sencil_state
|
||||
"sencil_state": sencil_state,
|
||||
"is_current": is_current == "1"
|
||||
}
|
||||
layers.append(layer)
|
||||
return layers
|
||||
|
|
@ -87,15 +89,17 @@ def get_layers_data_george_script(output_filepath, layer_ids=None):
|
|||
" selected editable sencilState"
|
||||
),
|
||||
# Check if layer ID match `tv_LayerCurrentID`
|
||||
"is_current=0",
|
||||
"IF CMP(current_layer_id, layer_id)==1",
|
||||
# - mark layer as selected if layer id match to current layer id
|
||||
"is_current=1",
|
||||
"selected=1",
|
||||
"END",
|
||||
# Prepare line with data separated by "|"
|
||||
(
|
||||
"line = layer_id'|'group_id'|'visible'|'position'|'opacity'|'"
|
||||
"name'|'type'|'startFrame'|'endFrame'|'prelighttable'|'"
|
||||
"postlighttable'|'selected'|'editable'|'sencilState"
|
||||
"postlighttable'|'selected'|'editable'|'sencilState'|'is_current"
|
||||
),
|
||||
# Write data to output file
|
||||
"tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line",
|
||||
|
|
@ -202,8 +206,9 @@ def get_groups_data(communicator=None):
|
|||
# Variable containing full path to output file
|
||||
"output_path = \"{}\"".format(output_filepath),
|
||||
"empty = 0",
|
||||
# Loop over 100 groups
|
||||
"FOR idx = 1 TO 100",
|
||||
# Loop over 26 groups which is ATM maximum possible (in 11.7)
|
||||
# - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880
|
||||
"FOR idx = 1 TO 26",
|
||||
# Receive information about groups
|
||||
"tv_layercolor \"getcolor\" 0 idx",
|
||||
"PARSE result clip_id group_index c_red c_green c_blue group_name",
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import requests
|
|||
import pyblish.api
|
||||
|
||||
from openpype.client import get_project, get_asset_by_name
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost
|
||||
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
|
||||
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
|
||||
from openpype.settings import get_current_project_settings
|
||||
from openpype.lib import register_event_callback
|
||||
|
|
@ -18,6 +18,7 @@ from openpype.pipeline import (
|
|||
register_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_global_context
|
||||
|
||||
from .lib import (
|
||||
execute_george,
|
||||
|
|
@ -29,6 +30,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
METADATA_SECTION = "avalon"
|
||||
SECTION_NAME_CONTEXT = "context"
|
||||
SECTION_NAME_CREATE_CONTEXT = "create_context"
|
||||
SECTION_NAME_INSTANCES = "instances"
|
||||
SECTION_NAME_CONTAINERS = "containers"
|
||||
# Maximum length of metadata chunk string
|
||||
|
|
@ -58,7 +60,7 @@ instances=2
|
|||
"""
|
||||
|
||||
|
||||
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
||||
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "tvpaint"
|
||||
|
||||
def install(self):
|
||||
|
|
@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
registered_callbacks = (
|
||||
pyblish.api.registered_callbacks().get("instanceToggled") or []
|
||||
)
|
||||
if self.on_instance_toggle not in registered_callbacks:
|
||||
pyblish.api.register_callback(
|
||||
"instanceToggled", self.on_instance_toggle
|
||||
)
|
||||
|
||||
register_event_callback("application.launched", self.initial_launch)
|
||||
register_event_callback("application.exit", self.application_exit)
|
||||
|
||||
def get_current_project_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current project name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("project_name")
|
||||
|
||||
def get_current_asset_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current asset name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("asset_name")
|
||||
|
||||
def get_current_task_name(self):
|
||||
"""
|
||||
Returns:
|
||||
Union[str, None]: Current task name.
|
||||
"""
|
||||
|
||||
return self.get_current_context().get("task_name")
|
||||
|
||||
def get_current_context(self):
|
||||
context = get_current_workfile_context()
|
||||
if not context:
|
||||
return get_global_context()
|
||||
|
||||
if "project_name" in context:
|
||||
return context
|
||||
# This is legacy way how context was stored
|
||||
return {
|
||||
"project_name": context.get("project"),
|
||||
"asset_name": context.get("asset"),
|
||||
"task_name": context.get("task")
|
||||
}
|
||||
|
||||
# --- Create ---
|
||||
def get_context_data(self):
|
||||
return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {})
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data)
|
||||
|
||||
def list_instances(self):
|
||||
"""List all created instances from current workfile."""
|
||||
return list_instances()
|
||||
|
||||
def write_instances(self, data):
|
||||
return write_instances(data)
|
||||
|
||||
# --- Workfile ---
|
||||
def open_workfile(self, filepath):
|
||||
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
|
||||
filepath.replace("\\", "/")
|
||||
|
|
@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
def save_workfile(self, filepath=None):
|
||||
if not filepath:
|
||||
filepath = self.get_current_workfile()
|
||||
context = {
|
||||
"project": legacy_io.Session["AVALON_PROJECT"],
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"]
|
||||
}
|
||||
context = get_global_context()
|
||||
save_current_workfile_context(context)
|
||||
|
||||
# Execute george script to save workfile.
|
||||
|
|
@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
def get_workfile_extensions(self):
|
||||
return [".tvpp"]
|
||||
|
||||
# --- Load ---
|
||||
def get_containers(self):
|
||||
return get_containers()
|
||||
|
||||
|
|
@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
return
|
||||
|
||||
log.info("Setting up project...")
|
||||
set_context_settings()
|
||||
|
||||
def remove_instance(self, instance):
|
||||
"""Remove instance from current workfile metadata.
|
||||
|
||||
Implementation for Subset manager tool.
|
||||
"""
|
||||
|
||||
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
|
||||
instance_id = instance.get("uuid")
|
||||
found_idx = None
|
||||
if instance_id:
|
||||
for idx, _inst in enumerate(current_instances):
|
||||
if _inst["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
global_context = get_global_context()
|
||||
project_name = global_context.get("project_name")
|
||||
asset_name = global_context.get("aset_name")
|
||||
if not project_name or not asset_name:
|
||||
return
|
||||
current_instances.pop(found_idx)
|
||||
write_instances(current_instances)
|
||||
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
set_context_settings(project_name, asset_doc)
|
||||
|
||||
def application_exit(self):
|
||||
"""Logic related to TimerManager.
|
||||
|
|
@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
|
||||
requests.post(rest_api_url)
|
||||
|
||||
def on_instance_toggle(self, instance, old_value, new_value):
|
||||
"""Update instance data in workfile on publish toggle."""
|
||||
# Review may not have real instance in wokrfile metadata
|
||||
if not instance.data.get("uuid"):
|
||||
return
|
||||
|
||||
instance_id = instance.data["uuid"]
|
||||
found_idx = None
|
||||
current_instances = list_instances()
|
||||
for idx, workfile_instance in enumerate(current_instances):
|
||||
if workfile_instance["uuid"] == instance_id:
|
||||
found_idx = idx
|
||||
break
|
||||
|
||||
if found_idx is None:
|
||||
return
|
||||
|
||||
if "active" in current_instances[found_idx]:
|
||||
current_instances[found_idx]["active"] = new_value
|
||||
self.write_instances(current_instances)
|
||||
|
||||
def list_instances(self):
|
||||
"""List all created instances from current workfile."""
|
||||
return list_instances()
|
||||
|
||||
def write_instances(self, data):
|
||||
return write_instances(data)
|
||||
|
||||
|
||||
def containerise(
|
||||
name, namespace, members, context, loader, current_containers=None
|
||||
|
|
@ -462,40 +470,17 @@ def get_containers():
|
|||
return output
|
||||
|
||||
|
||||
def set_context_settings(asset_doc=None):
|
||||
def set_context_settings(project_name, asset_doc):
|
||||
"""Set workfile settings by asset document data.
|
||||
|
||||
Change fps, resolution and frame start/end.
|
||||
"""
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
if asset_doc is None:
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
# Use current session asset if not passed
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
project_doc = get_project(project_name)
|
||||
|
||||
framerate = asset_doc["data"].get("fps")
|
||||
if framerate is None:
|
||||
framerate = project_doc["data"].get("fps")
|
||||
|
||||
if framerate is not None:
|
||||
execute_george(
|
||||
"tv_framerate {} \"timestretch\"".format(framerate)
|
||||
)
|
||||
else:
|
||||
print("Framerate was not found!")
|
||||
|
||||
width_key = "resolutionWidth"
|
||||
height_key = "resolutionHeight"
|
||||
|
||||
width = asset_doc["data"].get(width_key)
|
||||
height = asset_doc["data"].get(height_key)
|
||||
if width is None or height is None:
|
||||
width = project_doc["data"].get(width_key)
|
||||
height = project_doc["data"].get(height_key)
|
||||
|
||||
if width is None or height is None:
|
||||
print("Resolution was not found!")
|
||||
else:
|
||||
|
|
@ -503,6 +488,15 @@ def set_context_settings(asset_doc=None):
|
|||
"tv_resizepage {} {} 0".format(width, height)
|
||||
)
|
||||
|
||||
framerate = asset_doc["data"].get("fps")
|
||||
|
||||
if framerate is not None:
|
||||
execute_george(
|
||||
"tv_framerate {} \"timestretch\"".format(framerate)
|
||||
)
|
||||
else:
|
||||
print("Framerate was not found!")
|
||||
|
||||
frame_start = asset_doc["data"].get("frameStart")
|
||||
frame_end = asset_doc["data"].get("frameEnd")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,80 +1,142 @@
|
|||
import re
|
||||
import uuid
|
||||
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
registered_host,
|
||||
from openpype.pipeline import LoaderPlugin
|
||||
from openpype.pipeline.create import (
|
||||
CreatedInstance,
|
||||
get_subset_name,
|
||||
AutoCreator,
|
||||
Creator,
|
||||
)
|
||||
from openpype.pipeline.create.creator_plugins import cache_and_get_instances
|
||||
|
||||
from .lib import get_layers_data
|
||||
from .pipeline import get_current_workfile_context
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
# Add unified identifier created with `uuid` module
|
||||
self.data["uuid"] = str(uuid.uuid4())
|
||||
SHARED_DATA_KEY = "openpype.tvpaint.instances"
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(cls, *args, **kwargs):
|
||||
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
|
||||
|
||||
# Change asset and name by current workfile context
|
||||
workfile_context = get_current_workfile_context()
|
||||
asset_name = workfile_context.get("asset")
|
||||
task_name = workfile_context.get("task")
|
||||
if "asset" not in dynamic_data and asset_name:
|
||||
dynamic_data["asset"] = asset_name
|
||||
class TVPaintCreatorCommon:
|
||||
@property
|
||||
def subset_template_family_filter(self):
|
||||
return self.family
|
||||
|
||||
if "task" not in dynamic_data and task_name:
|
||||
dynamic_data["task"] = task_name
|
||||
return dynamic_data
|
||||
|
||||
@staticmethod
|
||||
def are_instances_same(instance_1, instance_2):
|
||||
"""Compare instances but skip keys with unique values.
|
||||
|
||||
During compare are skipped keys that will be 100% sure
|
||||
different on new instance, like "id".
|
||||
|
||||
Returns:
|
||||
bool: True if instances are same.
|
||||
"""
|
||||
if (
|
||||
not isinstance(instance_1, dict)
|
||||
or not isinstance(instance_2, dict)
|
||||
):
|
||||
return instance_1 == instance_2
|
||||
|
||||
checked_keys = set()
|
||||
checked_keys.add("id")
|
||||
for key, value in instance_1.items():
|
||||
if key not in checked_keys:
|
||||
if key not in instance_2:
|
||||
return False
|
||||
if value != instance_2[key]:
|
||||
return False
|
||||
checked_keys.add(key)
|
||||
|
||||
for key in instance_2.keys():
|
||||
if key not in checked_keys:
|
||||
return False
|
||||
return True
|
||||
|
||||
def write_instances(self, data):
|
||||
self.log.debug(
|
||||
"Storing instance data to workfile. {}".format(str(data))
|
||||
def _cache_and_get_instances(self):
|
||||
return cache_and_get_instances(
|
||||
self, SHARED_DATA_KEY, self.host.list_instances
|
||||
)
|
||||
host = registered_host()
|
||||
return host.write_instances(data)
|
||||
|
||||
def process(self):
|
||||
host = registered_host()
|
||||
data = host.list_instances()
|
||||
data.append(self.data)
|
||||
self.write_instances(data)
|
||||
def _collect_create_instances(self):
|
||||
instances_by_identifier = self._cache_and_get_instances()
|
||||
for instance_data in instances_by_identifier[self.identifier]:
|
||||
instance = CreatedInstance.from_existing(instance_data, self)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def _update_create_instances(self, update_list):
|
||||
if not update_list:
|
||||
return
|
||||
|
||||
cur_instances = self.host.list_instances()
|
||||
cur_instances_by_id = {}
|
||||
for instance_data in cur_instances:
|
||||
instance_id = instance_data.get("instance_id")
|
||||
if instance_id:
|
||||
cur_instances_by_id[instance_id] = instance_data
|
||||
|
||||
for instance, changes in update_list:
|
||||
instance_data = changes.new_value
|
||||
cur_instance_data = cur_instances_by_id.get(instance.id)
|
||||
if cur_instance_data is None:
|
||||
cur_instances.append(instance_data)
|
||||
continue
|
||||
for key in set(cur_instance_data) - set(instance_data):
|
||||
cur_instance_data.pop(key)
|
||||
cur_instance_data.update(instance_data)
|
||||
self.host.write_instances(cur_instances)
|
||||
|
||||
def _custom_get_subset_name(
|
||||
self,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name=None,
|
||||
instance=None
|
||||
):
|
||||
dynamic_data = self.get_dynamic_data(
|
||||
variant, task_name, asset_doc, project_name, host_name, instance
|
||||
)
|
||||
|
||||
return get_subset_name(
|
||||
self.family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data,
|
||||
project_settings=self.project_settings,
|
||||
family_filter=self.subset_template_family_filter
|
||||
)
|
||||
|
||||
|
||||
class TVPaintCreator(Creator, TVPaintCreatorCommon):
|
||||
def collect_instances(self):
|
||||
self._collect_create_instances()
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self._update_create_instances(update_list)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
ids_to_remove = {
|
||||
instance.id
|
||||
for instance in instances
|
||||
}
|
||||
cur_instances = self.host.list_instances()
|
||||
changed = False
|
||||
new_instances = []
|
||||
for instance_data in cur_instances:
|
||||
if instance_data.get("instance_id") in ids_to_remove:
|
||||
changed = True
|
||||
else:
|
||||
new_instances.append(instance_data)
|
||||
|
||||
if changed:
|
||||
self.host.write_instances(new_instances)
|
||||
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_dynamic_data(self, *args, **kwargs):
|
||||
# Change asset and name by current workfile context
|
||||
create_context = self.create_context
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
output = {}
|
||||
if asset_name:
|
||||
output["asset"] = asset_name
|
||||
if task_name:
|
||||
output["task"] = task_name
|
||||
return output
|
||||
|
||||
def get_subset_name(self, *args, **kwargs):
|
||||
return self._custom_get_subset_name(*args, **kwargs)
|
||||
|
||||
def _store_new_instance(self, new_instance):
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
|
||||
class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon):
|
||||
def collect_instances(self):
|
||||
self._collect_create_instances()
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self._update_create_instances(update_list)
|
||||
|
||||
def get_subset_name(self, *args, **kwargs):
|
||||
return self._custom_get_subset_name(*args, **kwargs)
|
||||
|
||||
|
||||
class Loader(LoaderPlugin):
|
||||
|
|
|
|||
150
openpype/hosts/tvpaint/plugins/create/convert_legacy.py
Normal file
150
openpype/hosts/tvpaint/plugins/create/convert_legacy.py
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
import collections
|
||||
|
||||
from openpype.pipeline.create.creator_plugins import (
|
||||
SubsetConvertorPlugin,
|
||||
cache_and_get_instances,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY
|
||||
from openpype.hosts.tvpaint.api.lib import get_groups_data
|
||||
|
||||
|
||||
class TVPaintLegacyConverted(SubsetConvertorPlugin):
|
||||
"""Conversion of legacy instances in scene to new creators.
|
||||
|
||||
This convertor handles only instances created by core creators.
|
||||
|
||||
All instances that would be created using auto-creators are removed as at
|
||||
the moment of finding them would there already be existing instances.
|
||||
"""
|
||||
|
||||
identifier = "tvpaint.legacy.converter"
|
||||
|
||||
def find_instances(self):
|
||||
instances_by_identifier = cache_and_get_instances(
|
||||
self, SHARED_DATA_KEY, self.host.list_instances
|
||||
)
|
||||
if instances_by_identifier[None]:
|
||||
self.add_convertor_item("Convert legacy instances")
|
||||
|
||||
def convert(self):
|
||||
current_instances = self.host.list_instances()
|
||||
to_convert = collections.defaultdict(list)
|
||||
converted = False
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") is not None:
|
||||
continue
|
||||
converted = True
|
||||
|
||||
family = instance.get("family")
|
||||
if family in (
|
||||
"renderLayer",
|
||||
"renderPass",
|
||||
"renderScene",
|
||||
"review",
|
||||
"workfile",
|
||||
):
|
||||
to_convert[family].append(instance)
|
||||
else:
|
||||
instance["keep"] = False
|
||||
|
||||
# Skip if nothing was changed
|
||||
if not converted:
|
||||
self.remove_convertor_item()
|
||||
return
|
||||
|
||||
self._convert_render_layers(
|
||||
to_convert["renderLayer"], current_instances)
|
||||
self._convert_render_passes(
|
||||
to_convert["renderpass"], current_instances)
|
||||
self._convert_render_scenes(
|
||||
to_convert["renderScene"], current_instances)
|
||||
self._convert_workfiles(
|
||||
to_convert["workfile"], current_instances)
|
||||
self._convert_reviews(
|
||||
to_convert["review"], current_instances)
|
||||
|
||||
new_instances = [
|
||||
instance
|
||||
for instance in current_instances
|
||||
if instance.get("keep") is not False
|
||||
]
|
||||
self.host.write_instances(new_instances)
|
||||
# remove legacy item if all is fine
|
||||
self.remove_convertor_item()
|
||||
|
||||
def _convert_render_layers(self, render_layers, current_instances):
|
||||
if not render_layers:
|
||||
return
|
||||
|
||||
# Look for possible existing render layers in scene
|
||||
render_layers_by_group_id = {}
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") == "render.layer":
|
||||
group_id = instance["creator_identifier"]["group_id"]
|
||||
render_layers_by_group_id[group_id] = instance
|
||||
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in get_groups_data()
|
||||
}
|
||||
for render_layer in render_layers:
|
||||
group_id = render_layer.pop("group_id")
|
||||
# Just remove legacy instance if group is already occupied
|
||||
if group_id in render_layers_by_group_id:
|
||||
render_layer["keep"] = False
|
||||
continue
|
||||
# Add identifier
|
||||
render_layer["creator_identifier"] = "render.layer"
|
||||
# Change 'uuid' to 'instance_id'
|
||||
render_layer["instance_id"] = render_layer.pop("uuid")
|
||||
# Fill creator attributes
|
||||
render_layer["creator_attributes"] = {
|
||||
"group_id": group_id
|
||||
}
|
||||
render_layer["family"] = "render"
|
||||
group = groups_by_id[group_id]
|
||||
# Use group name for variant
|
||||
group["variant"] = group["name"]
|
||||
|
||||
def _convert_render_passes(self, render_passes, current_instances):
|
||||
if not render_passes:
|
||||
return
|
||||
|
||||
# Render passes must have available render layers so we look for render
|
||||
# layers first
|
||||
# - '_convert_render_layers' must be called before this method
|
||||
render_layers_by_group_id = {}
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") == "render.layer":
|
||||
group_id = instance["creator_identifier"]["group_id"]
|
||||
render_layers_by_group_id[group_id] = instance
|
||||
|
||||
for render_pass in render_passes:
|
||||
group_id = render_pass.pop("group_id")
|
||||
render_layer = render_layers_by_group_id.get(group_id)
|
||||
if not render_layer:
|
||||
render_pass["keep"] = False
|
||||
continue
|
||||
|
||||
render_pass["creator_identifier"] = "render.pass"
|
||||
render_pass["instance_id"] = render_pass.pop("uuid")
|
||||
render_pass["family"] = "render"
|
||||
|
||||
render_pass["creator_attributes"] = {
|
||||
"render_layer_instance_id": render_layer["instance_id"]
|
||||
}
|
||||
render_pass["variant"] = render_pass.pop("pass")
|
||||
render_pass.pop("renderlayer")
|
||||
|
||||
# Rest of instances are just marked for deletion
|
||||
def _convert_render_scenes(self, render_scenes, current_instances):
|
||||
for render_scene in render_scenes:
|
||||
render_scene["keep"] = False
|
||||
|
||||
def _convert_workfiles(self, workfiles, current_instances):
|
||||
for render_scene in workfiles:
|
||||
render_scene["keep"] = False
|
||||
|
||||
def _convert_reviews(self, reviews, current_instances):
|
||||
for render_scene in reviews:
|
||||
render_scene["keep"] = False
|
||||
1151
openpype/hosts/tvpaint/plugins/create/create_render.py
Normal file
1151
openpype/hosts/tvpaint/plugins/create/create_render.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,231 +0,0 @@
|
|||
from openpype.lib import prepare_template_data
|
||||
from openpype.pipeline import CreatorError
|
||||
from openpype.hosts.tvpaint.api import (
|
||||
plugin,
|
||||
CommunicationWrapper
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import (
|
||||
get_layers_data,
|
||||
get_groups_data,
|
||||
execute_george_through_file,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.pipeline import list_instances
|
||||
|
||||
|
||||
class CreateRenderlayer(plugin.Creator):
|
||||
"""Mark layer group as one instance."""
|
||||
name = "render_layer"
|
||||
label = "RenderLayer"
|
||||
family = "renderLayer"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
rename_group = True
|
||||
render_pass = "beauty"
|
||||
|
||||
rename_script_template = (
|
||||
"tv_layercolor \"setcolor\""
|
||||
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
|
||||
)
|
||||
|
||||
dynamic_subset_keys = [
|
||||
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(
|
||||
cls, variant, task_name, asset_id, project_name, host_name
|
||||
):
|
||||
dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
# Use render pass name from creator's plugin
|
||||
dynamic_data["renderpass"] = cls.render_pass
|
||||
# Add variant to render layer
|
||||
dynamic_data["renderlayer"] = variant
|
||||
# Change family for subset name fill
|
||||
dynamic_data["family"] = "render"
|
||||
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
|
||||
|
||||
return dynamic_data
|
||||
|
||||
@classmethod
|
||||
def get_default_variant(cls):
|
||||
"""Default value for variant in Creator tool.
|
||||
|
||||
Method checks if TVPaint implementation is running and tries to find
|
||||
selected layers from TVPaint. If only one is selected it's name is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
str: Default variant name for Creator tool.
|
||||
"""
|
||||
# Validate that communication is initialized
|
||||
if CommunicationWrapper.communicator:
|
||||
# Get currently selected layers
|
||||
layers_data = get_layers_data()
|
||||
|
||||
selected_layers = [
|
||||
layer
|
||||
for layer in layers_data
|
||||
if layer["selected"]
|
||||
]
|
||||
# Return layer name if only one is selected
|
||||
if len(selected_layers) == 1:
|
||||
return selected_layers[0]["name"]
|
||||
|
||||
# Use defaults
|
||||
if cls.defaults:
|
||||
return cls.defaults[0]
|
||||
return None
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = list_instances()
|
||||
layers_data = get_layers_data()
|
||||
|
||||
self.log.debug("Checking for selection groups.")
|
||||
# Collect group ids from selection
|
||||
group_ids = set()
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if there is no selection
|
||||
if not group_ids:
|
||||
raise CreatorError("Nothing is selected.")
|
||||
|
||||
# This creator should run only on one group
|
||||
if len(group_ids) > 1:
|
||||
raise CreatorError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
# If group id is `0` it is `default` group which is invalid
|
||||
if group_id == 0:
|
||||
raise CreatorError(
|
||||
"Selection is not in group. Can't mark selection as Beauty."
|
||||
)
|
||||
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
self.data["group_id"] = group_id
|
||||
|
||||
group_data = get_groups_data()
|
||||
group_name = None
|
||||
for group in group_data:
|
||||
if group["group_id"] == group_id:
|
||||
group_name = group["name"]
|
||||
break
|
||||
|
||||
if group_name is None:
|
||||
raise AssertionError(
|
||||
"Couldn't find group by id \"{}\"".format(group_id)
|
||||
)
|
||||
|
||||
subset_name_fill_data = {
|
||||
"group": group_name
|
||||
}
|
||||
|
||||
family = self.family = self.data["family"]
|
||||
|
||||
# Fill dynamic key 'group'
|
||||
subset_name = self.data["subset"].format(
|
||||
**prepare_template_data(subset_name_fill_data)
|
||||
)
|
||||
self.data["subset"] = subset_name
|
||||
|
||||
# Check for instances of same group
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
# Check if subset name is not already taken
|
||||
same_subset_instance = None
|
||||
same_subset_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if instance["family"] == family:
|
||||
if instance["group_id"] == group_id:
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
elif instance["subset"] == subset_name:
|
||||
same_subset_instance = instance
|
||||
same_subset_instance_idx = idx
|
||||
|
||||
if (
|
||||
same_subset_instance_idx is not None
|
||||
and existing_instance_idx is not None
|
||||
):
|
||||
break
|
||||
|
||||
if same_subset_instance_idx is not None:
|
||||
if self._ask_user_subset_override(same_subset_instance):
|
||||
instances.pop(same_subset_instance_idx)
|
||||
else:
|
||||
return
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Beauty instance for group id {group_id} already exists"
|
||||
", overriding"
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
|
||||
if not self.rename_group:
|
||||
self.log.info("Group rename function is turned off. Skipping")
|
||||
return
|
||||
|
||||
self.log.debug("Querying groups data from workfile.")
|
||||
groups_data = get_groups_data()
|
||||
|
||||
self.log.debug("Changing name of the group.")
|
||||
selected_group = None
|
||||
for group_data in groups_data:
|
||||
if group_data["group_id"] == group_id:
|
||||
selected_group = group_data
|
||||
|
||||
# Rename TVPaint group (keep color same)
|
||||
# - groups can't contain spaces
|
||||
new_group_name = self.data["variant"].replace(" ", "_")
|
||||
rename_script = self.rename_script_template.format(
|
||||
clip_id=selected_group["clip_id"],
|
||||
group_id=selected_group["group_id"],
|
||||
r=selected_group["red"],
|
||||
g=selected_group["green"],
|
||||
b=selected_group["blue"],
|
||||
name=new_group_name
|
||||
)
|
||||
execute_george_through_file(rename_script)
|
||||
|
||||
self.log.info(
|
||||
f"Name of group with index {group_id}"
|
||||
f" was changed to \"{new_group_name}\"."
|
||||
)
|
||||
|
||||
def _ask_user_subset_override(self, instance):
|
||||
from qtpy import QtCore
|
||||
from qtpy.QtWidgets import QMessageBox
|
||||
|
||||
title = "Subset \"{}\" already exist".format(instance["subset"])
|
||||
text = (
|
||||
"Instance with subset name \"{}\" already exists."
|
||||
"\n\nDo you want to override existing?"
|
||||
).format(instance["subset"])
|
||||
|
||||
dialog = QMessageBox()
|
||||
dialog.setWindowFlags(
|
||||
dialog.windowFlags()
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
dialog.setWindowTitle(title)
|
||||
dialog.setText(text)
|
||||
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
|
||||
dialog.setDefaultButton(QMessageBox.Yes)
|
||||
dialog.exec_()
|
||||
if dialog.result() == QMessageBox.Yes:
|
||||
return True
|
||||
return False
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
from openpype.pipeline import CreatorError
|
||||
from openpype.lib import prepare_template_data
|
||||
from openpype.hosts.tvpaint.api import (
|
||||
plugin,
|
||||
CommunicationWrapper
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import get_layers_data
|
||||
from openpype.hosts.tvpaint.api.pipeline import list_instances
|
||||
|
||||
|
||||
class CreateRenderPass(plugin.Creator):
|
||||
"""Render pass is combination of one or more layers from same group.
|
||||
|
||||
Requirement to create Render Pass is to have already created beauty
|
||||
instance. Beauty instance is used as base for subset name.
|
||||
"""
|
||||
name = "render_pass"
|
||||
label = "RenderPass"
|
||||
family = "renderPass"
|
||||
icon = "cube"
|
||||
defaults = ["Main"]
|
||||
|
||||
dynamic_subset_keys = [
|
||||
"renderpass", "renderlayer", "render_pass", "render_layer"
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_dynamic_data(
|
||||
cls, variant, task_name, asset_id, project_name, host_name
|
||||
):
|
||||
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
|
||||
variant, task_name, asset_id, project_name, host_name
|
||||
)
|
||||
dynamic_data["renderpass"] = variant
|
||||
dynamic_data["family"] = "render"
|
||||
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
|
||||
return dynamic_data
|
||||
|
||||
@classmethod
|
||||
def get_default_variant(cls):
|
||||
"""Default value for variant in Creator tool.
|
||||
|
||||
Method checks if TVPaint implementation is running and tries to find
|
||||
selected layers from TVPaint. If only one is selected it's name is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
str: Default variant name for Creator tool.
|
||||
"""
|
||||
# Validate that communication is initialized
|
||||
if CommunicationWrapper.communicator:
|
||||
# Get currently selected layers
|
||||
layers_data = get_layers_data()
|
||||
|
||||
selected_layers = [
|
||||
layer
|
||||
for layer in layers_data
|
||||
if layer["selected"]
|
||||
]
|
||||
# Return layer name if only one is selected
|
||||
if len(selected_layers) == 1:
|
||||
return selected_layers[0]["name"]
|
||||
|
||||
# Use defaults
|
||||
if cls.defaults:
|
||||
return cls.defaults[0]
|
||||
return None
|
||||
|
||||
def process(self):
|
||||
self.log.debug("Query data from workfile.")
|
||||
instances = list_instances()
|
||||
layers_data = get_layers_data()
|
||||
|
||||
self.log.debug("Checking selection.")
|
||||
# Get all selected layers and their group ids
|
||||
group_ids = set()
|
||||
selected_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["selected"]:
|
||||
selected_layers.append(layer)
|
||||
group_ids.add(layer["group_id"])
|
||||
|
||||
# Raise if nothing is selected
|
||||
if not selected_layers:
|
||||
raise CreatorError("Nothing is selected.")
|
||||
|
||||
# Raise if layers from multiple groups are selected
|
||||
if len(group_ids) != 1:
|
||||
raise CreatorError("More than one group is in selection.")
|
||||
|
||||
group_id = tuple(group_ids)[0]
|
||||
self.log.debug(f"Selected group id is \"{group_id}\".")
|
||||
|
||||
# Find beauty instance for selected layers
|
||||
beauty_instance = None
|
||||
for instance in instances:
|
||||
if (
|
||||
instance["family"] == "renderLayer"
|
||||
and instance["group_id"] == group_id
|
||||
):
|
||||
beauty_instance = instance
|
||||
break
|
||||
|
||||
# Beauty is required for this creator so raise if was not found
|
||||
if beauty_instance is None:
|
||||
raise CreatorError("Beauty pass does not exist yet.")
|
||||
|
||||
subset_name = self.data["subset"]
|
||||
|
||||
subset_name_fill_data = {}
|
||||
|
||||
# Backwards compatibility
|
||||
# - beauty may be created with older creator where variant was not
|
||||
# stored
|
||||
if "variant" not in beauty_instance:
|
||||
render_layer = beauty_instance["name"]
|
||||
else:
|
||||
render_layer = beauty_instance["variant"]
|
||||
|
||||
subset_name_fill_data["renderlayer"] = render_layer
|
||||
subset_name_fill_data["render_layer"] = render_layer
|
||||
|
||||
# Format dynamic keys in subset name
|
||||
new_subset_name = subset_name.format(
|
||||
**prepare_template_data(subset_name_fill_data)
|
||||
)
|
||||
self.data["subset"] = new_subset_name
|
||||
self.log.info(f"New subset name is \"{new_subset_name}\".")
|
||||
|
||||
family = self.data["family"]
|
||||
variant = self.data["variant"]
|
||||
|
||||
self.data["group_id"] = group_id
|
||||
self.data["pass"] = variant
|
||||
self.data["renderlayer"] = render_layer
|
||||
|
||||
# Collect selected layer ids to be stored into instance
|
||||
layer_names = [layer["name"] for layer in selected_layers]
|
||||
self.data["layer_names"] = layer_names
|
||||
|
||||
# Check if same instance already exists
|
||||
existing_instance = None
|
||||
existing_instance_idx = None
|
||||
for idx, instance in enumerate(instances):
|
||||
if (
|
||||
instance["family"] == family
|
||||
and instance["group_id"] == group_id
|
||||
and instance["pass"] == variant
|
||||
):
|
||||
existing_instance = instance
|
||||
existing_instance_idx = idx
|
||||
break
|
||||
|
||||
if existing_instance is not None:
|
||||
self.log.info(
|
||||
f"Render pass instance for group id {group_id}"
|
||||
f" and name \"{variant}\" already exists, overriding."
|
||||
)
|
||||
instances[existing_instance_idx] = self.data
|
||||
else:
|
||||
instances.append(self.data)
|
||||
|
||||
self.write_instances(instances)
|
||||
76
openpype/hosts/tvpaint/plugins/create/create_review.py
Normal file
76
openpype/hosts/tvpaint/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
|
||||
|
||||
|
||||
class TVPaintReviewCreator(TVPaintAutoCreator):
|
||||
family = "review"
|
||||
identifier = "scene.review"
|
||||
label = "Review"
|
||||
icon = "ei.video"
|
||||
|
||||
# Settings
|
||||
active_on_create = True
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
plugin_settings = (
|
||||
project_settings["tvpaint"]["create"]["create_review"]
|
||||
)
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.default_variants = plugin_settings["default_variants"]
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
|
||||
def create(self):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.creator_identifier == self.identifier:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
create_context = self.create_context
|
||||
host_name = create_context.host_name
|
||||
project_name = create_context.get_current_project_name()
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
if not self.active_on_create:
|
||||
data["active"] = False
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
existing_instance["variant"],
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
existing_instance
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
70
openpype/hosts/tvpaint/plugins/create/create_workfile.py
Normal file
70
openpype/hosts/tvpaint/plugins/create/create_workfile.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
|
||||
|
||||
|
||||
class TVPaintWorkfileCreator(TVPaintAutoCreator):
|
||||
family = "workfile"
|
||||
identifier = "workfile"
|
||||
label = "Workfile"
|
||||
icon = "fa.file-o"
|
||||
|
||||
def apply_settings(self, project_settings, system_settings):
|
||||
plugin_settings = (
|
||||
project_settings["tvpaint"]["create"]["create_workfile"]
|
||||
)
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.default_variants = plugin_settings["default_variants"]
|
||||
|
||||
def create(self):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.creator_identifier == self.identifier:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
create_context = self.create_context
|
||||
host_name = create_context.host_name
|
||||
project_name = create_context.get_current_project_name()
|
||||
asset_name = create_context.get_current_asset_name()
|
||||
task_name = create_context.get_current_task_name()
|
||||
|
||||
if existing_instance is None:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name
|
||||
)
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
instances_data = self.host.list_instances()
|
||||
instances_data.append(new_instance.data_to_store())
|
||||
self.host.write_instances(instances_data)
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
elif (
|
||||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
existing_instance["variant"],
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
existing_instance
|
||||
)
|
||||
existing_instance["asset"] = asset_name
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
|
|
@ -1,37 +1,34 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
|
||||
class CollectOutputFrameRange(pyblish.api.InstancePlugin):
|
||||
"""Collect frame start/end from context.
|
||||
|
||||
When instances are collected context does not contain `frameStart` and
|
||||
`frameEnd` keys yet. They are collected in global plugin
|
||||
`CollectContextEntities`.
|
||||
"""
|
||||
|
||||
label = "Collect output frame range"
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder + 0.4999
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "render"]
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
frame_start = instance.data.get("frameStart")
|
||||
frame_end = instance.data.get("frameEnd")
|
||||
if frame_start is not None and frame_end is not None:
|
||||
self.log.debug(
|
||||
"Instance {} already has set frames {}-{}".format(
|
||||
str(instance), frame_start, frame_end
|
||||
)
|
||||
)
|
||||
return
|
||||
def process(self, instance):
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
if not asset_doc:
|
||||
return
|
||||
|
||||
frame_start = context.data.get("frameStart")
|
||||
frame_end = context.data.get("frameEnd")
|
||||
context = instance.context
|
||||
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
|
||||
self.log.info(
|
||||
"Set frames {}-{} on instance {} ".format(
|
||||
frame_start, frame_end, str(instance)
|
||||
)
|
||||
frame_start = asset_doc["data"]["frameStart"]
|
||||
frame_end = frame_start + (
|
||||
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
|
||||
)
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
self.log.info(
|
||||
"Set frames {}-{} on instance {} ".format(
|
||||
frame_start, frame_end, instance.data["subset"]
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,280 +0,0 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
def process(self, context):
|
||||
workfile_instances = context.data["workfileInstances"]
|
||||
|
||||
self.log.debug("Collected ({}) instances:\n{}".format(
|
||||
len(workfile_instances),
|
||||
json.dumps(workfile_instances, indent=4)
|
||||
))
|
||||
|
||||
filtered_instance_data = []
|
||||
# Backwards compatibility for workfiles that already have review
|
||||
# instance in metadata.
|
||||
review_instance_exist = False
|
||||
for instance_data in workfile_instances:
|
||||
family = instance_data["family"]
|
||||
if family == "review":
|
||||
review_instance_exist = True
|
||||
|
||||
elif family not in ("renderPass", "renderLayer"):
|
||||
self.log.info("Unknown family \"{}\". Skipping {}".format(
|
||||
family, json.dumps(instance_data, indent=4)
|
||||
))
|
||||
continue
|
||||
|
||||
filtered_instance_data.append(instance_data)
|
||||
|
||||
# Fake review instance if review was not found in metadata families
|
||||
if not review_instance_exist:
|
||||
filtered_instance_data.append(
|
||||
self._create_review_instance_data(context)
|
||||
)
|
||||
|
||||
for instance_data in filtered_instance_data:
|
||||
instance_data["fps"] = context.data["sceneFps"]
|
||||
|
||||
# Conversion from older instances
|
||||
# - change 'render_layer' to 'renderlayer'
|
||||
render_layer = instance_data.get("instance_data")
|
||||
if not render_layer:
|
||||
# Render Layer has only variant
|
||||
if instance_data["family"] == "renderLayer":
|
||||
render_layer = instance_data.get("variant")
|
||||
|
||||
# Backwards compatibility for renderPasses
|
||||
elif "render_layer" in instance_data:
|
||||
render_layer = instance_data["render_layer"]
|
||||
|
||||
if render_layer:
|
||||
instance_data["renderlayer"] = render_layer
|
||||
|
||||
# Store workfile instance data to instance data
|
||||
instance_data["originData"] = copy.deepcopy(instance_data)
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = instance_data["family"]
|
||||
families = [family]
|
||||
if family != "review":
|
||||
families.append("review")
|
||||
# Add `review` family for thumbnail integration
|
||||
instance_data["families"] = families
|
||||
|
||||
# Instance name
|
||||
subset_name = instance_data["subset"]
|
||||
name = instance_data.get("name", subset_name)
|
||||
instance_data["name"] = name
|
||||
instance_data["label"] = "{} [{}-{}]".format(
|
||||
name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
)
|
||||
|
||||
active = instance_data.get("active", True)
|
||||
instance_data["active"] = active
|
||||
instance_data["publish"] = active
|
||||
# Add representations key
|
||||
instance_data["representations"] = []
|
||||
|
||||
# Different instance creation based on family
|
||||
instance = None
|
||||
if family == "review":
|
||||
# Change subset name of review instance
|
||||
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
asset_name = context.data["workfile_context"]["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = context.data["hostName"]
|
||||
# Use empty variant value
|
||||
variant = ""
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = get_subset_name(
|
||||
family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
context.data["layersData"]
|
||||
)
|
||||
|
||||
elif family == "renderLayer":
|
||||
instance = self.create_render_layer_instance(
|
||||
context, instance_data
|
||||
)
|
||||
elif family == "renderPass":
|
||||
instance = self.create_render_pass_instance(
|
||||
context, instance_data
|
||||
)
|
||||
|
||||
if instance is None:
|
||||
continue
|
||||
|
||||
any_visible = False
|
||||
for layer in instance.data["layers"]:
|
||||
if layer["visible"]:
|
||||
any_visible = True
|
||||
break
|
||||
|
||||
instance.data["publish"] = any_visible
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
||||
def _create_review_instance_data(self, context):
|
||||
"""Fake review instance data."""
|
||||
|
||||
return {
|
||||
"family": "review",
|
||||
"asset": context.data["asset"],
|
||||
# Dummy subset name
|
||||
"subset": "reviewMain"
|
||||
}
|
||||
|
||||
def create_render_layer_instance(self, context, instance_data):
|
||||
name = instance_data["name"]
|
||||
# Change label
|
||||
subset_name = instance_data["subset"]
|
||||
|
||||
# Backwards compatibility
|
||||
# - subset names were not stored as final subset names during creation
|
||||
if "variant" not in instance_data:
|
||||
instance_data["label"] = "{}_Beauty".format(name)
|
||||
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_Beauty".format(
|
||||
new_family, task_name.capitalize(), name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
subset_name, new_subset_name
|
||||
))
|
||||
|
||||
# Get all layers for the layer
|
||||
layers_data = context.data["layersData"]
|
||||
group_id = instance_data["group_id"]
|
||||
group_layers = []
|
||||
for layer in layers_data:
|
||||
if layer["group_id"] == group_id:
|
||||
group_layers.append(layer)
|
||||
|
||||
if not group_layers:
|
||||
# Should be handled here?
|
||||
self.log.warning((
|
||||
f"Group with id {group_id} does not contain any layers."
|
||||
f" Instance \"{name}\" not created."
|
||||
))
|
||||
return None
|
||||
|
||||
instance_data["layers"] = group_layers
|
||||
|
||||
return context.create_instance(**instance_data)
|
||||
|
||||
def create_render_pass_instance(self, context, instance_data):
|
||||
pass_name = instance_data["pass"]
|
||||
self.log.info(
|
||||
"Creating render pass instance. \"{}\"".format(pass_name)
|
||||
)
|
||||
# Change label
|
||||
render_layer = instance_data["renderlayer"]
|
||||
|
||||
# Backwards compatibility
|
||||
# - subset names were not stored as final subset names during creation
|
||||
if "variant" not in instance_data:
|
||||
instance_data["label"] = "{}_{}".format(render_layer, pass_name)
|
||||
# Change subset name
|
||||
# Final family of an instance will be `render`
|
||||
new_family = "render"
|
||||
old_subset_name = instance_data["subset"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
new_subset_name = "{}{}_{}_{}".format(
|
||||
new_family, task_name.capitalize(), render_layer, pass_name
|
||||
)
|
||||
instance_data["subset"] = new_subset_name
|
||||
self.log.debug("Changed subset name \"{}\"->\"{}\"".format(
|
||||
old_subset_name, new_subset_name
|
||||
))
|
||||
|
||||
layers_data = context.data["layersData"]
|
||||
layers_by_name = {
|
||||
layer["name"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
|
||||
if "layer_names" in instance_data:
|
||||
layer_names = instance_data["layer_names"]
|
||||
else:
|
||||
# Backwards compatibility
|
||||
# - not 100% working as it was found out that layer ids can't be
|
||||
# used as unified identifier across multiple workstations
|
||||
layers_by_id = {
|
||||
layer["layer_id"]: layer
|
||||
for layer in layers_data
|
||||
}
|
||||
layer_ids = instance_data["layer_ids"]
|
||||
layer_names = []
|
||||
for layer_id in layer_ids:
|
||||
layer = layers_by_id.get(layer_id)
|
||||
if layer:
|
||||
layer_names.append(layer["name"])
|
||||
|
||||
if not layer_names:
|
||||
raise ValueError((
|
||||
"Metadata contain old way of storing layers information."
|
||||
" It is not possible to identify layers to publish with"
|
||||
" these data. Please remove Render Pass instances with"
|
||||
" Subset manager and use Creator tool to recreate them."
|
||||
))
|
||||
|
||||
render_pass_layers = []
|
||||
for layer_name in layer_names:
|
||||
layer = layers_by_name.get(layer_name)
|
||||
# NOTE This is kind of validation before validators?
|
||||
if not layer:
|
||||
self.log.warning(
|
||||
f"Layer with name {layer_name} was not found."
|
||||
)
|
||||
continue
|
||||
|
||||
render_pass_layers.append(layer)
|
||||
|
||||
if not render_pass_layers:
|
||||
name = instance_data["name"]
|
||||
self.log.warning(
|
||||
f"None of the layers from the RenderPass \"{name}\""
|
||||
" exist anymore. Instance not created."
|
||||
)
|
||||
return None
|
||||
|
||||
instance_data["layers"] = render_pass_layers
|
||||
return context.create_instance(**instance_data)
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
import copy
|
||||
import pyblish.api
|
||||
from openpype.lib import prepare_template_data
|
||||
|
||||
|
||||
class CollectRenderInstances(pyblish.api.InstancePlugin):
|
||||
label = "Collect Render Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
families = ["render", "review"]
|
||||
|
||||
ignore_render_pass_transparency = False
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
creator_identifier = instance.data["creator_identifier"]
|
||||
if creator_identifier == "render.layer":
|
||||
self._collect_data_for_render_layer(instance)
|
||||
|
||||
elif creator_identifier == "render.pass":
|
||||
self._collect_data_for_render_pass(instance)
|
||||
|
||||
elif creator_identifier == "render.scene":
|
||||
self._collect_data_for_render_scene(instance)
|
||||
|
||||
else:
|
||||
if creator_identifier == "scene.review":
|
||||
self._collect_data_for_review(instance)
|
||||
return
|
||||
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["name"] = subset_name
|
||||
instance.data["label"] = "{} [{}-{}]".format(
|
||||
subset_name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
)
|
||||
|
||||
def _collect_data_for_render_layer(self, instance):
|
||||
instance.data["families"].append("renderLayer")
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
group_id = creator_attributes["group_id"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
layers_data = instance.context.data["layersData"]
|
||||
instance.data["layers"] = [
|
||||
copy.deepcopy(layer)
|
||||
for layer in layers_data
|
||||
if layer["group_id"] == group_id
|
||||
]
|
||||
|
||||
def _collect_data_for_render_pass(self, instance):
|
||||
instance.data["families"].append("renderPass")
|
||||
|
||||
layer_names = set(instance.data["layer_names"])
|
||||
layers_data = instance.context.data["layersData"]
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
instance.data["layers"] = [
|
||||
copy.deepcopy(layer)
|
||||
for layer in layers_data
|
||||
if layer["name"] in layer_names
|
||||
]
|
||||
instance.data["ignoreLayersTransparency"] = (
|
||||
self.ignore_render_pass_transparency
|
||||
)
|
||||
|
||||
render_layer_data = None
|
||||
render_layer_id = creator_attributes["render_layer_instance_id"]
|
||||
for in_data in instance.context.data["workfileInstances"]:
|
||||
if (
|
||||
in_data["creator_identifier"] == "render.layer"
|
||||
and in_data["instance_id"] == render_layer_id
|
||||
):
|
||||
render_layer_data = in_data
|
||||
break
|
||||
|
||||
instance.data["renderLayerData"] = copy.deepcopy(render_layer_data)
|
||||
# Invalid state
|
||||
if render_layer_data is None:
|
||||
return
|
||||
render_layer_name = render_layer_data["variant"]
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["subset"] = subset_name.format(
|
||||
**prepare_template_data({"renderlayer": render_layer_name})
|
||||
)
|
||||
|
||||
def _collect_data_for_render_scene(self, instance):
|
||||
instance.data["families"].append("renderScene")
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
if creator_attributes["mark_for_review"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
instance.context.data["layersData"]
|
||||
)
|
||||
|
||||
render_pass_name = (
|
||||
instance.data["creator_attributes"]["render_pass_name"]
|
||||
)
|
||||
subset_name = instance.data["subset"]
|
||||
instance.data["subset"] = subset_name.format(
|
||||
**prepare_template_data({"renderpass": render_pass_name})
|
||||
)
|
||||
|
||||
def _collect_data_for_review(self, instance):
|
||||
instance.data["layers"] = copy.deepcopy(
|
||||
instance.context.data["layersData"]
|
||||
)
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
import json
|
||||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectRenderScene(pyblish.api.ContextPlugin):
|
||||
"""Collect instance which renders whole scene in PNG.
|
||||
|
||||
Creates instance with family 'renderScene' which will have all layers
|
||||
to render which will be composite into one result. The instance is not
|
||||
collected from scene.
|
||||
|
||||
Scene will be rendered with all visible layers similar way like review is.
|
||||
|
||||
Instance is disabled if there are any created instances of 'renderLayer'
|
||||
or 'renderPass'. That is because it is expected that this instance is
|
||||
used as lazy publish of TVPaint file.
|
||||
|
||||
Subset name is created similar way like 'renderLayer' family. It can use
|
||||
`renderPass` and `renderLayer` keys which can be set using settings and
|
||||
`variant` is filled using `renderPass` value.
|
||||
"""
|
||||
label = "Collect Render Scene"
|
||||
order = pyblish.api.CollectorOrder - 0.39
|
||||
hosts = ["tvpaint"]
|
||||
|
||||
# Value of 'render_pass' in subset name template
|
||||
render_pass = "beauty"
|
||||
|
||||
# Settings attributes
|
||||
enabled = False
|
||||
# Value of 'render_layer' and 'variant' in subset name template
|
||||
render_layer = "Main"
|
||||
|
||||
def process(self, context):
|
||||
# Check if there are created instances of renderPass and renderLayer
|
||||
# - that will define if renderScene instance is enabled after
|
||||
# collection
|
||||
any_created_instance = False
|
||||
for instance in context:
|
||||
family = instance.data["family"]
|
||||
if family in ("renderPass", "renderLayer"):
|
||||
any_created_instance = True
|
||||
break
|
||||
|
||||
# Global instance data modifications
|
||||
# Fill families
|
||||
family = "renderScene"
|
||||
# Add `review` family for thumbnail integration
|
||||
families = [family, "review"]
|
||||
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
workfile_context = context.data["workfile_context"]
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
asset_name = workfile_context["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = context.data["hostName"]
|
||||
# Variant is using render pass name
|
||||
variant = self.render_layer
|
||||
dynamic_data = {
|
||||
"renderlayer": self.render_layer,
|
||||
"renderpass": self.render_pass,
|
||||
}
|
||||
# TODO remove - Backwards compatibility for old subset name templates
|
||||
# - added 2022/04/28
|
||||
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
|
||||
dynamic_data["render_pass"] = dynamic_data["renderpass"]
|
||||
|
||||
task_name = workfile_context["task"]
|
||||
subset_name = get_subset_name(
|
||||
"render",
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
dynamic_data=dynamic_data,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
instance_data = {
|
||||
"family": family,
|
||||
"families": families,
|
||||
"fps": context.data["sceneFps"],
|
||||
"subset": subset_name,
|
||||
"name": subset_name,
|
||||
"label": "{} [{}-{}]".format(
|
||||
subset_name,
|
||||
context.data["sceneMarkIn"] + 1,
|
||||
context.data["sceneMarkOut"] + 1
|
||||
),
|
||||
"active": not any_created_instance,
|
||||
"publish": not any_created_instance,
|
||||
"representations": [],
|
||||
"layers": copy.deepcopy(context.data["layersData"]),
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
# Add render layer to instance data
|
||||
"renderlayer": self.render_layer
|
||||
}
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
self.log.debug("Created instance: {}\n{}".format(
|
||||
instance, json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
@ -2,17 +2,15 @@ import os
|
|||
import json
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
class CollectWorkfile(pyblish.api.InstancePlugin):
|
||||
label = "Collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.4
|
||||
hosts = ["tvpaint"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, context):
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
current_file = context.data["currentFile"]
|
||||
|
||||
self.log.info(
|
||||
|
|
@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
|
|||
|
||||
dirpath, filename = os.path.split(current_file)
|
||||
basename, ext = os.path.splitext(filename)
|
||||
instance = context.create_instance(name=basename)
|
||||
|
||||
# Project name from workfile context
|
||||
project_name = context.data["workfile_context"]["project"]
|
||||
|
||||
# Get subset name of workfile instance
|
||||
# Collect asset doc to get asset id
|
||||
# - not sure if it's good idea to require asset id in
|
||||
# get_subset_name?
|
||||
family = "workfile"
|
||||
asset_name = context.data["workfile_context"]["asset"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# Host name from environment variable
|
||||
host_name = os.environ["AVALON_APP"]
|
||||
# Use empty variant value
|
||||
variant = ""
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
subset_name = get_subset_name(
|
||||
family,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
# Create Workfile instance
|
||||
instance.data.update({
|
||||
"subset": subset_name,
|
||||
"asset": context.data["asset"],
|
||||
"label": subset_name,
|
||||
"publish": True,
|
||||
"family": "workfile",
|
||||
"families": ["workfile"],
|
||||
"representations": [{
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": dirpath
|
||||
}]
|
||||
instance.data["representations"].append({
|
||||
"name": ext.lstrip("."),
|
||||
"ext": ext.lstrip("."),
|
||||
"files": filename,
|
||||
"stagingDir": dirpath
|
||||
})
|
||||
|
||||
self.log.info("Collected workfile instance: {}".format(
|
||||
json.dumps(instance.data, indent=4)
|
||||
))
|
||||
|
|
|
|||
|
|
@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
|
||||
# Collect and store current context to have reference
|
||||
current_context = {
|
||||
"project": legacy_io.Session["AVALON_PROJECT"],
|
||||
"asset": legacy_io.Session["AVALON_ASSET"],
|
||||
"task": legacy_io.Session["AVALON_TASK"]
|
||||
"project_name": context.data["projectName"],
|
||||
"asset_name": context.data["asset"],
|
||||
"task_name": context.data["task"]
|
||||
}
|
||||
context.data["previous_context"] = current_context
|
||||
self.log.debug("Current context is: {}".format(current_context))
|
||||
|
|
@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
self.log.info("Collecting workfile context")
|
||||
|
||||
workfile_context = get_current_workfile_context()
|
||||
if "project" in workfile_context:
|
||||
workfile_context = {
|
||||
"project_name": workfile_context.get("project"),
|
||||
"asset_name": workfile_context.get("asset"),
|
||||
"task_name": workfile_context.get("task"),
|
||||
}
|
||||
# Store workfile context to pyblish context
|
||||
context.data["workfile_context"] = workfile_context
|
||||
if workfile_context:
|
||||
# Change current context with context from workfile
|
||||
key_map = (
|
||||
("AVALON_ASSET", "asset"),
|
||||
("AVALON_TASK", "task")
|
||||
("AVALON_ASSET", "asset_name"),
|
||||
("AVALON_TASK", "task_name")
|
||||
)
|
||||
for env_key, key in key_map:
|
||||
legacy_io.Session[env_key] = workfile_context[key]
|
||||
os.environ[env_key] = workfile_context[key]
|
||||
self.log.info("Context changed to: {}".format(workfile_context))
|
||||
|
||||
asset_name = workfile_context["asset"]
|
||||
task_name = workfile_context["task"]
|
||||
asset_name = workfile_context["asset_name"]
|
||||
task_name = workfile_context["task_name"]
|
||||
|
||||
else:
|
||||
asset_name = current_context["asset"]
|
||||
task_name = current_context["task"]
|
||||
asset_name = current_context["asset_name"]
|
||||
task_name = current_context["task_name"]
|
||||
# Handle older workfiles or workfiles without metadata
|
||||
self.log.warning((
|
||||
"Workfile does not contain information about context."
|
||||
|
|
@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
|
|||
|
||||
# Store context asset name
|
||||
context.data["asset"] = asset_name
|
||||
context.data["task"] = task_name
|
||||
self.log.info(
|
||||
"Context is set to Asset: \"{}\" and Task: \"{}\"".format(
|
||||
asset_name, task_name
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from PIL import Image
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.hosts.tvpaint.api.lib import (
|
||||
execute_george,
|
||||
execute_george_through_file,
|
||||
|
|
@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import (
|
|||
class ExtractSequence(pyblish.api.Extractor):
|
||||
label = "Extract Sequence"
|
||||
hosts = ["tvpaint"]
|
||||
families = ["review", "renderPass", "renderLayer", "renderScene"]
|
||||
families_to_review = ["review"]
|
||||
families = ["review", "render"]
|
||||
|
||||
# Modifiable with settings
|
||||
review_bg = [255, 255, 255, 255]
|
||||
|
|
@ -59,6 +59,10 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
)
|
||||
)
|
||||
|
||||
ignore_layers_transparency = instance.data.get(
|
||||
"ignoreLayersTransparency", False
|
||||
)
|
||||
|
||||
family_lowered = instance.data["family"].lower()
|
||||
mark_in = instance.context.data["sceneMarkIn"]
|
||||
mark_out = instance.context.data["sceneMarkOut"]
|
||||
|
|
@ -114,7 +118,11 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
else:
|
||||
# Render output
|
||||
result = self.render(
|
||||
output_dir, mark_in, mark_out, filtered_layers
|
||||
output_dir,
|
||||
mark_in,
|
||||
mark_out,
|
||||
filtered_layers,
|
||||
ignore_layers_transparency
|
||||
)
|
||||
|
||||
output_filepaths_by_frame_idx, thumbnail_fullpath = result
|
||||
|
|
@ -136,7 +144,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
# Fill tags and new families from project settings
|
||||
tags = []
|
||||
if family_lowered in self.families_to_review:
|
||||
if family_lowered == "review":
|
||||
tags.append("review")
|
||||
|
||||
# Sequence of one frame
|
||||
|
|
@ -162,10 +170,6 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
instance.data["representations"].append(new_repre)
|
||||
|
||||
if family_lowered in ("renderpass", "renderlayer", "renderscene"):
|
||||
# Change family to render
|
||||
instance.data["family"] = "render"
|
||||
|
||||
if not thumbnail_fullpath:
|
||||
return
|
||||
|
||||
|
|
@ -259,7 +263,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
output_filepaths_by_frame_idx[frame_idx] = filepath
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
raise AssertionError(
|
||||
raise KnownPublishError(
|
||||
"Output was not rendered. File was not found {}".format(
|
||||
filepath
|
||||
)
|
||||
|
|
@ -278,7 +282,9 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
|
||||
return output_filepaths_by_frame_idx, thumbnail_filepath
|
||||
|
||||
def render(self, output_dir, mark_in, mark_out, layers):
|
||||
def render(
|
||||
self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity
|
||||
):
|
||||
""" Export images from TVPaint.
|
||||
|
||||
Args:
|
||||
|
|
@ -286,6 +292,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
mark_in (int): Starting frame index from which export will begin.
|
||||
mark_out (int): On which frame index export will end.
|
||||
layers (list): List of layers to be exported.
|
||||
ignore_layer_opacity (bool): Layer's opacity will be ignored.
|
||||
|
||||
Returns:
|
||||
tuple: With 2 items first is list of filenames second is path to
|
||||
|
|
@ -327,7 +334,7 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
for layer_id, render_data in extraction_data_by_layer_id.items():
|
||||
layer = layers_by_id[layer_id]
|
||||
filepaths_by_layer_id[layer_id] = self._render_layer(
|
||||
render_data, layer, output_dir
|
||||
render_data, layer, output_dir, ignore_layer_opacity
|
||||
)
|
||||
|
||||
# Prepare final filepaths where compositing should store result
|
||||
|
|
@ -384,7 +391,9 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
red, green, blue = self.review_bg
|
||||
return (red, green, blue)
|
||||
|
||||
def _render_layer(self, render_data, layer, output_dir):
|
||||
def _render_layer(
|
||||
self, render_data, layer, output_dir, ignore_layer_opacity
|
||||
):
|
||||
frame_references = render_data["frame_references"]
|
||||
filenames_by_frame_index = render_data["filenames_by_frame_index"]
|
||||
|
||||
|
|
@ -393,6 +402,12 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
"tv_layerset {}".format(layer_id),
|
||||
"tv_SaveMode \"PNG\""
|
||||
]
|
||||
# Set density to 100 and store previous opacity
|
||||
if ignore_layer_opacity:
|
||||
george_script_lines.extend([
|
||||
"tv_layerdensity 100",
|
||||
"orig_opacity = result",
|
||||
])
|
||||
|
||||
filepaths_by_frame = {}
|
||||
frames_to_render = []
|
||||
|
|
@ -413,6 +428,10 @@ class ExtractSequence(pyblish.api.Extractor):
|
|||
# Store image to output
|
||||
george_script_lines.append("tv_saveimage \"{}\"".format(dst_path))
|
||||
|
||||
# Set density back to origin opacity
|
||||
if ignore_layer_opacity:
|
||||
george_script_lines.append("tv_layerdensity orig_opacity")
|
||||
|
||||
self.log.debug("Rendering Exposure frames {} of layer {} ({})".format(
|
||||
",".join(frames_to_render), layer_id, layer["name"]
|
||||
))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Overused Color group</title>
|
||||
<description>## One Color group is used by multiple Render Layers
|
||||
|
||||
Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups.
|
||||
|
||||
### Missing layer names
|
||||
|
||||
{groups_information}
|
||||
|
||||
### How to repair?
|
||||
|
||||
Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
duplicated_layer_names = []
|
||||
for layer_name in layer_names:
|
||||
layers = layers_by_name.get(layer_name)
|
||||
# It is not job of this validator to handle missing layers
|
||||
if layers is None:
|
||||
continue
|
||||
if len(layers) > 1:
|
||||
duplicated_layer_names.append(layer_name)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Validate Layers Visibility"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["review", "renderPass", "renderLayer", "renderScene"]
|
||||
families = ["review", "render"]
|
||||
|
||||
def process(self, instance):
|
||||
layers = instance.data["layers"]
|
||||
# Instance have empty layers
|
||||
# - it is not job of this validator to check that
|
||||
if not layers:
|
||||
return
|
||||
layer_names = set()
|
||||
for layer in instance.data["layers"]:
|
||||
for layer in layers:
|
||||
layer_names.add(layer["name"])
|
||||
if layer["visible"]:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import collections
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
||||
class ValidateRenderLayerGroups(pyblish.api.ContextPlugin):
|
||||
"""Validate group ids of renderLayer subsets.
|
||||
|
||||
Validate that there are not 2 render layers using the same group.
|
||||
"""
|
||||
|
||||
label = "Validate Render Layers Group"
|
||||
order = pyblish.api.ValidatorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
# Prepare layers
|
||||
render_layers_by_group_id = collections.defaultdict(list)
|
||||
for instance in context:
|
||||
families = instance.data.get("families")
|
||||
if not families or "renderLayer" not in families:
|
||||
continue
|
||||
|
||||
group_id = instance.data["creator_attributes"]["group_id"]
|
||||
render_layers_by_group_id[group_id].append(instance)
|
||||
|
||||
duplicated_instances = []
|
||||
for group_id, instances in render_layers_by_group_id.items():
|
||||
if len(instances) > 1:
|
||||
duplicated_instances.append((group_id, instances))
|
||||
|
||||
if not duplicated_instances:
|
||||
return
|
||||
|
||||
# Exception message preparations
|
||||
groups_data = context.data["groupsData"]
|
||||
groups_by_id = {
|
||||
group["group_id"]: group
|
||||
for group in groups_data
|
||||
}
|
||||
|
||||
per_group_msgs = []
|
||||
groups_information_lines = []
|
||||
for group_id, instances in duplicated_instances:
|
||||
group = groups_by_id[group_id]
|
||||
group_label = "Group \"{}\" ({})".format(
|
||||
group["name"],
|
||||
group["group_id"],
|
||||
)
|
||||
line_join_subset_names = "\n".join([
|
||||
f" - {instance['subset']}"
|
||||
for instance in instances
|
||||
])
|
||||
joined_subset_names = ", ".join([
|
||||
f"\"{instance['subset']}\""
|
||||
for instance in instances
|
||||
])
|
||||
per_group_msgs.append(
|
||||
"{} < {} >".format(group_label, joined_subset_names)
|
||||
)
|
||||
groups_information_lines.append(
|
||||
"<b>{}</b>\n{}".format(group_label, line_join_subset_names)
|
||||
)
|
||||
|
||||
# Raise an error
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
(
|
||||
"More than one Render Layer is using the same TVPaint"
|
||||
" group color. {}"
|
||||
).format(" | ".join(per_group_msgs)),
|
||||
formatting_data={
|
||||
"groups_information": "\n".join(groups_information_lines)
|
||||
}
|
||||
)
|
||||
|
|
@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin):
|
|||
),
|
||||
"expected_group": correct_group["name"],
|
||||
"layer_names": ", ".join(invalid_layer_names)
|
||||
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
|||
"expected_width": expected_data["resolutionWidth"],
|
||||
"expected_height": expected_data["resolutionHeight"],
|
||||
"current_width": scene_data["resolutionWidth"],
|
||||
"current_height": scene_data["resolutionWidth"],
|
||||
"current_height": scene_data["resolutionHeight"],
|
||||
"expected_pixel_ratio": expected_data["pixelAspect"],
|
||||
"current_pixel_ratio": scene_data["pixelAspect"]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError, registered_host
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
PublishValidationError,
|
||||
registered_host,
|
||||
)
|
||||
|
||||
|
||||
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
|
||||
|
|
@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):
|
|||
|
||||
actions = [ValidateWorkfileMetadataRepair]
|
||||
|
||||
required_keys = {"project", "asset", "task"}
|
||||
required_keys = {"project_name", "asset_name", "task_name"}
|
||||
|
||||
def process(self, context):
|
||||
workfile_context = context.data["workfile_context"]
|
||||
if not workfile_context:
|
||||
raise AssertionError(
|
||||
"Current workfile is missing whole metadata about context."
|
||||
raise PublishValidationError(
|
||||
"Current workfile is missing whole metadata about context.",
|
||||
"Missing context",
|
||||
(
|
||||
"Current workfile is missing metadata about task."
|
||||
" To fix this issue save the file using Workfiles tool."
|
||||
)
|
||||
)
|
||||
|
||||
missing_keys = []
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
|
||||
|
|
@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
workfile_context = context.data.get("workfile_context")
|
||||
# If workfile context is missing than project is matching to
|
||||
# `AVALON_PROJECT` value for 100%
|
||||
# global project
|
||||
if not workfile_context:
|
||||
self.log.info(
|
||||
"Workfile context (\"workfile_context\") is not filled."
|
||||
)
|
||||
return
|
||||
|
||||
workfile_project_name = workfile_context["project"]
|
||||
env_project_name = os.environ["AVALON_PROJECT"]
|
||||
workfile_project_name = workfile_context["project_name"]
|
||||
env_project_name = context.data["projectName"]
|
||||
if workfile_project_name == env_project_name:
|
||||
self.log.info((
|
||||
"Both workfile project and environment project are same. {}"
|
||||
|
|
|
|||
|
|
@ -17,9 +17,10 @@ class UnrealAddon(OpenPypeModule, IHostAddon):
|
|||
|
||||
ue_plugin = "UE_5.0" if app.name[:1] == "5" else "UE_4.7"
|
||||
unreal_plugin_path = os.path.join(
|
||||
UNREAL_ROOT_DIR, "integration", ue_plugin
|
||||
UNREAL_ROOT_DIR, "integration", ue_plugin, "OpenPype"
|
||||
)
|
||||
if not env.get("OPENPYPE_UNREAL_PLUGIN"):
|
||||
if not env.get("OPENPYPE_UNREAL_PLUGIN") or \
|
||||
env.get("OPENPYPE_UNREAL_PLUGIN") != unreal_plugin_path:
|
||||
env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path
|
||||
|
||||
# Set default environments if are not set via settings
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Unreal Editor OpenPype host API."""
|
||||
|
||||
from .plugin import Loader
|
||||
from .plugin import (
|
||||
UnrealActorCreator,
|
||||
UnrealAssetCreator,
|
||||
Loader
|
||||
)
|
||||
|
||||
from .pipeline import (
|
||||
install,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import List
|
||||
from contextlib import contextmanager
|
||||
import semver
|
||||
import time
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
|
@ -16,13 +18,14 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
import openpype.hosts.unreal
|
||||
from openpype.host import HostBase, ILoadHost
|
||||
from openpype.host import HostBase, ILoadHost, IPublishHost
|
||||
|
||||
import unreal # noqa
|
||||
|
||||
|
||||
logger = logging.getLogger("openpype.hosts.unreal")
|
||||
|
||||
OPENPYPE_CONTAINERS = "OpenPypeContainers"
|
||||
CONTEXT_CONTAINER = "OpenPype/context.json"
|
||||
UNREAL_VERSION = semver.VersionInfo(
|
||||
*os.getenv("OPENPYPE_UNREAL_VERSION").split(".")
|
||||
)
|
||||
|
|
@ -35,7 +38,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
|||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
|
||||
class UnrealHost(HostBase, ILoadHost):
|
||||
class UnrealHost(HostBase, ILoadHost, IPublishHost):
|
||||
"""Unreal host implementation.
|
||||
|
||||
For some time this class will re-use functions from module based
|
||||
|
|
@ -60,6 +63,32 @@ class UnrealHost(HostBase, ILoadHost):
|
|||
|
||||
show_tools_dialog()
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
content_path = unreal.Paths.project_content_dir()
|
||||
op_ctx = content_path + CONTEXT_CONTAINER
|
||||
attempts = 3
|
||||
for i in range(attempts):
|
||||
try:
|
||||
with open(op_ctx, "w+") as f:
|
||||
json.dump(data, f)
|
||||
break
|
||||
except IOError:
|
||||
if i == attempts - 1:
|
||||
raise Exception("Failed to write context data. Aborting.")
|
||||
unreal.log_warning("Failed to write context data. Retrying...")
|
||||
i += 1
|
||||
time.sleep(3)
|
||||
continue
|
||||
|
||||
def get_context_data(self):
|
||||
content_path = unreal.Paths.project_content_dir()
|
||||
op_ctx = content_path + CONTEXT_CONTAINER
|
||||
if not os.path.isfile(op_ctx):
|
||||
return {}
|
||||
with open(op_ctx, "r") as fp:
|
||||
data = json.load(fp)
|
||||
return data
|
||||
|
||||
|
||||
def install():
|
||||
"""Install Unreal configuration for OpenPype."""
|
||||
|
|
@ -133,6 +162,31 @@ def ls():
|
|||
yield data
|
||||
|
||||
|
||||
def ls_inst():
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
# UE 5.1 changed how class name is specified
|
||||
class_name = [
|
||||
"/Script/OpenPype",
|
||||
"OpenPypePublishInstance"
|
||||
] if (
|
||||
UNREAL_VERSION.major == 5
|
||||
and UNREAL_VERSION.minor > 0
|
||||
) else "OpenPypePublishInstance" # noqa
|
||||
instances = ar.get_assets_by_class(class_name, True)
|
||||
|
||||
# get_asset_by_class returns AssetData. To get all metadata we need to
|
||||
# load asset. get_tag_values() work only on metadata registered in
|
||||
# Asset Registry Project settings (and there is no way to set it with
|
||||
# python short of editing ini configuration file).
|
||||
for asset_data in instances:
|
||||
asset = asset_data.get_asset()
|
||||
data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset)
|
||||
data["objectName"] = asset_data.asset_name
|
||||
data = cast_map_to_str_dict(data)
|
||||
|
||||
yield data
|
||||
|
||||
|
||||
def parse_container(container):
|
||||
"""To get data from container, AssetContainer must be loaded.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,245 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from abc import ABC
|
||||
import ast
|
||||
import collections
|
||||
import sys
|
||||
import six
|
||||
from abc import (
|
||||
ABC,
|
||||
ABCMeta,
|
||||
)
|
||||
|
||||
from openpype.pipeline import LoaderPlugin
|
||||
import unreal
|
||||
|
||||
from .pipeline import (
|
||||
create_publish_instance,
|
||||
imprint,
|
||||
ls_inst,
|
||||
UNREAL_VERSION
|
||||
)
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
UILabelDef
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
Creator,
|
||||
LoaderPlugin,
|
||||
CreatorError,
|
||||
CreatedInstance
|
||||
)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class UnrealBaseCreator(Creator):
|
||||
"""Base class for Unreal creator plugins."""
|
||||
root = "/Game/OpenPype/PublishInstances"
|
||||
suffix = "_INS"
|
||||
|
||||
@staticmethod
|
||||
def cache_subsets(shared_data):
|
||||
"""Cache instances for Creators to shared data.
|
||||
|
||||
Create `unreal_cached_subsets` key when needed in shared data and
|
||||
fill it with all collected instances from the scene under its
|
||||
respective creator identifiers.
|
||||
|
||||
If legacy instances are detected in the scene, create
|
||||
`unreal_cached_legacy_subsets` there and fill it with
|
||||
all legacy subsets under family as a key.
|
||||
|
||||
Args:
|
||||
Dict[str, Any]: Shared data.
|
||||
|
||||
Return:
|
||||
Dict[str, Any]: Shared data dictionary.
|
||||
|
||||
"""
|
||||
if shared_data.get("unreal_cached_subsets") is None:
|
||||
unreal_cached_subsets = collections.defaultdict(list)
|
||||
unreal_cached_legacy_subsets = collections.defaultdict(list)
|
||||
for instance in ls_inst():
|
||||
creator_id = instance.get("creator_identifier")
|
||||
if creator_id:
|
||||
unreal_cached_subsets[creator_id].append(instance)
|
||||
else:
|
||||
family = instance.get("family")
|
||||
unreal_cached_legacy_subsets[family].append(instance)
|
||||
|
||||
shared_data["unreal_cached_subsets"] = unreal_cached_subsets
|
||||
shared_data["unreal_cached_legacy_subsets"] = (
|
||||
unreal_cached_legacy_subsets
|
||||
)
|
||||
return shared_data
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
try:
|
||||
instance_name = f"{subset_name}{self.suffix}"
|
||||
pub_instance = create_publish_instance(instance_name, self.root)
|
||||
|
||||
instance_data["subset"] = subset_name
|
||||
instance_data["instance_path"] = f"{self.root}/{instance_name}"
|
||||
|
||||
instance = CreatedInstance(
|
||||
self.family,
|
||||
subset_name,
|
||||
instance_data,
|
||||
self)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
pub_instance.set_editor_property('add_external_assets', True)
|
||||
assets = pub_instance.get_editor_property('asset_data_external')
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
for member in pre_create_data.get("members", []):
|
||||
obj = ar.get_asset_by_object_path(member).get_asset()
|
||||
assets.add(obj)
|
||||
|
||||
imprint(f"{self.root}/{instance_name}", instance.data_to_store())
|
||||
|
||||
return instance
|
||||
|
||||
except Exception as er:
|
||||
six.reraise(
|
||||
CreatorError,
|
||||
CreatorError(f"Creator error: {er}"),
|
||||
sys.exc_info()[2])
|
||||
|
||||
def collect_instances(self):
|
||||
# cache instances if missing
|
||||
self.cache_subsets(self.collection_shared_data)
|
||||
for instance in self.collection_shared_data[
|
||||
"unreal_cached_subsets"].get(self.identifier, []):
|
||||
# Unreal saves metadata as string, so we need to convert it back
|
||||
instance['creator_attributes'] = ast.literal_eval(
|
||||
instance.get('creator_attributes', '{}'))
|
||||
instance['publish_attributes'] = ast.literal_eval(
|
||||
instance.get('publish_attributes', '{}'))
|
||||
created_instance = CreatedInstance.from_existing(instance, self)
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, changes in update_list:
|
||||
instance_node = created_inst.get("instance_path", "")
|
||||
|
||||
if not instance_node:
|
||||
unreal.log_warning(
|
||||
f"Instance node not found for {created_inst}")
|
||||
continue
|
||||
|
||||
new_values = {
|
||||
key: changes[key].new_value
|
||||
for key in changes.changed_keys
|
||||
}
|
||||
imprint(
|
||||
instance_node,
|
||||
new_values
|
||||
)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
instance_node = instance.data.get("instance_path", "")
|
||||
if instance_node:
|
||||
unreal.EditorAssetLibrary.delete_asset(instance_node)
|
||||
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class UnrealAssetCreator(UnrealBaseCreator):
|
||||
"""Base class for Unreal creator plugins based on assets."""
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
"""Create instance of the asset.
|
||||
|
||||
Args:
|
||||
subset_name (str): Name of the subset.
|
||||
instance_data (dict): Data for the instance.
|
||||
pre_create_data (dict): Data for the instance.
|
||||
|
||||
Returns:
|
||||
CreatedInstance: Created instance.
|
||||
"""
|
||||
try:
|
||||
# Check if instance data has members, filled by the plugin.
|
||||
# If not, use selection.
|
||||
if not pre_create_data.get("members"):
|
||||
pre_create_data["members"] = []
|
||||
|
||||
if pre_create_data.get("use_selection"):
|
||||
utilib = unreal.EditorUtilityLibrary
|
||||
sel_objects = utilib.get_selected_assets()
|
||||
pre_create_data["members"] = [
|
||||
a.get_path_name() for a in sel_objects]
|
||||
|
||||
super(UnrealAssetCreator, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
except Exception as er:
|
||||
six.reraise(
|
||||
CreatorError,
|
||||
CreatorError(f"Creator error: {er}"),
|
||||
sys.exc_info()[2])
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
return [
|
||||
BoolDef("use_selection", label="Use selection", default=True)
|
||||
]
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class UnrealActorCreator(UnrealBaseCreator):
|
||||
"""Base class for Unreal creator plugins based on actors."""
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
"""Create instance of the asset.
|
||||
|
||||
Args:
|
||||
subset_name (str): Name of the subset.
|
||||
instance_data (dict): Data for the instance.
|
||||
pre_create_data (dict): Data for the instance.
|
||||
|
||||
Returns:
|
||||
CreatedInstance: Created instance.
|
||||
"""
|
||||
try:
|
||||
if UNREAL_VERSION.major == 5:
|
||||
world = unreal.UnrealEditorSubsystem().get_editor_world()
|
||||
else:
|
||||
world = unreal.EditorLevelLibrary.get_editor_world()
|
||||
|
||||
# Check if the level is saved
|
||||
if world.get_path_name().startswith("/Temp/"):
|
||||
raise CreatorError(
|
||||
"Level must be saved before creating instances.")
|
||||
|
||||
# Check if instance data has members, filled by the plugin.
|
||||
# If not, use selection.
|
||||
if not instance_data.get("members"):
|
||||
actor_subsystem = unreal.EditorActorSubsystem()
|
||||
sel_actors = actor_subsystem.get_selected_level_actors()
|
||||
selection = [a.get_path_name() for a in sel_actors]
|
||||
|
||||
instance_data["members"] = selection
|
||||
|
||||
instance_data["level"] = world.get_path_name()
|
||||
|
||||
super(UnrealActorCreator, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
except Exception as er:
|
||||
six.reraise(
|
||||
CreatorError,
|
||||
CreatorError(f"Creator error: {er}"),
|
||||
sys.exc_info()[2])
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
return [
|
||||
UILabelDef("Select actors to create instance from them.")
|
||||
]
|
||||
|
||||
|
||||
class Loader(LoaderPlugin, ABC):
|
||||
|
|
|
|||
|
|
@ -17,9 +17,8 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
def __init__(self, parent=None):
|
||||
super(ToolsBtnsWidget, self).__init__(parent)
|
||||
|
||||
create_btn = QtWidgets.QPushButton("Create...", self)
|
||||
load_btn = QtWidgets.QPushButton("Load...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publisher...", self)
|
||||
manage_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
render_btn = QtWidgets.QPushButton("Render...", self)
|
||||
experimental_tools_btn = QtWidgets.QPushButton(
|
||||
|
|
@ -28,7 +27,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(create_btn, 0)
|
||||
layout.addWidget(load_btn, 0)
|
||||
layout.addWidget(publish_btn, 0)
|
||||
layout.addWidget(manage_btn, 0)
|
||||
|
|
@ -36,7 +34,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
layout.addWidget(experimental_tools_btn, 0)
|
||||
layout.addStretch(1)
|
||||
|
||||
create_btn.clicked.connect(self._on_create)
|
||||
load_btn.clicked.connect(self._on_load)
|
||||
publish_btn.clicked.connect(self._on_publish)
|
||||
manage_btn.clicked.connect(self._on_manage)
|
||||
|
|
@ -50,7 +47,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
self.tool_required.emit("loader")
|
||||
|
||||
def _on_publish(self):
|
||||
self.tool_required.emit("publish")
|
||||
self.tool_required.emit("publisher")
|
||||
|
||||
def _on_manage(self):
|
||||
self.tool_required.emit("sceneinventory")
|
||||
|
|
|
|||
|
|
@ -79,9 +79,9 @@ class UnrealPrelaunchHook(PreLaunchHook):
|
|||
unreal_project_name = os.path.splitext(unreal_project_filename)[0]
|
||||
# Unreal is sensitive about project names longer then 20 chars
|
||||
if len(unreal_project_name) > 20:
|
||||
self.log.warning((
|
||||
f"Project name exceed 20 characters ({unreal_project_name})!"
|
||||
))
|
||||
raise ApplicationLaunchFailed(
|
||||
f"Project name exceeds 20 characters ({unreal_project_name})!"
|
||||
)
|
||||
|
||||
# Unreal doesn't accept non alphabet characters at the start
|
||||
# of the project name. This is because project name is then used
|
||||
|
|
@ -119,29 +119,34 @@ class UnrealPrelaunchHook(PreLaunchHook):
|
|||
f"detected [ {engine_version} ]"
|
||||
))
|
||||
|
||||
ue_path = unreal_lib.get_editor_executable_path(
|
||||
ue_path = unreal_lib.get_editor_exe_path(
|
||||
Path(detected[engine_version]), engine_version)
|
||||
|
||||
self.launch_context.launch_args = [ue_path.as_posix()]
|
||||
project_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for
|
||||
# execution of `create_unreal_project`
|
||||
|
||||
if self.launch_context.env.get("OPENPYPE_UNREAL_PLUGIN"):
|
||||
self.log.info((
|
||||
f"{self.signature} using OpenPype plugin from "
|
||||
f"{self.launch_context.env.get('OPENPYPE_UNREAL_PLUGIN')}"
|
||||
))
|
||||
env_key = "OPENPYPE_UNREAL_PLUGIN"
|
||||
if self.launch_context.env.get(env_key):
|
||||
os.environ[env_key] = self.launch_context.env[env_key]
|
||||
|
||||
engine_path = detected[engine_version]
|
||||
|
||||
unreal_lib.try_installing_plugin(Path(engine_path), os.environ)
|
||||
|
||||
project_file = project_path / unreal_project_filename
|
||||
if not project_file.is_file():
|
||||
engine_path = detected[engine_version]
|
||||
self.log.info((
|
||||
f"{self.signature} creating unreal "
|
||||
f"project [ {unreal_project_name} ]"
|
||||
))
|
||||
# Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for
|
||||
# execution of `create_unreal_project`
|
||||
if self.launch_context.env.get("OPENPYPE_UNREAL_PLUGIN"):
|
||||
self.log.info((
|
||||
f"{self.signature} using OpenPype plugin from "
|
||||
f"{self.launch_context.env.get('OPENPYPE_UNREAL_PLUGIN')}"
|
||||
))
|
||||
env_key = "OPENPYPE_UNREAL_PLUGIN"
|
||||
if self.launch_context.env.get(env_key):
|
||||
os.environ[env_key] = self.launch_context.env[env_key]
|
||||
|
||||
unreal_lib.create_unreal_project(
|
||||
unreal_project_name,
|
||||
|
|
|
|||
8
openpype/hosts/unreal/integration/UE_4.7/CommandletProject/.gitignore
vendored
Normal file
8
openpype/hosts/unreal/integration/UE_4.7/CommandletProject/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
/Saved
|
||||
/DerivedDataCache
|
||||
/Intermediate
|
||||
/Content
|
||||
/Config
|
||||
/Binaries
|
||||
/.idea
|
||||
/.vs
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"FileVersion": 3,
|
||||
"EngineAssociation": "4.27",
|
||||
"Category": "",
|
||||
"Description": "",
|
||||
"Plugins": [
|
||||
{
|
||||
"Name": "OpenPype",
|
||||
"Enabled": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
[FilterPlugin]
|
||||
; This section lists additional files which will be packaged along with your plugin. Paths should be listed relative to the root plugin directory, and
|
||||
; may include "...", "*", and "?" wildcards to match directories, files, and individual characters respectively.
|
||||
;
|
||||
; Examples:
|
||||
; /README.txt
|
||||
; /Extras/...
|
||||
; /Binaries/ThirdParty/*.dll
|
||||
|
|
@ -10,10 +10,9 @@
|
|||
"DocsURL": "https://openpype.io/docs/artist_hosts_unreal",
|
||||
"MarketplaceURL": "",
|
||||
"SupportURL": "https://pype.club/",
|
||||
"EngineVersion": "4.27",
|
||||
"CanContainContent": true,
|
||||
"IsBetaVersion": true,
|
||||
"IsExperimentalVersion": false,
|
||||
"Installed": false,
|
||||
"Installed": true,
|
||||
"Modules": [
|
||||
{
|
||||
"Name": "OpenPype",
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 84 KiB After Width: | Height: | Size: 84 KiB |
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved.
|
||||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
using UnrealBuildTool;
|
||||
|
||||
|
|
@ -6,8 +6,8 @@ public class OpenPype : ModuleRules
|
|||
{
|
||||
public OpenPype(ReadOnlyTargetRules Target) : base(Target)
|
||||
{
|
||||
PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs;
|
||||
|
||||
PCHUsage = PCHUsageMode.UseExplicitOrSharedPCHs;
|
||||
|
||||
PublicIncludePaths.AddRange(
|
||||
new string[] {
|
||||
// ... add public include paths required here ...
|
||||
|
|
@ -34,6 +34,7 @@ public class OpenPype : ModuleRules
|
|||
PrivateDependencyModuleNames.AddRange(
|
||||
new string[]
|
||||
{
|
||||
"GameProjectGeneration",
|
||||
"Projects",
|
||||
"InputCore",
|
||||
"UnrealEd",
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "Commandlets/Implementations/OPGenerateProjectCommandlet.h"
|
||||
|
||||
#include "Editor.h"
|
||||
#include "GameProjectUtils.h"
|
||||
#include "OPConstants.h"
|
||||
#include "Commandlets/OPActionResult.h"
|
||||
#include "ProjectDescriptor.h"
|
||||
|
||||
int32 UOPGenerateProjectCommandlet::Main(const FString& CommandLineParams)
|
||||
{
|
||||
//Parses command line parameters & creates structure FProjectInformation
|
||||
const FOPGenerateProjectParams ParsedParams = FOPGenerateProjectParams(CommandLineParams);
|
||||
ProjectInformation = ParsedParams.GenerateUEProjectInformation();
|
||||
|
||||
//Creates .uproject & other UE files
|
||||
EVALUATE_OP_ACTION_RESULT(TryCreateProject());
|
||||
|
||||
//Loads created .uproject
|
||||
EVALUATE_OP_ACTION_RESULT(TryLoadProjectDescriptor());
|
||||
|
||||
//Adds needed plugin to .uproject
|
||||
AttachPluginsToProjectDescriptor();
|
||||
|
||||
//Saves .uproject
|
||||
EVALUATE_OP_ACTION_RESULT(TrySave());
|
||||
|
||||
//When we are here, there should not be problems in generating Unreal Project for OpenPype
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
FOPGenerateProjectParams::FOPGenerateProjectParams(): FOPGenerateProjectParams("")
|
||||
{
|
||||
}
|
||||
|
||||
FOPGenerateProjectParams::FOPGenerateProjectParams(const FString& CommandLineParams): CommandLineParams(
|
||||
CommandLineParams)
|
||||
{
|
||||
UCommandlet::ParseCommandLine(*CommandLineParams, Tokens, Switches);
|
||||
}
|
||||
|
||||
FProjectInformation FOPGenerateProjectParams::GenerateUEProjectInformation() const
|
||||
{
|
||||
FProjectInformation ProjectInformation = FProjectInformation();
|
||||
ProjectInformation.ProjectFilename = GetProjectFileName();
|
||||
|
||||
ProjectInformation.bShouldGenerateCode = IsSwitchPresent("GenerateCode");
|
||||
|
||||
return ProjectInformation;
|
||||
}
|
||||
|
||||
FString FOPGenerateProjectParams::TryGetToken(const int32 Index) const
|
||||
{
|
||||
return Tokens.IsValidIndex(Index) ? Tokens[Index] : "";
|
||||
}
|
||||
|
||||
FString FOPGenerateProjectParams::GetProjectFileName() const
|
||||
{
|
||||
return TryGetToken(0);
|
||||
}
|
||||
|
||||
bool FOPGenerateProjectParams::IsSwitchPresent(const FString& Switch) const
|
||||
{
|
||||
return INDEX_NONE != Switches.IndexOfByPredicate([&Switch](const FString& Item) -> bool
|
||||
{
|
||||
return Item.Equals(Switch);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
UOPGenerateProjectCommandlet::UOPGenerateProjectCommandlet()
|
||||
{
|
||||
LogToConsole = true;
|
||||
}
|
||||
|
||||
FOP_ActionResult UOPGenerateProjectCommandlet::TryCreateProject() const
|
||||
{
|
||||
FText FailReason;
|
||||
FText FailLog;
|
||||
TArray<FString> OutCreatedFiles;
|
||||
|
||||
if (!GameProjectUtils::CreateProject(ProjectInformation, FailReason, FailLog, &OutCreatedFiles))
|
||||
return FOP_ActionResult(EOP_ActionResult::ProjectNotCreated, FailReason);
|
||||
return FOP_ActionResult();
|
||||
}
|
||||
|
||||
FOP_ActionResult UOPGenerateProjectCommandlet::TryLoadProjectDescriptor()
|
||||
{
|
||||
FText FailReason;
|
||||
const bool bLoaded = ProjectDescriptor.Load(ProjectInformation.ProjectFilename, FailReason);
|
||||
|
||||
return FOP_ActionResult(bLoaded ? EOP_ActionResult::Ok : EOP_ActionResult::ProjectNotLoaded, FailReason);
|
||||
}
|
||||
|
||||
void UOPGenerateProjectCommandlet::AttachPluginsToProjectDescriptor()
|
||||
{
|
||||
FPluginReferenceDescriptor OPPluginDescriptor;
|
||||
OPPluginDescriptor.bEnabled = true;
|
||||
OPPluginDescriptor.Name = OPConstants::OP_PluginName;
|
||||
ProjectDescriptor.Plugins.Add(OPPluginDescriptor);
|
||||
|
||||
FPluginReferenceDescriptor PythonPluginDescriptor;
|
||||
PythonPluginDescriptor.bEnabled = true;
|
||||
PythonPluginDescriptor.Name = OPConstants::PythonScript_PluginName;
|
||||
ProjectDescriptor.Plugins.Add(PythonPluginDescriptor);
|
||||
|
||||
FPluginReferenceDescriptor SequencerScriptingPluginDescriptor;
|
||||
SequencerScriptingPluginDescriptor.bEnabled = true;
|
||||
SequencerScriptingPluginDescriptor.Name = OPConstants::SequencerScripting_PluginName;
|
||||
ProjectDescriptor.Plugins.Add(SequencerScriptingPluginDescriptor);
|
||||
|
||||
FPluginReferenceDescriptor MovieRenderPipelinePluginDescriptor;
|
||||
MovieRenderPipelinePluginDescriptor.bEnabled = true;
|
||||
MovieRenderPipelinePluginDescriptor.Name = OPConstants::MovieRenderPipeline_PluginName;
|
||||
ProjectDescriptor.Plugins.Add(MovieRenderPipelinePluginDescriptor);
|
||||
|
||||
FPluginReferenceDescriptor EditorScriptingPluginDescriptor;
|
||||
EditorScriptingPluginDescriptor.bEnabled = true;
|
||||
EditorScriptingPluginDescriptor.Name = OPConstants::EditorScriptingUtils_PluginName;
|
||||
ProjectDescriptor.Plugins.Add(EditorScriptingPluginDescriptor);
|
||||
}
|
||||
|
||||
FOP_ActionResult UOPGenerateProjectCommandlet::TrySave()
|
||||
{
|
||||
FText FailReason;
|
||||
const bool bSaved = ProjectDescriptor.Save(ProjectInformation.ProjectFilename, FailReason);
|
||||
|
||||
return FOP_ActionResult(bSaved ? EOP_ActionResult::Ok : EOP_ActionResult::ProjectNotSaved, FailReason);
|
||||
}
|
||||
|
||||
FOPGenerateProjectParams UOPGenerateProjectCommandlet::ParseParameters(const FString& Params) const
|
||||
{
|
||||
FOPGenerateProjectParams ParamsResult;
|
||||
|
||||
TArray<FString> Tokens, Switches;
|
||||
ParseCommandLine(*Params, Tokens, Switches);
|
||||
|
||||
return ParamsResult;
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
|
||||
#include "Commandlets/OPActionResult.h"
|
||||
#include "Logging/OP_Log.h"
|
||||
|
||||
EOP_ActionResult::Type& FOP_ActionResult::GetStatus()
|
||||
{
|
||||
return Status;
|
||||
}
|
||||
|
||||
FText& FOP_ActionResult::GetReason()
|
||||
{
|
||||
return Reason;
|
||||
}
|
||||
|
||||
FOP_ActionResult::FOP_ActionResult():Status(EOP_ActionResult::Type::Ok)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
FOP_ActionResult::FOP_ActionResult(const EOP_ActionResult::Type& InEnum):Status(InEnum)
|
||||
{
|
||||
TryLog();
|
||||
}
|
||||
|
||||
FOP_ActionResult::FOP_ActionResult(const EOP_ActionResult::Type& InEnum, const FText& InReason):Status(InEnum), Reason(InReason)
|
||||
{
|
||||
TryLog();
|
||||
};
|
||||
|
||||
bool FOP_ActionResult::IsProblem() const
|
||||
{
|
||||
return Status != EOP_ActionResult::Ok;
|
||||
}
|
||||
|
||||
void FOP_ActionResult::TryLog() const
|
||||
{
|
||||
if(IsProblem())
|
||||
UE_LOG(LogCommandletOPGenerateProject, Error, TEXT("%s"), *Reason.ToString());
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
#include "Logging/OP_Log.h"
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "OpenPype.h"
|
||||
|
||||
#include "ISettingsContainer.h"
|
||||
|
|
@ -16,32 +17,34 @@ static const FName OpenPypeTabName("OpenPype");
|
|||
// This function is triggered when the plugin is staring up
|
||||
void FOpenPypeModule::StartupModule()
|
||||
{
|
||||
FOpenPypeStyle::Initialize();
|
||||
FOpenPypeStyle::SetIcon("Logo", "openpype40");
|
||||
if (!IsRunningCommandlet()) {
|
||||
FOpenPypeStyle::Initialize();
|
||||
FOpenPypeStyle::SetIcon("Logo", "openpype40");
|
||||
|
||||
// Create the Extender that will add content to the menu
|
||||
FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked<FLevelEditorModule>("LevelEditor");
|
||||
// Create the Extender that will add content to the menu
|
||||
FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked<FLevelEditorModule>("LevelEditor");
|
||||
|
||||
TSharedPtr<FExtender> MenuExtender = MakeShareable(new FExtender());
|
||||
TSharedPtr<FExtender> ToolbarExtender = MakeShareable(new FExtender());
|
||||
TSharedPtr<FExtender> MenuExtender = MakeShareable(new FExtender());
|
||||
TSharedPtr<FExtender> ToolbarExtender = MakeShareable(new FExtender());
|
||||
|
||||
MenuExtender->AddMenuExtension(
|
||||
"LevelEditor",
|
||||
EExtensionHook::After,
|
||||
NULL,
|
||||
FMenuExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddMenuEntry)
|
||||
);
|
||||
ToolbarExtender->AddToolBarExtension(
|
||||
"Settings",
|
||||
EExtensionHook::After,
|
||||
NULL,
|
||||
FToolBarExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddToobarEntry));
|
||||
MenuExtender->AddMenuExtension(
|
||||
"LevelEditor",
|
||||
EExtensionHook::After,
|
||||
NULL,
|
||||
FMenuExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddMenuEntry)
|
||||
);
|
||||
ToolbarExtender->AddToolBarExtension(
|
||||
"Settings",
|
||||
EExtensionHook::After,
|
||||
NULL,
|
||||
FToolBarExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddToobarEntry));
|
||||
|
||||
|
||||
LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender);
|
||||
LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender);
|
||||
LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender);
|
||||
LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender);
|
||||
|
||||
RegisterSettings();
|
||||
RegisterSettings();
|
||||
}
|
||||
}
|
||||
|
||||
void FOpenPypeModule::ShutdownModule()
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "OpenPypeLib.h"
|
||||
|
||||
#include "AssetViewUtils.h"
|
||||
|
|
@ -1,11 +1,12 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
#include "OpenPypePublishInstance.h"
|
||||
#include "AssetRegistryModule.h"
|
||||
#include "NotificationManager.h"
|
||||
#include "OpenPypeLib.h"
|
||||
#include "OpenPypeSettings.h"
|
||||
#include "SNotificationList.h"
|
||||
#include "Framework/Notifications/NotificationManager.h"
|
||||
#include "Widgets/Notifications/SNotificationList.h"
|
||||
|
||||
//Moves all the invalid pointers to the end to prepare them for the shrinking
|
||||
#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "OpenPypePublishInstanceFactory.h"
|
||||
#include "OpenPypePublishInstance.h"
|
||||
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "OpenPypePythonBridge.h"
|
||||
|
||||
UOpenPypePythonBridge* UOpenPypePythonBridge::Get()
|
||||
|
|
@ -1,9 +1,8 @@
|
|||
// Fill out your copyright notice in the Description page of Project Settings.
|
||||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
#include "OpenPypeSettings.h"
|
||||
|
||||
#include "IPluginManager.h"
|
||||
#include "UObjectGlobals.h"
|
||||
#include "Interfaces/IPluginManager.h"
|
||||
|
||||
/**
|
||||
* Mainly is used for initializing default values if the DefaultOpenPypeSettings.ini file does not exist in the saved config
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#include "OpenPypeStyle.h"
|
||||
#include "Framework/Application/SlateApplication.h"
|
||||
#include "Styling/SlateStyle.h"
|
||||
|
|
@ -43,7 +44,7 @@ const FVector2D Icon40x40(40.0f, 40.0f);
|
|||
TUniquePtr< FSlateStyleSet > FOpenPypeStyle::Create()
|
||||
{
|
||||
TUniquePtr< FSlateStyleSet > Style = MakeUnique<FSlateStyleSet>(GetStyleSetName());
|
||||
Style->SetContentRoot(FPaths::ProjectPluginsDir() / TEXT("OpenPype/Resources"));
|
||||
Style->SetContentRoot(FPaths::EnginePluginsDir() / TEXT("Marketplace/OpenPype/Resources"));
|
||||
|
||||
return Style;
|
||||
}
|
||||
|
|
@ -66,5 +67,4 @@ const ISlateStyle& FOpenPypeStyle::Get()
|
|||
{
|
||||
check(OpenPypeStyleInstance);
|
||||
return *OpenPypeStyleInstance;
|
||||
return *OpenPypeStyleInstance;
|
||||
}
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
#include "GameProjectUtils.h"
|
||||
#include "Commandlets/OPActionResult.h"
|
||||
#include "ProjectDescriptor.h"
|
||||
#include "Commandlets/Commandlet.h"
|
||||
#include "OPGenerateProjectCommandlet.generated.h"
|
||||
|
||||
struct FProjectDescriptor;
|
||||
struct FProjectInformation;
|
||||
|
||||
/**
|
||||
* @brief Structure which parses command line parameters and generates FProjectInformation
|
||||
*/
|
||||
USTRUCT()
|
||||
struct FOPGenerateProjectParams
|
||||
{
|
||||
GENERATED_BODY()
|
||||
|
||||
private:
|
||||
FString CommandLineParams;
|
||||
TArray<FString> Tokens;
|
||||
TArray<FString> Switches;
|
||||
|
||||
public:
|
||||
FOPGenerateProjectParams();
|
||||
FOPGenerateProjectParams(const FString& CommandLineParams);
|
||||
|
||||
FProjectInformation GenerateUEProjectInformation() const;
|
||||
|
||||
private:
|
||||
FString TryGetToken(const int32 Index) const;
|
||||
FString GetProjectFileName() const;
|
||||
|
||||
bool IsSwitchPresent(const FString& Switch) const;
|
||||
};
|
||||
|
||||
UCLASS()
|
||||
class OPENPYPE_API UOPGenerateProjectCommandlet : public UCommandlet
|
||||
{
|
||||
GENERATED_BODY()
|
||||
|
||||
private:
|
||||
FProjectInformation ProjectInformation;
|
||||
FProjectDescriptor ProjectDescriptor;
|
||||
|
||||
public:
|
||||
UOPGenerateProjectCommandlet();
|
||||
|
||||
virtual int32 Main(const FString& CommandLineParams) override;
|
||||
|
||||
private:
|
||||
FOPGenerateProjectParams ParseParameters(const FString& Params) const;
|
||||
FOP_ActionResult TryCreateProject() const;
|
||||
FOP_ActionResult TryLoadProjectDescriptor();
|
||||
void AttachPluginsToProjectDescriptor();
|
||||
FOP_ActionResult TrySave();
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "CoreMinimal.h"
|
||||
#include "OPActionResult.generated.h"
|
||||
|
||||
/**
|
||||
* @brief This macro returns error code when is problem or does nothing when there is no problem.
|
||||
* @param ActionResult FOP_ActionResult structure
|
||||
*/
|
||||
#define EVALUATE_OP_ACTION_RESULT(ActionResult) \
|
||||
if(ActionResult.IsProblem()) \
|
||||
return ActionResult.GetStatus();
|
||||
|
||||
/**
|
||||
* @brief This enum values are humanly readable mapping of error codes.
|
||||
* Here should be all error codes to be possible find what went wrong.
|
||||
* TODO: In the future should exists an web document where is mapped error code & what problem occured & how to repair it...
|
||||
*/
|
||||
UENUM()
|
||||
namespace EOP_ActionResult
|
||||
{
|
||||
enum Type
|
||||
{
|
||||
Ok,
|
||||
ProjectNotCreated,
|
||||
ProjectNotLoaded,
|
||||
ProjectNotSaved,
|
||||
//....Here insert another values
|
||||
|
||||
//Do not remove!
|
||||
//Usable for looping through enum values
|
||||
__Last UMETA(Hidden)
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief This struct holds action result enum and optionally reason of fail
|
||||
*/
|
||||
USTRUCT()
|
||||
struct FOP_ActionResult
|
||||
{
|
||||
GENERATED_BODY()
|
||||
|
||||
public:
|
||||
/** @brief Default constructor usable when there is no problem */
|
||||
FOP_ActionResult();
|
||||
|
||||
/**
|
||||
* @brief This constructor initializes variables & attempts to log when is error
|
||||
* @param InEnum Status
|
||||
*/
|
||||
FOP_ActionResult(const EOP_ActionResult::Type& InEnum);
|
||||
|
||||
/**
|
||||
* @brief This constructor initializes variables & attempts to log when is error
|
||||
* @param InEnum Status
|
||||
* @param InReason Reason of potential fail
|
||||
*/
|
||||
FOP_ActionResult(const EOP_ActionResult::Type& InEnum, const FText& InReason);
|
||||
|
||||
private:
|
||||
/** @brief Action status */
|
||||
EOP_ActionResult::Type Status;
|
||||
|
||||
/** @brief Optional reason of fail */
|
||||
FText Reason;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Checks if there is problematic state
|
||||
* @return true when status is not equal to EOP_ActionResult::Ok
|
||||
*/
|
||||
bool IsProblem() const;
|
||||
EOP_ActionResult::Type& GetStatus();
|
||||
FText& GetReason();
|
||||
|
||||
private:
|
||||
void TryLog() const;
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
DEFINE_LOG_CATEGORY_STATIC(LogCommandletOPGenerateProject, Log, All);
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
namespace OPConstants
|
||||
{
|
||||
const FString OP_PluginName = "OpenPype";
|
||||
const FString PythonScript_PluginName = "PythonScriptPlugin";
|
||||
const FString SequencerScripting_PluginName = "SequencerScripting";
|
||||
const FString MovieRenderPipeline_PluginName = "MovieRenderPipeline";
|
||||
const FString EditorScriptingUtils_PluginName = "EditorScriptingUtilities";
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved.
|
||||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
#include "Engine.h"
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
#include "Engine.h"
|
||||
|
|
@ -16,7 +17,7 @@ public:
|
|||
*
|
||||
* @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5
|
||||
*/
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure)
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure, Category="Python")
|
||||
TSet<UObject*> GetInternalAssets() const
|
||||
{
|
||||
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
|
||||
|
|
@ -33,7 +34,7 @@ public:
|
|||
*
|
||||
* @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5
|
||||
*/
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure)
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure, Category="Python")
|
||||
TSet<UObject*> GetExternalAssets() const
|
||||
{
|
||||
//For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed.
|
||||
|
|
@ -53,7 +54,7 @@ public:
|
|||
*
|
||||
* @attention If the bAddExternalAssets variable is false, external assets won't be included!
|
||||
*/
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure)
|
||||
UFUNCTION(BlueprintCallable, BlueprintPure, Category="Python")
|
||||
TSet<UObject*> GetAllAssets() const
|
||||
{
|
||||
const TSet<TSoftObjectPtr<UObject>>& IteratedSet = bAddExternalAssets
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
|
||||
#include "CoreMinimal.h"
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
#include "Engine.h"
|
||||
#include "OpenPypePythonBridge.generated.h"
|
||||
|
|
@ -1,9 +1,8 @@
|
|||
// Fill out your copyright notice in the Description page of Project Settings.
|
||||
// Copyright 2023, Ayon, All rights reserved.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "CoreMinimal.h"
|
||||
#include "Object.h"
|
||||
#include "OpenPypeSettings.generated.h"
|
||||
|
||||
#define OPENPYPE_SETTINGS_FILEPATH IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Config") / TEXT("DefaultOpenPypeSettings.ini")
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
// Copyright 2023, Ayon, All rights reserved.
|
||||
#pragma once
|
||||
#include "CoreMinimal.h"
|
||||
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
// Fill out your copyright notice in the Description page of Project Settings.
|
||||
|
||||
#include "AssetContainer.h"
|
||||
#include "AssetRegistryModule.h"
|
||||
#include "Misc/PackageName.h"
|
||||
#include "Engine.h"
|
||||
#include "Containers/UnrealString.h"
|
||||
|
||||
UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer)
|
||||
: UAssetUserData(ObjectInitializer)
|
||||
{
|
||||
FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked<FAssetRegistryModule>("AssetRegistry");
|
||||
FString path = UAssetContainer::GetPathName();
|
||||
UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path);
|
||||
FARFilter Filter;
|
||||
Filter.PackagePaths.Add(FName(*path));
|
||||
|
||||
AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded);
|
||||
AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved);
|
||||
AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed);
|
||||
}
|
||||
|
||||
void UAssetContainer::OnAssetAdded(const FAssetData& AssetData)
|
||||
{
|
||||
TArray<FString> split;
|
||||
|
||||
// get directory of current container
|
||||
FString selfFullPath = UAssetContainer::GetPathName();
|
||||
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
|
||||
|
||||
// get asset path and class
|
||||
FString assetPath = AssetData.GetFullName();
|
||||
FString assetFName = AssetData.AssetClass.ToString();
|
||||
|
||||
// split path
|
||||
assetPath.ParseIntoArray(split, TEXT(" "), true);
|
||||
|
||||
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
|
||||
|
||||
// take interest only in paths starting with path of current container
|
||||
if (assetDir.StartsWith(*selfDir))
|
||||
{
|
||||
// exclude self
|
||||
if (assetFName != "AssetContainer")
|
||||
{
|
||||
assets.Add(assetPath);
|
||||
assetsData.Add(AssetData);
|
||||
UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData)
|
||||
{
|
||||
TArray<FString> split;
|
||||
|
||||
// get directory of current container
|
||||
FString selfFullPath = UAssetContainer::GetPathName();
|
||||
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
|
||||
|
||||
// get asset path and class
|
||||
FString assetPath = AssetData.GetFullName();
|
||||
FString assetFName = AssetData.AssetClass.ToString();
|
||||
|
||||
// split path
|
||||
assetPath.ParseIntoArray(split, TEXT(" "), true);
|
||||
|
||||
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
|
||||
|
||||
// take interest only in paths starting with path of current container
|
||||
FString path = UAssetContainer::GetPathName();
|
||||
FString lpp = FPackageName::GetLongPackagePath(*path);
|
||||
|
||||
if (assetDir.StartsWith(*selfDir))
|
||||
{
|
||||
// exclude self
|
||||
if (assetFName != "AssetContainer")
|
||||
{
|
||||
// UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp);
|
||||
assets.Remove(assetPath);
|
||||
assetsData.Remove(AssetData);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str)
|
||||
{
|
||||
TArray<FString> split;
|
||||
|
||||
// get directory of current container
|
||||
FString selfFullPath = UAssetContainer::GetPathName();
|
||||
FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath);
|
||||
|
||||
// get asset path and class
|
||||
FString assetPath = AssetData.GetFullName();
|
||||
FString assetFName = AssetData.AssetClass.ToString();
|
||||
|
||||
// split path
|
||||
assetPath.ParseIntoArray(split, TEXT(" "), true);
|
||||
|
||||
FString assetDir = FPackageName::GetLongPackagePath(*split[1]);
|
||||
if (assetDir.StartsWith(*selfDir))
|
||||
{
|
||||
// exclude self
|
||||
if (assetFName != "AssetContainer")
|
||||
{
|
||||
|
||||
assets.Remove(str);
|
||||
assets.Add(assetPath);
|
||||
assetsData.Remove(AssetData);
|
||||
// UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue