Merge branch 'develop' into release/next-minor

This commit is contained in:
Jakub Jezek 2023-12-12 17:29:33 +01:00
commit b87957f017
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
102 changed files with 3144 additions and 1959 deletions

View file

@ -56,16 +56,15 @@ class RenderCreator(Creator):
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
if use_composition_name:
if "{composition}" not in subset_name_from_ui.lower():
subset_name_from_ui += "{Composition}"
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
@ -81,6 +80,8 @@ class RenderCreator(Creator):
inst.subset_name))
data["members"] = [comp.id]
data["orig_comp_name"] = composition_name
new_instance = CreatedInstance(self.family, subset_name, data,
self)
if "farm" in pre_create_data:
@ -88,7 +89,7 @@ class RenderCreator(Creator):
new_instance.creator_attributes["farm"] = use_farm
review = pre_create_data["mark_for_review"]
new_instance.creator_attributes["mark_for_review"] = review
new_instance. creator_attributes["mark_for_review"] = review
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
@ -150,16 +151,18 @@ class RenderCreator(Creator):
subset_change.new_value)
def remove_instances(self, instances):
"""Removes metadata and renames to original comp name if available."""
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
orig_comp_name = instance.data.get("orig_comp_name")
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
if orig_comp_name:
new_comp_name = orig_comp_name
else:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)

View file

@ -60,8 +60,9 @@ class ExtractLocalRender(publish.Extractor):
first_repre = not representations
if instance.data["review"] and first_repre:
repre_data["tags"] = ["review"]
thumbnail_path = os.path.join(staging_dir, files[0])
instance.data["thumbnailSource"] = thumbnail_path
# TODO return back when Extract from source same as regular
# thumbnail_path = os.path.join(staging_dir, files[0])
# instance.data["thumbnailSource"] = thumbnail_path
representations.append(repre_data)

View file

@ -14,18 +14,13 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect Chunk Size"
chunkSize = 999999
chunk_size = 999999
def process(self, instance):
# need to get the chunk size info from the setting
attr_values = self.get_attr_values_from_data(instance.data)
instance.data["chunkSize"] = attr_values.get("chunkSize")
@classmethod
def apply_settings(cls, project_settings):
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
cls.chunkSize = project_setting["chunk_size"]
@classmethod
def get_attribute_defs(cls):
return [
@ -33,7 +28,6 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
minimum=1,
maximum=999999,
decimals=0,
default=cls.chunkSize,
default=cls.chunk_size,
label="Frame Per Task")
]

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -198,8 +198,8 @@ def _render_preview_animation_max_pre_2024(
res_width, res_height, filename=filepath
)
dib = rt.gw.getViewportDib()
dib_width = rt.renderWidth
dib_height = rt.renderHeight
dib_width = float(dib.width)
dib_height = float(dib.height)
# aspect ratio
viewportRatio = dib_width / dib_height
renderRatio = float(res_width / res_height)

View file

@ -1,9 +1,12 @@
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin
)
from pymxs import runtime as rt
from openpype.pipeline.publish import (
RepairAction,
PublishValidationError
)
from openpype.hosts.max.api.lib import reset_scene_resolution
@ -16,6 +19,7 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
hosts = ["max"]
label = "Validate Resolution Setting"
optional = True
actions = [RepairAction]
def process(self, instance):
if not self.is_active(instance.data):

View file

@ -371,7 +371,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
continue
for node in data["nodes"]:
lib.set_attribute(data["attribute"], data["values"][0], node)
with lib.renderlayer(layer_node):
# Repair animation must be enabled
@ -392,13 +391,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
if renderer != "renderman":
prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
fname_prefix = default_prefix
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
cmds.setAttr(prefix_attr, fname_prefix, type="string")
# Repair padding
padding_attr = RenderSettings.get_padding_attr(renderer)
cmds.setAttr("{}.{}".format(node, padding_attr),
cls.DEFAULT_PADDING)
cmds.setAttr(padding_attr, cls.DEFAULT_PADDING)
else:
# renderman handles stuff differently
cmds.setAttr("rmanGlobals.imageFileFormat",

View file

@ -21,6 +21,11 @@ from openpype.pipeline import (
CreatedInstance,
get_current_task_name
)
from openpype.pipeline.colorspace import (
get_display_view_colorspace_name,
get_colorspace_settings_from_publish_context,
set_colorspace_data_to_representation
)
from openpype.lib.transcoding import (
VIDEO_EXTENSIONS
)
@ -612,7 +617,7 @@ class ExporterReview(object):
def get_representation_data(
self, tags=None, range=False,
custom_tags=None
custom_tags=None, colorspace=None
):
""" Add representation data to self.data
@ -652,6 +657,14 @@ class ExporterReview(object):
if self.publish_on_farm:
repre["tags"].append("publish_on_farm")
# add colorspace data to representation
if colorspace:
set_colorspace_data_to_representation(
repre,
self.instance.context.data,
colorspace=colorspace,
log=self.log
)
self.data["representations"].append(repre)
def get_imageio_baking_profile(self):
@ -866,6 +879,13 @@ class ExporterReviewMov(ExporterReview):
return path
def generate_mov(self, farm=False, **kwargs):
# colorspace data
colorspace = None
# get colorspace settings
# get colorspace data from context
config_data, _ = get_colorspace_settings_from_publish_context(
self.instance.context.data)
add_tags = []
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
@ -951,6 +971,14 @@ class ExporterReviewMov(ExporterReview):
# assign viewer
dag_node["view"].setValue(viewer)
if config_data:
# convert display and view to colorspace
colorspace = get_display_view_colorspace_name(
config_path=config_data["path"],
display=display,
view=viewer
)
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
# Write node
write_node = nuke.createNode("Write")
@ -996,9 +1024,10 @@ class ExporterReviewMov(ExporterReview):
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"] + add_tags,
tags=["review", "need_thumbnail", "delete"] + add_tags,
custom_tags=add_custom_tags,
range=True
range=True,
colorspace=colorspace
)
self.log.debug("Representation... `{}`".format(self.data))

View file

@ -276,7 +276,7 @@ class ExtractSlateFrame(publish.Extractor):
if not matching_repre:
self.log.info(
"Matching reresentation was not found."
"Matching representation was not found."
" Representation files were not filled with slate."
)
return
@ -294,7 +294,7 @@ class ExtractSlateFrame(publish.Extractor):
self.log.debug(
"__ matching_repre: {}".format(pformat(matching_repre)))
self.log.warning("Added slate frame to representation files")
self.log.info("Added slate frame to representation files")
def add_comment_slate_node(self, instance, node):

View file

@ -1,216 +0,0 @@
import sys
import os
import nuke
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.nuke import api as napi
from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
# Python 2/3 compatibility
if sys.version_info[0] >= 3:
unicode = str
class ExtractThumbnail(publish.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.011
label = "Extract Thumbnail"
families = ["review"]
hosts = ["nuke"]
# settings
use_rendered = False
bake_viewer_process = True
bake_viewer_input_process = True
nodes = {}
reposition_nodes = None
def process(self, instance):
if instance.data.get("farm"):
return
with napi.maintained_selection():
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
if instance.data.get("bakePresets"):
for o_name, o_data in instance.data["bakePresets"].items():
self.render_thumbnail(instance, o_name, **o_data)
else:
viewer_process_switches = {
"bake_viewer_process": True,
"bake_viewer_input_process": True
}
self.render_thumbnail(
instance, None, **viewer_process_switches)
def render_thumbnail(self, instance, output_name=None, **kwargs):
first_frame = instance.data["frameStartHandle"]
last_frame = instance.data["frameEndHandle"]
colorspace = instance.data["colorspace"]
# find frame range and define middle thumb frame
mid_frame = int((last_frame - first_frame) / 2)
# solve output name if any is set
output_name = output_name or ""
bake_viewer_process = kwargs["bake_viewer_process"]
bake_viewer_input_process_node = kwargs[
"bake_viewer_input_process"]
node = instance.data["transientData"]["node"] # group node
self.log.debug("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = []
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
instance.data["stagingDir"] = staging_dir
self.log.debug(
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
# try to connect already rendered images
previous_node = node
collection = instance.data.get("collection", None)
self.log.debug("__ collection: `{}`".format(collection))
if collection:
# get path
fhead = collection.format("{head}")
thumb_fname = list(collection)[mid_frame]
else:
fname = thumb_fname = os.path.basename(
instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
self.log.debug("__ fhead: `{}`".format(fhead))
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
path_render = os.path.join(
staging_dir, thumb_fname).replace("\\", "/")
self.log.debug("__ path_render: `{}`".format(path_render))
if self.use_rendered and os.path.isfile(path_render):
# check if file exist otherwise connect to write node
rnode = nuke.createNode("Read")
rnode["file"].setValue(path_render)
rnode["colorspace"].setValue(colorspace)
# turn it raw if none of baking is ON
if all([
not self.bake_viewer_input_process,
not self.bake_viewer_process
]):
rnode["raw"].setValue(True)
temporary_nodes.append(rnode)
previous_node = rnode
if self.reposition_nodes is None:
# [deprecated] create reformat node old way
reformat_node = nuke.createNode("Reformat")
ref_node = self.nodes.get("Reformat", None)
if ref_node:
for k, v in ref_node:
self.log.debug("k, v: {0}:{1}".format(k, v))
if isinstance(v, unicode):
v = str(v)
reformat_node[k].setValue(v)
reformat_node.setInput(0, previous_node)
previous_node = reformat_node
temporary_nodes.append(reformat_node)
else:
# create reformat node new way
for repo_node in self.reposition_nodes:
node_class = repo_node["node_class"]
knobs = repo_node["knobs"]
node = nuke.createNode(node_class)
set_node_knobs_from_settings(node, knobs)
# connect in order
node.setInput(0, previous_node)
previous_node = node
temporary_nodes.append(node)
# only create colorspace baking if toggled on
if bake_viewer_process:
if bake_viewer_input_process_node:
# get input process and connect it to baking
ipn = napi.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
thumb_name = "thumbnail"
# only add output name and
# if there are more than one bake preset
if (
output_name
and len(instance.data.get("bakePresets", {}).keys()) > 1
):
thumb_name = "{}_{}".format(output_name, thumb_name)
# create write node
write_node = nuke.createNode("Write")
file = fhead[:-1] + thumb_name + ".jpg"
thumb_path = os.path.join(staging_dir, file).replace("\\", "/")
# add thumbnail to cleanup
instance.context.data["cleanupFullPaths"].append(thumb_path)
# make sure only one thumbnail path is set
# and it is existing file
instance_thumb_path = instance.data.get("thumbnailPath")
if not instance_thumb_path or not os.path.isfile(instance_thumb_path):
instance.data["thumbnailPath"] = thumb_path
write_node["file"].setValue(thumb_path)
write_node["file_type"].setValue("jpg")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
repre = {
'name': thumb_name,
'ext': "jpg",
"outputName": thumb_name,
'files': file,
"stagingDir": staging_dir,
"tags": ["thumbnail", "publish_on_farm", "delete"]
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), mid_frame, mid_frame)
self.log.debug(
"representations: {}".format(instance.data["representations"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)

View file

@ -257,8 +257,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
if 'shot' not in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
@ -286,6 +284,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
parents = instance.data.get('parents', [])
self.log.debug(f"parents: {pformat(parents)}")
# Split by '/' for AYON where asset is a path
name = instance.data["asset"].split("/")[-1]
actual = {name: in_info}
for parent in reversed(parents):

View file

@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
"""Collect original base name for use in templates."""
from pathlib import Path
import pyblish.api
class CollectOriginalBasename(pyblish.api.InstancePlugin):
"""Collect original file base name."""
order = pyblish.api.CollectorOrder + 0.498
label = "Collect Base Name"
hosts = ["standalonepublisher"]
families = ["simpleUnrealTexture"]
def process(self, instance):
file_name = Path(instance.data["representations"][0]["files"])
instance.data["originalBasename"] = file_name.stem

View file

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
"""Validator for correct file naming."""
import re
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
)
class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin):
label = "Validate Unreal Texture Names"
hosts = ["standalonepublisher"]
families = ["simpleUnrealTexture"]
order = ValidateContentsOrder
regex = "^T_{asset}.*"
def process(self, instance):
file_name = instance.data.get("originalBasename")
self.log.info(file_name)
pattern = self.regex.format(asset=instance.data.get("asset"))
if not re.match(pattern, file_name):
msg = f"Invalid file name {file_name}"
raise PublishXmlValidationError(
self, msg, formatting_data={
"invalid_file": file_name,
"asset": instance.data.get("asset")
})

View file

@ -583,18 +583,9 @@ def prompt_new_file_with_mesh(mesh_filepath):
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
file_dialog.selectUrl(url)
# Give the explorer window time to refresh to the folder and select
# the file
while not file_dialog.selectedFiles():
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
print(f"Selected: {file_dialog.selectedFiles()}")
# Set it again now we know the path is refreshed - without this
# accepting the dialog will often not trigger the correct filepath
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
file_dialog.selectUrl(url)
# TODO: find a way to improve the process event to
# load more complicated mesh
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
file_dialog.done(file_dialog.Accepted)
app.processEvents(QtCore.QEventLoop.AllEvents)
@ -628,7 +619,12 @@ def prompt_new_file_with_mesh(mesh_filepath):
mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel)
if not mesh_filename_label.text():
dialog.close()
raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}")
substance_painter.logging.warning(
"Failed to set mesh path with the prompt dialog:"
f"{mesh_filepath}\n\n"
"Creating new project directly with the mesh path instead.")
else:
dialog.done(dialog.Accepted)
new_action = _get_new_project_action()
if not new_action:

View file

@ -44,14 +44,22 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
# Get user inputs
import_cameras = data.get("import_cameras", True)
preserve_strokes = data.get("preserve_strokes", True)
sp_settings = substance_painter.project.Settings(
import_cameras=import_cameras
)
if not substance_painter.project.is_open():
# Allow to 'initialize' a new project
path = self.filepath_from_context(context)
# TODO: improve the prompt dialog function to not
# only works for simple polygon scene
result = prompt_new_file_with_mesh(mesh_filepath=path)
if not result:
self.log.info("User cancelled new project prompt.")
return
self.log.info("User cancelled new project prompt."
"Creating new project directly from"
" Substance Painter API Instead.")
settings = substance_painter.project.create(
mesh_file_path=path, settings=sp_settings
)
else:
# Reload the mesh

View file

@ -663,7 +663,7 @@ or updating already created. Publishing will create OTIO file.
variant_name = instance_data["variant"]
# basic unique asset name
clip_name = os.path.splitext(otio_clip.name)[0].lower()
clip_name = os.path.splitext(otio_clip.name)[0]
project_doc = get_project(self.project_name)
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(

View file

@ -155,8 +155,6 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
else {}
)
asset_name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
@ -177,6 +175,8 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
parents = instance.data.get('parents', [])
# Split by '/' for AYON where asset is a path
asset_name = instance.data["asset"].split("/")[-1]
actual = {asset_name: in_info}
for parent in reversed(parents):

View file

@ -73,7 +73,7 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
render_layer_id = creator_attributes["render_layer_instance_id"]
for in_data in instance.context.data["workfileInstances"]:
if (
in_data["creator_identifier"] == "render.layer"
in_data.get("creator_identifier") == "render.layer"
and in_data["instance_id"] == render_layer_id
):
render_layer_data = in_data