mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into enhancement/OP-3622_Delivery-renamed-frame-numbers
This commit is contained in:
commit
5358395008
53 changed files with 1018 additions and 275 deletions
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,8 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.15.12-nightly.3
|
||||
- 3.15.12-nightly.2
|
||||
- 3.15.12-nightly.1
|
||||
- 3.15.11
|
||||
- 3.15.11-nightly.5
|
||||
|
|
@ -133,8 +135,6 @@ body:
|
|||
- 3.14.4
|
||||
- 3.14.4-nightly.4
|
||||
- 3.14.4-nightly.3
|
||||
- 3.14.4-nightly.2
|
||||
- 3.14.4-nightly.1
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
|
|
@ -21,8 +21,13 @@ from .pipeline import (
|
|||
reset_selection
|
||||
)
|
||||
|
||||
from .constants import (
|
||||
OPENPYPE_TAG_NAME,
|
||||
DEFAULT_SEQUENCE_NAME,
|
||||
DEFAULT_BIN_NAME
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
pype_tag_name,
|
||||
flatten,
|
||||
get_track_items,
|
||||
get_current_project,
|
||||
|
|
@ -82,8 +87,12 @@ __all__ = [
|
|||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# Constants
|
||||
"OPENPYPE_TAG_NAME",
|
||||
"DEFAULT_SEQUENCE_NAME",
|
||||
"DEFAULT_BIN_NAME",
|
||||
|
||||
# Lib functions
|
||||
"pype_tag_name",
|
||||
"flatten",
|
||||
"get_track_items",
|
||||
"get_current_project",
|
||||
|
|
|
|||
3
openpype/hosts/hiero/api/constants.py
Normal file
3
openpype/hosts/hiero/api/constants.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
OPENPYPE_TAG_NAME = "openpypeData"
|
||||
DEFAULT_SEQUENCE_NAME = "openpypeSequence"
|
||||
DEFAULT_BIN_NAME = "openpypeBin"
|
||||
|
|
@ -5,7 +5,6 @@ Host specific functions where host api is connected
|
|||
from copy import deepcopy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import platform
|
||||
import functools
|
||||
import warnings
|
||||
|
|
@ -29,12 +28,22 @@ from openpype.pipeline import (
|
|||
from openpype.pipeline.load import filter_containers
|
||||
from openpype.lib import Logger
|
||||
from . import tags
|
||||
|
||||
from .constants import (
|
||||
OPENPYPE_TAG_NAME,
|
||||
DEFAULT_SEQUENCE_NAME,
|
||||
DEFAULT_BIN_NAME
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_config
|
||||
)
|
||||
|
||||
|
||||
class _CTX:
|
||||
has_been_setup = False
|
||||
has_menu = False
|
||||
parent_gui = None
|
||||
|
||||
|
||||
class DeprecatedWarning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
|
|
@ -82,23 +91,14 @@ def deprecated(new_destination):
|
|||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._has_been_setup = False
|
||||
self._has_menu = False
|
||||
self._registered_gui = None
|
||||
self._parent = None
|
||||
self.pype_tag_name = "openpypeData"
|
||||
self.default_sequence_name = "openpypeSequence"
|
||||
self.default_bin_name = "openpypeBin"
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
def flatten(list_):
|
||||
for item_ in list_:
|
||||
if isinstance(item_, (list, tuple)):
|
||||
for sub_item in flatten(item_):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
yield item_
|
||||
|
||||
|
||||
def get_current_project(remove_untitled=False):
|
||||
|
|
@ -131,7 +131,7 @@ def get_current_sequence(name=None, new=False):
|
|||
|
||||
if new:
|
||||
# create new
|
||||
name = name or self.default_sequence_name
|
||||
name = name or DEFAULT_SEQUENCE_NAME
|
||||
sequence = hiero.core.Sequence(name)
|
||||
root_bin.addItem(hiero.core.BinItem(sequence))
|
||||
elif name:
|
||||
|
|
@ -345,7 +345,7 @@ def get_track_item_tags(track_item):
|
|||
# collect all tags which are not openpype tag
|
||||
returning_tag_data.extend(
|
||||
tag for tag in _tags
|
||||
if tag.name() != self.pype_tag_name
|
||||
if tag.name() != OPENPYPE_TAG_NAME
|
||||
)
|
||||
|
||||
return returning_tag_data
|
||||
|
|
@ -385,7 +385,7 @@ def set_track_openpype_tag(track, data=None):
|
|||
# if pype tag available then update with input data
|
||||
tag = tags.create_tag(
|
||||
"{}_{}".format(
|
||||
self.pype_tag_name,
|
||||
OPENPYPE_TAG_NAME,
|
||||
_get_tag_unique_hash()
|
||||
),
|
||||
tag_data
|
||||
|
|
@ -412,7 +412,7 @@ def get_track_openpype_tag(track):
|
|||
return None
|
||||
for tag in _tags:
|
||||
# return only correct tag defined by global name
|
||||
if self.pype_tag_name in tag.name():
|
||||
if OPENPYPE_TAG_NAME in tag.name():
|
||||
return tag
|
||||
|
||||
|
||||
|
|
@ -484,7 +484,7 @@ def get_trackitem_openpype_tag(track_item):
|
|||
return None
|
||||
for tag in _tags:
|
||||
# return only correct tag defined by global name
|
||||
if self.pype_tag_name in tag.name():
|
||||
if OPENPYPE_TAG_NAME in tag.name():
|
||||
return tag
|
||||
|
||||
|
||||
|
|
@ -516,7 +516,7 @@ def set_trackitem_openpype_tag(track_item, data=None):
|
|||
# if pype tag available then update with input data
|
||||
tag = tags.create_tag(
|
||||
"{}_{}".format(
|
||||
self.pype_tag_name,
|
||||
OPENPYPE_TAG_NAME,
|
||||
_get_tag_unique_hash()
|
||||
),
|
||||
tag_data
|
||||
|
|
@ -698,29 +698,29 @@ def setup(console=False, port=None, menu=True):
|
|||
menu (bool, optional): Display file menu in Hiero.
|
||||
"""
|
||||
|
||||
if self._has_been_setup:
|
||||
if _CTX.has_been_setup:
|
||||
teardown()
|
||||
|
||||
add_submission()
|
||||
|
||||
if menu:
|
||||
add_to_filemenu()
|
||||
self._has_menu = True
|
||||
_CTX.has_menu = True
|
||||
|
||||
self._has_been_setup = True
|
||||
_CTX.has_been_setup = True
|
||||
log.debug("pyblish: Loaded successfully.")
|
||||
|
||||
|
||||
def teardown():
|
||||
"""Remove integration"""
|
||||
if not self._has_been_setup:
|
||||
if not _CTX.has_been_setup:
|
||||
return
|
||||
|
||||
if self._has_menu:
|
||||
if _CTX.has_menu:
|
||||
remove_from_filemenu()
|
||||
self._has_menu = False
|
||||
_CTX.has_menu = False
|
||||
|
||||
self._has_been_setup = False
|
||||
_CTX.has_been_setup = False
|
||||
log.debug("pyblish: Integration torn down successfully")
|
||||
|
||||
|
||||
|
|
@ -928,7 +928,7 @@ def create_bin(path=None, project=None):
|
|||
# get the first loaded project
|
||||
project = project or get_current_project()
|
||||
|
||||
path = path or self.default_bin_name
|
||||
path = path or DEFAULT_BIN_NAME
|
||||
|
||||
path = path.replace("\\", "/").split("/")
|
||||
|
||||
|
|
@ -1311,11 +1311,11 @@ def before_project_save(event):
|
|||
|
||||
def get_main_window():
|
||||
"""Acquire Nuke's main window"""
|
||||
if self._parent is None:
|
||||
if _CTX.parent_gui is None:
|
||||
top_widgets = QtWidgets.QApplication.topLevelWidgets()
|
||||
name = "Foundry::UI::DockMainWindow"
|
||||
main_window = next(widget for widget in top_widgets if
|
||||
widget.inherits("QMainWindow") and
|
||||
widget.metaObject().className() == name)
|
||||
self._parent = main_window
|
||||
return self._parent
|
||||
_CTX.parent_gui = main_window
|
||||
return _CTX.parent_gui
|
||||
|
|
|
|||
|
|
@ -3,20 +3,18 @@
|
|||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import ast
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
|
||||
TRACK_TYPE_MAP = {
|
||||
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
|
||||
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
self.marker_color_map = {
|
||||
MARKER_COLOR_MAP = {
|
||||
"magenta": otio.schema.MarkerColor.MAGENTA,
|
||||
"red": otio.schema.MarkerColor.RED,
|
||||
"yellow": otio.schema.MarkerColor.YELLOW,
|
||||
|
|
@ -24,30 +22,21 @@ self.marker_color_map = {
|
|||
"cyan": otio.schema.MarkerColor.CYAN,
|
||||
"blue": otio.schema.MarkerColor.BLUE,
|
||||
}
|
||||
self.timeline = None
|
||||
self.include_tags = True
|
||||
|
||||
|
||||
def flatten(_list):
|
||||
for item in _list:
|
||||
if isinstance(item, (list, tuple)):
|
||||
for sub_item in flatten(item):
|
||||
class CTX:
|
||||
project_fps = None
|
||||
timeline = None
|
||||
include_tags = True
|
||||
|
||||
|
||||
def flatten(list_):
|
||||
for item_ in list_:
|
||||
if isinstance(item_, (list, tuple)):
|
||||
for sub_item in flatten(item_):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def get_current_hiero_project(remove_untitled=False):
|
||||
projects = flatten(hiero.core.projects())
|
||||
if not remove_untitled:
|
||||
return next(iter(projects))
|
||||
|
||||
# if remove_untitled
|
||||
for proj in projects:
|
||||
if "Untitled" in proj.name():
|
||||
proj.close()
|
||||
else:
|
||||
return proj
|
||||
yield item_
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
|
|
@ -152,7 +141,7 @@ def create_otio_reference(clip):
|
|||
file_head = media_source.filenameHead()
|
||||
is_sequence = not media_source.singleFile()
|
||||
frame_duration = media_source.duration()
|
||||
fps = utils.get_rate(clip) or self.project_fps
|
||||
fps = utils.get_rate(clip) or CTX.project_fps
|
||||
extension = os.path.splitext(path)[-1]
|
||||
|
||||
if is_sequence:
|
||||
|
|
@ -217,8 +206,8 @@ def get_marker_color(tag):
|
|||
res = re.search(pat, icon)
|
||||
if res:
|
||||
color = res.groupdict().get('color')
|
||||
if color.lower() in self.marker_color_map:
|
||||
return self.marker_color_map[color.lower()]
|
||||
if color.lower() in MARKER_COLOR_MAP:
|
||||
return MARKER_COLOR_MAP[color.lower()]
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
|
@ -232,7 +221,7 @@ def create_otio_markers(otio_item, item):
|
|||
# Hiero adds this tag to a lot of clips
|
||||
continue
|
||||
|
||||
frame_rate = utils.get_rate(item) or self.project_fps
|
||||
frame_rate = utils.get_rate(item) or CTX.project_fps
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
|
|
@ -279,7 +268,7 @@ def create_otio_clip(track_item):
|
|||
|
||||
duration = int(track_item.duration())
|
||||
|
||||
fps = utils.get_rate(track_item) or self.project_fps
|
||||
fps = utils.get_rate(track_item) or CTX.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
media_reference = create_otio_reference(clip)
|
||||
|
|
@ -296,7 +285,7 @@ def create_otio_clip(track_item):
|
|||
)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
|
|
@ -319,13 +308,13 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
|||
|
||||
|
||||
def _create_otio_timeline():
|
||||
project = get_current_hiero_project(remove_untitled=False)
|
||||
metadata = _get_metadata(self.timeline)
|
||||
project = CTX.timeline.project()
|
||||
metadata = _get_metadata(CTX.timeline)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(self.timeline.format().width()),
|
||||
"openpype.timeline.height": int(self.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(self.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.timeline.width": int(CTX.timeline.format().width()),
|
||||
"openpype.timeline.height": int(CTX.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(CTX.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
|
||||
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
|
||||
|
|
@ -339,10 +328,10 @@ def _create_otio_timeline():
|
|||
})
|
||||
|
||||
start_time = create_otio_rational_time(
|
||||
self.timeline.timecodeStart(), self.project_fps)
|
||||
CTX.timeline.timecodeStart(), CTX.project_fps)
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=self.timeline.name(),
|
||||
name=CTX.timeline.name(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
|
@ -351,7 +340,7 @@ def _create_otio_timeline():
|
|||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
kind=TRACK_TYPE_MAP[track_type]
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -363,7 +352,7 @@ def add_otio_gap(track_item, otio_track, prev_out):
|
|||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
self.project_fps
|
||||
CTX.project_fps
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
|
|
@ -396,14 +385,14 @@ def create_otio_timeline():
|
|||
return track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# get current timeline
|
||||
self.timeline = hiero.ui.activeSequence()
|
||||
self.project_fps = self.timeline.framerate().toFloat()
|
||||
CTX.timeline = hiero.ui.activeSequence()
|
||||
CTX.project_fps = CTX.timeline.framerate().toFloat()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline()
|
||||
|
||||
# loop all defined track types
|
||||
for track in self.timeline.items():
|
||||
for track in CTX.timeline.items():
|
||||
# skip if track is disabled
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
|
@ -441,7 +430,7 @@ def create_otio_timeline():
|
|||
otio_track.append(otio_clip)
|
||||
|
||||
# Add tags as markers
|
||||
if self.include_tags:
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_track, track)
|
||||
|
||||
# add track to otio timeline
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if phiero.pype_tag_name in marker.name:
|
||||
if phiero.OPENPYPE_TAG_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from qtpy.QtGui import QPixmap
|
|||
import hiero.ui
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
|
||||
|
|
@ -22,8 +21,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
project = phiero.get_current_project()
|
||||
active_timeline = hiero.ui.activeSequence()
|
||||
project = active_timeline.project()
|
||||
fps = active_timeline.framerate().toFloat()
|
||||
|
||||
# adding otio timeline to context
|
||||
|
|
|
|||
|
|
@ -633,23 +633,8 @@ def evalParmNoFrame(node, parm, pad_character="#"):
|
|||
|
||||
def get_color_management_preferences():
|
||||
"""Get default OCIO preferences"""
|
||||
data = {
|
||||
"config": hou.Color.ocio_configPath()
|
||||
|
||||
return {
|
||||
"config": hou.Color.ocio_configPath(),
|
||||
"display": hou.Color.ocio_defaultDisplay(),
|
||||
"view": hou.Color.ocio_defaultView()
|
||||
}
|
||||
|
||||
# Get default display and view from OCIO
|
||||
display = hou.Color.ocio_defaultDisplay()
|
||||
disp_regex = re.compile(r"^(?P<name>.+-)(?P<display>.+)$")
|
||||
disp_match = disp_regex.match(display)
|
||||
|
||||
view = hou.Color.ocio_defaultView()
|
||||
view_regex = re.compile(r"^(?P<name>.+- )(?P<view>.+)$")
|
||||
view_match = view_regex.match(view)
|
||||
data.update({
|
||||
"display": disp_match.group("display"),
|
||||
"view": view_match.group("view")
|
||||
|
||||
})
|
||||
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
import hdefereval # noqa, hdefereval is only available in ui mode
|
||||
hdefereval.executeDeferred(creator_node_shelves.install)
|
||||
|
||||
def has_unsaved_changes(self):
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return hou.hipFile.hasUnsavedChanges()
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@
|
|||
import hou # noqa
|
||||
|
||||
from openpype.hosts.houdini.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
from openpype.lib import EnumDef
|
||||
|
||||
|
||||
class CreateRedshiftROP(plugin.HoudiniCreator):
|
||||
"""Redshift ROP"""
|
||||
|
||||
identifier = "io.openpype.creators.houdini.redshift_rop"
|
||||
label = "Redshift ROP"
|
||||
family = "redshift_rop"
|
||||
|
|
@ -28,7 +28,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
instance = super(CreateRedshiftROP, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
|
@ -57,6 +57,8 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext)
|
||||
)
|
||||
|
||||
ext_format_index = {"exr": 0, "tif": 1, "jpg": 2, "png": 3}
|
||||
|
||||
parms = {
|
||||
# Render frame range
|
||||
"trange": 1,
|
||||
|
|
@ -64,6 +66,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
"RS_outputFileNamePrefix": filepath,
|
||||
"RS_outputMultilayerMode": "1", # no multi-layered exr
|
||||
"RS_outputBeautyAOVSuffix": "beauty",
|
||||
"RS_outputFileFormat": ext_format_index[ext],
|
||||
}
|
||||
|
||||
if self.selected_nodes:
|
||||
|
|
@ -93,8 +96,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
"exr", "tif", "jpg", "png",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
|
|||
"Collected filename from current scene name."
|
||||
)
|
||||
|
||||
if host.has_unsaved_changes():
|
||||
if host.workfile_has_unsaved_changes():
|
||||
self.log.info("Saving current file: {}".format(current_file))
|
||||
host.save_workfile(current_file)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -73,6 +73,14 @@ class ValidateAbcPrimitiveToDetail(pyblish.api.InstancePlugin):
|
|||
cls.log.debug("Checking Primitive to Detail pattern: %s" % pattern)
|
||||
cls.log.debug("Checking with path attribute: %s" % path_attr)
|
||||
|
||||
if not hasattr(output_node, "geometry"):
|
||||
# In the case someone has explicitly set an Object
|
||||
# node instead of a SOP node in Geometry context
|
||||
# then for now we ignore - this allows us to also
|
||||
# export object transforms.
|
||||
cls.log.warning("No geometry output node found, skipping check..")
|
||||
return
|
||||
|
||||
# Check if the primitive attribute exists
|
||||
frame = instance.data.get("frameStart", 0)
|
||||
geo = output_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -60,6 +60,14 @@ class ValidatePrimitiveHierarchyPaths(pyblish.api.InstancePlugin):
|
|||
|
||||
cls.log.debug("Checking for attribute: %s" % path_attr)
|
||||
|
||||
if not hasattr(output_node, "geometry"):
|
||||
# In the case someone has explicitly set an Object
|
||||
# node instead of a SOP node in Geometry context
|
||||
# then for now we ignore - this allows us to also
|
||||
# export object transforms.
|
||||
cls.log.warning("No geometry output node found, skipping check..")
|
||||
return
|
||||
|
||||
# Check if the primitive attribute exists
|
||||
frame = instance.data.get("frameStart", 0)
|
||||
geo = output_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ def update_mode_context(mode):
|
|||
|
||||
def get_geometry_at_frame(sop_node, frame, force=True):
|
||||
"""Return geometry at frame but force a cooked value."""
|
||||
if not hasattr(sop_node, "geometry"):
|
||||
return
|
||||
with update_mode_context(hou.updateMode.AutoUpdate):
|
||||
sop_node.cook(force=force, frame_range=(frame, frame))
|
||||
return sop_node.geometryAtFrame(frame)
|
||||
|
|
|
|||
|
|
@ -284,6 +284,21 @@ def get_max_version():
|
|||
return max_info[7]
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def viewport_camera(camera):
|
||||
original = rt.viewport.getCamera()
|
||||
if not original:
|
||||
# if there is no original camera
|
||||
# use the current camera as original
|
||||
original = rt.getNodeByName(camera)
|
||||
review_camera = rt.getNodeByName(camera)
|
||||
try:
|
||||
rt.viewport.setCamera(review_camera)
|
||||
yield
|
||||
finally:
|
||||
rt.viewport.setCamera(original)
|
||||
|
||||
|
||||
def set_timeline(frameStart, frameEnd):
|
||||
"""Set frame range for timeline editor in Max
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -42,6 +42,10 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
|
|||
(
|
||||
handle_name = node_to_name c
|
||||
node_ref = NodeTransformMonitor node:c
|
||||
idx = finditem list_node.items handle_name
|
||||
if idx do (
|
||||
continue
|
||||
)
|
||||
append temp_arr handle_name
|
||||
append i_node_arr node_ref
|
||||
)
|
||||
|
|
|
|||
57
openpype/hosts/max/plugins/create/create_review.py
Normal file
57
openpype/hosts/max/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating review in Max."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.lib import BoolDef, EnumDef, NumberDef
|
||||
|
||||
|
||||
class CreateReview(plugin.MaxCreator):
|
||||
"""Review in 3dsMax"""
|
||||
|
||||
identifier = "io.openpype.creators.max.review"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "video-camera"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data["imageFormat"] = pre_create_data.get("imageFormat")
|
||||
instance_data["keepImages"] = pre_create_data.get("keepImages")
|
||||
instance_data["percentSize"] = pre_create_data.get("percentSize")
|
||||
instance_data["rndLevel"] = pre_create_data.get("rndLevel")
|
||||
|
||||
super(CreateReview, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateReview, self).get_pre_create_attr_defs()
|
||||
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "hdr", "rgb", "png",
|
||||
"rla", "rpf", "dds", "sgi", "tga", "tif", "vrimg"
|
||||
]
|
||||
|
||||
rndLevel_enum = [
|
||||
"smoothhighlights", "smooth", "facethighlights",
|
||||
"facet", "flat", "litwireframe", "wireframe", "box"
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("keepImages",
|
||||
label="Keep Image Sequences",
|
||||
default=False),
|
||||
EnumDef("imageFormat",
|
||||
image_format_enum,
|
||||
default="png",
|
||||
label="Image Format Options"),
|
||||
NumberDef("percentSize",
|
||||
label="Percent of Output",
|
||||
default=100,
|
||||
minimum=1,
|
||||
decimals=0),
|
||||
EnumDef("rndLevel",
|
||||
rndLevel_enum,
|
||||
default="smoothhighlights",
|
||||
label="Preference")
|
||||
]
|
||||
92
openpype/hosts/max/plugins/publish/collect_review.py
Normal file
92
openpype/hosts/max/plugins/publish/collect_review.py
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
# dont forget getting the focal length for burnin
|
||||
"""Collect Review"""
|
||||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect Review Data for Preview Animation"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.02
|
||||
label = "Collect Review Data"
|
||||
hosts = ['max']
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
nodes = instance.data["members"]
|
||||
focal_length = None
|
||||
camera_name = None
|
||||
for node in nodes:
|
||||
if rt.classOf(node) in rt.Camera.classes:
|
||||
camera_name = node.name
|
||||
focal_length = node.fov
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
data = {
|
||||
"review_camera": camera_name,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"dspGeometry": attr_values.get("dspGeometry"),
|
||||
"dspShapes": attr_values.get("dspShapes"),
|
||||
"dspLights": attr_values.get("dspLights"),
|
||||
"dspCameras": attr_values.get("dspCameras"),
|
||||
"dspHelpers": attr_values.get("dspHelpers"),
|
||||
"dspParticles": attr_values.get("dspParticles"),
|
||||
"dspBones": attr_values.get("dspBones"),
|
||||
"dspBkg": attr_values.get("dspBkg"),
|
||||
"dspGrid": attr_values.get("dspGrid"),
|
||||
"dspSafeFrame": attr_values.get("dspSafeFrame"),
|
||||
"dspFrameNums": attr_values.get("dspFrameNums")
|
||||
}
|
||||
# Enable ftrack functionality
|
||||
instance.data.setdefault("families", []).append('ftrack')
|
||||
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
self.log.debug(f"data:{data}")
|
||||
instance.data.update(data)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
|
||||
return [
|
||||
BoolDef("dspGeometry",
|
||||
label="Geometry",
|
||||
default=True),
|
||||
BoolDef("dspShapes",
|
||||
label="Shapes",
|
||||
default=False),
|
||||
BoolDef("dspLights",
|
||||
label="Lights",
|
||||
default=False),
|
||||
BoolDef("dspCameras",
|
||||
label="Cameras",
|
||||
default=False),
|
||||
BoolDef("dspHelpers",
|
||||
label="Helpers",
|
||||
default=False),
|
||||
BoolDef("dspParticles",
|
||||
label="Particle Systems",
|
||||
default=True),
|
||||
BoolDef("dspBones",
|
||||
label="Bone Objects",
|
||||
default=False),
|
||||
BoolDef("dspBkg",
|
||||
label="Background",
|
||||
default=True),
|
||||
BoolDef("dspGrid",
|
||||
label="Active Grid",
|
||||
default=False),
|
||||
BoolDef("dspSafeFrame",
|
||||
label="Safe Frames",
|
||||
default=False),
|
||||
BoolDef("dspFrameNums",
|
||||
label="Frame Numbers",
|
||||
default=False)
|
||||
]
|
||||
102
openpype/hosts/max/plugins/publish/extract_review_animation.py
Normal file
102
openpype/hosts/max/plugins/publish/extract_review_animation.py
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
|
||||
|
||||
class ExtractReviewAnimation(publish.Extractor):
|
||||
"""
|
||||
Extract Review by Review Animation
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.001
|
||||
label = "Extract Review Animation"
|
||||
hosts = ["max"]
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
ext = instance.data.get("imageFormat")
|
||||
filename = "{0}..{1}".format(instance.name, ext)
|
||||
start = int(instance.data["frameStart"])
|
||||
end = int(instance.data["frameEnd"])
|
||||
fps = int(instance.data["fps"])
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
filenames = self.get_files(
|
||||
instance.name, start, end, ext)
|
||||
|
||||
self.log.debug(
|
||||
"Writing Review Animation to"
|
||||
" '%s' to '%s'" % (filename, staging_dir))
|
||||
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, start, end, fps)
|
||||
rt.execute(preview_arg)
|
||||
|
||||
tags = ["review"]
|
||||
if not instance.data.get("keepImages"):
|
||||
tags.append("delete")
|
||||
|
||||
self.log.debug("Performing Extraction ...")
|
||||
|
||||
representation = {
|
||||
"name": instance.data["imageFormat"],
|
||||
"ext": instance.data["imageFormat"],
|
||||
"files": filenames,
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"tags": tags,
|
||||
"preview": True,
|
||||
"camera_name": review_camera
|
||||
}
|
||||
self.log.debug(f"{representation}")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_files(self, filename, start, end, ext):
|
||||
file_list = []
|
||||
for frame in range(int(start), int(end) + 1):
|
||||
actual_name = "{}.{:04}.{}".format(
|
||||
filename, frame, ext)
|
||||
file_list.append(actual_name)
|
||||
|
||||
return file_list
|
||||
|
||||
def set_preview_arg(self, instance, filepath,
|
||||
start, end, fps):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{start} end:{end} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
91
openpype/hosts/max/plugins/publish/extract_thumbnail.py
Normal file
91
openpype/hosts/max/plugins/publish/extract_thumbnail.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
import os
|
||||
import tempfile
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""
|
||||
Extract Thumbnail for Review
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Thumbnail"
|
||||
hosts = ["max"]
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: Create temp directory for thumbnail
|
||||
# - this is to avoid "override" of source file
|
||||
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
self.log.debug(
|
||||
f"Create temp directory {tmp_staging} for thumbnail"
|
||||
)
|
||||
fps = int(instance.data["fps"])
|
||||
frame = int(instance.data["frameStart"])
|
||||
instance.context.data["cleanupFullPaths"].append(tmp_staging)
|
||||
filename = "{name}_thumbnail..png".format(**instance.data)
|
||||
filepath = os.path.join(tmp_staging, filename)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
thumbnail = self.get_filename(instance.name, frame)
|
||||
|
||||
self.log.debug(
|
||||
"Writing Thumbnail to"
|
||||
" '%s' to '%s'" % (filename, tmp_staging))
|
||||
review_camera = instance.data["review_camera"]
|
||||
with viewport_camera(review_camera):
|
||||
preview_arg = self.set_preview_arg(
|
||||
instance, filepath, fps, frame)
|
||||
rt.execute(preview_arg)
|
||||
|
||||
representation = {
|
||||
"name": "thumbnail",
|
||||
"ext": "png",
|
||||
"files": thumbnail,
|
||||
"stagingDir": tmp_staging,
|
||||
"thumbnail": True
|
||||
}
|
||||
|
||||
self.log.debug(f"{representation}")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_filename(self, filename, target_frame):
|
||||
thumbnail_name = "{}_thumbnail.{:04}.png".format(
|
||||
filename, target_frame
|
||||
)
|
||||
return thumbnail_name
|
||||
|
||||
def set_preview_arg(self, instance, filepath, fps, frame):
|
||||
job_args = list()
|
||||
default_option = f'CreatePreview filename:"{filepath}"'
|
||||
job_args.append(default_option)
|
||||
frame_option = f"outputAVI:false start:{frame} end:{frame} fps:{fps}" # noqa
|
||||
job_args.append(frame_option)
|
||||
rndLevel = instance.data.get("rndLevel")
|
||||
if rndLevel:
|
||||
option = f"rndLevel:#{rndLevel}"
|
||||
job_args.append(option)
|
||||
options = [
|
||||
"percentSize", "dspGeometry", "dspShapes",
|
||||
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
|
||||
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
|
||||
]
|
||||
|
||||
for key in options:
|
||||
enabled = instance.data.get(key)
|
||||
if enabled:
|
||||
job_args.append(f"{key}:{enabled}")
|
||||
if get_max_version() == 2024:
|
||||
# hardcoded for current stage
|
||||
auto_play_option = "autoPlay:false"
|
||||
job_args.append(auto_play_option)
|
||||
|
||||
job_str = " ".join(job_args)
|
||||
self.log.debug(job_str)
|
||||
|
||||
return job_str
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
|
||||
|
||||
|
||||
class ValidateAnimationTimeline(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validates Animation Timeline for Preview Animation in Max
|
||||
"""
|
||||
|
||||
label = "Animation Timeline for Review"
|
||||
order = ValidateContentsOrder
|
||||
families = ["review"]
|
||||
hosts = ["max"]
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
if rt.animationRange.start != frame_start_handle or (
|
||||
rt.animationRange.end != frame_end_handle
|
||||
):
|
||||
raise PublishValidationError("Incorrect animation timeline "
|
||||
"set for preview animation.. "
|
||||
"\nYou can use repair action to "
|
||||
"the correct animation timeline")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
frame_range = get_frame_range()
|
||||
frame_start_handle = frame_range["frameStart"] - int(
|
||||
frame_range["handleStart"]
|
||||
)
|
||||
frame_end_handle = frame_range["frameEnd"] + int(
|
||||
frame_range["handleEnd"]
|
||||
)
|
||||
set_timeline(frame_start_handle, frame_end_handle)
|
||||
|
|
@ -11,7 +11,7 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera"]
|
||||
families = ["camera", "review"]
|
||||
hosts = ["max"]
|
||||
label = "Camera Contents"
|
||||
camera_type = ["$Free_Camera", "$Target_Camera",
|
||||
|
|
|
|||
|
|
@ -13,7 +13,8 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera",
|
||||
"maxScene",
|
||||
"maxrender"]
|
||||
"maxrender",
|
||||
"review"]
|
||||
hosts = ["max"]
|
||||
label = "Max Scene Contents"
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import sys
|
||||
import platform
|
||||
import uuid
|
||||
import re
|
||||
|
||||
|
|
@ -2811,19 +2810,22 @@ def get_attr_in_layer(attr, layer):
|
|||
|
||||
def fix_incompatible_containers():
|
||||
"""Backwards compatibility: old containers to use new ReferenceLoader"""
|
||||
|
||||
old_loaders = {
|
||||
"MayaAsciiLoader",
|
||||
"AbcLoader",
|
||||
"ModelLoader",
|
||||
"CameraLoader",
|
||||
"RigLoader",
|
||||
"FBXLoader"
|
||||
}
|
||||
host = registered_host()
|
||||
for container in host.ls():
|
||||
loader = container['loader']
|
||||
|
||||
print(container['loader'])
|
||||
|
||||
if loader in ["MayaAsciiLoader",
|
||||
"AbcLoader",
|
||||
"ModelLoader",
|
||||
"CameraLoader",
|
||||
"RigLoader",
|
||||
"FBXLoader"]:
|
||||
if loader in old_loaders:
|
||||
log.info(
|
||||
"Converting legacy container loader {} to "
|
||||
"ReferenceLoader: {}".format(loader, container["objectName"])
|
||||
)
|
||||
cmds.setAttr(container["objectName"] + ".loader",
|
||||
"ReferenceLoader", type="string")
|
||||
|
||||
|
|
@ -2951,7 +2953,7 @@ def _get_render_instances():
|
|||
list: list of instances
|
||||
|
||||
"""
|
||||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
instances = []
|
||||
|
|
@ -3238,36 +3240,21 @@ def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None):
|
|||
|
||||
|
||||
def set_colorspace():
|
||||
"""Set Colorspace from project configuration
|
||||
"""
|
||||
"""Set Colorspace from project configuration"""
|
||||
|
||||
# set color spaces for rendering space and view transforms
|
||||
def _colormanage(**kwargs):
|
||||
"""Wrapper around `cmds.colorManagementPrefs`.
|
||||
|
||||
This logs errors instead of raising an error so color management
|
||||
settings get applied as much as possible.
|
||||
|
||||
"""
|
||||
assert len(kwargs) == 1, "Must receive one keyword argument"
|
||||
try:
|
||||
cmds.colorManagementPrefs(edit=True, **kwargs)
|
||||
log.debug("Setting Color Management Preference: {}".format(kwargs))
|
||||
except RuntimeError as exc:
|
||||
log.error(exc)
|
||||
|
||||
project_name = os.getenv("AVALON_PROJECT")
|
||||
project_name = get_current_project_name()
|
||||
imageio = get_project_settings(project_name)["maya"]["imageio"]
|
||||
|
||||
# ocio compatibility variables
|
||||
ocio_v2_maya_version = 2022
|
||||
maya_version = int(cmds.about(version=True))
|
||||
ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version
|
||||
is_ocio_set = bool(os.environ.get("OCIO"))
|
||||
|
||||
root_dict = {}
|
||||
use_workfile_settings = imageio.get("workfile", {}).get("enabled")
|
||||
|
||||
if use_workfile_settings:
|
||||
root_dict = imageio["workfile"]
|
||||
else:
|
||||
# TODO: deprecated code from 3.15.5 - remove
|
||||
# Maya 2022+ introduces new OCIO v2 color management settings that
|
||||
# can override the old color management preferences. OpenPype has
|
||||
|
|
@ -3290,40 +3277,63 @@ def set_colorspace():
|
|||
if not isinstance(root_dict, dict):
|
||||
msg = "set_colorspace(): argument should be dictionary"
|
||||
log.error(msg)
|
||||
return
|
||||
|
||||
else:
|
||||
root_dict = imageio["workfile"]
|
||||
# backward compatibility
|
||||
# TODO: deprecated code from 3.15.5 - remove with deprecated code above
|
||||
view_name = root_dict.get("viewTransform")
|
||||
if view_name is None:
|
||||
view_name = root_dict.get("viewName")
|
||||
|
||||
log.debug(">> root_dict: {}".format(pformat(root_dict)))
|
||||
if not root_dict:
|
||||
return
|
||||
|
||||
if root_dict:
|
||||
# enable color management
|
||||
cmds.colorManagementPrefs(e=True, cmEnabled=True)
|
||||
cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True)
|
||||
# set color spaces for rendering space and view transforms
|
||||
def _colormanage(**kwargs):
|
||||
"""Wrapper around `cmds.colorManagementPrefs`.
|
||||
|
||||
# backward compatibility
|
||||
# TODO: deprecated code from 3.15.5 - refactor to use new settings
|
||||
view_name = root_dict.get("viewTransform")
|
||||
if view_name is None:
|
||||
view_name = root_dict.get("viewName")
|
||||
This logs errors instead of raising an error so color management
|
||||
settings get applied as much as possible.
|
||||
|
||||
if use_ocio_v2:
|
||||
# Use Maya 2022+ default OCIO v2 config
|
||||
"""
|
||||
assert len(kwargs) == 1, "Must receive one keyword argument"
|
||||
try:
|
||||
cmds.colorManagementPrefs(edit=True, **kwargs)
|
||||
log.debug("Setting Color Management Preference: {}".format(kwargs))
|
||||
except RuntimeError as exc:
|
||||
log.error(exc)
|
||||
|
||||
# enable color management
|
||||
cmds.colorManagementPrefs(edit=True, cmEnabled=True)
|
||||
cmds.colorManagementPrefs(edit=True, ocioRulesEnabled=True)
|
||||
|
||||
if use_ocio_v2:
|
||||
log.info("Using Maya OCIO v2")
|
||||
if not is_ocio_set:
|
||||
# Set the Maya 2022+ default OCIO v2 config file path
|
||||
log.info("Setting default Maya OCIO v2 config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="")
|
||||
# Note: Setting "" as value also sets this default however
|
||||
# introduces a bug where launching a file on startup will prompt
|
||||
# to save the empty scene before it, so we set using the path.
|
||||
# This value has been the same for 2022, 2023 and 2024
|
||||
path = "<MAYA_RESOURCES>/OCIO-configs/Maya2022-default/config.ocio"
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath=path)
|
||||
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewName=view_name)
|
||||
_colormanage(displayName=root_dict["displayName"])
|
||||
else:
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewName=view_name)
|
||||
_colormanage(displayName=root_dict["displayName"])
|
||||
else:
|
||||
log.info("Using Maya OCIO v1 (legacy)")
|
||||
if not is_ocio_set:
|
||||
# Set the Maya default config file path
|
||||
log.info("Setting default Maya OCIO v1 legacy config")
|
||||
cmds.colorManagementPrefs(edit=True, configFilePath="legacy")
|
||||
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewTransformName=view_name)
|
||||
# set rendering space and view transform
|
||||
_colormanage(renderingSpaceName=root_dict["renderSpace"])
|
||||
_colormanage(viewTransformName=view_name)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import errno
|
||||
import logging
|
||||
import contextlib
|
||||
import shutil
|
||||
|
||||
from maya import utils, cmds, OpenMaya
|
||||
import maya.api.OpenMaya as om
|
||||
|
|
@ -113,6 +114,9 @@ class MayaHost(HostBase, IWorkfileHost, ILoadHost):
|
|||
register_event_callback("taskChanged", on_task_changed)
|
||||
register_event_callback("workfile.open.before", before_workfile_open)
|
||||
register_event_callback("workfile.save.before", before_workfile_save)
|
||||
register_event_callback(
|
||||
"workfile.save.before", workfile_save_before_xgen
|
||||
)
|
||||
register_event_callback("workfile.save.after", after_workfile_save)
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
|
|
@ -480,18 +484,16 @@ def on_init():
|
|||
# Force load objExport plug-in (requested by artists)
|
||||
cmds.loadPlugin("objExport", quiet=True)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
|
||||
if launch_workfiles:
|
||||
safe_deferred(host_tools.show_workfiles)
|
||||
|
||||
if not lib.IS_HEADLESS:
|
||||
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
|
||||
if launch_workfiles:
|
||||
safe_deferred(host_tools.show_workfiles)
|
||||
|
||||
from .customize import (
|
||||
override_component_mask_commands,
|
||||
override_toolbox_ui
|
||||
)
|
||||
safe_deferred(override_component_mask_commands)
|
||||
safe_deferred(override_toolbox_ui)
|
||||
|
||||
|
||||
|
|
@ -549,37 +551,29 @@ def on_save():
|
|||
Any transform of a mesh, without an existing ID, is given one
|
||||
automatically on file save.
|
||||
"""
|
||||
|
||||
log.info("Running callback on save..")
|
||||
# remove lockfile if users jumps over from one scene to another
|
||||
_remove_workfile_lock()
|
||||
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
# Generate ids of the current context on nodes in the scene
|
||||
nodes = lib.get_id_required_nodes(referenced_nodes=False)
|
||||
for node, new_id in lib.generate_ids(nodes):
|
||||
lib.set_id(node, new_id, overwrite=False)
|
||||
|
||||
|
||||
def _update_render_layer_observers():
|
||||
# Helper to trigger update for all renderlayer observer logic
|
||||
lib.remove_render_layer_observer()
|
||||
lib.add_render_layer_observer()
|
||||
lib.add_render_layer_change_observer()
|
||||
|
||||
|
||||
def on_open():
|
||||
"""On scene open let's assume the containers have changed."""
|
||||
|
||||
from qtpy import QtWidgets
|
||||
from openpype.widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
utils.executeDeferred(_update_render_layer_observers)
|
||||
|
||||
# Validate FPS after update_task_from_path to
|
||||
# ensure it is using correct FPS for the asset
|
||||
|
|
@ -590,10 +584,7 @@ def on_open():
|
|||
log.warning("Scene has outdated content.")
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
|
||||
parent = lib.get_main_window()
|
||||
if parent is None:
|
||||
log.info("Skipping outdated content pop-up "
|
||||
"because Maya window can't be found.")
|
||||
|
|
@ -618,16 +609,9 @@ def on_new():
|
|||
"""Set project resolution and fps when create a new file"""
|
||||
log.info("Running callback on new..")
|
||||
with lib.suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from openpype.hosts.maya.api import lib;"
|
||||
"lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
utils.executeDeferred(_update_render_layer_observers)
|
||||
_remove_workfile_lock()
|
||||
|
||||
|
||||
|
|
@ -681,6 +665,91 @@ def before_workfile_save(event):
|
|||
create_workspace_mel(workdir_path, project_name)
|
||||
|
||||
|
||||
def workfile_save_before_xgen(event):
|
||||
"""Manage Xgen external files when switching context.
|
||||
|
||||
Xgen has various external files that needs to be unique and relative to the
|
||||
workfile, so we need to copy and potentially overwrite these files when
|
||||
switching context.
|
||||
|
||||
Args:
|
||||
event (Event) - openpype/lib/events.py
|
||||
"""
|
||||
if not cmds.pluginInfo("xgenToolkit", query=True, loaded=True):
|
||||
return
|
||||
|
||||
import xgenm
|
||||
|
||||
current_work_dir = legacy_io.Session["AVALON_WORKDIR"].replace("\\", "/")
|
||||
expected_work_dir = event.data["workdir_path"].replace("\\", "/")
|
||||
if current_work_dir == expected_work_dir:
|
||||
return
|
||||
|
||||
palettes = cmds.ls(type="xgmPalette", long=True)
|
||||
if not palettes:
|
||||
return
|
||||
|
||||
transfers = []
|
||||
overwrites = []
|
||||
attribute_changes = {}
|
||||
attrs = ["xgFileName", "xgBaseFile"]
|
||||
for palette in palettes:
|
||||
sanitized_palette = palette.replace("|", "")
|
||||
project_path = xgenm.getAttr("xgProjectPath", sanitized_palette)
|
||||
_, maya_extension = os.path.splitext(event.data["filename"])
|
||||
|
||||
for attr in attrs:
|
||||
node_attr = "{}.{}".format(palette, attr)
|
||||
attr_value = cmds.getAttr(node_attr)
|
||||
|
||||
if not attr_value:
|
||||
continue
|
||||
|
||||
source = os.path.join(project_path, attr_value)
|
||||
|
||||
attr_value = event.data["filename"].replace(
|
||||
maya_extension,
|
||||
"__{}{}".format(
|
||||
sanitized_palette.replace(":", "__"),
|
||||
os.path.splitext(attr_value)[1]
|
||||
)
|
||||
)
|
||||
target = os.path.join(expected_work_dir, attr_value)
|
||||
|
||||
transfers.append((source, target))
|
||||
attribute_changes[node_attr] = attr_value
|
||||
|
||||
relative_path = xgenm.getAttr(
|
||||
"xgDataPath", sanitized_palette
|
||||
).split(os.pathsep)[0]
|
||||
absolute_path = relative_path.replace("${PROJECT}", project_path)
|
||||
for root, _, files in os.walk(absolute_path):
|
||||
for f in files:
|
||||
source = os.path.join(root, f).replace("\\", "/")
|
||||
target = source.replace(project_path, expected_work_dir + "/")
|
||||
transfers.append((source, target))
|
||||
if os.path.exists(target):
|
||||
overwrites.append(target)
|
||||
|
||||
# Ask user about overwriting files.
|
||||
if overwrites:
|
||||
log.warning(
|
||||
"WARNING! Potential loss of data.\n\n"
|
||||
"Found duplicate Xgen files in new context.\n{}".format(
|
||||
"\n".join(overwrites)
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
for source, destination in transfers:
|
||||
if not os.path.exists(os.path.dirname(destination)):
|
||||
os.makedirs(os.path.dirname(destination))
|
||||
shutil.copy(source, destination)
|
||||
|
||||
for attribute, value in attribute_changes.items():
|
||||
cmds.setAttr(attribute, value, type="string")
|
||||
|
||||
|
||||
def after_workfile_save(event):
|
||||
workfile_name = event["filename"]
|
||||
if (
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ import contextlib
|
|||
from maya import cmds
|
||||
from maya.app.renderSetup.model import renderSetup
|
||||
|
||||
# from colorbleed.maya import lib
|
||||
from .lib import pairwise
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -272,7 +272,12 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin):
|
|||
return
|
||||
|
||||
roots = cmds.sets(container, q=True)
|
||||
ref_node = get_reference_node(roots)
|
||||
ref_node = None
|
||||
try:
|
||||
ref_node = get_reference_node(roots)
|
||||
except AssertionError as e:
|
||||
self.log.info(e.args[0])
|
||||
|
||||
nodes_to_parent = []
|
||||
for root in roots:
|
||||
if ref_node:
|
||||
|
|
|
|||
|
|
@ -104,25 +104,40 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# Define nice label
|
||||
name = cmds.ls(objset, long=False)[0] # use short name
|
||||
label = "{0} ({1})".format(name,
|
||||
data["asset"])
|
||||
label = "{0} ({1})".format(name, data["asset"])
|
||||
|
||||
# Convert frame values to integers
|
||||
for attr_name in (
|
||||
"handleStart", "handleEnd", "frameStart", "frameEnd",
|
||||
):
|
||||
value = data.get(attr_name)
|
||||
if value is not None:
|
||||
data[attr_name] = int(value)
|
||||
|
||||
# Append start frame and end frame to label if present
|
||||
if "frameStart" and "frameEnd" in data:
|
||||
if "frameStart" in data and "frameEnd" in data:
|
||||
# Take handles from context if not set locally on the instance
|
||||
for key in ["handleStart", "handleEnd"]:
|
||||
if key not in data:
|
||||
data[key] = context.data[key]
|
||||
value = context.data[key]
|
||||
if value is not None:
|
||||
value = int(value)
|
||||
data[key] = value
|
||||
|
||||
data["frameStartHandle"] = data["frameStart"] - data["handleStart"] # noqa: E501
|
||||
data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] # noqa: E501
|
||||
data["frameStartHandle"] = int(
|
||||
data["frameStart"] - data["handleStart"]
|
||||
)
|
||||
data["frameEndHandle"] = int(
|
||||
data["frameEnd"] + data["handleEnd"]
|
||||
)
|
||||
|
||||
label += " [{0}-{1}]".format(int(data["frameStartHandle"]),
|
||||
int(data["frameEndHandle"]))
|
||||
label += " [{0}-{1}]".format(
|
||||
data["frameStartHandle"], data["frameEndHandle"]
|
||||
)
|
||||
|
||||
instance.data["label"] = label
|
||||
|
||||
instance.data.update(data)
|
||||
self.log.debug("{}".format(instance.data))
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
|
|
|
|||
|
|
@ -17,10 +17,12 @@ class CollectMayaSceneTime(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
instance.data.update({
|
||||
"frameStart": cmds.playbackOptions(query=True, minTime=True),
|
||||
"frameEnd": cmds.playbackOptions(query=True, maxTime=True),
|
||||
"frameStartHandle": cmds.playbackOptions(query=True,
|
||||
animationStartTime=True),
|
||||
"frameEndHandle": cmds.playbackOptions(query=True,
|
||||
animationEndTime=True)
|
||||
"frameStart": int(
|
||||
cmds.playbackOptions(query=True, minTime=True)),
|
||||
"frameEnd": int(
|
||||
cmds.playbackOptions(query=True, maxTime=True)),
|
||||
"frameStartHandle": int(
|
||||
cmds.playbackOptions(query=True, animationStartTime=True)),
|
||||
"frameEndHandle": int(
|
||||
cmds.playbackOptions(query=True, animationEndTime=True))
|
||||
})
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
import os
|
||||
|
||||
from maya import cmds
|
||||
# import maya.mel as mel
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
|
|
|||
|
|
@ -261,8 +261,8 @@ class ExtractPlayblast(publish.Extractor):
|
|||
"ext": capture_preset["Codec"]["compression"],
|
||||
"files": collected_files,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"frameStart": int(start),
|
||||
"frameEnd": int(end),
|
||||
"fps": fps,
|
||||
"tags": tags,
|
||||
"camera_name": camera_node_name
|
||||
|
|
|
|||
|
|
@ -8,7 +8,10 @@ from openpype.client import get_last_version_by_subset_id
|
|||
from openpype import style
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.tools.utils.lib import qt_app_context
|
||||
from openpype.hosts.maya.api.lib import assign_look_by_version
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
assign_look_by_version,
|
||||
get_main_window
|
||||
)
|
||||
|
||||
from maya import cmds
|
||||
# old api for MFileIO
|
||||
|
|
@ -297,9 +300,7 @@ def show():
|
|||
pass
|
||||
|
||||
# Get Maya main window
|
||||
top_level_widgets = QtWidgets.QApplication.topLevelWidgets()
|
||||
mainwindow = next(widget for widget in top_level_widgets
|
||||
if widget.objectName() == "MayaWindow")
|
||||
mainwindow = get_main_window()
|
||||
|
||||
with qt_app_context():
|
||||
window = MayaLookAssignerWindow(parent=mainwindow)
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
|||
self._uninstall_menu()
|
||||
self._deregister_callbacks()
|
||||
|
||||
def has_unsaved_changes(self):
|
||||
def workfile_has_unsaved_changes(self):
|
||||
|
||||
if not substance_painter.project.is_open():
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -34,6 +34,18 @@ class CreateTextures(Creator):
|
|||
if not substance_painter.project.is_open():
|
||||
raise CreatorError("Can't create a Texture Set instance without "
|
||||
"an open project.")
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in [
|
||||
"exportPresetUrl",
|
||||
"exportFileFormat",
|
||||
"exportSize",
|
||||
"exportPadding",
|
||||
"exportDilationDistance"
|
||||
]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance = self.create_instance_in_context(subset_name,
|
||||
instance_data)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class SaveCurrentWorkfile(pyblish.api.ContextPlugin):
|
|||
if context.data["currentFile"] != current:
|
||||
raise KnownPublishError("Workfile has changed during publishing!")
|
||||
|
||||
if host.has_unsaved_changes():
|
||||
if host.workfile_has_unsaved_changes():
|
||||
self.log.info("Saving current file: {}".format(current))
|
||||
host.save_workfile()
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -146,7 +146,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
"FTRACK_SERVER",
|
||||
"AVALON_APP_NAME",
|
||||
"OPENPYPE_USERNAME",
|
||||
"OPENPYPE_VERSION",
|
||||
"OPENPYPE_SG_USER"
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from copy import deepcopy
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import platform
|
||||
import contextlib
|
||||
|
|
@ -237,12 +236,13 @@ def get_data_subprocess(config_path, data_type):
|
|||
return json.loads(return_json_data)
|
||||
|
||||
|
||||
def compatible_python():
|
||||
"""Only 3.9 or higher can directly use PyOpenColorIO in ocio_wrapper"""
|
||||
compatible = False
|
||||
if sys.version[0] == 3 and sys.version[1] >= 9:
|
||||
compatible = True
|
||||
return compatible
|
||||
def compatibility_check():
|
||||
"""Making sure PyOpenColorIO is importable"""
|
||||
try:
|
||||
import PyOpenColorIO # noqa: F401
|
||||
except ImportError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_ocio_config_colorspaces(config_path):
|
||||
|
|
@ -257,12 +257,15 @@ def get_ocio_config_colorspaces(config_path):
|
|||
Returns:
|
||||
dict: colorspace and family in couple
|
||||
"""
|
||||
if compatible_python():
|
||||
from ..scripts.ocio_wrapper import _get_colorspace_data
|
||||
return _get_colorspace_data(config_path)
|
||||
else:
|
||||
if not compatibility_check():
|
||||
# python environment is not compatible with PyOpenColorIO
|
||||
# needs to be run in subprocess
|
||||
return get_colorspace_data_subprocess(config_path)
|
||||
|
||||
from openpype.scripts.ocio_wrapper import _get_colorspace_data
|
||||
|
||||
return _get_colorspace_data(config_path)
|
||||
|
||||
|
||||
def get_colorspace_data_subprocess(config_path):
|
||||
"""Get colorspace data via subprocess
|
||||
|
|
@ -290,12 +293,15 @@ def get_ocio_config_views(config_path):
|
|||
Returns:
|
||||
dict: `display/viewer` and viewer data
|
||||
"""
|
||||
if compatible_python():
|
||||
from ..scripts.ocio_wrapper import _get_views_data
|
||||
return _get_views_data(config_path)
|
||||
else:
|
||||
if not compatibility_check():
|
||||
# python environment is not compatible with PyOpenColorIO
|
||||
# needs to be run in subprocess
|
||||
return get_views_data_subprocess(config_path)
|
||||
|
||||
from openpype.scripts.ocio_wrapper import _get_views_data
|
||||
|
||||
return _get_views_data(config_path)
|
||||
|
||||
|
||||
def get_views_data_subprocess(config_path):
|
||||
"""Get viewers data via subprocess
|
||||
|
|
|
|||
|
|
@ -4,6 +4,11 @@ from .constants import (
|
|||
PRE_CREATE_THUMBNAIL_KEY,
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
get_last_versions_for_instances,
|
||||
get_next_versions_for_instances,
|
||||
)
|
||||
|
||||
from .subset_name import (
|
||||
TaskNotSetError,
|
||||
get_subset_name_template,
|
||||
|
|
@ -46,6 +51,9 @@ __all__ = (
|
|||
"DEFAULT_SUBSET_TEMPLATE",
|
||||
"PRE_CREATE_THUMBNAIL_KEY",
|
||||
|
||||
"get_last_versions_for_instances",
|
||||
"get_next_versions_for_instances",
|
||||
|
||||
"TaskNotSetError",
|
||||
"get_subset_name_template",
|
||||
"get_subset_name",
|
||||
|
|
|
|||
|
|
@ -1122,10 +1122,10 @@ class CreatedInstance:
|
|||
|
||||
@property
|
||||
def creator_attribute_defs(self):
|
||||
"""Attribute defintions defined by creator plugin.
|
||||
"""Attribute definitions defined by creator plugin.
|
||||
|
||||
Returns:
|
||||
List[AbstractAttrDef]: Attribute defitions.
|
||||
List[AbstractAttrDef]: Attribute definitions.
|
||||
"""
|
||||
|
||||
return self.creator_attributes.attr_defs
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import os
|
||||
import copy
|
||||
import collections
|
||||
|
||||
|
|
@ -21,6 +20,7 @@ from openpype.pipeline.plugin_discover import (
|
|||
)
|
||||
|
||||
from .subset_name import get_subset_name
|
||||
from .utils import get_next_versions_for_instances
|
||||
from .legacy_create import LegacyCreator
|
||||
|
||||
|
||||
|
|
@ -483,6 +483,27 @@ class BaseCreator:
|
|||
thumbnail_path
|
||||
)
|
||||
|
||||
def get_next_versions_for_instances(self, instances):
|
||||
"""Prepare next versions for instances.
|
||||
|
||||
This is helper method to receive next possible versions for instances.
|
||||
It is using context information on instance to receive them, 'asset'
|
||||
and 'subset'.
|
||||
|
||||
Output will contain version by each instance id.
|
||||
|
||||
Args:
|
||||
instances (list[CreatedInstance]): Instances for which to get next
|
||||
versions.
|
||||
|
||||
Returns:
|
||||
Dict[str, int]: Next versions by instance id.
|
||||
"""
|
||||
|
||||
return get_next_versions_for_instances(
|
||||
self.create_context.project_name, instances
|
||||
)
|
||||
|
||||
|
||||
class Creator(BaseCreator):
|
||||
"""Creator that has more information for artist to show in UI.
|
||||
|
|
|
|||
|
|
@ -74,12 +74,12 @@ class LegacyCreator(object):
|
|||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
cls.log.debug(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
cls.log.debug(" - is disabled by preset")
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
cls.log.debug(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
def process(self):
|
||||
|
|
|
|||
122
openpype/pipeline/create/utils.py
Normal file
122
openpype/pipeline/create/utils.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
import collections
|
||||
|
||||
from openpype.client import get_assets, get_subsets, get_last_versions
|
||||
|
||||
|
||||
def get_last_versions_for_instances(
|
||||
project_name, instances, use_value_for_missing=False
|
||||
):
|
||||
"""Get last versions for instances by their asset and subset name.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
instances (list[CreatedInstance]): Instances to get next versions for.
|
||||
use_value_for_missing (Optional[bool]): Missing values are replaced
|
||||
with negative value if True. Otherwise None is used. -2 is used
|
||||
for instances without filled asset or subset name. -1 is used
|
||||
for missing entities.
|
||||
|
||||
Returns:
|
||||
dict[str, Union[int, None]]: Last versions by instance id.
|
||||
"""
|
||||
|
||||
output = {
|
||||
instance.id: -1 if use_value_for_missing else None
|
||||
for instance in instances
|
||||
}
|
||||
subset_names_by_asset_name = collections.defaultdict(set)
|
||||
instances_by_hierarchy = {}
|
||||
for instance in instances:
|
||||
asset_name = instance.data.get("asset")
|
||||
subset_name = instance.subset_name
|
||||
if not asset_name or not subset_name:
|
||||
if use_value_for_missing:
|
||||
output[instance.id] = -2
|
||||
continue
|
||||
|
||||
(
|
||||
instances_by_hierarchy
|
||||
.setdefault(asset_name, {})
|
||||
.setdefault(subset_name, [])
|
||||
.append(instance)
|
||||
)
|
||||
subset_names_by_asset_name[asset_name].add(subset_name)
|
||||
|
||||
subset_names = set()
|
||||
for names in subset_names_by_asset_name.values():
|
||||
subset_names |= names
|
||||
|
||||
if not subset_names:
|
||||
return output
|
||||
|
||||
asset_docs = get_assets(
|
||||
project_name,
|
||||
asset_names=subset_names_by_asset_name.keys(),
|
||||
fields=["name", "_id"]
|
||||
)
|
||||
asset_names_by_id = {
|
||||
asset_doc["_id"]: asset_doc["name"]
|
||||
for asset_doc in asset_docs
|
||||
}
|
||||
if not asset_names_by_id:
|
||||
return output
|
||||
|
||||
subset_docs = get_subsets(
|
||||
project_name,
|
||||
asset_ids=asset_names_by_id.keys(),
|
||||
subset_names=subset_names,
|
||||
fields=["_id", "name", "parent"]
|
||||
)
|
||||
subset_docs_by_id = {}
|
||||
for subset_doc in subset_docs:
|
||||
# Filter subset docs by subset names under parent
|
||||
asset_id = subset_doc["parent"]
|
||||
asset_name = asset_names_by_id[asset_id]
|
||||
subset_name = subset_doc["name"]
|
||||
if subset_name not in subset_names_by_asset_name[asset_name]:
|
||||
continue
|
||||
subset_docs_by_id[subset_doc["_id"]] = subset_doc
|
||||
|
||||
if not subset_docs_by_id:
|
||||
return output
|
||||
|
||||
last_versions_by_subset_id = get_last_versions(
|
||||
project_name,
|
||||
subset_docs_by_id.keys(),
|
||||
fields=["name", "parent"]
|
||||
)
|
||||
for subset_id, version_doc in last_versions_by_subset_id.items():
|
||||
subset_doc = subset_docs_by_id[subset_id]
|
||||
asset_id = subset_doc["parent"]
|
||||
asset_name = asset_names_by_id[asset_id]
|
||||
_instances = instances_by_hierarchy[asset_name][subset_doc["name"]]
|
||||
for instance in _instances:
|
||||
output[instance.id] = version_doc["name"]
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def get_next_versions_for_instances(project_name, instances):
|
||||
"""Get next versions for instances by their asset and subset name.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
instances (list[CreatedInstance]): Instances to get next versions for.
|
||||
|
||||
Returns:
|
||||
dict[str, Union[int, None]]: Next versions by instance id. Version is
|
||||
'None' if instance has no asset or subset name.
|
||||
"""
|
||||
|
||||
last_versions = get_last_versions_for_instances(
|
||||
project_name, instances, True)
|
||||
|
||||
output = {}
|
||||
for instance_id, version in last_versions.items():
|
||||
if version == -2:
|
||||
output[instance_id] = None
|
||||
elif version == -1:
|
||||
output[instance_id] = 1
|
||||
else:
|
||||
output[instance_id] = version + 1
|
||||
return output
|
||||
|
|
@ -4,6 +4,8 @@ from .utils import (
|
|||
LoadError,
|
||||
IncompatibleLoaderError,
|
||||
InvalidRepresentationContext,
|
||||
LoaderSwitchNotImplementedError,
|
||||
LoaderNotFoundError,
|
||||
|
||||
get_repres_contexts,
|
||||
get_contexts_for_repre_docs,
|
||||
|
|
@ -55,6 +57,8 @@ __all__ = (
|
|||
"LoadError",
|
||||
"IncompatibleLoaderError",
|
||||
"InvalidRepresentationContext",
|
||||
"LoaderSwitchNotImplementedError",
|
||||
"LoaderNotFoundError",
|
||||
|
||||
"get_repres_contexts",
|
||||
"get_contexts_for_repre_docs",
|
||||
|
|
|
|||
|
|
@ -79,6 +79,16 @@ class InvalidRepresentationContext(ValueError):
|
|||
pass
|
||||
|
||||
|
||||
class LoaderSwitchNotImplementedError(NotImplementedError):
|
||||
"""Error when `switch` is used with Loader that has no implementation."""
|
||||
pass
|
||||
|
||||
|
||||
class LoaderNotFoundError(RuntimeError):
|
||||
"""Error when Loader plugin for a loader name is not found."""
|
||||
pass
|
||||
|
||||
|
||||
def get_repres_contexts(representation_ids, dbcon=None):
|
||||
"""Return parenthood context for representation.
|
||||
|
||||
|
|
@ -432,7 +442,10 @@ def remove_container(container):
|
|||
|
||||
Loader = _get_container_loader(container)
|
||||
if not Loader:
|
||||
raise RuntimeError("Can't remove container. See log for details.")
|
||||
raise LoaderNotFoundError(
|
||||
"Can't remove container because loader '{}' was not found."
|
||||
.format(container.get("loader"))
|
||||
)
|
||||
|
||||
loader = Loader(get_representation_context(container["representation"]))
|
||||
return loader.remove(container)
|
||||
|
|
@ -480,7 +493,10 @@ def update_container(container, version=-1):
|
|||
# Run update on the Loader for this container
|
||||
Loader = _get_container_loader(container)
|
||||
if not Loader:
|
||||
raise RuntimeError("Can't update container. See log for details.")
|
||||
raise LoaderNotFoundError(
|
||||
"Can't update container because loader '{}' was not found."
|
||||
.format(container.get("loader"))
|
||||
)
|
||||
|
||||
loader = Loader(get_representation_context(container["representation"]))
|
||||
return loader.update(container, new_representation)
|
||||
|
|
@ -502,15 +518,18 @@ def switch_container(container, representation, loader_plugin=None):
|
|||
loader_plugin = _get_container_loader(container)
|
||||
|
||||
if not loader_plugin:
|
||||
raise RuntimeError("Can't switch container. See log for details.")
|
||||
raise LoaderNotFoundError(
|
||||
"Can't switch container because loader '{}' was not found."
|
||||
.format(container.get("loader"))
|
||||
)
|
||||
|
||||
if not hasattr(loader_plugin, "switch"):
|
||||
# Backwards compatibility (classes without switch support
|
||||
# might be better to just have "switch" raise NotImplementedError
|
||||
# on the base class of Loader\
|
||||
raise RuntimeError("Loader '{}' does not support 'switch'".format(
|
||||
loader_plugin.label
|
||||
))
|
||||
raise LoaderSwitchNotImplementedError(
|
||||
"Loader {} does not support 'switch'".format(loader_plugin.label)
|
||||
)
|
||||
|
||||
# Get the new representation to switch to
|
||||
project_name = legacy_io.active_project()
|
||||
|
|
@ -520,7 +539,11 @@ def switch_container(container, representation, loader_plugin=None):
|
|||
|
||||
new_context = get_representation_context(new_representation)
|
||||
if not is_compatible_loader(loader_plugin, new_context):
|
||||
raise AssertionError("Must be compatible Loader")
|
||||
raise IncompatibleLoaderError(
|
||||
"Loader {} is incompatible with {}".format(
|
||||
loader_plugin.__name__, new_context["subset"]["name"]
|
||||
)
|
||||
)
|
||||
|
||||
loader = loader_plugin(new_context)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import copy
|
||||
import platform
|
||||
from collections import defaultdict
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
|
@ -83,6 +84,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
self.templates = self._get_templates(self.anatomy)
|
||||
for name, _ in self.templates.items():
|
||||
dropdown.addItem(name)
|
||||
if self.templates and platform.system() == "Darwin":
|
||||
# fix macos QCombobox Style
|
||||
dropdown.setItemDelegate(QtWidgets.QStyledItemDelegate())
|
||||
# update combo box length to longest entry
|
||||
longest_key = max(self.templates.keys(), key=len)
|
||||
dropdown.setMinimumContentsLength(len(longest_key))
|
||||
|
||||
template_label = QtWidgets.QLabel()
|
||||
template_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
|
||||
|
|
@ -123,7 +130,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
input_layout.addRow("Representations", repre_checkboxes_layout)
|
||||
|
||||
btn_delivery = QtWidgets.QPushButton("Deliver")
|
||||
btn_delivery.setEnabled(bool(dropdown.currentText()))
|
||||
btn_delivery.setEnabled(False)
|
||||
|
||||
progress_bar = QtWidgets.QProgressBar(self)
|
||||
progress_bar.setMinimum = 0
|
||||
|
|
@ -162,6 +169,15 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
btn_delivery.clicked.connect(self.deliver)
|
||||
dropdown.currentIndexChanged.connect(self._update_template_value)
|
||||
|
||||
if not self.dropdown.count():
|
||||
self.text_area.setVisible(True)
|
||||
error_message = (
|
||||
"No Delivery Templates found!\n"
|
||||
"Add Template in [project_anatomy/templates/delivery]"
|
||||
)
|
||||
self.text_area.setText(error_message)
|
||||
self.log.error(error_message.replace("\n", " "))
|
||||
|
||||
def deliver(self):
|
||||
"""Main method to loop through all selected representations"""
|
||||
self.progress_bar.setVisible(True)
|
||||
|
|
@ -308,14 +324,17 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
self.files_selected, self.size_selected = \
|
||||
self._get_counts(selected_repres)
|
||||
self.selected_label.setText(self._prepare_label())
|
||||
# update delivery button state if any templates found
|
||||
if self.dropdown.count():
|
||||
self.btn_delivery.setEnabled(bool(selected_repres))
|
||||
|
||||
def _update_template_value(self, _index=None):
|
||||
"""Sets template value to label after selection in dropdown."""
|
||||
name = self.dropdown.currentText()
|
||||
template_value = self.templates.get(name)
|
||||
if template_value:
|
||||
self.btn_delivery.setEnabled(True)
|
||||
self.template_label.setText(template_value)
|
||||
self.btn_delivery.setEnabled(bool(self._get_selected_repres()))
|
||||
|
||||
def _update_progress(self, uploaded):
|
||||
"""Update progress bar after each repre copied."""
|
||||
|
|
|
|||
|
|
@ -51,7 +51,8 @@ class ExtractBurnin(publish.Extractor):
|
|||
"aftereffects",
|
||||
"photoshop",
|
||||
"flame",
|
||||
"houdini"
|
||||
"houdini",
|
||||
"max"
|
||||
# "resolve"
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"maya",
|
||||
"blender",
|
||||
"houdini",
|
||||
"max",
|
||||
"shell",
|
||||
"hiero",
|
||||
"premiere",
|
||||
|
|
|
|||
|
|
@ -262,7 +262,8 @@
|
|||
],
|
||||
"hosts": [
|
||||
"maya",
|
||||
"houdini"
|
||||
"houdini",
|
||||
"max"
|
||||
],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
|
|
|
|||
|
|
@ -421,9 +421,9 @@
|
|||
},
|
||||
"workfile": {
|
||||
"enabled": false,
|
||||
"renderSpace": "ACEScg",
|
||||
"displayName": "sRGB",
|
||||
"viewName": "ACES 1.0 SDR-video"
|
||||
"renderSpace": "ACES - ACEScg",
|
||||
"displayName": "ACES",
|
||||
"viewName": "sRGB"
|
||||
},
|
||||
"colorManagementPreference_v2": {
|
||||
"enabled": true,
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ from openpype.pipeline.load import (
|
|||
switch_container,
|
||||
get_repres_contexts,
|
||||
loaders_from_repre_context,
|
||||
LoaderSwitchNotImplementedError,
|
||||
IncompatibleLoaderError,
|
||||
LoaderNotFoundError
|
||||
)
|
||||
|
||||
from .widgets import (
|
||||
|
|
@ -1298,19 +1301,28 @@ class SwitchAssetDialog(QtWidgets.QDialog):
|
|||
else:
|
||||
repre_doc = repres_by_name[container_repre_name]
|
||||
|
||||
error = None
|
||||
try:
|
||||
switch_container(container, repre_doc, loader)
|
||||
except (
|
||||
LoaderSwitchNotImplementedError,
|
||||
IncompatibleLoaderError,
|
||||
LoaderNotFoundError,
|
||||
) as exc:
|
||||
error = str(exc)
|
||||
except Exception:
|
||||
msg = (
|
||||
error = (
|
||||
"Switch asset failed. "
|
||||
"Search console log for more details."
|
||||
)
|
||||
if error is not None:
|
||||
log.warning((
|
||||
"Couldn't switch asset."
|
||||
"See traceback for more information."
|
||||
)
|
||||
log.warning(msg, exc_info=True)
|
||||
), exc_info=True)
|
||||
dialog = QtWidgets.QMessageBox(self)
|
||||
dialog.setWindowTitle("Switch asset failed")
|
||||
dialog.setText(
|
||||
"Switch asset failed. Search console log for more details"
|
||||
)
|
||||
dialog.setText(error)
|
||||
dialog.exec_()
|
||||
|
||||
self.switched.emit()
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring Pype version."""
|
||||
__version__ = "3.15.12-nightly.1"
|
||||
__version__ = "3.15.12-nightly.3"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue