[Automated] Merged develop into main

This commit is contained in:
ynbot 2023-07-08 05:30:38 +02:00 committed by GitHub
commit d46ecd84f1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
55 changed files with 866 additions and 248 deletions

View file

@ -35,6 +35,7 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.15.12-nightly.3
- 3.15.12-nightly.2
- 3.15.12-nightly.1
- 3.15.11
@ -134,7 +135,6 @@ body:
- 3.14.4
- 3.14.4-nightly.4
- 3.14.4-nightly.3
- 3.14.4-nightly.2
validations:
required: true
- type: dropdown

View file

@ -49,7 +49,7 @@ def deprecated(new_destination):
@deprecated("openpype.pipeline.publish.get_errored_instances_from_context")
def get_errored_instances_from_context(context):
def get_errored_instances_from_context(context, plugin=None):
"""
Deprecated:
Since 3.14.* will be removed in 3.16.* or later.
@ -57,7 +57,7 @@ def get_errored_instances_from_context(context):
from openpype.pipeline.publish import get_errored_instances_from_context
return get_errored_instances_from_context(context)
return get_errored_instances_from_context(context, plugin=plugin)
@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context")
@ -97,11 +97,9 @@ class RepairAction(pyblish.api.Action):
# Get the errored instances
self.log.info("Finding failed instances..")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
for instance in instances:
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
for instance in errored_instances:
plugin.repair(instance)

View file

@ -12,13 +12,13 @@ class SelectInvalidAction(pyblish.api.Action):
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -18,15 +18,13 @@ class SelectInvalidAction(pyblish.api.Action):
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -17,15 +17,13 @@ class SelectInvalidAction(pyblish.api.Action):
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
@ -44,3 +42,42 @@ class SelectInvalidAction(pyblish.api.Action):
node.setCurrent(True)
else:
self.log.info("No invalid nodes found.")
class SelectROPAction(pyblish.api.Action):
"""Select ROP.
It's used to select the associated ROPs with the errored instances.
"""
label = "Select ROP"
on = "failed" # This action is only available on a failed plug-in
icon = "mdi.cursor-default-click"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding ROP nodes..")
rop_nodes = list()
for instance in errored_instances:
node_path = instance.data.get("instance_node")
if not node_path:
continue
node = hou.node(node_path)
if not node:
continue
rop_nodes.append(node)
hou.clearAllSelected()
if rop_nodes:
self.log.info("Selecting ROP nodes: {}".format(
", ".join(node.path() for node in rop_nodes)
))
for node in rop_nodes:
node.setSelected(True)
node.setCurrent(True)
else:
self.log.info("No ROP nodes found.")

View file

@ -633,23 +633,8 @@ def evalParmNoFrame(node, parm, pad_character="#"):
def get_color_management_preferences():
"""Get default OCIO preferences"""
data = {
"config": hou.Color.ocio_configPath()
return {
"config": hou.Color.ocio_configPath(),
"display": hou.Color.ocio_defaultDisplay(),
"view": hou.Color.ocio_defaultView()
}
# Get default display and view from OCIO
display = hou.Color.ocio_defaultDisplay()
disp_regex = re.compile(r"^(?P<name>.+-)(?P<display>.+)$")
disp_match = disp_regex.match(display)
view = hou.Color.ocio_defaultView()
view_regex = re.compile(r"^(?P<name>.+- )(?P<view>.+)$")
view_match = view_regex.match(view)
data.update({
"display": disp_match.group("display"),
"view": view_match.group("view")
})
return data

View file

@ -93,7 +93,7 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
import hdefereval # noqa, hdefereval is only available in ui mode
hdefereval.executeDeferred(creator_node_shelves.install)
def has_unsaved_changes(self):
def workfile_has_unsaved_changes(self):
return hou.hipFile.hasUnsavedChanges()
def get_workfile_extensions(self):

View file

@ -19,7 +19,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin):
"Collected filename from current scene name."
)
if host.has_unsaved_changes():
if host.workfile_has_unsaved_changes():
self.log.info("Saving current file: {}".format(current_file))
host.save_workfile(current_file)
else:

View file

@ -1,6 +1,12 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
from openpype.hosts.houdini.api.action import (
SelectInvalidAction,
SelectROPAction,
)
import hou
class ValidateSopOutputNode(pyblish.api.InstancePlugin):
@ -19,6 +25,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
families = ["pointcache", "vdbcache"]
hosts = ["houdini"]
label = "Validate Output Node"
actions = [SelectROPAction, SelectInvalidAction]
def process(self, instance):
@ -31,9 +38,6 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
@classmethod
def get_invalid(cls, instance):
import hou
output_node = instance.data.get("output_node")
if output_node is None:
@ -43,7 +47,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
"Ensure a valid SOP output path is set." % node.path()
)
return [node.path()]
return [node]
# Output node must be a Sop node.
if not isinstance(output_node, hou.SopNode):
@ -53,7 +57,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
"instead found category type: %s"
% (output_node.path(), output_node.type().category().name())
)
return [output_node.path()]
return [output_node]
# For the sake of completeness also assert the category type
# is Sop to avoid potential edge case scenarios even though
@ -73,11 +77,11 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
except hou.Error as exc:
cls.log.error("Cook failed: %s" % exc)
cls.log.error(output_node.errors()[0])
return [output_node.path()]
return [output_node]
# Ensure the output node has at least Geometry data
if not output_node.geometry():
cls.log.error(
"Output node `%s` has no geometry data." % output_node.path()
)
return [output_node.path()]
return [output_node]

View file

@ -78,6 +78,14 @@ def read(container) -> dict:
value.startswith(JSON_PREFIX):
with contextlib.suppress(json.JSONDecodeError):
value = json.loads(value[len(JSON_PREFIX):])
# default value behavior
# convert maxscript boolean values
if value == "true":
value = True
elif value == "false":
value = False
data[key.strip()] = value
data["instance_node"] = container.Name
@ -284,6 +292,21 @@ def get_max_version():
return max_info[7]
@contextlib.contextmanager
def viewport_camera(camera):
original = rt.viewport.getCamera()
if not original:
# if there is no original camera
# use the current camera as original
original = rt.getNodeByName(camera)
review_camera = rt.getNodeByName(camera)
try:
rt.viewport.setCamera(review_camera)
yield
finally:
rt.viewport.setCamera(original)
def set_timeline(frameStart, frameEnd):
"""Set frame range for timeline editor in Max
"""

View file

@ -42,6 +42,10 @@ MS_CUSTOM_ATTRIB = """attributes "openPypeData"
(
handle_name = node_to_name c
node_ref = NodeTransformMonitor node:c
idx = finditem list_node.items handle_name
if idx do (
continue
)
append temp_arr handle_name
append i_node_arr node_ref
)

View file

@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating review in Max."""
from openpype.hosts.max.api import plugin
from openpype.lib import BoolDef, EnumDef, NumberDef
class CreateReview(plugin.MaxCreator):
"""Review in 3dsMax"""
identifier = "io.openpype.creators.max.review"
label = "Review"
family = "review"
icon = "video-camera"
def create(self, subset_name, instance_data, pre_create_data):
instance_data["imageFormat"] = pre_create_data.get("imageFormat")
instance_data["keepImages"] = pre_create_data.get("keepImages")
instance_data["percentSize"] = pre_create_data.get("percentSize")
instance_data["rndLevel"] = pre_create_data.get("rndLevel")
super(CreateReview, self).create(
subset_name,
instance_data,
pre_create_data)
def get_pre_create_attr_defs(self):
attrs = super(CreateReview, self).get_pre_create_attr_defs()
image_format_enum = [
"bmp", "cin", "exr", "jpg", "hdr", "rgb", "png",
"rla", "rpf", "dds", "sgi", "tga", "tif", "vrimg"
]
rndLevel_enum = [
"smoothhighlights", "smooth", "facethighlights",
"facet", "flat", "litwireframe", "wireframe", "box"
]
return attrs + [
BoolDef("keepImages",
label="Keep Image Sequences",
default=False),
EnumDef("imageFormat",
image_format_enum,
default="png",
label="Image Format Options"),
NumberDef("percentSize",
label="Percent of Output",
default=100,
minimum=1,
decimals=0),
EnumDef("rndLevel",
rndLevel_enum,
default="smoothhighlights",
label="Preference")
]

View file

@ -0,0 +1,92 @@
# dont forget getting the focal length for burnin
"""Collect Review"""
import pyblish.api
from pymxs import runtime as rt
from openpype.lib import BoolDef
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
class CollectReview(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Collect Review Data for Preview Animation"""
order = pyblish.api.CollectorOrder + 0.02
label = "Collect Review Data"
hosts = ['max']
families = ["review"]
def process(self, instance):
nodes = instance.data["members"]
focal_length = None
camera_name = None
for node in nodes:
if rt.classOf(node) in rt.Camera.classes:
camera_name = node.name
focal_length = node.fov
attr_values = self.get_attr_values_from_data(instance.data)
data = {
"review_camera": camera_name,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"fps": instance.context.data["fps"],
"dspGeometry": attr_values.get("dspGeometry"),
"dspShapes": attr_values.get("dspShapes"),
"dspLights": attr_values.get("dspLights"),
"dspCameras": attr_values.get("dspCameras"),
"dspHelpers": attr_values.get("dspHelpers"),
"dspParticles": attr_values.get("dspParticles"),
"dspBones": attr_values.get("dspBones"),
"dspBkg": attr_values.get("dspBkg"),
"dspGrid": attr_values.get("dspGrid"),
"dspSafeFrame": attr_values.get("dspSafeFrame"),
"dspFrameNums": attr_values.get("dspFrameNums")
}
# Enable ftrack functionality
instance.data.setdefault("families", []).append('ftrack')
burnin_members = instance.data.setdefault("burninDataMembers", {})
burnin_members["focalLength"] = focal_length
self.log.debug(f"data:{data}")
instance.data.update(data)
@classmethod
def get_attribute_defs(cls):
return [
BoolDef("dspGeometry",
label="Geometry",
default=True),
BoolDef("dspShapes",
label="Shapes",
default=False),
BoolDef("dspLights",
label="Lights",
default=False),
BoolDef("dspCameras",
label="Cameras",
default=False),
BoolDef("dspHelpers",
label="Helpers",
default=False),
BoolDef("dspParticles",
label="Particle Systems",
default=True),
BoolDef("dspBones",
label="Bone Objects",
default=False),
BoolDef("dspBkg",
label="Background",
default=True),
BoolDef("dspGrid",
label="Active Grid",
default=False),
BoolDef("dspSafeFrame",
label="Safe Frames",
default=False),
BoolDef("dspFrameNums",
label="Frame Numbers",
default=False)
]

View file

@ -0,0 +1,102 @@
import os
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import publish
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
class ExtractReviewAnimation(publish.Extractor):
"""
Extract Review by Review Animation
"""
order = pyblish.api.ExtractorOrder + 0.001
label = "Extract Review Animation"
hosts = ["max"]
families = ["review"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
ext = instance.data.get("imageFormat")
filename = "{0}..{1}".format(instance.name, ext)
start = int(instance.data["frameStart"])
end = int(instance.data["frameEnd"])
fps = int(instance.data["fps"])
filepath = os.path.join(staging_dir, filename)
filepath = filepath.replace("\\", "/")
filenames = self.get_files(
instance.name, start, end, ext)
self.log.debug(
"Writing Review Animation to"
" '%s' to '%s'" % (filename, staging_dir))
review_camera = instance.data["review_camera"]
with viewport_camera(review_camera):
preview_arg = self.set_preview_arg(
instance, filepath, start, end, fps)
rt.execute(preview_arg)
tags = ["review"]
if not instance.data.get("keepImages"):
tags.append("delete")
self.log.debug("Performing Extraction ...")
representation = {
"name": instance.data["imageFormat"],
"ext": instance.data["imageFormat"],
"files": filenames,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"tags": tags,
"preview": True,
"camera_name": review_camera
}
self.log.debug(f"{representation}")
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
def get_files(self, filename, start, end, ext):
file_list = []
for frame in range(int(start), int(end) + 1):
actual_name = "{}.{:04}.{}".format(
filename, frame, ext)
file_list.append(actual_name)
return file_list
def set_preview_arg(self, instance, filepath,
start, end, fps):
job_args = list()
default_option = f'CreatePreview filename:"{filepath}"'
job_args.append(default_option)
frame_option = f"outputAVI:false start:{start} end:{end} fps:{fps}" # noqa
job_args.append(frame_option)
rndLevel = instance.data.get("rndLevel")
if rndLevel:
option = f"rndLevel:#{rndLevel}"
job_args.append(option)
options = [
"percentSize", "dspGeometry", "dspShapes",
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
]
for key in options:
enabled = instance.data.get(key)
if enabled:
job_args.append(f"{key}:{enabled}")
if get_max_version() == 2024:
# hardcoded for current stage
auto_play_option = "autoPlay:false"
job_args.append(auto_play_option)
job_str = " ".join(job_args)
self.log.debug(job_str)
return job_str

View file

@ -0,0 +1,91 @@
import os
import tempfile
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline import publish
from openpype.hosts.max.api.lib import viewport_camera, get_max_version
class ExtractThumbnail(publish.Extractor):
"""
Extract Thumbnail for Review
"""
order = pyblish.api.ExtractorOrder
label = "Extract Thumbnail"
hosts = ["max"]
families = ["review"]
def process(self, instance):
# TODO: Create temp directory for thumbnail
# - this is to avoid "override" of source file
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
f"Create temp directory {tmp_staging} for thumbnail"
)
fps = int(instance.data["fps"])
frame = int(instance.data["frameStart"])
instance.context.data["cleanupFullPaths"].append(tmp_staging)
filename = "{name}_thumbnail..png".format(**instance.data)
filepath = os.path.join(tmp_staging, filename)
filepath = filepath.replace("\\", "/")
thumbnail = self.get_filename(instance.name, frame)
self.log.debug(
"Writing Thumbnail to"
" '%s' to '%s'" % (filename, tmp_staging))
review_camera = instance.data["review_camera"]
with viewport_camera(review_camera):
preview_arg = self.set_preview_arg(
instance, filepath, fps, frame)
rt.execute(preview_arg)
representation = {
"name": "thumbnail",
"ext": "png",
"files": thumbnail,
"stagingDir": tmp_staging,
"thumbnail": True
}
self.log.debug(f"{representation}")
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(representation)
def get_filename(self, filename, target_frame):
thumbnail_name = "{}_thumbnail.{:04}.png".format(
filename, target_frame
)
return thumbnail_name
def set_preview_arg(self, instance, filepath, fps, frame):
job_args = list()
default_option = f'CreatePreview filename:"{filepath}"'
job_args.append(default_option)
frame_option = f"outputAVI:false start:{frame} end:{frame} fps:{fps}" # noqa
job_args.append(frame_option)
rndLevel = instance.data.get("rndLevel")
if rndLevel:
option = f"rndLevel:#{rndLevel}"
job_args.append(option)
options = [
"percentSize", "dspGeometry", "dspShapes",
"dspLights", "dspCameras", "dspHelpers", "dspParticles",
"dspBones", "dspBkg", "dspGrid", "dspSafeFrame", "dspFrameNums"
]
for key in options:
enabled = instance.data.get(key)
if enabled:
job_args.append(f"{key}:{enabled}")
if get_max_version() == 2024:
# hardcoded for current stage
auto_play_option = "autoPlay:false"
job_args.append(auto_play_option)
job_str = " ".join(job_args)
self.log.debug(job_str)
return job_str

View file

@ -0,0 +1,48 @@
import pyblish.api
from pymxs import runtime as rt
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError
)
from openpype.hosts.max.api.lib import get_frame_range, set_timeline
class ValidateAnimationTimeline(pyblish.api.InstancePlugin):
"""
Validates Animation Timeline for Preview Animation in Max
"""
label = "Animation Timeline for Review"
order = ValidateContentsOrder
families = ["review"]
hosts = ["max"]
actions = [RepairAction]
def process(self, instance):
frame_range = get_frame_range()
frame_start_handle = frame_range["frameStart"] - int(
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(
frame_range["handleEnd"]
)
if rt.animationRange.start != frame_start_handle or (
rt.animationRange.end != frame_end_handle
):
raise PublishValidationError("Incorrect animation timeline "
"set for preview animation.. "
"\nYou can use repair action to "
"the correct animation timeline")
@classmethod
def repair(cls, instance):
frame_range = get_frame_range()
frame_start_handle = frame_range["frameStart"] - int(
frame_range["handleStart"]
)
frame_end_handle = frame_range["frameEnd"] + int(
frame_range["handleEnd"]
)
set_timeline(frame_start_handle, frame_end_handle)

View file

@ -11,7 +11,7 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
families = ["camera"]
families = ["camera", "review"]
hosts = ["max"]
label = "Camera Contents"
camera_type = ["$Free_Camera", "$Target_Camera",

View file

@ -13,7 +13,8 @@ class ValidateMaxContents(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
families = ["camera",
"maxScene",
"maxrender"]
"maxrender",
"review"]
hosts = ["max"]
label = "Max Scene Contents"

View file

@ -111,15 +111,13 @@ class SelectInvalidAction(pyblish.api.Action):
except ImportError:
raise ImportError("Current host is not Maya")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -3,7 +3,6 @@
import os
from pprint import pformat
import sys
import platform
import uuid
import re
@ -1523,7 +1522,15 @@ def set_attribute(attribute, value, node):
cmds.addAttr(node, longName=attribute, **kwargs)
node_attr = "{}.{}".format(node, attribute)
if "dataType" in kwargs:
enum_type = cmds.attributeQuery(attribute, node=node, enum=True)
if enum_type and value_type == "str":
enum_string_values = cmds.attributeQuery(
attribute, node=node, listEnum=True
)[0].split(":")
cmds.setAttr(
"{}.{}".format(node, attribute), enum_string_values.index(value)
)
elif "dataType" in kwargs:
attr_type = kwargs["dataType"]
cmds.setAttr(node_attr, value, type=attr_type)
else:
@ -2811,19 +2818,22 @@ def get_attr_in_layer(attr, layer):
def fix_incompatible_containers():
"""Backwards compatibility: old containers to use new ReferenceLoader"""
old_loaders = {
"MayaAsciiLoader",
"AbcLoader",
"ModelLoader",
"CameraLoader",
"RigLoader",
"FBXLoader"
}
host = registered_host()
for container in host.ls():
loader = container['loader']
print(container['loader'])
if loader in ["MayaAsciiLoader",
"AbcLoader",
"ModelLoader",
"CameraLoader",
"RigLoader",
"FBXLoader"]:
if loader in old_loaders:
log.info(
"Converting legacy container loader {} to "
"ReferenceLoader: {}".format(loader, container["objectName"])
)
cmds.setAttr(container["objectName"] + ".loader",
"ReferenceLoader", type="string")
@ -2951,7 +2961,7 @@ def _get_render_instances():
list: list of instances
"""
objectset = cmds.ls("*.id", long=True, type="objectSet",
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
recursive=True, objectsOnly=True)
instances = []

View file

@ -274,12 +274,14 @@ class ARenderProducts:
"Unsupported renderer {}".format(self.renderer)
)
# Note: When this attribute is never set (e.g. on maya launch) then
# this can return None even though it is a string attribute
prefix = self._get_attr(prefix_attr)
if not prefix:
# Fall back to scene name by default
log.debug("Image prefix not set, using <Scene>")
file_prefix = "<Scene>"
log.warning("Image prefix not set, using <Scene>")
prefix = "<Scene>"
return prefix

View file

@ -484,18 +484,16 @@ def on_init():
# Force load objExport plug-in (requested by artists)
cmds.loadPlugin("objExport", quiet=True)
from .customize import (
override_component_mask_commands,
override_toolbox_ui
)
safe_deferred(override_component_mask_commands)
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
if launch_workfiles:
safe_deferred(host_tools.show_workfiles)
if not lib.IS_HEADLESS:
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
if launch_workfiles:
safe_deferred(host_tools.show_workfiles)
from .customize import (
override_component_mask_commands,
override_toolbox_ui
)
safe_deferred(override_component_mask_commands)
safe_deferred(override_toolbox_ui)
@ -553,37 +551,29 @@ def on_save():
Any transform of a mesh, without an existing ID, is given one
automatically on file save.
"""
log.info("Running callback on save..")
# remove lockfile if users jumps over from one scene to another
_remove_workfile_lock()
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
# Generate ids of the current context on nodes in the scene
nodes = lib.get_id_required_nodes(referenced_nodes=False)
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
def _update_render_layer_observers():
# Helper to trigger update for all renderlayer observer logic
lib.remove_render_layer_observer()
lib.add_render_layer_observer()
lib.add_render_layer_change_observer()
def on_open():
"""On scene open let's assume the containers have changed."""
from qtpy import QtWidgets
from openpype.widgets import popup
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_change_observer()")
# # Update current task for the current scene
# update_task_from_path(cmds.file(query=True, sceneName=True))
utils.executeDeferred(_update_render_layer_observers)
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
@ -594,10 +584,7 @@ def on_open():
log.warning("Scene has outdated content.")
# Find maya main window
top_level_widgets = {w.objectName(): w for w in
QtWidgets.QApplication.topLevelWidgets()}
parent = top_level_widgets.get("MayaWindow", None)
parent = lib.get_main_window()
if parent is None:
log.info("Skipping outdated content pop-up "
"because Maya window can't be found.")
@ -622,16 +609,9 @@ def on_new():
"""Set project resolution and fps when create a new file"""
log.info("Running callback on new..")
with lib.suspended_refresh():
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.remove_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_observer()")
cmds.evalDeferred(
"from openpype.hosts.maya.api import lib;"
"lib.add_render_layer_change_observer()")
lib.set_context_settings()
utils.executeDeferred(_update_render_layer_observers)
_remove_workfile_lock()

View file

@ -1,5 +1,4 @@
import os
import re
from maya import cmds

View file

@ -15,7 +15,6 @@ import contextlib
from maya import cmds
from maya.app.renderSetup.model import renderSetup
# from colorbleed.maya import lib
from .lib import pairwise

View file

@ -2,7 +2,6 @@
import os
from maya import cmds
# import maya.mel as mel
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib

View file

@ -8,7 +8,10 @@ from openpype.client import get_last_version_by_subset_id
from openpype import style
from openpype.pipeline import legacy_io
from openpype.tools.utils.lib import qt_app_context
from openpype.hosts.maya.api.lib import assign_look_by_version
from openpype.hosts.maya.api.lib import (
assign_look_by_version,
get_main_window
)
from maya import cmds
# old api for MFileIO
@ -297,9 +300,7 @@ def show():
pass
# Get Maya main window
top_level_widgets = QtWidgets.QApplication.topLevelWidgets()
mainwindow = next(widget for widget in top_level_widgets
if widget.objectName() == "MayaWindow")
mainwindow = get_main_window()
with qt_app_context():
window = MayaLookAssignerWindow(parent=mainwindow)

View file

@ -25,15 +25,13 @@ class SelectInvalidAction(pyblish.api.Action):
except ImportError:
raise ImportError("Current host is not Nuke")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:

View file

@ -553,7 +553,9 @@ def add_write_node_legacy(name, **kwarg):
w = nuke.createNode(
"Write",
"name {}".format(name))
"name {}".format(name),
inpanel=False
)
w["file"].setValue(kwarg["file"])
@ -589,7 +591,9 @@ def add_write_node(name, file_path, knobs, **kwarg):
w = nuke.createNode(
"Write",
"name {}".format(name))
"name {}".format(name),
inpanel=False
)
w["file"].setValue(file_path)
@ -1192,8 +1196,10 @@ def create_prenodes(
# create node
now_node = nuke.createNode(
nodeclass, "name {}".format(name))
now_node.hideControlPanel()
nodeclass,
"name {}".format(name),
inpanel=False
)
# add for dependency linking
for_dependency[name] = {
@ -1317,12 +1323,17 @@ def create_write_node(
input_name = str(input.name()).replace(" ", "")
# if connected input node was defined
prev_node = nuke.createNode(
"Input", "name {}".format(input_name))
"Input",
"name {}".format(input_name),
inpanel=False
)
else:
# generic input node connected to nothing
prev_node = nuke.createNode(
"Input", "name {}".format("rgba"))
prev_node.hideControlPanel()
"Input",
"name {}".format("rgba"),
inpanel=False
)
# creating pre-write nodes `prenodes`
last_prenode = create_prenodes(
@ -1342,15 +1353,13 @@ def create_write_node(
imageio_writes["knobs"],
**data
)
write_node.hideControlPanel()
# connect to previous node
now_node.setInput(0, prev_node)
# switch actual node to previous
prev_node = now_node
now_node = nuke.createNode("Output", "name Output1")
now_node.hideControlPanel()
now_node = nuke.createNode("Output", "name Output1", inpanel=False)
# connect to previous node
now_node.setInput(0, prev_node)
@ -1517,8 +1526,10 @@ def create_write_node_legacy(
else:
# generic input node connected to nothing
prev_node = nuke.createNode(
"Input", "name {}".format("rgba"))
prev_node.hideControlPanel()
"Input",
"name {}".format("rgba"),
inpanel=False
)
# creating pre-write nodes `prenodes`
if prenodes:
for node in prenodes:
@ -1530,8 +1541,10 @@ def create_write_node_legacy(
# create node
now_node = nuke.createNode(
klass, "name {}".format(pre_node_name))
now_node.hideControlPanel()
klass,
"name {}".format(pre_node_name),
inpanel=False
)
# add data to knob
for _knob in knobs:
@ -1561,14 +1574,18 @@ def create_write_node_legacy(
if isinstance(dependent, (tuple or list)):
for i, node_name in enumerate(dependent):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
input_node.hideControlPanel()
"Input",
"name {}".format(node_name),
inpanel=False
)
now_node.setInput(1, input_node)
elif isinstance(dependent, str):
input_node = nuke.createNode(
"Input", "name {}".format(node_name))
input_node.hideControlPanel()
"Input",
"name {}".format(node_name),
inpanel=False
)
now_node.setInput(0, input_node)
else:
@ -1583,15 +1600,13 @@ def create_write_node_legacy(
"inside_{}".format(name),
**_data
)
write_node.hideControlPanel()
# connect to previous node
now_node.setInput(0, prev_node)
# switch actual node to previous
prev_node = now_node
now_node = nuke.createNode("Output", "name Output1")
now_node.hideControlPanel()
now_node = nuke.createNode("Output", "name Output1", inpanel=False)
# connect to previous node
now_node.setInput(0, prev_node)

View file

@ -66,8 +66,6 @@ class AlembicCameraLoader(load.LoaderPlugin):
object_name, file),
inpanel=False
)
# hide property panel
camera_node.hideControlPanel()
camera_node.forceValidate()
camera_node["frame_rate"].setValue(float(fps))

View file

@ -144,10 +144,9 @@ class LoadClip(plugin.NukeLoader):
# Create the Loader with the filename path set
read_node = nuke.createNode(
"Read",
"name {}".format(read_name))
# hide property panel
read_node.hideControlPanel()
"name {}".format(read_name),
inpanel=False
)
# to avoid multiple undo steps for rest of process
# we will switch off undo-ing

View file

@ -88,10 +88,9 @@ class LoadEffects(load.LoaderPlugin):
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# hide property panel
GN.hideControlPanel()
"name {}_1".format(object_name),
inpanel=False
)
# adding content to the group node
with GN:

View file

@ -89,10 +89,9 @@ class LoadEffectsInputProcess(load.LoaderPlugin):
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# hide property panel
GN.hideControlPanel()
"name {}_1".format(object_name),
inpanel=False
)
# adding content to the group node
with GN:

View file

@ -119,10 +119,9 @@ class LoadImage(load.LoaderPlugin):
with viewer_update_and_undo_stop():
r = nuke.createNode(
"Read",
"name {}".format(read_name))
# hide property panel
r.hideControlPanel()
"name {}".format(read_name),
inpanel=False
)
r["file"].setValue(file)

View file

@ -65,9 +65,6 @@ class AlembicModelLoader(load.LoaderPlugin):
inpanel=False
)
# hide property panel
model_node.hideControlPanel()
model_node.forceValidate()
# Ensure all items are imported and selected.

View file

@ -70,10 +70,9 @@ class LinkAsGroup(load.LoaderPlugin):
# P = nuke.nodes.LiveGroup("file {}".format(file))
P = nuke.createNode(
"Precomp",
"file {}".format(file))
# hide property panel
P.hideControlPanel()
"file {}".format(file),
inpanel=False
)
# Set colorspace defined in version data
colorspace = context["version"]["data"].get("colorspace", None)

View file

@ -2,6 +2,7 @@ import os
import pyblish.api
import clique
from openpype.pipeline import PublishXmlValidationError
from openpype.pipeline.publish import get_errored_instances_from_context
class RepairActionBase(pyblish.api.Action):
@ -11,14 +12,7 @@ class RepairActionBase(pyblish.api.Action):
@staticmethod
def get_instance(context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
return pyblish.api.instances_by_plugin(failed, plugin)
return get_errored_instances_from_context(context, plugin=plugin)
def repair_knob(self, instances, state):
for instance in instances:

View file

@ -27,15 +27,13 @@ class SelectInvalidAction(pyblish.api.Action):
except ImportError:
raise ImportError("Current host is not Resolve")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid clips..")
invalid = list()
for instance in instances:
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):

View file

@ -86,7 +86,7 @@ class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
self._uninstall_menu()
self._deregister_callbacks()
def has_unsaved_changes(self):
def workfile_has_unsaved_changes(self):
if not substance_painter.project.is_open():
return False

View file

@ -34,6 +34,18 @@ class CreateTextures(Creator):
if not substance_painter.project.is_open():
raise CreatorError("Can't create a Texture Set instance without "
"an open project.")
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
for key in [
"exportPresetUrl",
"exportFileFormat",
"exportSize",
"exportPadding",
"exportDilationDistance"
]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
instance = self.create_instance_in_context(subset_name,
instance_data)

View file

@ -20,7 +20,7 @@ class SaveCurrentWorkfile(pyblish.api.ContextPlugin):
if context.data["currentFile"] != current:
raise KnownPublishError("Workfile has changed during publishing!")
if host.has_unsaved_changes():
if host.workfile_has_unsaved_changes():
self.log.info("Saving current file: {}".format(current))
host.save_workfile()
else:

View file

@ -146,7 +146,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
"FTRACK_SERVER",
"AVALON_APP_NAME",
"OPENPYPE_USERNAME",
"OPENPYPE_VERSION",
"OPENPYPE_SG_USER"
]

View file

@ -4,6 +4,11 @@ from .constants import (
PRE_CREATE_THUMBNAIL_KEY,
)
from .utils import (
get_last_versions_for_instances,
get_next_versions_for_instances,
)
from .subset_name import (
TaskNotSetError,
get_subset_name_template,
@ -46,6 +51,9 @@ __all__ = (
"DEFAULT_SUBSET_TEMPLATE",
"PRE_CREATE_THUMBNAIL_KEY",
"get_last_versions_for_instances",
"get_next_versions_for_instances",
"TaskNotSetError",
"get_subset_name_template",
"get_subset_name",

View file

@ -1122,10 +1122,10 @@ class CreatedInstance:
@property
def creator_attribute_defs(self):
"""Attribute defintions defined by creator plugin.
"""Attribute definitions defined by creator plugin.
Returns:
List[AbstractAttrDef]: Attribute defitions.
List[AbstractAttrDef]: Attribute definitions.
"""
return self.creator_attributes.attr_defs

View file

@ -21,6 +21,7 @@ from openpype.pipeline.plugin_discover import (
)
from .subset_name import get_subset_name
from .utils import get_next_versions_for_instances
from .legacy_create import LegacyCreator
@ -483,6 +484,27 @@ class BaseCreator:
thumbnail_path
)
def get_next_versions_for_instances(self, instances):
"""Prepare next versions for instances.
This is helper method to receive next possible versions for instances.
It is using context information on instance to receive them, 'asset'
and 'subset'.
Output will contain version by each instance id.
Args:
instances (list[CreatedInstance]): Instances for which to get next
versions.
Returns:
Dict[str, int]: Next versions by instance id.
"""
return get_next_versions_for_instances(
self.create_context.project_name, instances
)
class Creator(BaseCreator):
"""Creator that has more information for artist to show in UI.

View file

@ -74,12 +74,12 @@ class LegacyCreator(object):
if not plugin_settings:
return
print(">>> We have preset for {}".format(plugin_name))
cls.log.debug(">>> We have preset for {}".format(plugin_name))
for option, value in plugin_settings.items():
if option == "enabled" and value is False:
print(" - is disabled by preset")
cls.log.debug(" - is disabled by preset")
else:
print(" - setting `{}`: `{}`".format(option, value))
cls.log.debug(" - setting `{}`: `{}`".format(option, value))
setattr(cls, option, value)
def process(self):

View file

@ -0,0 +1,122 @@
import collections
from openpype.client import get_assets, get_subsets, get_last_versions
def get_last_versions_for_instances(
project_name, instances, use_value_for_missing=False
):
"""Get last versions for instances by their asset and subset name.
Args:
project_name (str): Project name.
instances (list[CreatedInstance]): Instances to get next versions for.
use_value_for_missing (Optional[bool]): Missing values are replaced
with negative value if True. Otherwise None is used. -2 is used
for instances without filled asset or subset name. -1 is used
for missing entities.
Returns:
dict[str, Union[int, None]]: Last versions by instance id.
"""
output = {
instance.id: -1 if use_value_for_missing else None
for instance in instances
}
subset_names_by_asset_name = collections.defaultdict(set)
instances_by_hierarchy = {}
for instance in instances:
asset_name = instance.data.get("asset")
subset_name = instance.subset_name
if not asset_name or not subset_name:
if use_value_for_missing:
output[instance.id] = -2
continue
(
instances_by_hierarchy
.setdefault(asset_name, {})
.setdefault(subset_name, [])
.append(instance)
)
subset_names_by_asset_name[asset_name].add(subset_name)
subset_names = set()
for names in subset_names_by_asset_name.values():
subset_names |= names
if not subset_names:
return output
asset_docs = get_assets(
project_name,
asset_names=subset_names_by_asset_name.keys(),
fields=["name", "_id"]
)
asset_names_by_id = {
asset_doc["_id"]: asset_doc["name"]
for asset_doc in asset_docs
}
if not asset_names_by_id:
return output
subset_docs = get_subsets(
project_name,
asset_ids=asset_names_by_id.keys(),
subset_names=subset_names,
fields=["_id", "name", "parent"]
)
subset_docs_by_id = {}
for subset_doc in subset_docs:
# Filter subset docs by subset names under parent
asset_id = subset_doc["parent"]
asset_name = asset_names_by_id[asset_id]
subset_name = subset_doc["name"]
if subset_name not in subset_names_by_asset_name[asset_name]:
continue
subset_docs_by_id[subset_doc["_id"]] = subset_doc
if not subset_docs_by_id:
return output
last_versions_by_subset_id = get_last_versions(
project_name,
subset_docs_by_id.keys(),
fields=["name", "parent"]
)
for subset_id, version_doc in last_versions_by_subset_id.items():
subset_doc = subset_docs_by_id[subset_id]
asset_id = subset_doc["parent"]
asset_name = asset_names_by_id[asset_id]
_instances = instances_by_hierarchy[asset_name][subset_doc["name"]]
for instance in _instances:
output[instance.id] = version_doc["name"]
return output
def get_next_versions_for_instances(project_name, instances):
"""Get next versions for instances by their asset and subset name.
Args:
project_name (str): Project name.
instances (list[CreatedInstance]): Instances to get next versions for.
Returns:
dict[str, Union[int, None]]: Next versions by instance id. Version is
'None' if instance has no asset or subset name.
"""
last_versions = get_last_versions_for_instances(
project_name, instances, True)
output = {}
for instance_id, version in last_versions.items():
if version == -2:
output[instance_id] = None
elif version == -1:
output[instance_id] = 1
else:
output[instance_id] = version + 1
return output

View file

@ -157,6 +157,8 @@ def deliver_single_file(
delivery_path = delivery_path.replace("..", ".")
# Make sure path is valid for all platforms
delivery_path = os.path.normpath(delivery_path.replace("\\", "/"))
# Remove newlines from the end of the string to avoid OSError during copy
delivery_path = delivery_path.rstrip()
delivery_folder = os.path.dirname(delivery_path)
if not os.path.exists(delivery_folder):

View file

@ -577,12 +577,14 @@ def remote_publish(log, close_plugin_name=None, raise_error=False):
raise RuntimeError(error_message)
def get_errored_instances_from_context(context):
def get_errored_instances_from_context(context, plugin=None):
"""Collect failed instances from pyblish context.
Args:
context (pyblish.api.Context): Publish context where we're looking
for failed instances.
plugin (pyblish.api.Plugin): If provided then only consider errors
related to that plug-in.
Returns:
List[pyblish.lib.Instance]: Instances which failed during processing.
@ -594,6 +596,9 @@ def get_errored_instances_from_context(context):
# When instance is None we are on the "context" result
continue
if plugin is not None and result.get("plugin") != plugin:
continue
if result["error"]:
instances.append(result["instance"])

View file

@ -234,11 +234,9 @@ class RepairAction(pyblish.api.Action):
# Get the errored instances
self.log.debug("Finding failed instances..")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
for instance in instances:
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
for instance in errored_instances:
self.log.debug(
"Attempting repair for instance: {} ...".format(instance)
)

View file

@ -1,4 +1,5 @@
import copy
import platform
from collections import defaultdict
from qtpy import QtWidgets, QtCore, QtGui
@ -83,6 +84,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
self.templates = self._get_templates(self.anatomy)
for name, _ in self.templates.items():
dropdown.addItem(name)
if self.templates and platform.system() == "Darwin":
# fix macos QCombobox Style
dropdown.setItemDelegate(QtWidgets.QStyledItemDelegate())
# update combo box length to longest entry
longest_key = max(self.templates.keys(), key=len)
dropdown.setMinimumContentsLength(len(longest_key))
template_label = QtWidgets.QLabel()
template_label.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
@ -115,7 +122,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
input_layout.addRow("Representations", repre_checkboxes_layout)
btn_delivery = QtWidgets.QPushButton("Deliver")
btn_delivery.setEnabled(bool(dropdown.currentText()))
btn_delivery.setEnabled(False)
progress_bar = QtWidgets.QProgressBar(self)
progress_bar.setMinimum = 0
@ -152,6 +159,15 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
btn_delivery.clicked.connect(self.deliver)
dropdown.currentIndexChanged.connect(self._update_template_value)
if not self.dropdown.count():
self.text_area.setVisible(True)
error_message = (
"No Delivery Templates found!\n"
"Add Template in [project_anatomy/templates/delivery]"
)
self.text_area.setText(error_message)
self.log.error(error_message.replace("\n", " "))
def deliver(self):
"""Main method to loop through all selected representations"""
self.progress_bar.setVisible(True)
@ -287,14 +303,17 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
self.files_selected, self.size_selected = \
self._get_counts(selected_repres)
self.selected_label.setText(self._prepare_label())
# update delivery button state if any templates found
if self.dropdown.count():
self.btn_delivery.setEnabled(bool(selected_repres))
def _update_template_value(self, _index=None):
"""Sets template value to label after selection in dropdown."""
name = self.dropdown.currentText()
template_value = self.templates.get(name)
if template_value:
self.btn_delivery.setEnabled(True)
self.template_label.setText(template_value)
self.btn_delivery.setEnabled(bool(self._get_selected_repres()))
def _update_progress(self, uploaded):
"""Update progress bar after each repre copied."""

View file

@ -51,7 +51,8 @@ class ExtractBurnin(publish.Extractor):
"aftereffects",
"photoshop",
"flame",
"houdini"
"houdini",
"max"
# "resolve"
]

View file

@ -49,6 +49,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
"maya",
"blender",
"houdini",
"max",
"shell",
"hiero",
"premiere",

View file

@ -262,7 +262,8 @@
],
"hosts": [
"maya",
"houdini"
"houdini",
"max"
],
"task_types": [],
"task_names": [],

View file

@ -1,5 +1,6 @@
import collections
import logging
import itertools
from functools import partial
from qtpy import QtWidgets, QtCore
@ -195,20 +196,17 @@ class SceneInventoryView(QtWidgets.QTreeView):
version_name_by_id[version_doc["_id"]] = \
version_doc["name"]
# Specify version per item to update to
update_items = []
update_versions = []
for item in items:
repre_id = item["representation"]
version_id = version_id_by_repre_id.get(repre_id)
version_name = version_name_by_id.get(version_id)
if version_name is not None:
try:
update_container(item, version_name)
except AssertionError:
self._show_version_error_dialog(
version_name, [item]
)
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
update_items.append(item)
update_versions.append(version_name)
self._update_containers(update_items, update_versions)
update_icon = qtawesome.icon(
"fa.asterisk",
@ -225,16 +223,6 @@ class SceneInventoryView(QtWidgets.QTreeView):
update_to_latest_action = None
if has_outdated or has_loaded_hero_versions:
# update to latest version
def _on_update_to_latest(items):
for item in items:
try:
update_container(item, -1)
except AssertionError:
self._show_version_error_dialog(None, [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
update_icon = qtawesome.icon(
"fa.angle-double-up",
color=DEFAULT_COLOR
@ -245,21 +233,11 @@ class SceneInventoryView(QtWidgets.QTreeView):
menu
)
update_to_latest_action.triggered.connect(
lambda: _on_update_to_latest(items)
lambda: self._update_containers(items, version=-1)
)
change_to_hero = None
if has_available_hero_version:
# change to hero version
def _on_update_to_hero(items):
for item in items:
try:
update_container(item, HeroVersionType(-1))
except AssertionError:
self._show_version_error_dialog('hero', [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
# TODO change icon
change_icon = qtawesome.icon(
"fa.asterisk",
@ -271,7 +249,8 @@ class SceneInventoryView(QtWidgets.QTreeView):
menu
)
change_to_hero.triggered.connect(
lambda: _on_update_to_hero(items)
lambda: self._update_containers(items,
version=HeroVersionType(-1))
)
# set version
@ -740,14 +719,7 @@ class SceneInventoryView(QtWidgets.QTreeView):
if label:
version = versions_by_label[label]
for item in items:
try:
update_container(item, version)
except AssertionError:
self._show_version_error_dialog(version, [item])
log.warning("Update failed", exc_info=True)
# refresh model when done
self.data_changed.emit()
self._update_containers(items, version)
def _show_switch_dialog(self, items):
"""Display Switch dialog"""
@ -782,9 +754,9 @@ class SceneInventoryView(QtWidgets.QTreeView):
Args:
version: str or int or None
"""
if not version:
if version == -1:
version_str = "latest"
elif version == "hero":
elif isinstance(version, HeroVersionType):
version_str = "hero"
elif isinstance(version, int):
version_str = "v{:03d}".format(version)
@ -841,10 +813,43 @@ class SceneInventoryView(QtWidgets.QTreeView):
return
# Trigger update to latest
for item in outdated_items:
try:
update_container(item, -1)
except AssertionError:
self._show_version_error_dialog(None, [item])
log.warning("Update failed", exc_info=True)
self.data_changed.emit()
self._update_containers(outdated_items, version=-1)
def _update_containers(self, items, version):
"""Helper to update items to given version (or version per item)
If at least one item is specified this will always try to refresh
the inventory even if errors occurred on any of the items.
Arguments:
items (list): Items to update
version (int or list): Version to set to.
This can be a list specifying a version for each item.
Like `update_container` version -1 sets the latest version
and HeroTypeVersion instances set the hero version.
"""
if isinstance(version, (list, tuple)):
# We allow a unique version to be specified per item. In that case
# the length must match with the items
assert len(items) == len(version), (
"Number of items mismatches number of versions: "
"{} items - {} versions".format(len(items), len(version))
)
versions = version
else:
# Repeat the same version infinitely
versions = itertools.repeat(version)
# Trigger update to latest
try:
for item, item_version in zip(items, versions):
try:
update_container(item, item_version)
except AssertionError:
self._show_version_error_dialog(item_version, [item])
log.warning("Update failed", exc_info=True)
finally:
# Always update the scene inventory view, even if errors occurred
self.data_changed.emit()