Merge branch 'develop' into release/3.15.x

# Conflicts:
#	openpype/hosts/aftereffects/api/pipeline.py
#	openpype/hosts/photoshop/api/pipeline.py
#	openpype/hosts/photoshop/plugins/create/create_legacy_image.py
#	openpype/modules/deadline/plugins/publish/submit_publish_job.py
This commit is contained in:
Jakub Trllo 2022-12-16 12:09:01 +01:00
commit d3753fef31
178 changed files with 1707 additions and 676 deletions

View file

@ -30,8 +30,14 @@ def main(ctx):
It wraps different commands together.
"""
if ctx.invoked_subcommand is None:
ctx.invoke(tray)
# Print help if headless mode is used
if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1":
print(ctx.get_help())
sys.exit(0)
else:
ctx.invoke(tray)
@main.command()

View file

@ -10,7 +10,7 @@ from wsrpc_aiohttp import (
WebSocketAsync
)
from Qt import QtCore
from qtpy import QtCore
from openpype.lib import Logger
from openpype.pipeline import legacy_io

View file

@ -7,7 +7,7 @@ import traceback
import logging
from functools import partial
from Qt import QtWidgets
from qtpy import QtWidgets
from openpype.pipeline import install_host
from openpype.modules import ModulesManager

View file

@ -1,5 +1,6 @@
import os
from Qt import QtWidgets
from qtpy import QtWidgets
import pyblish.api

View file

@ -10,7 +10,7 @@ from pathlib import Path
from types import ModuleType
from typing import Dict, List, Optional, Union
from Qt import QtWidgets, QtCore
from qtpy import QtWidgets, QtCore
import bpy
import bpy.utils.previews

View file

@ -32,11 +32,6 @@ class CreateCamera(plugin.Creator):
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
@ -53,6 +48,11 @@ class CreateCamera(plugin.Creator):
bpy.ops.object.parent_set(keep_transform=True)
else:
plugin.deselect_all()
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
camera_obj.select_set(True)
asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group

View file

@ -48,8 +48,14 @@ class BlendLayoutLoader(plugin.AssetLoader):
bpy.data.objects.remove(obj)
def _remove_asset_and_library(self, asset_group):
if not asset_group.get(AVALON_PROPERTY):
return
libpath = asset_group.get(AVALON_PROPERTY).get('libpath')
if not libpath:
return
# Check how many assets use the same library
count = 0
for obj in bpy.data.collections.get(AVALON_CONTAINERS).all_objects:
@ -63,10 +69,12 @@ class BlendLayoutLoader(plugin.AssetLoader):
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
if library:
bpy.data.libraries.remove(library)
def _process(
self, libpath, asset_group, group_name, asset, representation, actions
self, libpath, asset_group, group_name, asset, representation,
actions, anim_instances
):
with bpy.data.libraries.load(
libpath, link=True, relative=False
@ -140,12 +148,12 @@ class BlendLayoutLoader(plugin.AssetLoader):
elif local_obj.type == 'ARMATURE':
plugin.prepare_data(local_obj.data)
if action is not None:
if action:
if local_obj.animation_data is None:
local_obj.animation_data_create()
local_obj.animation_data.action = action
elif (local_obj.animation_data and
local_obj.animation_data.action is not None):
local_obj.animation_data.action):
plugin.prepare_data(
local_obj.animation_data.action)
@ -157,19 +165,26 @@ class BlendLayoutLoader(plugin.AssetLoader):
t.id = local_obj
elif local_obj.type == 'EMPTY':
creator_plugin = get_legacy_creator_by_name("CreateAnimation")
if not creator_plugin:
raise ValueError("Creator plugin \"CreateAnimation\" was "
"not found.")
if (not anim_instances or
(anim_instances and
local_obj.name not in anim_instances.keys())):
avalon = local_obj.get(AVALON_PROPERTY)
if avalon and avalon.get('family') == 'rig':
creator_plugin = get_legacy_creator_by_name(
"CreateAnimation")
if not creator_plugin:
raise ValueError(
"Creator plugin \"CreateAnimation\" was "
"not found.")
legacy_create(
creator_plugin,
name=local_obj.name.split(':')[-1] + "_animation",
asset=asset,
options={"useSelection": False,
"asset_group": local_obj},
data={"dependencies": representation}
)
legacy_create(
creator_plugin,
name=local_obj.name.split(':')[-1] + "_animation",
asset=asset,
options={"useSelection": False,
"asset_group": local_obj},
data={"dependencies": representation}
)
if not local_obj.get(AVALON_PROPERTY):
local_obj[AVALON_PROPERTY] = dict()
@ -272,7 +287,8 @@ class BlendLayoutLoader(plugin.AssetLoader):
avalon_container.objects.link(asset_group)
objects = self._process(
libpath, asset_group, group_name, asset, representation, None)
libpath, asset_group, group_name, asset, representation,
None, None)
for child in asset_group.children:
if child.get(AVALON_PROPERTY):
@ -352,10 +368,20 @@ class BlendLayoutLoader(plugin.AssetLoader):
return
actions = {}
anim_instances = {}
for obj in asset_group.children:
obj_meta = obj.get(AVALON_PROPERTY)
if obj_meta.get('family') == 'rig':
# Get animation instance
collections = list(obj.users_collection)
for c in collections:
avalon = c.get(AVALON_PROPERTY)
if avalon and avalon.get('family') == 'animation':
anim_instances[obj.name] = c.name
break
# Get armature's action
rig = None
for child in obj.children:
if child.type == 'ARMATURE':
@ -384,9 +410,26 @@ class BlendLayoutLoader(plugin.AssetLoader):
# If it is the last object to use that library, remove it
if count == 1:
library = bpy.data.libraries.get(bpy.path.basename(group_libpath))
bpy.data.libraries.remove(library)
if library:
bpy.data.libraries.remove(library)
self._process(str(libpath), asset_group, object_name, actions)
asset = container.get("asset_name").split("_")[0]
self._process(
str(libpath), asset_group, object_name, asset,
str(representation.get("_id")), actions, anim_instances
)
# Link the new objects to the animation collection
for inst in anim_instances.keys():
try:
obj = bpy.data.objects[inst]
bpy.data.collections[anim_instances[inst]].objects.link(obj)
except KeyError:
self.log.info(f"Object {inst} does not exist anymore.")
coll = bpy.data.collections.get(anim_instances[inst])
if (coll):
bpy.data.collections.remove(coll)
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
for child in asset_group.children:

View file

@ -1,6 +1,6 @@
import sys
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
from openpype.tools.utils import host_tools
from openpype.style import load_stylesheet

View file

@ -6,7 +6,7 @@ import sys
import logging
import pyblish.api
from Qt import QtCore
from qtpy import QtCore
from openpype.lib import (
Logger,

View file

@ -1,7 +1,7 @@
import os
import sys
from Qt import QtCore
from qtpy import QtCore
class PulseThread(QtCore.QThread):

View file

@ -6,10 +6,10 @@ import importlib
try:
from Qt import QtWidgets # noqa: F401
from Qt import __binding__
print(f"Qt binding: {__binding__}")
mod = importlib.import_module(__binding__)
from qtpy import API_NAME
print(f"Qt binding: {API_NAME}")
mod = importlib.import_module(API_NAME)
print(f"Qt path: {mod.__file__}")
print("Qt library found, nothing to do..")

View file

@ -3,7 +3,7 @@ import sys
import glob
import logging
from Qt import QtWidgets, QtCore
from qtpy import QtWidgets, QtCore
import qtawesome as qta

View file

@ -1,4 +1,4 @@
from Qt import QtGui, QtWidgets
from qtpy import QtGui, QtWidgets
from openpype.pipeline import InventoryAction
from openpype import style

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets
from qtpy import QtWidgets
import qtawesome
from openpype.hosts.fusion.api import get_current_comp

View file

@ -14,7 +14,7 @@ import json
import signal
import time
from uuid import uuid4
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
import collections
from .server import Server

View file

@ -7,7 +7,7 @@ import contextlib
import hou # noqa
from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher
from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
import pyblish.api
@ -40,7 +40,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "houdini"
def __init__(self):

View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""Tools to work with GLTF."""
import logging
from maya import cmds, mel # noqa
log = logging.getLogger(__name__)
_gltf_options = {
"of": str, # outputFolder
"cpr": str, # copyright
"sno": bool, # selectedNodeOnly
"sn": str, # sceneName
"glb": bool, # binary
"nbu": bool, # niceBufferURIs
"hbu": bool, # hashBufferURI
"ext": bool, # externalTextures
"ivt": int, # initialValuesTime
"acn": str, # animationClipName
"ast": int, # animationClipStartTime
"aet": int, # animationClipEndTime
"afr": float, # animationClipFrameRate
"dsa": int, # detectStepAnimations
"mpa": str, # meshPrimitiveAttributes
"bpa": str, # blendPrimitiveAttributes
"i32": bool, # force32bitIndices
"ssm": bool, # skipStandardMaterials
"eut": bool, # excludeUnusedTexcoord
"dm": bool, # defaultMaterial
"cm": bool, # colorizeMaterials
"dmy": str, # dumpMaya
"dgl": str, # dumpGLTF
"imd": str, # ignoreMeshDeformers
"ssc": bool, # skipSkinClusters
"sbs": bool, # skipBlendShapes
"rvp": bool, # redrawViewport
"vno": bool # visibleNodesOnly
}
def extract_gltf(parent_dir,
filename,
**kwargs):
"""Sets GLTF export options from data in the instance.
"""
cmds.loadPlugin('maya2glTF', quiet=True)
# load the UI to run mel command
mel.eval("maya2glTF_UI()")
parent_dir = parent_dir.replace('\\', '/')
options = {
"dsa": 1,
"glb": True
}
options.update(kwargs)
for key, value in options.copy().items():
if key not in _gltf_options:
log.warning("extract_gltf() does not support option '%s'. "
"Flag will be ignored..", key)
options.pop(key)
options.pop(value)
continue
job_args = list()
default_opt = "maya2glTF -of \"{0}\" -sn \"{1}\"".format(parent_dir, filename) # noqa
job_args.append(default_opt)
for key, value in options.items():
if isinstance(value, str):
job_args.append("-{0} \"{1}\"".format(key, value))
elif isinstance(value, bool):
if value:
job_args.append("-{0}".format(key))
else:
job_args.append("-{0} {1}".format(key, value))
job_str = " ".join(job_args)
log.info("{}".format(job_str))
mel.eval(job_str)
# close the gltf export after finish the export
gltf_UI = "maya2glTF_exporter_window"
if cmds.window(gltf_UI, q=True, exists=True):
cmds.deleteUI(gltf_UI)

View file

@ -128,13 +128,18 @@ def get_main_window():
@contextlib.contextmanager
def suspended_refresh(suspend=True):
"""Suspend viewport refreshes"""
original_state = cmds.refresh(query=True, suspend=True)
"""Suspend viewport refreshes
cmds.ogs(pause=True) is a toggle so we cant pass False.
"""
original_state = cmds.ogs(query=True, pause=True)
try:
cmds.refresh(suspend=suspend)
if suspend and not original_state:
cmds.ogs(pause=True)
yield
finally:
cmds.refresh(suspend=original_state)
if suspend and not original_state:
cmds.ogs(pause=True)
@contextlib.contextmanager
@ -3436,3 +3441,8 @@ def iter_visible_nodes_in_range(nodes, start, end):
# If no more nodes to process break the frame iterations..
if not node_dependencies:
break
def get_attribute_input(attr):
connections = cmds.listConnections(attr, plugs=True, destination=False)
return connections[0] if connections else None

View file

@ -95,21 +95,25 @@ class RenderSettings(object):
if renderer == "redshift":
self._set_redshift_settings(width, height)
mel.eval("redshiftUpdateActiveAovList")
def _set_arnold_settings(self, width, height):
"""Sets settings for Arnold."""
from mtoa.core import createOptions # noqa
from mtoa.aovs import AOVInterface # noqa
createOptions()
arnold_render_presets = self._project_settings["maya"]["RenderSettings"]["arnold_renderer"] # noqa
render_settings = self._project_settings["maya"]["RenderSettings"]
arnold_render_presets = render_settings["arnold_renderer"] # noqa
# Force resetting settings and AOV list to avoid having to deal with
# AOV checking logic, for now.
# This is a work around because the standard
# function to revert render settings does not reset AOVs list in MtoA
# Fetch current aovs in case there's any.
current_aovs = AOVInterface().getAOVs()
remove_aovs = render_settings["remove_aovs"]
if remove_aovs:
# Remove fetched AOVs
AOVInterface().removeAOVs(current_aovs)
AOVInterface().removeAOVs(current_aovs)
mel.eval("unifiedRenderGlobalsRevertToDefault")
img_ext = arnold_render_presets["image_format"]
img_prefix = arnold_render_presets["image_prefix"]
@ -118,6 +122,8 @@ class RenderSettings(object):
multi_exr = arnold_render_presets["multilayer_exr"]
additional_options = arnold_render_presets["additional_options"]
for aov in aovs:
if aov in current_aovs and not remove_aovs:
continue
AOVInterface('defaultArnoldRenderOptions').addAOV(aov)
cmds.setAttr("defaultResolution.width", width)
@ -141,12 +147,50 @@ class RenderSettings(object):
def _set_redshift_settings(self, width, height):
"""Sets settings for Redshift."""
redshift_render_presets = (
self._project_settings
["maya"]
["RenderSettings"]
["redshift_renderer"]
)
render_settings = self._project_settings["maya"]["RenderSettings"]
redshift_render_presets = render_settings["redshift_renderer"]
remove_aovs = render_settings["remove_aovs"]
all_rs_aovs = cmds.ls(type='RedshiftAOV')
if remove_aovs:
for aov in all_rs_aovs:
enabled = cmds.getAttr("{}.enabled".format(aov))
if enabled:
cmds.delete(aov)
redshift_aovs = redshift_render_presets["aov_list"]
# list all the aovs
all_rs_aovs = cmds.ls(type='RedshiftAOV')
for rs_aov in redshift_aovs:
rs_layername = rs_aov
if " " in rs_aov:
rs_renderlayer = rs_aov.replace(" ", "")
rs_layername = "rsAov_{}".format(rs_renderlayer)
else:
rs_layername = "rsAov_{}".format(rs_aov)
if rs_layername in all_rs_aovs:
continue
cmds.rsCreateAov(type=rs_aov)
# update the AOV list
mel.eval("redshiftUpdateActiveAovList")
rs_p_engine = redshift_render_presets["primary_gi_engine"]
rs_s_engine = redshift_render_presets["secondary_gi_engine"]
if int(rs_p_engine) or int(rs_s_engine) != 0:
cmds.setAttr("redshiftOptions.GIEnabled", 1)
if int(rs_p_engine) == 0:
# reset the primary GI Engine as default
cmds.setAttr("redshiftOptions.primaryGIEngine", 4)
if int(rs_s_engine) == 0:
# reset the secondary GI Engine as default
cmds.setAttr("redshiftOptions.secondaryGIEngine", 2)
else:
cmds.setAttr("redshiftOptions.GIEnabled", 0)
cmds.setAttr("redshiftOptions.primaryGIEngine", int(rs_p_engine))
cmds.setAttr("redshiftOptions.secondaryGIEngine", int(rs_s_engine))
additional_options = redshift_render_presets["additional_options"]
ext = redshift_render_presets["image_format"]
img_exts = ["iff", "exr", "tif", "png", "tga", "jpg"]
@ -163,12 +207,31 @@ class RenderSettings(object):
"""Sets important settings for Vray."""
settings = cmds.ls(type="VRaySettingsNode")
node = settings[0] if settings else cmds.createNode("VRaySettingsNode")
vray_render_presets = (
self._project_settings
["maya"]
["RenderSettings"]
["vray_renderer"]
)
render_settings = self._project_settings["maya"]["RenderSettings"]
vray_render_presets = render_settings["vray_renderer"]
# vrayRenderElement
remove_aovs = render_settings["remove_aovs"]
all_vray_aovs = cmds.ls(type='VRayRenderElement')
lightSelect_aovs = cmds.ls(type='VRayRenderElementSet')
if remove_aovs:
for aov in all_vray_aovs:
# remove all aovs except LightSelect
enabled = cmds.getAttr("{}.enabled".format(aov))
if enabled:
cmds.delete(aov)
# remove LightSelect
for light_aovs in lightSelect_aovs:
light_enabled = cmds.getAttr("{}.enabled".format(light_aovs))
if light_enabled:
cmds.delete(lightSelect_aovs)
vray_aovs = vray_render_presets["aov_list"]
for renderlayer in vray_aovs:
renderElement = "vrayAddRenderElement {}".format(renderlayer)
RE_name = mel.eval(renderElement)
# if there is more than one same render element
if RE_name.endswith("1"):
cmds.delete(RE_name)
# Set aov separator
# First we need to explicitly set the UI items in Render Settings
# because that is also what V-Ray updates to when that Render Settings

View file

@ -217,7 +217,7 @@ class ReferenceLoader(Loader):
# Need to save alembic settings and reapply, cause referencing resets
# them to incoming data.
alembic_attrs = ["speed", "offset", "cycleType"]
alembic_attrs = ["speed", "offset", "cycleType", "time"]
alembic_data = {}
if representation["name"] == "abc":
alembic_nodes = cmds.ls(
@ -226,7 +226,12 @@ class ReferenceLoader(Loader):
if alembic_nodes:
for attr in alembic_attrs:
node_attr = "{}.{}".format(alembic_nodes[0], attr)
alembic_data[attr] = cmds.getAttr(node_attr)
data = {
"input": lib.get_attribute_input(node_attr),
"value": cmds.getAttr(node_attr)
}
alembic_data[attr] = data
else:
self.log.debug("No alembic nodes found in {}".format(members))
@ -263,8 +268,19 @@ class ReferenceLoader(Loader):
"{}:*".format(namespace), type="AlembicNode"
)
if alembic_nodes:
for attr, value in alembic_data.items():
cmds.setAttr("{}.{}".format(alembic_nodes[0], attr), value)
alembic_node = alembic_nodes[0] # assume single AlembicNode
for attr, data in alembic_data.items():
node_attr = "{}.{}".format(alembic_node, attr)
input = lib.get_attribute_input(node_attr)
if data["input"]:
if data["input"] != input:
cmds.connectAttr(
data["input"], node_attr, force=True
)
else:
if input:
cmds.disconnectAttr(input, node_attr)
cmds.setAttr(node_attr, data["value"])
# Fix PLN-40 for older containers created with Avalon that had the
# `.verticesOnlySet` set to True.

View file

@ -1,5 +1,3 @@
from collections import OrderedDict
from openpype.hosts.maya.api import (
lib,
plugin
@ -9,12 +7,26 @@ from maya import cmds
class CreateAss(plugin.Creator):
"""Arnold Archive"""
"""Arnold Scene Source"""
name = "ass"
label = "Ass StandIn"
label = "Arnold Scene Source"
family = "ass"
icon = "cube"
expandProcedurals = False
motionBlur = True
motionBlurKeys = 2
motionBlurLength = 0.5
maskOptions = False
maskCamera = False
maskLight = False
maskShape = False
maskShader = False
maskOverride = False
maskDriver = False
maskFilter = False
maskColor_manager = False
maskOperator = False
def __init__(self, *args, **kwargs):
super(CreateAss, self).__init__(*args, **kwargs)
@ -22,17 +34,27 @@ class CreateAss(plugin.Creator):
# Add animation data
self.data.update(lib.collect_animation_data())
# Vertex colors with the geometry
self.data["exportSequence"] = False
self.data["expandProcedurals"] = self.expandProcedurals
self.data["motionBlur"] = self.motionBlur
self.data["motionBlurKeys"] = self.motionBlurKeys
self.data["motionBlurLength"] = self.motionBlurLength
# Masks
self.data["maskOptions"] = self.maskOptions
self.data["maskCamera"] = self.maskCamera
self.data["maskLight"] = self.maskLight
self.data["maskShape"] = self.maskShape
self.data["maskShader"] = self.maskShader
self.data["maskOverride"] = self.maskOverride
self.data["maskDriver"] = self.maskDriver
self.data["maskFilter"] = self.maskFilter
self.data["maskColor_manager"] = self.maskColor_manager
self.data["maskOperator"] = self.maskOperator
def process(self):
instance = super(CreateAss, self).process()
# data = OrderedDict(**self.data)
nodes = list()
nodes = []
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
@ -42,7 +64,3 @@ class CreateAss(plugin.Creator):
assContent = cmds.sets(name="content_SET")
assProxy = cmds.sets(name="proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)
# self.log.info(data)
#
# self.data = data

View file

@ -8,3 +8,9 @@ class CreateLayout(plugin.Creator):
label = "Layout"
family = "layout"
icon = "cubes"
def __init__(self, *args, **kwargs):
super(CreateLayout, self).__init__(*args, **kwargs)
# enable this when you want to
# publish group of loaded asset
self.data["groupLoadedAssets"] = False

View file

@ -0,0 +1,35 @@
from openpype.hosts.maya.api import (
lib,
plugin
)
class CreateProxyAlembic(plugin.Creator):
"""Proxy Alembic for animated data"""
name = "proxyAbcMain"
label = "Proxy Alembic"
family = "proxyAbc"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateProxyAlembic, self).__init__(*args, **kwargs)
# Add animation data
self.data.update(lib.collect_animation_data())
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
# Default to exporting world-space
self.data["worldSpace"] = True
# name suffix for the bounding box
self.data["nameSuffix"] = "_BBox"
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""

View file

@ -48,3 +48,21 @@ class CreateUnrealSkeletalMesh(plugin.Creator):
cmds.sets(node, forceElement=joints_set)
else:
cmds.sets(node, forceElement=geometry_set)
# Add animation data
self.data.update(lib.collect_animation_data())
# Only renderable visible shapes
self.data["renderableOnly"] = False
# only nodes that are visible
self.data["visibleOnly"] = False
# Include parent groups
self.data["includeParentHierarchy"] = False
# Default to exporting world-space
self.data["worldSpace"] = True
# Default to suspend refresh.
self.data["refresh"] = False
# Add options for custom attributes
self.data["attr"] = ""
self.data["attrPrefix"] = ""

View file

@ -14,6 +14,7 @@ class SetFrameRangeLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"proxyAbc",
"pointcache"]
representations = ["abc"]
@ -48,6 +49,7 @@ class SetFrameRangeWithHandlesLoader(load.LoaderPlugin):
families = ["animation",
"camera",
"proxyAbc",
"pointcache"]
representations = ["abc"]

View file

@ -11,7 +11,7 @@ from openpype.settings import get_project_settings
class AlembicStandinLoader(load.LoaderPlugin):
"""Load Alembic as Arnold Standin"""
families = ["animation", "model", "pointcache"]
families = ["animation", "model", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Alembic as Arnold Standin"

View file

@ -10,7 +10,7 @@ from openpype.settings import get_project_settings
class GpuCacheLoader(load.LoaderPlugin):
"""Load Alembic as gpuCache"""
families = ["model", "animation", "pointcache"]
families = ["model", "animation", "proxyAbc", "pointcache"]
representations = ["abc"]
label = "Import Gpu Cache"

View file

@ -16,6 +16,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
families = ["model",
"pointcache",
"proxyAbc",
"animation",
"mayaAscii",
"mayaScene",

View file

@ -1,4 +1,5 @@
from maya import cmds
from openpype.pipeline.publish import KnownPublishError
import pyblish.api
@ -6,6 +7,7 @@ import pyblish.api
class CollectAssData(pyblish.api.InstancePlugin):
"""Collect Ass data."""
# Offset to be after renderable camera collection.
order = pyblish.api.CollectorOrder + 0.2
label = 'Collect Ass'
families = ["ass"]
@ -23,8 +25,23 @@ class CollectAssData(pyblish.api.InstancePlugin):
instance.data['setMembers'] = members
self.log.debug('content members: {}'.format(members))
elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
if len(members) != 1:
msg = "You have multiple proxy meshes, please only use one"
raise KnownPublishError(msg)
instance.data['proxy'] = members
self.log.debug('proxy members: {}'.format(members))
# Use camera in object set if present else default to render globals
# camera.
cameras = cmds.ls(type="camera", long=True)
renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)]
camera = renderable[0]
for node in instance.data["setMembers"]:
camera_shapes = cmds.listRelatives(
node, shapes=True, type="camera"
)
if camera_shapes:
camera = node
instance.data["camera"] = camera
self.log.debug("data: {}".format(instance.data))

View file

@ -0,0 +1,17 @@
# -*- coding: utf-8 -*-
import pyblish.api
class CollectGLTF(pyblish.api.InstancePlugin):
"""Collect Assets for GLTF/GLB export."""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Asset for GLTF/GLB export"
families = ["model", "animation", "pointcache"]
def process(self, instance):
if not instance.data.get("families"):
instance.data["families"] = []
if "gltf" not in instance.data["families"]:
instance.data["families"].append("gltf")

View file

@ -1,77 +1,93 @@
import os
from maya import cmds
import arnold
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import maintained_selection
from openpype.hosts.maya.api.lib import maintained_selection, attribute_values
class ExtractAssStandin(publish.Extractor):
"""Extract the content of the instance to a ass file
"""Extract the content of the instance to a ass file"""
Things to pay attention to:
- If animation is toggled, are the frames correct
-
"""
label = "Ass Standin (.ass)"
label = "Arnold Scene Source (.ass)"
hosts = ["maya"]
families = ["ass"]
asciiAss = False
def process(self, instance):
sequence = instance.data.get("exportSequence", False)
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
filenames = list()
filenames = []
file_path = os.path.join(staging_dir, filename)
# Mask
mask = arnold.AI_NODE_ALL
node_types = {
"options": arnold.AI_NODE_OPTIONS,
"camera": arnold.AI_NODE_CAMERA,
"light": arnold.AI_NODE_LIGHT,
"shape": arnold.AI_NODE_SHAPE,
"shader": arnold.AI_NODE_SHADER,
"override": arnold.AI_NODE_OVERRIDE,
"driver": arnold.AI_NODE_DRIVER,
"filter": arnold.AI_NODE_FILTER,
"color_manager": arnold.AI_NODE_COLOR_MANAGER,
"operator": arnold.AI_NODE_OPERATOR
}
for key in node_types.keys():
if instance.data.get("mask" + key.title()):
mask = mask ^ node_types[key]
# Motion blur
values = {
"defaultArnoldRenderOptions.motion_blur_enable": instance.data.get(
"motionBlur", True
),
"defaultArnoldRenderOptions.motion_steps": instance.data.get(
"motionBlurKeys", 2
),
"defaultArnoldRenderOptions.motion_frames": instance.data.get(
"motionBlurLength", 0.5
)
}
# Write out .ass file
kwargs = {
"filename": file_path,
"startFrame": instance.data.get("frameStartHandle", 1),
"endFrame": instance.data.get("frameEndHandle", 1),
"frameStep": instance.data.get("step", 1),
"selected": True,
"asciiAss": self.asciiAss,
"shadowLinks": True,
"lightLinks": True,
"boundingBox": True,
"expandProcedurals": instance.data.get("expandProcedurals", False),
"camera": instance.data["camera"],
"mask": mask
}
self.log.info("Writing: '%s'" % file_path)
with maintained_selection():
self.log.info("Writing: {}".format(instance.data["setMembers"]))
cmds.select(instance.data["setMembers"], noExpand=True)
with attribute_values(values):
with maintained_selection():
self.log.info(
"Writing: {}".format(instance.data["setMembers"])
)
cmds.select(instance.data["setMembers"], noExpand=True)
if sequence:
self.log.info("Extracting ass sequence")
self.log.info(
"Extracting ass sequence with: {}".format(kwargs)
)
# Collect the start and end including handles
start = instance.data.get("frameStartHandle", 1)
end = instance.data.get("frameEndHandle", 1)
step = instance.data.get("step", 0)
exported_files = cmds.arnoldExportAss(**kwargs)
exported_files = cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=self.asciiAss,
shadowLinks=True,
lightLinks=True,
boundingBox=True,
startFrame=start,
endFrame=end,
frameStep=step
)
for file in exported_files:
filenames.append(os.path.split(file)[1])
self.log.info("Exported: {}".format(filenames))
else:
self.log.info("Extracting ass")
cmds.arnoldExportAss(filename=file_path,
selected=True,
asciiAss=False,
shadowLinks=True,
lightLinks=True,
boundingBox=True
)
self.log.info("Extracted {}".format(filename))
filenames = filename
optionals = [
"frameStart", "frameEnd", "step", "handles",
"handleEnd", "handleStart"
]
for key in optionals:
instance.data.pop(key, None)
if "representations" not in instance.data:
instance.data["representations"] = []
@ -79,13 +95,11 @@ class ExtractAssStandin(publish.Extractor):
representation = {
'name': 'ass',
'ext': 'ass',
'files': filenames,
"stagingDir": staging_dir
'files': filenames if len(filenames) > 1 else filenames[0],
"stagingDir": staging_dir,
'frameStart': kwargs["startFrame"]
}
if sequence:
representation['frameStart'] = start
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"

View file

@ -0,0 +1,65 @@
import os
from maya import cmds, mel
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
from openpype.hosts.maya.api.gltf import extract_gltf
class ExtractGLB(publish.Extractor):
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
label = "Extract GLB"
families = ["gltf"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{0}.glb".format(instance.name)
path = os.path.join(staging_dir, filename)
self.log.info("Extracting GLB to: {}".format(path))
nodes = instance[:]
self.log.info("Instance: {0}".format(nodes))
start_frame = instance.data('frameStart') or \
int(cmds.playbackOptions(query=True,
animationStartTime=True))# noqa
end_frame = instance.data('frameEnd') or \
int(cmds.playbackOptions(query=True,
animationEndTime=True)) # noqa
fps = mel.eval('currentTimeUnitToFPS()')
options = {
"sno": True, # selectedNodeOnly
"nbu": True, # .bin instead of .bin0
"ast": start_frame,
"aet": end_frame,
"afr": fps,
"dsa": 1,
"acn": instance.name,
"glb": True,
"vno": True # visibleNodeOnly
}
with lib.maintained_selection():
cmds.select(nodes, hi=True, noExpand=True)
extract_gltf(staging_dir,
instance.name,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'glb',
'ext': 'glb',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract GLB successful to: {0}".format(path))

View file

@ -15,6 +15,7 @@ class ExtractLayout(publish.Extractor):
label = "Extract Layout"
hosts = ["maya"]
families = ["layout"]
project_container = "AVALON_CONTAINERS"
optional = True
def process(self, instance):
@ -33,12 +34,25 @@ class ExtractLayout(publish.Extractor):
for asset in cmds.sets(str(instance), query=True):
# Find the container
grp_name = asset.split(':')[0]
project_container = self.project_container
container_list = cmds.ls(project_container)
if len(container_list) == 0:
self.log.warning("Project container is not found!")
self.log.warning("The asset(s) may not be properly loaded after published") # noqa
continue
grp_loaded_ass = instance.data.get("groupLoadedAssets", False)
if grp_loaded_ass:
asset_list = cmds.listRelatives(asset, children=True)
for asset in asset_list:
grp_name = asset.split(':')[0]
else:
grp_name = asset.split(':')[0]
containers = cmds.ls("{}*_CON".format(grp_name))
assert len(containers) == 1, \
"More than one container found for {}".format(asset)
if len(containers) == 0:
self.log.warning("{} isn't from the loader".format(asset))
self.log.warning("It may not be properly loaded after published") # noqa
continue
container = containers[0]
representation_id = cmds.getAttr(

View file

@ -86,7 +86,8 @@ class ExtractAlembic(publish.Extractor):
start=start,
end=end))
with suspended_refresh(suspend=instance.data.get("refresh", False)):
suspend = not instance.data.get("refresh", False)
with suspended_refresh(suspend=suspend):
with maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(

View file

@ -0,0 +1,109 @@
import os
from maya import cmds
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection,
iter_visible_nodes_in_range
)
class ExtractProxyAlembic(publish.Extractor):
"""Produce an alembic for bounding box geometry
"""
label = "Extract Proxy (Alembic)"
hosts = ["maya"]
families = ["proxyAbc"]
def process(self, instance):
name_suffix = instance.data.get("nameSuffix")
# Collect the start and end including handles
start = float(instance.data.get("frameStartHandle", 1))
end = float(instance.data.get("frameEndHandle", 1))
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
self.log.info("Extracting Proxy Alembic..")
dirname = self.staging_dir(instance)
filename = "{name}.abc".format(**instance.data)
path = os.path.join(dirname, filename)
proxy_root = self.create_proxy_geometry(instance,
name_suffix,
start,
end)
options = {
"step": instance.data.get("step", 1.0),
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": instance.data.get("writeColorSets", False),
"writeFaceSets": instance.data.get("writeFaceSets", False),
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True),
"root": proxy_root
}
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
with suspended_refresh():
with maintained_selection():
cmds.select(proxy_root, hi=True, noExpand=True)
extract_alembic(file=path,
startFrame=start,
endFrame=end,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": dirname
}
instance.data["representations"].append(representation)
instance.context.data["cleanupFullPaths"].append(path)
self.log.info("Extracted {} to {}".format(instance, dirname))
# remove the bounding box
bbox_master = cmds.ls("bbox_grp")
cmds.delete(bbox_master)
def create_proxy_geometry(self, instance, name_suffix, start, end):
nodes = instance[:]
nodes = list(iter_visible_nodes_in_range(nodes,
start=start,
end=end))
inst_selection = cmds.ls(nodes, long=True)
cmds.geomToBBox(inst_selection,
nameSuffix=name_suffix,
keepOriginal=True,
single=False,
bakeAnimation=True,
startTime=start,
endTime=end)
# create master group for bounding
# boxes as the main root
master_group = cmds.group(name="bbox_grp")
bbox_sel = cmds.ls(master_group, long=True)
self.log.debug("proxy_root: {}".format(bbox_sel))
return bbox_sel

View file

@ -105,6 +105,11 @@ class ExtractThumbnail(publish.Extractor):
pm.currentTime(refreshFrameInt - 1, edit=True)
pm.currentTime(refreshFrameInt, edit=True)
# Override transparency if requested.
transparency = instance.data.get("transparency", 0)
if transparency != 0:
preset["viewport2_options"]["transparencyAlgorithm"] = transparency
# Isolate view is requested by having objects in the set besides a
# camera.
if preset.pop("isolate_view", False) and instance.data.get("isolate"):

View file

@ -0,0 +1,108 @@
# -*- coding: utf-8 -*-
"""Create Unreal Skeletal Mesh data to be extracted as FBX."""
import os
from contextlib import contextmanager
from maya import cmds # noqa
from openpype.pipeline import publish
from openpype.hosts.maya.api.lib import (
extract_alembic,
suspended_refresh,
maintained_selection
)
@contextmanager
def renamed(original_name, renamed_name):
# type: (str, str) -> None
try:
cmds.rename(original_name, renamed_name)
yield
finally:
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """
label = "Extract Unreal Skeletal Mesh - Alembic"
hosts = ["maya"]
families = ["skeletalMesh"]
optional = True
def process(self, instance):
self.log.info("Extracting pointcache..")
geo = cmds.listRelatives(
instance.data.get("geometry"), allDescendents=True, fullPath=True)
joints = cmds.listRelatives(
instance.data.get("joints"), allDescendents=True, fullPath=True)
nodes = geo + joints
attrs = instance.data.get("attr", "").split(";")
attrs = [value for value in attrs if value.strip()]
attrs += ["cbId"]
attr_prefixes = instance.data.get("attrPrefix", "").split(";")
attr_prefixes = [value for value in attr_prefixes if value.strip()]
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.abc".format(instance.name)
path = os.path.join(staging_dir, filename)
# The export requires forward slashes because we need
# to format it into a string in a mel expression
path = path.replace('\\', '/')
self.log.info("Extracting ABC to: {0}".format(path))
self.log.info("Members: {0}".format(nodes))
self.log.info("Instance: {0}".format(instance[:]))
options = {
"step": instance.data.get("step", 1.0),
"attr": attrs,
"attrPrefix": attr_prefixes,
"writeVisibility": True,
"writeCreases": True,
"writeColorSets": instance.data.get("writeColorSets", False),
"writeFaceSets": instance.data.get("writeFaceSets", False),
"uvWrite": True,
"selection": True,
"worldSpace": instance.data.get("worldSpace", True)
}
self.log.info("Options: {}".format(options))
if int(cmds.about(version=True)) >= 2017:
# Since Maya 2017 alembic supports multiple uv sets - write them.
options["writeUVSets"] = True
if not instance.data.get("includeParentHierarchy", True):
# Set the root nodes if we don't want to include parents
# The roots are to be considered the ones that are the actual
# direct members of the set
options["root"] = instance.data.get("setMembers")
with suspended_refresh(suspend=instance.data.get("refresh", False)):
with maintained_selection():
cmds.select(nodes, noExpand=True)
extract_alembic(file=path,
# startFrame=start,
# endFrame=end,
**options)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'abc',
'ext': 'abc',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract ABC successful to: {0}".format(path))

View file

@ -21,12 +21,13 @@ def renamed(original_name, renamed_name):
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMesh(publish.Extractor):
class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """
order = pyblish.api.ExtractorOrder - 0.1
label = "Extract Unreal Skeletal Mesh"
label = "Extract Unreal Skeletal Mesh - FBX"
families = ["skeletalMesh"]
optional = True
def process(self, instance):
fbx_exporter = fbx.FBXExtractor(log=self.log)

View file

@ -20,7 +20,7 @@ class ValidateOutRelatedNodeIds(pyblish.api.InstancePlugin):
"""
order = ValidateContentsOrder
families = ['animation', "pointcache"]
families = ['animation', "pointcache", "proxyAbc"]
hosts = ['maya']
label = 'Animation Out Set Related Node Ids'
actions = [

View file

@ -25,6 +25,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
families = ["animation",
"pointcache",
"camera",
"proxyAbc",
"renderlayer",
"review",
"yeticache"]

View file

@ -28,7 +28,9 @@ class ValidateSkeletalMeshHierarchy(pyblish.api.InstancePlugin):
parent.split("|")[1] for parent in (joints_parents + geo_parents)
}
if len(set(parents_set)) != 1:
self.log.info(parents_set)
if len(set(parents_set)) > 2:
raise PublishXmlValidationError(
self,
"Multiple roots on geometry or joints."

View file

@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.hosts.maya.api.action import (
SelectInvalidAction,
)
from openpype.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
)
from maya import cmds
class ValidateSkeletalMeshTriangulated(pyblish.api.InstancePlugin):
"""Validates that the geometry has been triangulated."""
order = ValidateContentsOrder
hosts = ["maya"]
families = ["skeletalMesh"]
label = "Skeletal Mesh Triangulated"
optional = True
actions = [
SelectInvalidAction,
RepairAction
]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError(
"The following objects needs to be triangulated: "
"{}".format(invalid))
@classmethod
def get_invalid(cls, instance):
geo = instance.data.get("geometry")
invalid = []
for obj in cmds.listRelatives(
cmds.ls(geo), allDescendents=True, fullPath=True):
n_triangles = cmds.polyEvaluate(obj, triangle=True)
n_faces = cmds.polyEvaluate(obj, face=True)
if not (isinstance(n_triangles, int) and isinstance(n_faces, int)):
continue
# We check if the number of triangles is equal to the number of
# faces for each transform node.
# If it is, the object is triangulated.
if cmds.objectType(obj, i="transform") and n_triangles != n_faces:
invalid.append(obj)
return invalid
@classmethod
def repair(cls, instance):
for node in cls.get_invalid(instance):
cmds.polyTriangulate(node)

View file

@ -8,7 +8,7 @@ from wsrpc_aiohttp import (
WebSocketAsync
)
from Qt import QtCore
from qtpy import QtCore
from openpype.lib import Logger
from openpype.pipeline import legacy_io

View file

@ -3,7 +3,7 @@ import sys
import contextlib
import traceback
from Qt import QtWidgets
from qtpy import QtWidgets
from openpype.lib import env_value_to_bool, Logger
from openpype.modules import ModulesManager

View file

@ -1,5 +1,7 @@
import os
from Qt import QtWidgets
from qtpy import QtWidgets
import pyblish.api
from openpype.lib import register_event_callback, Logger

View file

@ -6,7 +6,7 @@ import ctypes
import platform
import logging
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.pipeline import install_host

View file

@ -207,8 +207,8 @@ class CreateRenderlayer(plugin.Creator):
)
def _ask_user_subset_override(self, instance):
from Qt import QtCore
from Qt.QtWidgets import QMessageBox
from qtpy import QtCore
from qtpy.QtWidgets import QMessageBox
title = "Subset \"{}\" already exist".format(instance["subset"])
text = (

View file

@ -2,6 +2,7 @@
import os
import logging
from typing import List
import semver
import pyblish.api
@ -21,6 +22,9 @@ import unreal # noqa
logger = logging.getLogger("openpype.hosts.unreal")
OPENPYPE_CONTAINERS = "OpenPypeContainers"
UNREAL_VERSION = semver.VersionInfo(
*os.getenv("OPENPYPE_UNREAL_VERSION").split(".")
)
HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
@ -111,7 +115,9 @@ def ls():
"""
ar = unreal.AssetRegistryHelpers.get_asset_registry()
openpype_containers = ar.get_assets_by_class("AssetContainer", True)
# UE 5.1 changed how class name is specified
class_name = ["/Script/OpenPype", "AssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AssetContainer" # noqa
openpype_containers = ar.get_assets_by_class(class_name, True)
# get_asset_by_class returns AssetData. To get all metadata we need to
# load asset. get_tag_values() work only on metadata registered in

View file

@ -1,5 +1,5 @@
import sys
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
from openpype import (
resources,

View file

@ -150,6 +150,7 @@ class UnrealPrelaunchHook(PreLaunchHook):
engine_path=Path(engine_path)
)
self.launch_context.env["OPENPYPE_UNREAL_VERSION"] = engine_version
# Append project file to launch arguments
self.launch_context.launch_args.append(
f"\"{project_file.as_posix()}\"")

View file

@ -6,7 +6,11 @@ public class OpenPype : ModuleRules
{
public OpenPype(ReadOnlyTargetRules Target) : base(Target)
{
DefaultBuildSettings = BuildSettingsVersion.V2;
bLegacyPublicIncludePaths = false;
ShadowVariableWarningLevel = WarningLevel.Error;
PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs;
IncludeOrderVersion = EngineIncludeOrderVersion.Unreal5_0;
PublicIncludePaths.AddRange(
new string[] {

View file

@ -1,7 +1,7 @@
// Fill out your copyright notice in the Description page of Project Settings.
#include "AssetContainer.h"
#include "AssetRegistryModule.h"
#include "AssetRegistry/AssetRegistryModule.h"
#include "Misc/PackageName.h"
#include "Engine.h"
#include "Containers/UnrealString.h"
@ -30,8 +30,8 @@ void UAssetContainer::OnAssetAdded(const FAssetData& AssetData)
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
UE_LOG(LogTemp, Log, TEXT("asset name %s"), *assetFName);
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
@ -60,7 +60,7 @@ void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData)
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);
@ -93,7 +93,7 @@ void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString&
// get asset path and class
FString assetPath = AssetData.GetFullName();
FString assetFName = AssetData.AssetClass.ToString();
FString assetFName = AssetData.AssetClassPath.ToString();
// split path
assetPath.ParseIntoArray(split, TEXT(" "), true);

View file

@ -1,10 +1,10 @@
#pragma once
#include "OpenPypePublishInstance.h"
#include "AssetRegistryModule.h"
#include "AssetRegistry/AssetRegistryModule.h"
#include "AssetToolsModule.h"
#include "Framework/Notifications/NotificationManager.h"
#include "SNotificationList.h"
#include "Widgets/Notifications/SNotificationList.h"
//Moves all the invalid pointers to the end to prepare them for the shrinking
#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \
@ -47,7 +47,7 @@ void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData)
if (!IsValid(Asset))
{
UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."),
*InAssetData.ObjectPath.ToString());
*InAssetData.GetObjectPathString());
return;
}

View file

@ -5,7 +5,7 @@
#include "CoreMinimal.h"
#include "UObject/NoExportTypes.h"
#include "Engine/AssetUserData.h"
#include "AssetData.h"
#include "AssetRegistry/AssetData.h"
#include "AssetContainer.generated.h"
/**

View file

@ -1,6 +1,5 @@
#pragma once
#include "EditorTutorial.h"
#include "Engine.h"
#include "OpenPypePublishInstance.generated.h"

View file

@ -50,7 +50,10 @@ def get_engine_versions(env=None):
# environment variable not set
pass
except OSError:
# specified directory doesn't exists
# specified directory doesn't exist
pass
except StopIteration:
# specified directory doesn't exist
pass
# if we've got something, terminate auto-detection process

View file

@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
"""Load Alembic Animation."""
import os
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
import unreal # noqa
class AnimationAlembicLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from Alembic"""
families = ["animation"]
label = "Import Alembic Animation"
representations = ["abc"]
icon = "cube"
color = "orange"
def get_task(self, filename, asset_dir, asset_name, replace):
task = unreal.AssetImportTask()
options = unreal.AbcImportSettings()
sm_settings = unreal.AbcStaticMeshSettings()
conversion_settings = unreal.AbcConversionSettings(
preset=unreal.AbcConversionPreset.CUSTOM,
flip_u=False, flip_v=False,
rotation=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, -1.0])
task.set_editor_property('filename', filename)
task.set_editor_property('destination_path', asset_dir)
task.set_editor_property('destination_name', asset_name)
task.set_editor_property('replace_existing', replace)
task.set_editor_property('automated', True)
task.set_editor_property('save', True)
options.set_editor_property(
'import_type', unreal.AlembicImportType.SKELETAL)
options.static_mesh_settings = sm_settings
options.conversion_settings = conversion_settings
task.options = options
return task
def load(self, context, name, namespace, data):
"""Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and openpype container
root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
version = context.get('version').get('name')
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_v{version:03d}", suffix="")
container_name += suffix
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
unreal.EditorAssetLibrary.make_directory(asset_dir)
task = self.get_task(self.fname, asset_dir, asset_name, False)
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
return asset_content
def update(self, container, representation):
name = container["asset_name"]
source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True)
# do import fbx and replace existing data
asset_tools = unreal.AssetToolsHelpers.get_asset_tools()
asset_tools.import_asset_tasks([task])
container_path = f"{container['namespace']}/{container['objectName']}"
# update metadata
unreal_pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
asset_content = unreal.EditorAssetLibrary.list_assets(
destination_path, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
def remove(self, container):
path = container["namespace"]
parent_path = os.path.dirname(path)
unreal.EditorAssetLibrary.delete_directory(path)
asset_content = unreal.EditorAssetLibrary.list_assets(
parent_path, recursive=False
)
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path)

View file

@ -14,7 +14,7 @@ import unreal # noqa
class SkeletalMeshAlembicLoader(plugin.Loader):
"""Load Unreal SkeletalMesh from Alembic"""
families = ["pointcache"]
families = ["pointcache", "skeletalMesh"]
label = "Import Alembic Skeletal Mesh"
representations = ["abc"]
icon = "cube"

View file

@ -14,7 +14,7 @@ import unreal # noqa
class StaticMeshAlembicLoader(plugin.Loader):
"""Load Unreal StaticMesh from Alembic"""
families = ["model"]
families = ["model", "staticMesh"]
label = "Import Alembic Static Mesh"
representations = ["abc"]
icon = "cube"

View file

@ -3,6 +3,8 @@
import ast
import unreal # noqa
import pyblish.api
from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION
from openpype.pipeline.publish import KnownPublishError
class CollectInstances(pyblish.api.ContextPlugin):
@ -23,8 +25,10 @@ class CollectInstances(pyblish.api.ContextPlugin):
def process(self, context):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
instance_containers = ar.get_assets_by_class(
"OpenPypePublishInstance", True)
class_name = ["/Script/OpenPype",
"AssetContainer"] if UNREAL_VERSION.major == 5 and \
UNREAL_VERSION.minor > 0 else "OpenPypePublishInstance" # noqa
instance_containers = ar.get_assets_by_class(class_name, True)
for container_data in instance_containers:
asset = container_data.get_asset()
@ -32,9 +36,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["objectName"] = container_data.asset_name
# convert to strings
data = {str(key): str(value) for (key, value) in data.items()}
assert data.get("family"), (
"instance has no family"
)
if not data.get("family"):
raise KnownPublishError("instance has no family")
# content of container
members = ast.literal_eval(data.get("members"))

View file

@ -74,22 +74,52 @@ class EventCallback(object):
"Registered callback is not callable. \"{}\""
).format(str(func)))
# Collect additional data about function
# - name
# - path
# - if expect argument or not
# Collect function name and path to file for logging
func_name = func.__name__
func_path = os.path.abspath(inspect.getfile(func))
# Get expected arguments from function spec
# - positional arguments are always preferred
expect_args = False
expect_kwargs = False
fake_event = "fake"
if hasattr(inspect, "signature"):
# Python 3 using 'Signature' object where we try to bind arg
# or kwarg. Using signature is recommended approach based on
# documentation.
sig = inspect.signature(func)
expect_args = len(sig.parameters) > 0
try:
sig.bind(fake_event)
expect_args = True
except TypeError:
pass
try:
sig.bind(event=fake_event)
expect_kwargs = True
except TypeError:
pass
else:
expect_args = len(inspect.getargspec(func)[0]) > 0
# In Python 2 'signature' is not available so 'getcallargs' is used
# - 'getcallargs' is marked as deprecated since Python 3.0
try:
inspect.getcallargs(func, fake_event)
expect_args = True
except TypeError:
pass
try:
inspect.getcallargs(func, event=fake_event)
expect_kwargs = True
except TypeError:
pass
self._func_ref = func_ref
self._func_name = func_name
self._func_path = func_path
self._expect_args = expect_args
self._expect_kwargs = expect_kwargs
self._ref_valid = func_ref is not None
self._enabled = True
@ -157,6 +187,10 @@ class EventCallback(object):
try:
if self._expect_args:
callback(event)
elif self._expect_kwargs:
callback(event=event)
else:
callback()

View file

@ -422,7 +422,7 @@ class TemplateResult(str):
cls = self.__class__
return cls(
os.path.normpath(self),
os.path.normpath(self.replace("\\", "/")),
self.template,
self.solved,
self.used_values,

View file

@ -57,7 +57,7 @@ class AvalonModule(OpenPypeModule, ITrayModule):
if not self._library_loader_imported:
return
from Qt import QtWidgets
from qtpy import QtWidgets
# Actions
action_library_loader = QtWidgets.QAction(
"Loader", tray_menu
@ -75,7 +75,7 @@ class AvalonModule(OpenPypeModule, ITrayModule):
def show_library_loader(self):
if self._library_loader_window is None:
from Qt import QtCore
from qtpy import QtCore
from openpype.tools.libraryloader import LibraryLoaderWindow
from openpype.pipeline import install_openpype_plugins

View file

@ -183,7 +183,7 @@ class ClockifyModule(
# Definition of Tray menu
def tray_menu(self, parent_menu):
# Menu for Tray App
from Qt import QtWidgets
from qtpy import QtWidgets
menu = QtWidgets.QMenu("Clockify", parent_menu)
menu.setProperty("submenu", "on")

View file

@ -1,4 +1,4 @@
from Qt import QtCore, QtGui, QtWidgets
from qtpy import QtCore, QtGui, QtWidgets
from openpype import resources, style

View file

@ -127,25 +127,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"harmony": [r".*"], # for everything from AE
"celaction": [r".*"]}
enviro_filter = [
environ_job_filter = [
"OPENPYPE_METADATA_FILE"
]
environ_keys = [
"FTRACK_API_USER",
"FTRACK_API_KEY",
"FTRACK_SERVER",
"OPENPYPE_METADATA_FILE",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_PUBLISH_JOB"
"OPENPYPE_LOG_NO_COLORS",
"OPENPYPE_USERNAME",
"OPENPYPE_RENDER_JOB",
"OPENPYPE_PUBLISH_JOB",
"OPENPYPE_MONGO",
"OPENPYPE_VERSION",
"IS_TEST"
"OPENPYPE_VERSION"
]
# custom deadline attributes
@ -228,30 +220,43 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
instance_version = instance.data.get("version") # take this if exists
if instance_version != 1:
override_version = instance_version
output_dir = self._get_publish_folder(instance.context.data['anatomy'],
deepcopy(
instance.data["anatomyData"]),
instance.data.get("asset"),
instances[0]["subset"],
'render',
override_version)
output_dir = self._get_publish_folder(
instance.context.data['anatomy'],
deepcopy(instance.data["anatomyData"]),
instance.data.get("asset"),
instances[0]["subset"],
'render',
override_version
)
# Transfer the environment from the original job to this dependent
# job so they use the same environment
metadata_path, roothless_metadata_path = \
self._create_metadata_path(instance)
environment = job["Props"].get("Env", {})
environment["AVALON_PROJECT"] = legacy_io.Session["AVALON_PROJECT"]
environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"]
environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"]
environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME")
environment["OPENPYPE_VERSION"] = os.environ.get("OPENPYPE_VERSION")
environment["OPENPYPE_LOG_NO_COLORS"] = "1"
environment["OPENPYPE_USERNAME"] = instance.context.data["user"]
environment["OPENPYPE_PUBLISH_JOB"] = "1"
environment["OPENPYPE_RENDER_JOB"] = "0"
environment["IS_TEST"] = is_in_tests()
environment = {
"AVALON_PROJECT": legacy_io.Session["AVALON_PROJECT"],
"AVALON_ASSET": legacy_io.Session["AVALON_ASSET"],
"AVALON_TASK": legacy_io.Session["AVALON_TASK"],
"OPENPYPE_USERNAME": instance.context.data["user"],
"OPENPYPE_PUBLISH_JOB": "1",
"OPENPYPE_RENDER_JOB": "0",
"OPENPYPE_REMOTE_JOB": "0",
"OPENPYPE_LOG_NO_COLORS": "1",
"IS_TEST": str(int(is_in_tests()))
}
# add environments from self.environ_keys
for env_key in self.environ_keys:
if os.getenv(env_key):
environment[env_key] = os.environ[env_key]
# pass environment keys from self.environ_job_filter
job_environ = job["Props"].get("Env", {})
for env_j_key in self.environ_job_filter:
if job_environ.get(env_j_key):
environment[env_j_key] = job_environ[env_j_key]
# Add mongo url if it's enabled
if instance.context.data.get("deadlinePassMongoUrl"):
mongo_url = os.environ.get("OPENPYPE_MONGO")
@ -318,19 +323,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
if instance.data.get("suspend_publish"):
payload["JobInfo"]["InitialStatus"] = "Suspended"
index = 0
for key in environment:
if key.upper() in self.enviro_filter:
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% index: "{key}={value}".format(
key=key, value=environment[key]
)
}
)
index += 1
for index, (key_, value_) in enumerate(environment.items()):
payload["JobInfo"].update(
{
"EnvironmentKeyValue%d"
% index: "{key}={value}".format(
key=key_, value=value_
)
}
)
# remove secondary pool
payload["JobInfo"].pop("SecondaryPool", None)

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets
from qtpy import QtWidgets
from openpype.style import load_stylesheet

View file

@ -135,9 +135,9 @@ class FirstVersionStatus(BaseEvent):
new_status = asset_version_statuses.get(found_item["status"])
if not new_status:
self.log.warning(
self.log.warning((
"AssetVersion doesn't have status `{}`."
).format(found_item["status"])
).format(found_item["status"]))
continue
try:

View file

@ -3,9 +3,9 @@ import time
import datetime
import threading
from Qt import QtCore, QtWidgets, QtGui
import ftrack_api
from qtpy import QtCore, QtWidgets, QtGui
from openpype import resources
from openpype.lib import Logger
from openpype_modules.ftrack import resolve_ftrack_url, FTRACK_MODULE_DIR

View file

@ -1,10 +1,13 @@
import os
import requests
from qtpy import QtCore, QtGui, QtWidgets
from openpype import style
from openpype_modules.ftrack.lib import credentials
from . import login_tools
from openpype import resources
from Qt import QtCore, QtGui, QtWidgets
from . import login_tools
class CredentialsDialog(QtWidgets.QDialog):

View file

@ -222,7 +222,7 @@ class ITrayAction(ITrayModule):
pass
def tray_menu(self, tray_menu):
from Qt import QtWidgets
from qtpy import QtWidgets
if self.admin_action:
menu = self.admin_submenu(tray_menu)
@ -247,7 +247,7 @@ class ITrayAction(ITrayModule):
@staticmethod
def admin_submenu(tray_menu):
if ITrayAction._admin_submenu is None:
from Qt import QtWidgets
from qtpy import QtWidgets
admin_submenu = QtWidgets.QMenu("Admin", tray_menu)
admin_submenu.menuAction().setVisible(False)
@ -279,7 +279,7 @@ class ITrayService(ITrayModule):
@staticmethod
def services_submenu(tray_menu):
if ITrayService._services_submenu is None:
from Qt import QtWidgets
from qtpy import QtWidgets
services_submenu = QtWidgets.QMenu("Services", tray_menu)
services_submenu.menuAction().setVisible(False)
@ -294,7 +294,7 @@ class ITrayService(ITrayModule):
@staticmethod
def _load_service_icons():
from Qt import QtGui
from qtpy import QtGui
ITrayService._failed_icon = QtGui.QIcon(
resources.get_resource("icons", "circle_red.png")
@ -325,7 +325,7 @@ class ITrayService(ITrayModule):
return ITrayService._failed_icon
def tray_menu(self, tray_menu):
from Qt import QtWidgets
from qtpy import QtWidgets
action = QtWidgets.QAction(
self.label,

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.modules.kitsu.utils.credentials import (

View file

@ -1,12 +1,9 @@
import os
import threading
import gazu
from openpype.client import (
get_project,
get_assets,
get_asset_by_name
)
from openpype.client import get_project, get_assets, get_asset_by_name
from openpype.pipeline import AvalonMongoDB
from .credentials import validate_credentials
from .update_op_with_zou import (
@ -397,6 +394,13 @@ def start_listeners(login: str, password: str):
login (str): Kitsu user login
password (str): Kitsu user password
"""
# Refresh token every week
def refresh_token_every_week():
print("Refreshing token...")
gazu.refresh_token()
threading.Timer(7 * 3600 * 24, refresh_token_every_week).start()
refresh_token_every_week()
# Connect to server
listener = Listener(login, password)

View file

@ -22,7 +22,7 @@ class LogViewModule(OpenPypeModule, ITrayModule):
# Definition of Tray menu
def tray_menu(self, tray_menu):
from Qt import QtWidgets
from qtpy import QtWidgets
# Menu for Tray App
menu = QtWidgets.QMenu('Logging', tray_menu)

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets, QtCore
from qtpy import QtWidgets, QtCore
from .widgets import LogsWidget, OutputWidget
from openpype import style

View file

@ -1,5 +1,5 @@
import collections
from Qt import QtCore, QtGui
from qtpy import QtCore, QtGui
from openpype.lib import Logger

View file

@ -1,5 +1,5 @@
import html
from Qt import QtCore, QtWidgets
from qtpy import QtCore, QtWidgets
import qtawesome
from .models import LogModel, LogsFilterProxy

View file

@ -53,7 +53,7 @@ class MusterModule(OpenPypeModule, ITrayModule):
# Definition of Tray menu
def tray_menu(self, parent):
"""Add **change credentials** option to tray menu."""
from Qt import QtWidgets
from qtpy import QtWidgets
# Menu for Tray App
menu = QtWidgets.QMenu('Muster', parent)

View file

@ -1,5 +1,4 @@
import os
from Qt import QtCore, QtGui, QtWidgets
from qtpy import QtCore, QtGui, QtWidgets
from openpype import resources, style

View file

@ -5,7 +5,7 @@ import collections
from code import InteractiveInterpreter
import appdirs
from Qt import QtCore, QtWidgets, QtGui
from qtpy import QtCore, QtWidgets, QtGui
from openpype import resources
from openpype.style import load_stylesheet

View file

@ -1,5 +1,5 @@
import os
from Qt import QtCore, QtWidgets, QtGui
from qtpy import QtCore, QtWidgets, QtGui
from openpype import style
from openpype import resources

View file

@ -1,7 +1,7 @@
import os
import webbrowser
from Qt import QtWidgets
from qtpy import QtWidgets
from openpype.modules.shotgrid.lib import credentials
from openpype.modules.shotgrid.tray.credential_dialog import (

View file

@ -1244,7 +1244,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule):
if not self.enabled:
return
from Qt import QtWidgets
from qtpy import QtWidgets
"""Add menu or action to Tray(or parent)'s menu"""
action = QtWidgets.QAction(self.label, parent_menu)
action.triggered.connect(self.show_widget)

View file

@ -1,4 +1,4 @@
from Qt import QtWidgets, QtCore, QtGui
from qtpy import QtWidgets, QtCore, QtGui
from openpype.tools.settings import style

View file

@ -1,5 +1,5 @@
import os
from Qt import QtCore, QtWidgets, QtGui
from qtpy import QtCore, QtWidgets, QtGui
from openpype.lib import Logger

View file

@ -3,8 +3,7 @@ import attr
from bson.objectid import ObjectId
import datetime
from Qt import QtCore
from Qt.QtCore import Qt
from qtpy import QtCore
import qtawesome
from openpype.tools.utils.delegates import pretty_timestamp
@ -79,16 +78,16 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel):
def columnCount(self, _index=None):
return len(self._header)
def headerData(self, section, orientation, role=Qt.DisplayRole):
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if section >= len(self.COLUMN_LABELS):
return
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.COLUMN_LABELS[section][1]
if role == HEADER_NAME_ROLE:
if orientation == Qt.Horizontal:
if orientation == QtCore.Qt.Horizontal:
return self.COLUMN_LABELS[section][0] # return name
def data(self, index, role):
@ -123,7 +122,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel):
return item.status == lib.STATUS[2] and \
item.remote_progress < 1
if role in (Qt.DisplayRole, Qt.EditRole):
if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
# because of ImageDelegate
if header_value in ['remote_site', 'local_site']:
return ""
@ -146,7 +145,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel):
if role == STATUS_ROLE:
return item.status
if role == Qt.UserRole:
if role == QtCore.Qt.UserRole:
return item._id
@property
@ -409,7 +408,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel):
"""
for i in range(self.rowCount(None)):
index = self.index(i, 0)
value = self.data(index, Qt.UserRole)
value = self.data(index, QtCore.Qt.UserRole)
if value == id:
return index
return None
@ -917,7 +916,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel):
if not self.can_edit:
return
repre_id = self.data(index, Qt.UserRole)
repre_id = self.data(index, QtCore.Qt.UserRole)
representation = get_representation_by_id(self.project, repre_id)
if representation:
@ -1353,7 +1352,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel):
if not self.can_edit:
return
file_id = self.data(index, Qt.UserRole)
file_id = self.data(index, QtCore.Qt.UserRole)
updated_file = None
representation = get_representation_by_id(self.project, self._id)

View file

@ -3,8 +3,7 @@ import subprocess
import sys
from functools import partial
from Qt import QtWidgets, QtCore, QtGui
from Qt.QtCore import Qt
from qtpy import QtWidgets, QtCore, QtGui
import qtawesome
from openpype.tools.settings import style
@ -260,7 +259,7 @@ class _SyncRepresentationWidget(QtWidgets.QWidget):
self._selected_ids = set()
for index in idxs:
self._selected_ids.add(self.model.data(index, Qt.UserRole))
self._selected_ids.add(self.model.data(index, QtCore.Qt.UserRole))
def _set_selection(self):
"""
@ -291,7 +290,7 @@ class _SyncRepresentationWidget(QtWidgets.QWidget):
self.table_view.openPersistentEditor(index)
return
_id = self.model.data(index, Qt.UserRole)
_id = self.model.data(index, QtCore.Qt.UserRole)
detail_window = SyncServerDetailWindow(
self.sync_server, _id, self.model.project, parent=self)
detail_window.exec()
@ -615,7 +614,7 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget):
table_view.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
table_view.horizontalHeader().setSortIndicator(
-1, Qt.AscendingOrder)
-1, QtCore.Qt.AscendingOrder)
table_view.setAlternatingRowColors(True)
table_view.verticalHeader().hide()
table_view.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
@ -773,7 +772,8 @@ class SyncRepresentationDetailWidget(_SyncRepresentationWidget):
QtWidgets.QAbstractItemView.ExtendedSelection)
table_view.setSelectionBehavior(
QtWidgets.QTableView.SelectRows)
table_view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder)
table_view.horizontalHeader().setSortIndicator(
-1, QtCore.Qt.AscendingOrder)
table_view.horizontalHeader().setSortIndicatorShown(True)
table_view.setAlternatingRowColors(True)
table_view.verticalHeader().hide()

View file

@ -1,5 +1,5 @@
import time
from Qt import QtCore
from qtpy import QtCore
from pynput import mouse, keyboard
from openpype.lib import Logger

View file

@ -1,4 +1,4 @@
from Qt import QtCore, QtGui, QtWidgets
from qtpy import QtCore, QtGui, QtWidgets
from openpype import resources, style

View file

@ -3,7 +3,7 @@ from aiohttp import web
import json
import logging
from concurrent.futures import CancelledError
from Qt import QtWidgets
from qtpy import QtWidgets
from openpype.modules import ITrayService

View file

@ -21,6 +21,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.495
families = ["workfile",
"pointcache",
"proxyAbc",
"camera",
"animation",
"model",
@ -50,6 +51,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
"source",
"assembly",
"fbx",
"gltf",
"textures",
"action",
"background",

View file

@ -1,5 +1,4 @@
import os
import re
import json
import copy
import tempfile
@ -21,6 +20,7 @@ from openpype.lib import (
CREATE_NO_WINDOW
)
from openpype.lib.profiles_filtering import filter_profiles
class ExtractBurnin(publish.Extractor):
@ -34,6 +34,7 @@ class ExtractBurnin(publish.Extractor):
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = [
"nuke",
@ -53,6 +54,7 @@ class ExtractBurnin(publish.Extractor):
"flame"
# "resolve"
]
optional = True
positions = [
@ -69,11 +71,15 @@ class ExtractBurnin(publish.Extractor):
"y_offset": 5
}
# Preset attributes
# Configurable by Settings
profiles = None
options = None
def process(self, instance):
if not self.profiles:
self.log.warning("No profiles present for create burnin")
return
# QUESTION what is this for and should we raise an exception?
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
@ -137,18 +143,29 @@ class ExtractBurnin(publish.Extractor):
return filtered_repres
def main_process(self, instance):
# TODO get these data from context
host_name = instance.context.data["hostName"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
family = instance.data["family"]
task_data = instance.data["anatomyData"].get("task", {})
task_name = task_data.get("name")
task_type = task_data.get("type")
subset = instance.data["subset"]
filtering_criteria = {
"hosts": host_name,
"families": family,
"task_names": task_name,
"task_types": task_type,
"subset": subset
}
profile = filter_profiles(self.profiles, filtering_criteria,
logger=self.log)
# Find profile most matching current host, task and instance family
profile = self.find_matching_profile(host_name, task_name, family)
if not profile:
self.log.info((
"Skipped instance. None of profiles in presets are for"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\""
).format(host_name, family, task_name))
" Host: \"{}\" | Families: \"{}\" | Task \"{}\""
" | Task type \"{}\" | Subset \"{}\" "
).format(host_name, family, task_name, task_type, subset))
return
self.log.debug("profile: {}".format(profile))
@ -158,7 +175,8 @@ class ExtractBurnin(publish.Extractor):
if not burnin_defs:
self.log.info((
"Skipped instance. Burnin definitions are not set for profile"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\" | Profile \"{}\""
" Host: \"{}\" | Families: \"{}\" | Task \"{}\""
" | Profile \"{}\""
).format(host_name, family, task_name, profile))
return
@ -693,130 +711,6 @@ class ExtractBurnin(publish.Extractor):
)
})
def find_matching_profile(self, host_name, task_name, family):
""" Filter profiles by Host name, Task name and main Family.
Filtering keys are "hosts" (list), "tasks" (list), "families" (list).
If key is not find or is empty than it's expected to match.
Args:
profiles (list): Profiles definition from presets.
host_name (str): Current running host name.
task_name (str): Current context task name.
family (str): Main family of current Instance.
Returns:
dict/None: Return most matching profile or None if none of profiles
match at least one criteria.
"""
matching_profiles = None
highest_points = -1
for profile in self.profiles or tuple():
profile_points = 0
profile_value = []
# Host filtering
host_names = profile.get("hosts")
match = self.validate_value_by_regexes(host_name, host_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Task filtering
task_names = profile.get("tasks")
match = self.validate_value_by_regexes(task_name, task_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Family filtering
families = profile.get("families")
match = self.validate_value_by_regexes(family, families)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
if profile_points > highest_points:
matching_profiles = []
highest_points = profile_points
if profile_points == highest_points:
profile["__value__"] = profile_value
matching_profiles.append(profile)
if not matching_profiles:
return
if len(matching_profiles) == 1:
return matching_profiles[0]
return self.profile_exclusion(matching_profiles)
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile by host, task and family match.
Profiles are selectivelly filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
Filtering ends when only one profile left in true_list. Or when all
existence booleans loops passed, in that case first profile from left
profiles is returned.
Args:
matching_profiles (list): Profiles with same values.
Returns:
dict: Most matching profile.
"""
self.log.info(
"Search for first most matching profile in match order:"
" Host name -> Task name -> Family."
)
# Filter all profiles with highest points value. First filter profiles
# with matching host if there are any then filter profiles by task
# name if there are any and lastly filter by family. Else use first in
# list.
idx = 0
final_profile = None
while True:
profiles_true = []
profiles_false = []
for profile in matching_profiles:
value = profile["__value__"]
# Just use first profile when idx is greater than values.
if not idx < len(value):
final_profile = profile
break
if value[idx]:
profiles_true.append(profile)
else:
profiles_false.append(profile)
if final_profile is not None:
break
if profiles_true:
matching_profiles = profiles_true
else:
matching_profiles = profiles_false
if len(matching_profiles) == 1:
final_profile = matching_profiles[0]
break
idx += 1
final_profile.pop("__value__")
return final_profile
def filter_burnins_defs(self, profile, instance):
"""Filter outputs by their values from settings.
@ -909,56 +803,6 @@ class ExtractBurnin(publish.Extractor):
return True
return False
def compile_list_of_regexes(self, in_list):
"""Convert strings in entered list to compiled regex objects."""
regexes = []
if not in_list:
return regexes
for item in in_list:
if not item:
continue
try:
regexes.append(re.compile(item))
except TypeError:
self.log.warning((
"Invalid type \"{}\" value \"{}\"."
" Expected string based object. Skipping."
).format(str(type(item)), str(item)))
return regexes
def validate_value_by_regexes(self, value, in_list):
"""Validate in any regexe from list match entered value.
Args:
in_list (list): List with regexes.
value (str): String where regexes is checked.
Returns:
int: Returns `0` when list is not set or is empty. Returns `1` when
any regex match value and returns `-1` when none of regexes
match value entered.
"""
if not in_list:
return 0
output = -1
regexes = self.compile_list_of_regexes(in_list)
for regex in regexes:
if re.match(regex, value):
output = 1
break
return output
def main_family_from_instance(self, instance):
"""Return main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
def families_from_instance(self, instance):
"""Return all families of entered instance."""
families = []

View file

@ -81,6 +81,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder
families = ["workfile",
"pointcache",
"proxyAbc",
"camera",
"animation",
"model",
@ -111,6 +112,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"image",
"assembly",
"fbx",
"gltf",
"textures",
"action",
"harmony.template",

View file

@ -76,6 +76,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
order = pyblish.api.IntegratorOrder + 0.00001
families = ["workfile",
"pointcache",
"proxyAbc",
"camera",
"animation",
"model",
@ -106,6 +107,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"image",
"assembly",
"fbx",
"gltf",
"textures",
"action",
"harmony.template",

View file

@ -209,6 +209,9 @@
{
"families": [],
"hosts": [],
"task_types": [],
"task_names": [],
"subsets": [],
"burnins": {
"burnin": {
"TOP_LEFT": "{yy}-{mm}-{dd}",

View file

@ -59,6 +59,7 @@
"default_render_image_folder": "renders/maya",
"enable_all_lights": true,
"aov_separator": "underscore",
"remove_aovs": false,
"reset_current_frame": false,
"arnold_renderer": {
"image_prefix": "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>",
@ -149,6 +150,14 @@
"Main"
]
},
"CreateProxyAlembic": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main"
]
},
"CreateMultiverseUsd": {
"enabled": true,
"defaults": [
@ -171,7 +180,21 @@
"enabled": true,
"defaults": [
"Main"
]
],
"expandProcedurals": false,
"motionBlur": true,
"motionBlurKeys": 2,
"motionBlurLength": 0.5,
"maskOptions": false,
"maskCamera": false,
"maskLight": false,
"maskShape": false,
"maskShader": false,
"maskOverride": false,
"maskDriver": false,
"maskFilter": false,
"maskColor_manager": false,
"maskOperator": false
},
"CreateAssembly": {
"enabled": true,
@ -250,6 +273,9 @@
"CollectFbxCamera": {
"enabled": false
},
"CollectGLTF": {
"enabled": false
},
"ValidateInstanceInContext": {
"enabled": true,
"optional": true,
@ -569,6 +595,12 @@
"optional": false,
"active": true
},
"ExtractProxyAlembic": {
"enabled": true,
"families": [
"proxyAbc"
]
},
"ExtractAlembic": {
"enabled": true,
"families": [
@ -915,7 +947,7 @@
"current_context": [
{
"subset_name_filters": [
"\".+[Mm]ain\""
".+[Mm]ain"
],
"families": [
"model"
@ -932,7 +964,8 @@
"subset_name_filters": [],
"families": [
"animation",
"pointcache"
"pointcache",
"proxyAbc"
],
"repre_names": [
"abc"
@ -1007,4 +1040,4 @@
"ValidateNoAnimation": false
}
}
}
}

View file

@ -526,11 +526,28 @@
"object_type": "text"
},
{
"type": "hosts-enum",
"key": "hosts",
"label": "Hosts",
"label": "Host names",
"type": "hosts-enum",
"multiselection": true
},
{
"key": "task_types",
"label": "Task types",
"type": "task-types-enum"
},
{
"key": "task_names",
"label": "Task names",
"type": "list",
"object_type": "text"
},
{
"key": "subsets",
"label": "Subset names",
"type": "list",
"object_type": "text"
},
{
"type": "splitter"
},

Some files were not shown because too many files have changed in this diff Show more