Merge branch 'develop' into bugfix/OP-7281_Maya-Review---playblast-renders-without-textures

This commit is contained in:
Kayla Man 2023-12-07 22:00:38 +08:00
commit ccc95f0797
132 changed files with 3889 additions and 1042 deletions

View file

@ -35,6 +35,9 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.7-nightly.6
- 3.17.7-nightly.5
- 3.17.7-nightly.4
- 3.17.7-nightly.3
- 3.17.7-nightly.2
- 3.17.7-nightly.1
@ -132,9 +135,6 @@ body:
- 3.15.3-nightly.2
- 3.15.3-nightly.1
- 3.15.2
- 3.15.2-nightly.6
- 3.15.2-nightly.5
- 3.15.2-nightly.4
validations:
required: true
- type: dropdown

View file

@ -296,12 +296,15 @@ def run(script):
@click.option("--mongo_url",
help="MongoDB for testing.",
default=None)
@click.option("--dump_databases",
help="Dump all databases to data folder.",
default=None)
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
timeout, setup_only, mongo_url, app_group):
timeout, setup_only, mongo_url, app_group, dump_databases):
"""Run all automatic tests after proper initialization via start.py"""
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
persist, app_variant, timeout, setup_only,
mongo_url, app_group)
mongo_url, app_group, dump_databases)
@main.command(help="DEPRECATED - run sync server")

View file

@ -80,8 +80,8 @@ def _get_subsets(
for subset in con.get_products(
project_name,
subset_ids,
subset_names,
product_ids=subset_ids,
product_names=subset_names,
folder_ids=folder_ids,
names_by_folder_ids=names_by_folder_ids,
active=active,
@ -113,23 +113,23 @@ def _get_versions(
queried_versions = con.get_versions(
project_name,
version_ids,
subset_ids,
versions,
hero,
standard,
latest,
version_ids=version_ids,
product_ids=subset_ids,
versions=versions,
hero=hero,
standard=standard,
latest=latest,
active=active,
fields=fields
)
versions = []
version_entities = []
hero_versions = []
for version in queried_versions:
if version["version"] < 0:
hero_versions.append(version)
else:
versions.append(convert_v4_version_to_v3(version))
version_entities.append(convert_v4_version_to_v3(version))
if hero_versions:
subset_ids = set()
@ -159,9 +159,9 @@ def _get_versions(
break
conv_hero = convert_v4_version_to_v3(hero_version)
conv_hero["version_id"] = version_id
versions.append(conv_hero)
version_entities.append(conv_hero)
return versions
return version_entities
def get_asset_by_id(project_name, asset_id, fields=None):
@ -539,11 +539,11 @@ def get_representations(
representations = con.get_representations(
project_name,
representation_ids,
representation_names,
version_ids,
names_by_version_ids,
active,
representation_ids=representation_ids,
representation_names=representation_names,
version_ids=version_ids,
names_by_version_ids=names_by_version_ids,
active=active,
fields=fields
)
for representation in representations:

View file

@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
"tvpaint",
"substancepainter",
"aftereffects",
"wrap"
}
launch_types = {LaunchTypes.local}

View file

@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
# Before `AddLastWorkfileToLaunchArgs`
order = 0
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
"wrap"}
launch_types = {LaunchTypes.local}
def execute(self):

View file

@ -56,16 +56,15 @@ class RenderCreator(Creator):
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
if use_composition_name:
if "{composition}" not in subset_name_from_ui.lower():
subset_name_from_ui += "{Composition}"
composition_name = re.sub(
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
dynamic_fill = prepare_template_data({"composition":
composition_name})
subset_name = subset_name_from_ui.format(**dynamic_fill)
@ -81,6 +80,8 @@ class RenderCreator(Creator):
inst.subset_name))
data["members"] = [comp.id]
data["orig_comp_name"] = composition_name
new_instance = CreatedInstance(self.family, subset_name, data,
self)
if "farm" in pre_create_data:
@ -88,7 +89,7 @@ class RenderCreator(Creator):
new_instance.creator_attributes["farm"] = use_farm
review = pre_create_data["mark_for_review"]
new_instance.creator_attributes["mark_for_review"] = review
new_instance. creator_attributes["mark_for_review"] = review
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
@ -150,16 +151,18 @@ class RenderCreator(Creator):
subset_change.new_value)
def remove_instances(self, instances):
"""Removes metadata and renames to original comp name if available."""
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
subset = instance.data["subset"]
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
orig_comp_name = instance.data.get("orig_comp_name")
if comp:
new_comp_name = comp.name.replace(subset, '')
if not new_comp_name:
if orig_comp_name:
new_comp_name = orig_comp_name
else:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)

View file

@ -1,4 +1,4 @@
import os
from pathlib import Path
import bpy
@ -59,7 +59,7 @@ def get_render_product(output_path, name, aov_sep):
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = os.path.join(output_path, name)
filepath = output_path / name.lstrip("/")
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
@ -180,7 +180,7 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
return []
output.file_slots.clear()
output.base_path = output_path
output.base_path = str(output_path)
aov_file_products = []
@ -191,8 +191,9 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
output.file_slots.new(filepath)
aov_file_products.append(
(render_pass.name, os.path.join(output_path, filepath)))
filename = str(output_path / filepath.lstrip("/"))
aov_file_products.append((render_pass.name, filename))
node_input = output.inputs[-1]
@ -214,12 +215,11 @@ def imprint_render_settings(node, data):
def prepare_rendering(asset_group):
name = asset_group.name
filepath = bpy.data.filepath
filepath = Path(bpy.data.filepath)
assert filepath, "Workfile not saved. Please save the file first."
file_path = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
file_name, _ = os.path.splitext(file_name)
dirpath = filepath.parent
file_name = Path(filepath.name).stem
project = get_current_project_name()
settings = get_project_settings(project)
@ -232,7 +232,7 @@ def prepare_rendering(asset_group):
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
output_path = os.path.join(file_path, render_folder, file_name)
output_path = Path.joinpath(dirpath, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(

View file

@ -11,12 +11,12 @@ import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
"""Gather all publishable render layers from renderSetup."""
"""Gather all publishable render instances."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
label = "Collect Render Layers"
label = "Collect Render"
sync_workfile_version = False
@staticmethod
@ -78,8 +78,6 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
assert render_data, "No render data found."
self.log.debug(f"render_data: {dict(render_data)}")
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
@ -101,7 +99,7 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
expected_files = expected_beauty | expected_aovs
instance.data.update({
"family": "render.farm",
"families": ["render", "render.farm"],
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
@ -120,5 +118,3 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})
self.log.debug(f"data: {instance.data}")

View file

@ -14,7 +14,7 @@ from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a layout."""
label = "Extract Layout"
label = "Extract Layout (JSON)"
hosts = ["blender"]
families = ["layout"]
optional = True

View file

@ -26,6 +26,10 @@ class ExtractThumbnail(publish.Extractor):
def process(self, instance):
self.log.debug("Extracting capture..")
if instance.data.get("thumbnailSource"):
self.log.debug("Thumbnail source found, skipping...")
return
stagingdir = self.staging_dir(instance)
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]

View file

@ -14,7 +14,7 @@ class IncrementWorkfileVersion(
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
"pointcache", "render"]
"pointcache", "render.farm"]
def process(self, context):
if not self.is_active(context.data):

View file

@ -19,7 +19,7 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
"""
order = ValidateContentsOrder
families = ["render.farm"]
families = ["render"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True

View file

@ -1,3 +1,4 @@
import os
import sys
from qtpy import QtWidgets, QtCore, QtGui
@ -18,6 +19,10 @@ from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
MENU_LABEL = os.environ["AVALON_LABEL"]
self = sys.modules[__name__]
self.menu = None
@ -26,7 +31,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(OpenPypeMenu, self).__init__(*args, **kwargs)
self.setObjectName("OpenPypeMenu")
self.setObjectName(f"{MENU_LABEL}Menu")
icon_path = get_openpype_icon_filepath()
icon = QtGui.QIcon(icon_path)
@ -41,7 +46,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
self.setWindowTitle("OpenPype")
self.setWindowTitle(MENU_LABEL)
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet(

View file

@ -0,0 +1,60 @@
{
Action
{
ID = "AYON_Menu",
Category = "AYON",
Name = "AYON Menu",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
if bmd.fileexists(scriptPath) == false then
print("[AYON Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Action
{
ID = "AYON_Install_PySide2",
Category = "AYON",
Name = "Install PySide2",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
if bmd.fileexists(scriptPath) == false then
print("[AYON Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Menus
{
Target = "ChildFrame",
Before "Help"
{
Sub "AYON"
{
"AYON_Menu{}",
"_",
Sub "Admin" {
"AYON_Install_PySide2{}"
}
}
},
},
}

View file

@ -0,0 +1,19 @@
{
Locked = true,
Global = {
Paths = {
Map = {
["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon",
["Config:"] = "UserPaths:Config;AYON:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
},
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
UserInterface = {
Language = "en_US"
},
},
}

View file

@ -10,7 +10,7 @@
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else
@ -31,7 +31,7 @@
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py")
if bmd.fileexists(scriptPath) == false then
print("[OpenPype Error] Can't run file: " .. scriptPath)
else

View file

@ -3,7 +3,7 @@ Locked = true,
Global = {
Paths = {
Map = {
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype",
["Config:"] = "UserPaths:Config;OpenPype:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
},

View file

@ -2,6 +2,7 @@ import os
import shutil
import platform
from pathlib import Path
from openpype import AYON_SERVER_ENABLED
from openpype.hosts.fusion import (
FUSION_HOST_DIR,
FUSION_VERSIONS_DICT,
@ -161,6 +162,13 @@ class FusionCopyPrefsPrelaunch(PreLaunchHook):
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
if AYON_SERVER_ENABLED:
master_prefs = Path(
FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs")
else:
master_prefs = Path(
FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs")
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs)

View file

@ -25,20 +25,24 @@ def enabled_savers(comp, savers):
"""
passthrough_key = "TOOLB_PassThrough"
original_states = {}
enabled_save_names = {saver.Name for saver in savers}
enabled_saver_names = {saver.Name for saver in savers}
all_savers = comp.GetToolList(False, "Saver").values()
savers_by_name = {saver.Name: saver for saver in all_savers}
try:
all_savers = comp.GetToolList(False, "Saver").values()
for saver in all_savers:
original_state = saver.GetAttrs()[passthrough_key]
original_states[saver] = original_state
original_states[saver.Name] = original_state
# The passthrough state we want to set (passthrough != enabled)
state = saver.Name not in enabled_save_names
state = saver.Name not in enabled_saver_names
if state != original_state:
saver.SetAttrs({passthrough_key: state})
yield
finally:
for saver, original_state in original_states.items():
for saver_name, original_state in original_states.items():
saver = savers_by_name[saver_name]
saver.SetAttrs({"TOOLB_PassThrough": original_state})

View file

@ -13,7 +13,7 @@ var LD_OPENHARMONY_PATH = System.getenv('LIB_OPENHARMONY_PATH');
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH + '/openHarmony.js';
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH.replace(/\\/g, "/");
include(LD_OPENHARMONY_PATH);
this.__proto__['$'] = $;
//this.__proto__['$'] = $;
function Client() {
var self = this;

View file

@ -59,8 +59,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
args = [application_path, "-batch",
"-frames", str(frame_start), str(frame_end),
"-scene", scene_path]
self.log.info(f"running [ {application_path} {' '.join(args)}")
scene_path]
self.log.info(f"running: {' '.join(args)}")
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,

View file

@ -95,18 +95,18 @@ def menu_install():
menu.addSeparator()
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish(hiero.ui.mainWindow())
)
creator_action = menu.addAction("Create...")
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
creator_action.triggered.connect(
lambda: host_tools.show_creator(parent=main_window)
)
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish(hiero.ui.mainWindow())
)
loader_action = menu.addAction("Load...")
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
loader_action.triggered.connect(

View file

@ -121,8 +121,8 @@ def get_id_required_nodes():
return list(nodes)
def get_output_parameter(node):
"""Return the render output parameter name of the given node
def get_export_parameter(node):
"""Return the export output parameter of the given node
Example:
root = hou.node("/obj")
@ -137,13 +137,70 @@ def get_output_parameter(node):
hou.Parm
"""
node_type = node.type().description()
node_type = node.type().name()
if node_type == "geometry":
# Ensures the proper Take is selected for each ROP to retrieve the correct
# ifd
try:
rop_take = hou.takes.findTake(node.parm("take").eval())
if rop_take is not None:
hou.takes.setCurrentTake(rop_take)
except AttributeError:
# hou object doesn't always have the 'takes' attribute
pass
if node_type == "Mantra" and node.parm("soho_outputmode").eval():
return node.parm("soho_diskfile")
elif node_type == "Alfred":
return node.parm("alf_diskfile")
elif (node_type == "RenderMan" or node_type == "RenderMan RIS"):
pre_ris22 = node.parm("rib_outputmode") and \
node.parm("rib_outputmode").eval()
ris22 = node.parm("diskfile") and node.parm("diskfile").eval()
if pre_ris22 or ris22:
return node.parm("soho_diskfile")
elif node_type == "Redshift" and node.parm("RS_archive_enable").eval():
return node.parm("RS_archive_file")
elif node_type == "Wedge" and node.parm("driver").eval():
return get_export_parameter(node.node(node.parm("driver").eval()))
elif node_type == "Arnold":
return node.parm("ar_ass_file")
elif node_type == "Alembic" and node.parm("use_sop_path").eval():
return node.parm("sop_path")
elif node_type == "Shotgun Mantra" and node.parm("soho_outputmode").eval():
return node.parm("sgtk_soho_diskfile")
elif node_type == "Shotgun Alembic" and node.parm("use_sop_path").eval():
return node.parm("sop_path")
elif node.type().nameWithCategory() == "Driver/vray_renderer":
return node.parm("render_export_filepath")
raise TypeError("Node type '%s' not supported" % node_type)
def get_output_parameter(node):
"""Return the render output parameter of the given node
Example:
root = hou.node("/obj")
my_alembic_node = root.createNode("alembic")
get_output_parameter(my_alembic_node)
# Result: "output"
Args:
node(hou.Node): node instance
Returns:
hou.Parm
"""
node_type = node.type().description()
category = node.type().category().name()
# Figure out which type of node is being rendered
if node_type == "Geometry" or node_type == "Filmbox FBX" or \
(node_type == "ROP Output Driver" and category == "Sop"):
return node.parm("sopoutput")
elif node_type == "alembic":
return node.parm("filename")
elif node_type == "comp":
elif node_type == "Composite":
return node.parm("copoutput")
elif node_type == "opengl":
return node.parm("picture")
@ -155,6 +212,15 @@ def get_output_parameter(node):
elif node_type == "ifd":
if node.evalParm("soho_outputmode"):
return node.parm("soho_diskfile")
elif node_type == "Octane":
return node.parm("HO_img_fileName")
elif node_type == "Fetch":
inner_node = node.node(node.parm("source").eval())
if inner_node:
return get_output_parameter(inner_node)
elif node.type().nameWithCategory() == "Driver/vray_renderer":
return node.parm("SettingsOutput_img_file_path")
raise TypeError("Node type '%s' not supported" % node_type)

View file

@ -13,6 +13,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
# Default extension
ext = "exr"
# Default to split export and render jobs
export_job = True
def create(self, subset_name, instance_data, pre_create_data):
import hou
@ -48,6 +51,15 @@ class CreateArnoldRop(plugin.HoudiniCreator):
"ar_exr_half_precision": 1 # half precision
}
if pre_create_data.get("export_job"):
ass_filepath = \
"{export_dir}{subset_name}/{subset_name}.$F4.ass".format(
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
subset_name=subset_name,
)
parms["ar_ass_export_enable"] = 1
parms["ar_ass_file"] = ass_filepath
instance_node.setParms(parms)
# Lock any parameters in this list
@ -66,6 +78,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
EnumDef("image_format",
image_format_enum,
default=self.ext,

View file

@ -12,6 +12,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
family = "mantra_rop"
icon = "magic"
# Default to split export and render jobs
export_job = True
def create(self, subset_name, instance_data, pre_create_data):
import hou # noqa
@ -44,6 +47,15 @@ class CreateMantraROP(plugin.HoudiniCreator):
"vm_picture": filepath,
}
if pre_create_data.get("export_job"):
ifd_filepath = \
"{export_dir}{subset_name}/{subset_name}.$F4.ifd".format(
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
subset_name=subset_name,
)
parms["soho_outputmode"] = 1
parms["soho_diskfile"] = ifd_filepath
if self.selected_nodes:
# If camera found in selection
# we will use as render camera
@ -78,6 +90,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
EnumDef("image_format",
image_format_enum,
default="exr",

View file

@ -16,6 +16,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
icon = "magic"
ext = "exr"
# Default to split export and render jobs
export_job = True
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
@ -52,6 +55,17 @@ class CreateVrayROP(plugin.HoudiniCreator):
"SettingsEXR_bits_per_channel": "16" # half precision
}
if pre_create_data.get("export_job"):
scene_filepath = \
"{export_dir}{subset_name}/{subset_name}.$F4.vrscene".format(
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
subset_name=subset_name,
)
# Setting render_export_mode to "2" because that's for
# "Export only" ("1" is for "Export & Render")
parms["render_export_mode"] = "2"
parms["render_export_filepath"] = scene_filepath
if self.selected_nodes:
# set up the render camera from the selected node
camera = None
@ -140,6 +154,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
BoolDef("farm",
label="Submitting to Farm",
default=True),
BoolDef("export_job",
label="Split export and render jobs",
default=self.export_job),
EnumDef("image_format",
image_format_enum,
default=self.ext,

View file

@ -40,6 +40,25 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "ar_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
export_job = bool(rop.parm("ar_ass_export_enable").eval())
instance.data["exportJob"] = export_job
export_prefix = None
export_products = []
if export_job:
export_prefix = evalParmNoFrame(
rop, "ar_ass_file", pad_character="0"
)
beauty_export_product = self.get_render_product_name(
prefix=export_prefix,
suffix=None)
export_products.append(beauty_export_product)
self.log.debug(
"Found export product: {}".format(beauty_export_product)
)
instance.data["ifdFile"] = beauty_export_product
instance.data["exportFiles"] = list(export_products)
# Default beauty AOV
beauty_product = self.get_render_product_name(prefix=default_prefix,
suffix=None)

View file

@ -44,6 +44,25 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
default_prefix = evalParmNoFrame(rop, "vm_picture")
render_products = []
# Store whether we are splitting the render job (export + render)
export_job = bool(rop.parm("soho_outputmode").eval())
instance.data["exportJob"] = export_job
export_prefix = None
export_products = []
if export_job:
export_prefix = evalParmNoFrame(
rop, "soho_diskfile", pad_character="0"
)
beauty_export_product = self.get_render_product_name(
prefix=export_prefix,
suffix=None)
export_products.append(beauty_export_product)
self.log.debug(
"Found export product: {}".format(beauty_export_product)
)
instance.data["ifdFile"] = beauty_export_product
instance.data["exportFiles"] = list(export_products)
# Default beauty AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=None

View file

@ -45,7 +45,26 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
render_products = []
# TODO: add render elements if render element
beauty_product = self.get_beauty_render_product(default_prefix)
# Store whether we are splitting the render job in an export + render
export_job = rop.parm("render_export_mode").eval() == "2"
instance.data["exportJob"] = export_job
export_prefix = None
export_products = []
if export_job:
export_prefix = evalParmNoFrame(
rop, "render_export_filepath", pad_character="0"
)
beauty_export_product = self.get_render_product_name(
prefix=export_prefix,
suffix=None)
export_products.append(beauty_export_product)
self.log.debug(
"Found export product: {}".format(beauty_export_product)
)
instance.data["ifdFile"] = beauty_export_product
instance.data["exportFiles"] = list(export_products)
beauty_product = self.get_render_product_name(default_prefix)
render_products.append(beauty_product)
files_by_aov = {
"RGB Color": self.generate_expected_files(instance,
@ -79,7 +98,7 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
instance.data["colorspaceDisplay"] = colorspace_data["display"]
instance.data["colorspaceView"] = colorspace_data["view"]
def get_beauty_render_product(self, prefix, suffix="<reName>"):
def get_render_product_name(self, prefix, suffix="<reName>"):
"""Return the beauty output filename if render element enabled
"""
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -2,10 +2,12 @@
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
from openpype import AYON_SERVER_ENABLED
def main():
print("Installing OpenPype ...")
print("Installing {} ...".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
install_host(HoudiniHost())

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""3dsmax menu definition of OpenPype."""
"""3dsmax menu definition of AYON."""
import os
from qtpy import QtWidgets, QtCore
from pymxs import runtime as rt
@ -8,7 +9,7 @@ from openpype.hosts.max.api import lib
class OpenPypeMenu(object):
"""Object representing OpenPype menu.
"""Object representing OpenPype/AYON menu.
This is using "hack" to inject itself before "Help" menu of 3dsmax.
For some reason `postLoadingMenus` event doesn't fire, and main menu
@ -50,17 +51,17 @@ class OpenPypeMenu(object):
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
def get_or_create_openpype_menu(
self, name: str = "&OpenPype",
self, name: str = "&Openpype",
before: str = "&Help") -> QtWidgets.QAction:
"""Create OpenPype menu.
"""Create AYON menu.
Args:
name (str, Optional): OpenPypep menu name.
name (str, Optional): AYON menu name.
before (str, Optional): Name of the 3dsmax main menu item to
add OpenPype menu before.
add AYON menu before.
Returns:
QtWidgets.QAction: OpenPype menu action.
QtWidgets.QAction: AYON menu action.
"""
if self.menu is not None:
@ -77,15 +78,15 @@ class OpenPypeMenu(object):
if before in item.title():
help_action = item.menuAction()
op_menu = QtWidgets.QMenu("&OpenPype")
tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label))
menu_bar.insertMenu(help_action, op_menu)
self.menu = op_menu
return op_menu
def build_openpype_menu(self) -> QtWidgets.QAction:
"""Build items in OpenPype menu."""
"""Build items in AYON menu."""
openpype_menu = self.get_or_create_openpype_menu()
load_action = QtWidgets.QAction("Load...", openpype_menu)
load_action.triggered.connect(self.load_callback)

View file

@ -175,7 +175,7 @@ def containerise(name: str, nodes: list, context,
def load_custom_attribute_data():
"""Re-loading the Openpype/AYON custom parameter built by the creator
"""Re-loading the AYON custom parameter built by the creator
Returns:
attribute: re-loading the custom OP attributes set in Maxscript
@ -213,7 +213,7 @@ def import_custom_attribute_data(container: str, selections: list):
def update_custom_attribute_data(container: str, selections: list):
"""Updating the Openpype/AYON custom parameter built by the creator
"""Updating the AYON custom parameter built by the creator
Args:
container (str): target container which adds custom attributes

View file

@ -33,7 +33,7 @@ class ImportModelRender(InventoryAction):
)
def process(self, containers):
from maya import cmds
from maya import cmds # noqa: F401
project_name = get_current_project_name()
for container in containers:
@ -66,7 +66,7 @@ class ImportModelRender(InventoryAction):
None
"""
from maya import cmds
from maya import cmds # noqa: F401
project_name = get_current_project_name()
repre_docs = get_representations(
@ -85,12 +85,7 @@ class ImportModelRender(InventoryAction):
if scene_type_regex.fullmatch(repre_name):
look_repres.append(repre_doc)
# QUESTION should we care if there is more then one look
# representation? (since it's based on regex match)
look_repre = None
if look_repres:
look_repre = look_repres[0]
look_repre = look_repres[0] if look_repres else None
# QUESTION shouldn't be json representation validated too?
if not look_repre:
print("No model render sets for this model version..")

View file

@ -9,7 +9,7 @@ from openpype.pipeline import (
)
from openpype.pipeline.load.utils import get_representation_path_from_context
from openpype.pipeline.colorspace import (
get_imageio_colorspace_from_filepath,
get_imageio_file_rules_colorspace_from_filepath,
get_imageio_config,
get_imageio_file_rules
)
@ -285,10 +285,10 @@ class FileNodeLoader(load.LoaderPlugin):
)
path = get_representation_path_from_context(context)
colorspace = get_imageio_colorspace_from_filepath(
path=path,
host_name=host_name,
project_name=project_name,
colorspace = get_imageio_file_rules_colorspace_from_filepath(
path,
host_name,
project_name,
config_data=config_data,
file_rules=file_rules,
project_settings=project_settings

View file

@ -265,6 +265,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
class MayaUSDReferenceLoader(ReferenceLoader):
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
label = "Reference Maya USD"
families = ["usd"]
representations = ["usd"]
extensions = {"usd", "usda", "usdc"}

View file

@ -45,11 +45,23 @@ FILE_NODES = {
"PxrTexture": "filename"
}
RENDER_SET_TYPES = [
"VRayDisplacement",
"VRayLightMesh",
"VRayObjectProperties",
"RedshiftObjectId",
"RedshiftMeshParameters",
]
# Keep only node types that actually exist
all_node_types = set(cmds.allNodeTypes())
for node_type in list(FILE_NODES.keys()):
if node_type not in all_node_types:
FILE_NODES.pop(node_type)
for node_type in RENDER_SET_TYPES:
if node_type not in all_node_types:
RENDER_SET_TYPES.remove(node_type)
del all_node_types
# Cache pixar dependency node types so we can perform a type lookup against it
@ -69,9 +81,7 @@ def get_attributes(dictionary, attr, node=None):
else:
val = dictionary.get(attr, [])
if not isinstance(val, list):
return [val]
return val
return val if isinstance(val, list) else [val]
def get_look_attrs(node):
@ -106,7 +116,7 @@ def get_look_attrs(node):
def node_uses_image_sequence(node, node_path):
# type: (str) -> bool
# type: (str, str) -> bool
"""Return whether file node uses an image sequence or single image.
Determine if a node uses an image sequence or just a single image,
@ -114,6 +124,7 @@ def node_uses_image_sequence(node, node_path):
Args:
node (str): Name of the Maya node
node_path (str): The file path of the node
Returns:
bool: True if node uses an image sequence
@ -247,7 +258,7 @@ def get_file_node_files(node):
# For sequences get all files and filter to only existing files
result = []
for index, path in enumerate(paths):
for path in paths:
if node_uses_image_sequence(node, path):
glob_pattern = seq_to_glob(path)
result.extend(glob.glob(glob_pattern))
@ -358,6 +369,7 @@ class CollectLook(pyblish.api.InstancePlugin):
for attr in shader_attrs:
if cmds.attributeQuery(attr, node=look, exists=True):
existing_attrs.append("{}.{}".format(look, attr))
materials = cmds.listConnections(existing_attrs,
source=True,
destination=False) or []
@ -367,30 +379,32 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.debug("Found the following sets:\n{}".format(look_sets))
# Get the entire node chain of the look sets
# history = cmds.listHistory(look_sets, allConnections=True)
history = cmds.listHistory(materials, allConnections=True)
# if materials list is empty, listHistory() will crash with
# RuntimeError
history = set()
if materials:
history = set(
cmds.listHistory(materials, allConnections=True))
# Since we retrieved history only of the connected materials
# connected to the look sets above we now add direct history
# for some of the look sets directly
# handling render attribute sets
render_set_types = [
"VRayDisplacement",
"VRayLightMesh",
"VRayObjectProperties",
"RedshiftObjectId",
"RedshiftMeshParameters",
]
render_sets = cmds.ls(look_sets, type=render_set_types)
if render_sets:
history.extend(
cmds.listHistory(render_sets,
future=False,
pruneDagObjects=True)
or []
)
# Maya (at least 2024) crashes with Warning when render set type
# isn't available. cmds.ls() will return empty list
if RENDER_SET_TYPES:
render_sets = cmds.ls(look_sets, type=RENDER_SET_TYPES)
if render_sets:
history.update(
cmds.listHistory(render_sets,
future=False,
pruneDagObjects=True)
or []
)
# Ensure unique entries only
history = list(set(history))
history = list(history)
files = cmds.ls(history,
# It's important only node types are passed that

View file

@ -50,11 +50,11 @@ class ExtractRedshiftProxy(publish.Extractor):
# Padding is taken from number of digits of the end_frame.
# Not sure where Redshift is taking it.
repr_files = [
"{}.{}{}".format(root, str(frame).rjust(4, "0"), ext) # noqa: E501
"{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501
for frame in range(
int(start_frame),
int(end_frame) + 1,
int(instance.data["step"]),
int(instance.data["step"])
)]
# vertex_colors = instance.data.get("vertexColors", False)

View file

@ -3,60 +3,76 @@ from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder,
RepairContextAction,
PublishValidationError
)
class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin):
class ValidateLookDefaultShadersConnections(pyblish.api.ContextPlugin):
"""Validate default shaders in the scene have their default connections.
For example the lambert1 could potentially be disconnected from the
initialShadingGroup. As such it's not lambert1 that will be identified
as the default shader which can have unpredictable results.
For example the standardSurface1 or lambert1 (maya 2023 and before) could
potentially be disconnected from the initialShadingGroup. As such it's not
lambert1 that will be identified as the default shader which can have
unpredictable results.
To fix the default connections need to be made again. See the logs for
more details on which connections are missing.
"""
order = ValidateContentsOrder
order = pyblish.api.ValidatorOrder - 0.4999
families = ['look']
hosts = ['maya']
label = 'Look Default Shader Connections'
actions = [RepairContextAction]
# The default connections to check
DEFAULTS = [("initialShadingGroup.surfaceShader", "lambert1"),
("initialParticleSE.surfaceShader", "lambert1"),
("initialParticleSE.volumeShader", "particleCloud1")
]
DEFAULTS = {
"initialShadingGroup.surfaceShader": ["standardSurface1.outColor",
"lambert1.outColor"],
"initialParticleSE.surfaceShader": ["standardSurface1.outColor",
"lambert1.outColor"],
"initialParticleSE.volumeShader": ["particleCloud1.outColor"]
}
def process(self, instance):
def process(self, context):
# Ensure check is run only once. We don't use ContextPlugin because
# of a bug where the ContextPlugin will always be visible. Even when
# the family is not present in an instance.
key = "__validate_look_default_shaders_connections_checked"
context = instance.context
is_run = context.data.get(key, False)
if is_run:
return
else:
context.data[key] = True
if self.get_invalid():
raise PublishValidationError(
"Default shaders in your scene do not have their "
"default shader connections. Please repair them to continue."
)
@classmethod
def get_invalid(cls):
# Process as usual
invalid = list()
for plug, input_node in self.DEFAULTS:
for plug, valid_inputs in cls.DEFAULTS.items():
inputs = cmds.listConnections(plug,
source=True,
destination=False) or None
if not inputs or inputs[0] != input_node:
self.log.error("{0} is not connected to {1}. "
"This can result in unexpected behavior. "
"Please reconnect to continue.".format(
plug,
input_node))
destination=False,
plugs=True) or None
if not inputs or inputs[0] not in valid_inputs:
cls.log.error(
"{0} is not connected to {1}. This can result in "
"unexpected behavior. Please reconnect to continue."
"".format(plug, " or ".join(valid_inputs))
)
invalid.append(plug)
if invalid:
raise PublishValidationError("Invalid connections.")
return invalid
@classmethod
def repair(cls, context):
invalid = cls.get_invalid()
for plug in invalid:
valid_inputs = cls.DEFAULTS[plug]
for valid_input in valid_inputs:
if cmds.objExists(valid_input):
cls.log.info(
"Connecting {} -> {}".format(valid_input, plug)
)
cmds.connectAttr(valid_input, plug, force=True)
break

View file

@ -371,7 +371,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
continue
for node in data["nodes"]:
lib.set_attribute(data["attribute"], data["values"][0], node)
with lib.renderlayer(layer_node):
# Repair animation must be enabled
@ -392,13 +391,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
if renderer != "renderman":
prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
fname_prefix = default_prefix
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
cmds.setAttr(prefix_attr, fname_prefix, type="string")
# Repair padding
padding_attr = RenderSettings.get_padding_attr(renderer)
cmds.setAttr("{}.{}".format(node, padding_attr),
cls.DEFAULT_PADDING)
cmds.setAttr(padding_attr, cls.DEFAULT_PADDING)
else:
# renderman handles stuff differently
cmds.setAttr("rmanGlobals.imageFileFormat",

View file

@ -298,7 +298,7 @@ def create_timeline_item(
if source_end:
clip_data["endFrame"] = source_end
if timecode_in:
clip_data["recordFrame"] = timecode_in
clip_data["recordFrame"] = timeline_in
# add to timeline
media_pool.AppendToTimeline([clip_data])

View file

@ -7,6 +7,9 @@ from openpype.tools.utils import host_tools
from openpype.pipeline import registered_host
MENU_LABEL = os.environ["AVALON_LABEL"]
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
if not os.path.exists(path):
@ -39,7 +42,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(OpenPypeMenu, self).__init__(*args, **kwargs)
self.setObjectName("OpenPypeMenu")
self.setObjectName(f"{MENU_LABEL}Menu")
self.setWindowFlags(
QtCore.Qt.Window
@ -49,7 +52,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle("OpenPype")
self.setWindowTitle(f"{MENU_LABEL}")
save_current_btn = QtWidgets.QPushButton("Save current file", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)

View file

@ -406,26 +406,42 @@ class ClipLoader:
self.active_bin
)
_clip_property = media_pool_item.GetClipProperty
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
source_duration = int(_clip_property("Frames"))
# get handles
handle_start = self.data["versionData"].get("handleStart")
handle_end = self.data["versionData"].get("handleEnd")
if handle_start is None:
handle_start = int(self.data["assetData"]["handleStart"])
if handle_end is None:
handle_end = int(self.data["assetData"]["handleEnd"])
if not self.with_handles:
# Load file without the handles of the source media
# We remove the handles from the source in and source out
# so that the handles are excluded in the timeline
handle_start = 0
handle_end = 0
# check frame duration from versionData or assetData
frame_start = self.data["versionData"].get("frameStart")
if frame_start is None:
frame_start = self.data["assetData"]["frameStart"]
# get version data frame data from db
version_data = self.data["versionData"]
frame_start = version_data.get("frameStart")
frame_end = version_data.get("frameEnd")
# check frame duration from versionData or assetData
frame_end = self.data["versionData"].get("frameEnd")
if frame_end is None:
frame_end = self.data["assetData"]["frameEnd"]
db_frame_duration = int(frame_end) - int(frame_start) + 1
# The version data usually stored the frame range + handles of the
# media however certain representations may be shorter because they
# exclude those handles intentionally. Unfortunately the
# representation does not store that in the database currently;
# so we should compensate for those cases. If the media is shorter
# than the frame range specified in the database we assume it is
# without handles and thus we do not need to remove the handles
# from source and out
if frame_start is not None and frame_end is not None:
# Version has frame range data, so we can compare media length
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_start + handle_end
database_frame_duration = int(
frame_end_handle - frame_start_handle + 1
)
if source_duration >= database_frame_duration:
source_in += handle_start
source_out -= handle_end
# get timeline in
timeline_start = self.active_timeline.GetStartFrame()
@ -437,24 +453,6 @@ class ClipLoader:
timeline_in = int(
timeline_start + self.data["assetData"]["clipIn"])
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
source_duration = int(_clip_property("Frames"))
# check if source duration is shorter than db frame duration
source_with_handles = True
if source_duration < db_frame_duration:
source_with_handles = False
# only exclude handles if source has no handles or
# if user wants to load without handles
if (
not self.with_handles
or not source_with_handles
):
source_in += handle_start
source_out -= handle_end
# make track item from source in bin as item
timeline_item = lib.create_timeline_item(
media_pool_item,
@ -868,7 +866,7 @@ class PublishClip:
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
entity_type = self.types.get(key)
assert entity_type, "Missing entity type for `{}`".format(
key

View file

@ -0,0 +1,22 @@
import os
import sys
from openpype.pipeline import install_host
from openpype.lib import Logger
log = Logger.get_logger(__name__)
def main(env):
from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu
# activate resolve from openpype
host = ResolveHost()
install_host(host)
launch_pype_menu()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -2,6 +2,7 @@ import os
import shutil
from openpype.lib import Logger, is_running_from_build
from openpype import AYON_SERVER_ENABLED
RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -54,6 +55,14 @@ def setup(env):
src = os.path.join(directory, script)
dst = os.path.join(util_scripts_dir, script)
# TODO: remove this once we have a proper solution
if AYON_SERVER_ENABLED:
if "OpenPype__Menu.py" == script:
continue
else:
if "AYON__Menu.py" == script:
continue
# TODO: Make this a less hacky workaround
if script == "openpype_startup.scriptlib":
# Handle special case for scriptlib that needs to be a folder

View file

@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
"""Collect original base name for use in templates."""
from pathlib import Path
import pyblish.api
class CollectOriginalBasename(pyblish.api.InstancePlugin):
"""Collect original file base name."""
order = pyblish.api.CollectorOrder + 0.498
label = "Collect Base Name"
hosts = ["standalonepublisher"]
families = ["simpleUnrealTexture"]
def process(self, instance):
file_name = Path(instance.data["representations"][0]["files"])
instance.data["originalBasename"] = file_name.stem

View file

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
"""Validator for correct file naming."""
import re
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
)
class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin):
label = "Validate Unreal Texture Names"
hosts = ["standalonepublisher"]
families = ["simpleUnrealTexture"]
order = ValidateContentsOrder
regex = "^T_{asset}.*"
def process(self, instance):
file_name = instance.data.get("originalBasename")
self.log.info(file_name)
pattern = self.regex.format(asset=instance.data.get("asset"))
if not re.match(pattern, file_name):
msg = f"Invalid file name {file_name}"
raise PublishXmlValidationError(
self, msg, formatting_data={
"invalid_file": file_name,
"asset": instance.data.get("asset")
})

View file

@ -73,7 +73,7 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
render_layer_id = creator_attributes["render_layer_instance_id"]
for in_data in instance.context.data["workfileInstances"]:
if (
in_data["creator_identifier"] == "render.layer"
in_data.get("creator_identifier") == "render.layer"
and in_data["instance_id"] == render_layer_id
):
render_layer_data = in_data

View file

@ -111,6 +111,7 @@ from .transcoding import (
get_ffmpeg_format_args,
convert_ffprobe_fps_value,
convert_ffprobe_fps_to_float,
get_rescaled_command_arguments,
)
from .local_settings import (
@ -232,6 +233,7 @@ __all__ = [
"get_ffmpeg_format_args",
"convert_ffprobe_fps_value",
"convert_ffprobe_fps_to_float",
"get_rescaled_command_arguments",
"IniSettingRegistry",
"JSONSettingRegistry",

View file

@ -536,7 +536,7 @@ def convert_for_ffmpeg(
input_frame_end=None,
logger=None
):
"""Contert source file to format supported in ffmpeg.
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs.
@ -592,29 +592,7 @@ def convert_for_ffmpeg(
oiio_cmd.extend(["--compression", compression])
# Collect channels to export
channel_names = input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart/subimages exrs
input_arg += ":ch={}".format(input_channels_str)
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
oiio_cmd.extend([
input_arg, first_input_path,
@ -635,7 +613,7 @@ def convert_for_ffmpeg(
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain unallowed symbols
# for ffmpeg or when contain prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
@ -695,7 +673,7 @@ def convert_input_paths_for_ffmpeg(
Args:
input_paths (str): Paths that should be converted. It is expected that
contains single file or image sequence of samy type.
contains single file or image sequence of same type.
output_dir (str): Path to directory where output will be rendered.
Must not be same as input's directory.
logger (logging.Logger): Logger used for logging.
@ -709,6 +687,7 @@ def convert_input_paths_for_ffmpeg(
first_input_path = input_paths[0]
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
@ -724,30 +703,7 @@ def convert_input_paths_for_ffmpeg(
compression = "none"
# Collect channels to export
channel_names = input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
# TODO find subimage inder where rgba is available for multipart exrs
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart exrs
input_arg += ":ch={}".format(input_channels_str)
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
for input_path in input_paths:
# Prepare subprocess arguments
@ -774,7 +730,7 @@ def convert_input_paths_for_ffmpeg(
continue
# Remove attributes that have string value longer than allowed
# length for ffmpeg or when containing unallowed symbols
# length for ffmpeg or when containing prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
@ -1021,9 +977,7 @@ def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd):
if pix_fmt:
output.extend(["-pix_fmt", pix_fmt])
output.extend(["-intra"])
output.extend(["-g", "1"])
output.extend(["-intra", "-g", "1"])
return output
@ -1149,8 +1103,9 @@ def convert_colorspace(
target_colorspace=None,
view=None,
display=None,
additional_input_args=None,
additional_command_args=None,
logger=None
logger=None,
):
"""Convert source file from one color space to another.
@ -1169,6 +1124,8 @@ def convert_colorspace(
view (str): name for viewer space (ocio valid)
both 'view' and 'display' must be filled (if 'target_colorspace')
display (str): name for display-referred reference space (ocio valid)
both 'view' and 'display' must be filled (if 'target_colorspace')
additional_input_args (list): arguments for input file
additional_command_args (list): arguments for oiiotool (like binary
depth for .dpx)
logger (logging.Logger): Logger used for logging.
@ -1178,14 +1135,31 @@ def convert_colorspace(
if logger is None:
logger = logging.getLogger(__name__)
input_info = get_oiio_info_for_input(input_path, logger=logger)
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
if additional_input_args:
input_arg = "{} {}".format(input_arg, " ".join(additional_input_args))
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
input_path,
# Don't add any additional attributes
"--nosoftwareattrib",
"--colorconfig", config_path
)
oiio_cmd.extend([
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
if all([target_colorspace, view, display]):
raise ValueError("Colorspace and both screen and display"
" cannot be set together."
@ -1226,3 +1200,221 @@ def split_cmd_args(in_args):
continue
splitted_args.extend(arg.split(" "))
return splitted_args
def get_rescaled_command_arguments(
application,
input_path,
target_width,
target_height,
target_par=None,
bg_color=None,
log=None
):
"""Get command arguments for rescaling input to target size.
Args:
application (str): Application for which command should be created.
Currently supported are "ffmpeg" and "oiiotool".
input_path (str): Path to input file.
target_width (int): Width of target.
target_height (int): Height of target.
target_par (Optional[float]): Pixel aspect ratio of target.
bg_color (Optional[list[int]]): List of 8bit int values for
background color. Should be in range 0 - 255.
log (Optional[logging.Logger]): Logger used for logging.
Returns:
list[str]: List of command arguments.
"""
command_args = []
target_par = target_par or 1.0
input_par = 1.0
# ffmpeg command
input_file_metadata = get_ffprobe_data(input_path, logger=log)
stream = input_file_metadata["streams"][0]
input_width = int(stream["width"])
input_height = int(stream["height"])
stream_input_par = stream.get("sample_aspect_ratio")
if stream_input_par:
input_par = (
float(stream_input_par.split(":")[0])
/ float(stream_input_par.split(":")[1])
)
# recalculating input and target width
input_width = int(input_width * input_par)
target_width = int(target_width * target_par)
# calculate aspect ratios
target_aspect = float(target_width) / target_height
input_aspect = float(input_width) / input_height
# calculate scale size
scale_size = float(input_width) / target_width
if input_aspect < target_aspect:
scale_size = float(input_height) / target_height
# calculate rescaled width and height
rescaled_width = int(input_width / scale_size)
rescaled_height = int(input_height / scale_size)
# calculate width and height shift
rescaled_width_shift = int((target_width - rescaled_width) / 2)
rescaled_height_shift = int((target_height - rescaled_height) / 2)
if application == "ffmpeg":
# create scale command
scale = "scale={0}:{1}".format(input_width, input_height)
pad = "pad={0}:{1}:({2}-iw)/2:({3}-ih)/2".format(
target_width,
target_height,
target_width,
target_height
)
if input_width > target_width or input_height > target_height:
scale = "scale={0}:{1}".format(rescaled_width, rescaled_height)
pad = "pad={0}:{1}:{2}:{3}".format(
target_width,
target_height,
rescaled_width_shift,
rescaled_height_shift
)
if bg_color:
color = convert_color_values(application, bg_color)
pad += ":{0}".format(color)
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
elif application == "oiiotool":
input_info = get_oiio_info_for_input(input_path, logger=log)
# Collect channels to export
_, channels_arg = get_oiio_input_and_channel_args(
input_info, alpha_default=1.0)
command_args.extend([
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
if input_par != 1.0:
command_args.extend(["--pixelaspect", "1"])
width_shift = int((target_width - input_width) / 2)
height_shift = int((target_height - input_height) / 2)
# default resample is not scaling source image
resample = [
"--resize",
"{0}x{1}".format(input_width, input_height),
"--origin",
"+{0}+{1}".format(width_shift, height_shift),
]
# scaled source image to target size
if input_width > target_width or input_height > target_height:
# form resample command
resample = [
"--resize:filter=lanczos3",
"{0}x{1}".format(rescaled_width, rescaled_height),
"--origin",
"+{0}+{1}".format(rescaled_width_shift, rescaled_height_shift),
]
command_args.extend(resample)
fullsize = [
"--fullsize",
"{0}x{1}".format(target_width, target_height)
]
if bg_color:
color = convert_color_values(application, bg_color)
fullsize.extend([
"--pattern",
"constant:color={0}".format(color),
"{0}x{1}".format(target_width, target_height),
"4", # 4 channels
"--over"
])
command_args.extend(fullsize)
else:
raise ValueError(
"\"application\" input argument should "
"be either \"ffmpeg\" or \"oiiotool\""
)
return command_args
def convert_color_values(application, color_value):
"""Get color mapping for ffmpeg and oiiotool.
Args:
application (str): Application for which command should be created.
color_value (list[int]): List of 8bit int values for RGBA.
Returns:
str: ffmpeg returns hex string, oiiotool is string with floats.
"""
red, green, blue, alpha = color_value
if application == "ffmpeg":
return "{0:0>2X}{1:0>2X}{2:0>2X}@{3}".format(
red, green, blue, (alpha / 255.0)
)
elif application == "oiiotool":
red = float(red / 255)
green = float(green / 255)
blue = float(blue / 255)
alpha = float(alpha / 255)
return "{0:.3f},{1:.3f},{2:.3f},{3:.3f}".format(
red, green, blue, alpha)
else:
raise ValueError(
"\"application\" input argument should "
"be either \"ffmpeg\" or \"oiiotool\""
)
def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
"""Get input and channel arguments for oiiotool.
Args:
oiio_input_info (dict): Information about input from oiio tool.
Should be output of function `get_oiio_info_for_input`.
alpha_default (float, optional): Default value for alpha channel.
Returns:
tuple[str, str]: Tuple of input and channel arguments.
"""
channel_names = oiio_input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
channels_arg = "R={0},G={1},B={2}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
elif alpha_default:
channels_arg += ",A={}".format(float(alpha_default))
input_channels.append("A")
input_channels_str = ",".join(input_channels)
subimages = oiio_input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart exrs
input_arg += ":ch={}".format(input_channels_str)
return input_arg, channels_arg

View file

@ -460,7 +460,21 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
self.plugin_info = self.get_plugin_info()
self.aux_files = self.get_aux_files()
self.process_submission()
job_id = self.process_submission()
self.log.info("Submitted job to Deadline: {}.".format(job_id))
# TODO: Find a way that's more generic and not render type specific
if "exportJob" in instance.data:
self.log.info("Splitting export and render in two jobs")
self.log.info("Export job id: %s", job_id)
render_job_info = self.get_job_info(dependency_job_ids=[job_id])
render_plugin_info = self.get_plugin_info(job_type="render")
payload = self.assemble_payload(
job_info=render_job_info,
plugin_info=render_plugin_info
)
render_job_id = self.submit(payload)
self.log.info("Render job id: %s", render_job_id)
def process_submission(self):
"""Process data for submission.

View file

@ -2,6 +2,8 @@
"""Collect default Deadline server."""
import pyblish.api
from openpype import AYON_SERVER_ENABLED
class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
"""Collect default Deadline Webservice URL.
@ -30,24 +32,26 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
self.log.error("Cannot get OpenPype Deadline module.")
raise AssertionError("OpenPype Deadline module not found.")
# get default deadline webservice url from deadline module
self.log.debug(deadline_module.deadline_urls)
context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501
deadline_settings = context.data["project_settings"]["deadline"]
deadline_server_name = None
if AYON_SERVER_ENABLED:
deadline_server_name = deadline_settings["deadline_server"]
else:
deadline_servers = deadline_settings["deadline_servers"]
if deadline_servers:
deadline_server_name = deadline_servers[0]
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
deadline_servers = (context.data
["project_settings"]
["deadline"]
["deadline_servers"])
if deadline_servers:
deadline_server_name = deadline_servers[0]
deadline_webservice = None
if deadline_server_name:
deadline_webservice = deadline_module.deadline_urls.get(
deadline_server_name)
if deadline_webservice:
context.data["defaultDeadline"] = deadline_webservice
self.log.debug("Overriding from project settings with {}".format( # noqa: E501
deadline_webservice))
context.data["defaultDeadline"] = \
context.data["defaultDeadline"].strip().rstrip("/")
default_deadline_webservice = deadline_module.deadline_urls["default"]
deadline_webservice = (
deadline_webservice
or default_deadline_webservice
)
context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa

View file

@ -6,8 +6,14 @@ import getpass
import attr
from datetime import datetime
from openpype.lib import is_running_from_build
from openpype.lib import (
is_running_from_build,
BoolDef,
NumberDef,
TextDef,
)
from openpype.pipeline import legacy_io
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
from openpype.pipeline.farm.tools import iter_expected_files
from openpype.tests.lib import is_in_tests
@ -22,10 +28,11 @@ class BlenderPluginInfo():
SaveFile = attr.ib(default=True)
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
OpenPypePyblishPluginMixin):
label = "Submit Render to Deadline"
hosts = ["blender"]
families = ["render.farm"]
families = ["render"]
use_published = True
priority = 50
@ -33,6 +40,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
jobInfo = {}
pluginInfo = {}
group = None
job_delay = "00:00:00:00"
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Blender")
@ -67,8 +75,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
job_info.Comment = instance.data.get("comment")
if self.group != "none" and self.group:
job_info.Group = self.group
@ -83,8 +90,10 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
machine_list_key = "Blacklist"
render_globals[machine_list_key] = machine_list
job_info.Priority = attr_values.get("priority")
job_info.ChunkSize = attr_values.get("chunkSize")
job_info.ChunkSize = attr_values.get("chunkSize", self.chunk_size)
job_info.Priority = attr_values.get("priority", self.priority)
job_info.ScheduledType = "Once"
job_info.JobDelay = attr_values.get("job_delay", self.job_delay)
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
@ -180,3 +189,39 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
the metadata and the rendered files are in the same location.
"""
return super().from_published_scene(False)
@classmethod
def get_attribute_defs(cls):
defs = super(BlenderSubmitDeadline, cls).get_attribute_defs()
defs.extend([
BoolDef("use_published",
default=cls.use_published,
label="Use Published Scene"),
NumberDef("priority",
minimum=1,
maximum=250,
decimals=0,
default=cls.priority,
label="Priority"),
NumberDef("chunkSize",
minimum=1,
maximum=50,
decimals=0,
default=cls.chunk_size,
label="Frame Per Task"),
TextDef("group",
default=cls.group,
label="Group Name"),
TextDef("job_delay",
default=cls.job_delay,
label="Job Delay",
placeholder="dd:hh:mm:ss",
tooltip="Delay the job by the specified amount of time. "
"Timecode: dd:hh:mm:ss."),
])
return defs

View file

@ -2,8 +2,6 @@ import os
import getpass
from datetime import datetime
import hou
import attr
import pyblish.api
from openpype.lib import (
@ -141,6 +139,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
return job_info
def get_plugin_info(self):
# Not all hosts can import this module.
import hou
instance = self._instance
version = hou.applicationVersionString()
version = ".".join(version.split(".")[:2])
@ -167,6 +168,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
instance.data["toBeRenderedOn"] = "deadline"
def get_rop_node(self, instance):
# Not all hosts can import this module.
import hou
rop = instance.data.get("instance_node")
rop_node = hou.node(rop)

View file

@ -5,12 +5,15 @@ from datetime import datetime
import pyblish.api
from openpype.pipeline import legacy_io
from openpype.pipeline import legacy_io, OpenPypePyblishPluginMixin
from openpype.tests.lib import is_in_tests
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
from openpype.lib import is_running_from_build
from openpype.lib import (
is_running_from_build,
BoolDef,
NumberDef
)
@attr.s
class DeadlinePluginInfo():
@ -20,8 +23,29 @@ class DeadlinePluginInfo():
IgnoreInputs = attr.ib(default=True)
class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
"""Submit Solaris USD Render ROPs to Deadline.
@attr.s
class ArnoldRenderDeadlinePluginInfo():
InputFile = attr.ib(default=None)
Verbose = attr.ib(default=4)
@attr.s
class MantraRenderDeadlinePluginInfo():
SceneFile = attr.ib(default=None)
Version = attr.ib(default=None)
@attr.s
class VrayRenderPluginInfo():
InputFilename = attr.ib(default=None)
SeparateFilesPerFrame = attr.ib(default=True)
class HoudiniSubmitDeadline(
abstract_submit_deadline.AbstractSubmitDeadline,
OpenPypePyblishPluginMixin
):
"""Submit Render ROPs to Deadline.
Renders are submitted to a Deadline Web Service as
supplied via the environment variable AVALON_DEADLINE.
@ -45,21 +69,95 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
targets = ["local"]
use_published = True
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Houdini")
# presets
priority = 50
chunk_size = 1
export_priority = 50
export_chunk_size = 10
group = ""
export_group = ""
@classmethod
def get_attribute_defs(cls):
return [
NumberDef(
"priority",
label="Priority",
default=cls.priority,
decimals=0
),
NumberDef(
"chunk",
label="Frames Per Task",
default=cls.chunk_size,
decimals=0,
minimum=1,
maximum=1000
),
NumberDef(
"export_priority",
label="Export Priority",
default=cls.priority,
decimals=0
),
NumberDef(
"export_chunk",
label="Export Frames Per Task",
default=cls.export_chunk_size,
decimals=0,
minimum=1,
maximum=1000
),
BoolDef(
"suspend_publish",
default=False,
label="Suspend publish"
)
]
def get_job_info(self, dependency_job_ids=None):
instance = self._instance
context = instance.context
attribute_values = self.get_attr_values_from_data(instance.data)
# Whether Deadline render submission is being split in two
# (extract + render)
split_render_job = instance.data["exportJob"]
# If there's some dependency job ids we can assume this is a render job
# and not an export job
is_export_job = True
if dependency_job_ids:
is_export_job = False
if split_render_job and not is_export_job:
# Convert from family to Deadline plugin name
# i.e., arnold_rop -> Arnold
plugin = instance.data["family"].replace("_rop", "").capitalize()
else:
plugin = "Houdini"
job_info = DeadlineJobInfo(Plugin=plugin)
filepath = context.data["currentFile"]
filename = os.path.basename(filepath)
job_info.Name = "{} - {}".format(filename, instance.name)
job_info.BatchName = filename
job_info.Plugin = "Houdini"
job_info.UserName = context.data.get(
"deadlineUser", getpass.getuser())
if split_render_job and is_export_job:
job_info.Priority = attribute_values.get(
"export_priority", self.export_priority
)
else:
job_info.Priority = attribute_values.get(
"priority", self.priority
)
if is_in_tests():
job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S")
@ -73,9 +171,23 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
)
job_info.Frames = frames
# Make sure we make job frame dependent so render tasks pick up a soon
# as export tasks are done
if split_render_job and not is_export_job:
job_info.IsFrameDependent = True
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.ChunkSize = instance.data.get("chunkSize", 10)
job_info.Group = self.group
if split_render_job and is_export_job:
job_info.ChunkSize = attribute_values.get(
"export_chunk", self.export_chunk_size
)
else:
job_info.ChunkSize = attribute_values.get(
"chunk", self.chunk_size
)
job_info.Comment = context.data.get("comment")
keys = [
@ -101,6 +213,7 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **legacy_io.Session)
for key in keys:
value = environment.get(key)
if value:
@ -115,25 +228,51 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info.OutputDirectory += dirname.replace("\\", "/")
job_info.OutputFilename += fname
# Add dependencies if given
if dependency_job_ids:
job_info.JobDependencies = ",".join(dependency_job_ids)
return job_info
def get_plugin_info(self):
def get_plugin_info(self, job_type=None):
# Not all hosts can import this module.
import hou
instance = self._instance
context = instance.context
# Output driver to render
driver = hou.node(instance.data["instance_node"])
hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0]
plugin_info = DeadlinePluginInfo(
SceneFile=context.data["currentFile"],
OutputDriver=driver.path(),
Version=hou_major_minor,
IgnoreInputs=True
)
# Output driver to render
if job_type == "render":
family = instance.data.get("family")
if family == "arnold_rop":
plugin_info = ArnoldRenderDeadlinePluginInfo(
InputFile=instance.data["ifdFile"]
)
elif family == "mantra_rop":
plugin_info = MantraRenderDeadlinePluginInfo(
SceneFile=instance.data["ifdFile"],
Version=hou_major_minor,
)
elif family == "vray_rop":
plugin_info = VrayRenderPluginInfo(
InputFilename=instance.data["ifdFile"],
)
else:
self.log.error(
"Family '%s' not supported yet to split render job",
family
)
return
else:
driver = hou.node(instance.data["instance_node"])
plugin_info = DeadlinePluginInfo(
SceneFile=context.data["currentFile"],
OutputDriver=driver.path(),
Version=hou_major_minor,
IgnoreInputs=True
)
return attr.asdict(plugin_info)

View file

@ -230,6 +230,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Create review components
# Change asset name of each new component for review
multiple_reviewable = len(review_representations) > 1
extended_asset_name = None
for index, repre in enumerate(review_representations):
if not self._is_repre_video(repre) and has_movie_review:
self.log.debug("Movie repre has priority "

View file

@ -8,6 +8,7 @@ import appdirs
from qtpy import QtCore, QtWidgets, QtGui
from openpype import resources
from openpype import AYON_SERVER_ENABLED
from openpype.style import load_stylesheet
from openpype.lib import JSONSettingRegistry
@ -339,7 +340,9 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(PythonInterpreterWidget, self).__init__(parent)
self.setWindowTitle("OpenPype Console")
self.setWindowTitle("{} Console".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"
))
self.setWindowIcon(QtGui.QIcon(resources.get_openpype_icon_filepath()))
self.ansi_escape = re.compile(

View file

@ -1,23 +1,28 @@
# -*- coding: utf-8 -*-
"""Submitting render job to RoyalRender."""
import os
import re
import json
import platform
import re
import tempfile
import uuid
from datetime import datetime
import pyblish.api
from openpype.tests.lib import is_in_tests
from openpype.pipeline.publish.lib import get_published_workfile_instance
from openpype.pipeline.publish import KnownPublishError
from openpype.lib import BoolDef, NumberDef, is_running_from_build
from openpype.lib.execute import run_openpype_process
from openpype.modules.royalrender.api import Api as rrApi
from openpype.modules.royalrender.rr_job import (
RRJob, CustomAttribute, get_rr_platform)
from openpype.lib import (
is_running_from_build,
BoolDef,
NumberDef,
CustomAttribute,
RRJob,
RREnvList,
get_rr_platform,
)
from openpype.pipeline import OpenPypePyblishPluginMixin
from openpype.pipeline.publish import KnownPublishError
from openpype.pipeline.publish.lib import get_published_workfile_instance
from openpype.tests.lib import is_in_tests
class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
@ -302,3 +307,68 @@ class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
path = path.replace(first_frame, "#" * padding)
return path
def inject_environment(self, instance, job):
# type: (pyblish.api.Instance, RRJob) -> RRJob
"""Inject environment variables for RR submission.
This function mimics the behaviour of the Deadline
integration. It is just temporary solution until proper
runtime environment injection is implemented in RR.
Args:
instance (pyblish.api.Instance): Publishing instance
job (RRJob): RRJob instance to be injected.
Returns:
RRJob: Injected RRJob instance.
Throws:
RuntimeError: If any of the required env vars is missing.
"""
temp_file_name = "{}_{}.json".format(
datetime.utcnow().strftime('%Y%m%d%H%M%S%f'),
str(uuid.uuid1())
)
export_url = os.path.join(tempfile.gettempdir(), temp_file_name)
print(">>> Temporary path: {}".format(export_url))
args = [
"--headless",
"extractenvironments",
export_url
]
anatomy_data = instance.context.data["anatomyData"]
add_kwargs = {
"project": anatomy_data["project"]["name"],
"asset": instance.context.data["asset"],
"task": anatomy_data["task"]["name"],
"app": instance.context.data.get("appName"),
"envgroup": "farm"
}
if os.getenv('IS_TEST'):
args.append("--automatic-tests")
if not all(add_kwargs.values()):
raise RuntimeError((
"Missing required env vars: AVALON_PROJECT, AVALON_ASSET,"
" AVALON_TASK, AVALON_APP_NAME"
))
for key, value in add_kwargs.items():
args.extend([f"--{key}", value])
self.log.debug("Executing: {}".format(" ".join(args)))
run_openpype_process(*args, logger=self.log)
self.log.debug("Loading file ...")
with open(export_url) as fp:
contents = json.load(fp)
job.rrEnvList = RREnvList(contents).serialize()
return job

View file

@ -2,7 +2,7 @@
"""Submitting render job to RoyalRender."""
import os
from maya.OpenMaya import MGlobal
from maya.OpenMaya import MGlobal # noqa: F401
from openpype.modules.royalrender import lib
from openpype.pipeline.farm.tools import iter_expected_files
@ -38,5 +38,6 @@ class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
job = self.get_job(instance, self.scene_path, first_file_path,
layer_name)
job = self.update_job_with_host_specific(instance, job)
job = self.inject_environment(instance, job)
instance.data["rrJobs"].append(job)

View file

@ -25,6 +25,7 @@ class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
jobs = self.create_jobs(instance)
for job in jobs:
job = self.update_job_with_host_specific(instance, job)
job = self.inject_environment(instance, job)
instance.data["rrJobs"].append(job)

View file

@ -205,6 +205,9 @@ class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin,
jobs_pre_ids = []
for job in instance.data["rrJobs"]: # type: RRJob
if job.rrEnvList:
if len(job.rrEnvList) > 2000:
self.log.warning(("Job environment is too long "
f"{len(job.rrEnvList)} > 2000"))
job_environ.update(
dict(RREnvList.parse(job.rrEnvList))
)

View file

@ -32,7 +32,7 @@ class RREnvList(dict):
"""Parse rrEnvList string and return it as RREnvList object."""
out = RREnvList()
for var in data.split("~~~"):
k, v = var.split("=")
k, v = var.split("=", maxsplit=1)
out[k] = v
return out
@ -172,7 +172,7 @@ class RRJob(object):
# Environment
# only used in RR 8.3 and newer
rrEnvList = attr.ib(default=None) # type: str
rrEnvList = attr.ib(default=None, type=str) # type: str
class SubmitterParameter:

View file

@ -7,6 +7,8 @@ from openpype.pipeline.plugin_discover import (
deregister_plugin_path
)
from .load.utils import get_representation_path_from_context
class LauncherAction(object):
"""A custom action available"""
@ -100,6 +102,10 @@ class InventoryAction(object):
"""
return True
@classmethod
def filepath_from_context(cls, context):
return get_representation_path_from_context(context)
# Launcher action
def discover_launcher_actions():

View file

@ -19,6 +19,7 @@ from abc import ABCMeta, abstractmethod
import six
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_asset_by_name,
get_linked_assets,
@ -1272,31 +1273,54 @@ class PlaceholderLoadMixin(object):
# Sort for readability
families = list(sorted(families))
return [
if AYON_SERVER_ENABLED:
builder_type_enum_items = [
{"label": "Current folder", "value": "context_folder"},
# TODO implement linked folders
# {"label": "Linked folders", "value": "linked_folders"},
{"label": "All folders", "value": "all_folders"},
]
build_type_label = "Folder Builder Type"
build_type_help = (
"Folder Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\nCurrent Folder: Template loader will look for products"
" of current context folder (Folder /assets/bob will"
" find asset)"
"\nAll folders: All folders matching the regex will be"
" used."
)
else:
builder_type_enum_items = [
{"label": "Current asset", "value": "context_asset"},
{"label": "Linked assets", "value": "linked_asset"},
{"label": "All assets", "value": "all_assets"},
]
build_type_label = "Asset Builder Type"
build_type_help = (
"Asset Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\ncontext_asset : Template loader will look for subsets"
" of current context asset (Asset bob will find asset)"
"\nlinked_asset : Template loader will look for assets"
" linked to current context asset."
"\nLinked asset are looked in database under"
" field \"inputLinks\""
)
attr_defs = [
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Main attributes"),
attribute_definitions.UISeparatorDef(),
attribute_definitions.EnumDef(
"builder_type",
label="Asset Builder Type",
label=build_type_label,
default=options.get("builder_type"),
items=[
{"label": "Current asset", "value": "context_asset"},
{"label": "Linked assets", "value": "linked_asset"},
{"label": "All assets", "value": "all_assets"},
],
tooltip=(
"Asset Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\ncontext_asset : Template loader will look for subsets"
" of current context asset (Asset bob will find asset)"
"\nlinked_asset : Template loader will look for assets"
" linked to current context asset."
"\nLinked asset are looked in database under"
" field \"inputLinks\""
)
items=builder_type_enum_items,
tooltip=build_type_help
),
attribute_definitions.EnumDef(
"family",
@ -1352,34 +1376,63 @@ class PlaceholderLoadMixin(object):
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Optional attributes"),
attribute_definitions.UISeparatorDef(),
attribute_definitions.TextDef(
"asset",
label="Asset filter",
default=options.get("asset"),
placeholder="regex filtering by asset name",
tooltip=(
"Filtering assets by matching field regex to asset's name"
)
),
attribute_definitions.TextDef(
"subset",
label="Subset filter",
default=options.get("subset"),
placeholder="regex filtering by subset name",
tooltip=(
"Filtering assets by matching field regex to subset's name"
)
),
attribute_definitions.TextDef(
"hierarchy",
label="Hierarchy filter",
default=options.get("hierarchy"),
placeholder="regex filtering by asset's hierarchy",
tooltip=(
"Filtering assets by matching field asset's hierarchy"
)
)
]
if AYON_SERVER_ENABLED:
attr_defs.extend([
attribute_definitions.TextDef(
"folder_path",
label="Folder filter",
default=options.get("folder_path"),
placeholder="regex filtering by folder path",
tooltip=(
"Filtering assets by matching"
" field regex to folder path"
)
),
attribute_definitions.TextDef(
"product_name",
label="Product filter",
default=options.get("product_name"),
placeholder="regex filtering by product name",
tooltip=(
"Filtering assets by matching"
" field regex to product name"
)
),
])
else:
attr_defs.extend([
attribute_definitions.TextDef(
"asset",
label="Asset filter",
default=options.get("asset"),
placeholder="regex filtering by asset name",
tooltip=(
"Filtering assets by matching"
" field regex to asset's name"
)
),
attribute_definitions.TextDef(
"subset",
label="Subset filter",
default=options.get("subset"),
placeholder="regex filtering by subset name",
tooltip=(
"Filtering assets by matching"
" field regex to subset's name"
)
),
attribute_definitions.TextDef(
"hierarchy",
label="Hierarchy filter",
default=options.get("hierarchy"),
placeholder="regex filtering by asset's hierarchy",
tooltip=(
"Filtering assets by matching field asset's hierarchy"
)
)
])
return attr_defs
def parse_loader_args(self, loader_args):
"""Helper function to parse string of loader arugments.
@ -1409,6 +1462,117 @@ class PlaceholderLoadMixin(object):
return {}
def _query_by_folder_regex(self, project_name, folder_regex):
"""Query folders by folder path regex.
WARNING:
This method will be removed once the same functionality is
available in ayon-python-api.
Args:
project_name (str): Project name.
folder_regex (str): Regex for folder path.
Returns:
list[str]: List of folder paths.
"""
from ayon_api.graphql_queries import folders_graphql_query
from openpype.client import get_ayon_server_api_connection
query = folders_graphql_query({"id"})
folders_field = None
for child in query._children:
if child.path != "project":
continue
for project_child in child._children:
if project_child.path == "project/folders":
folders_field = project_child
break
if folders_field:
break
if "folderPathRegex" not in query._variables:
folder_path_regex_var = query.add_variable(
"folderPathRegex", "String!"
)
folders_field.set_filter("pathEx", folder_path_regex_var)
query.set_variable_value("projectName", project_name)
if folder_regex:
query.set_variable_value("folderPathRegex", folder_regex)
api = get_ayon_server_api_connection()
for parsed_data in query.continuous_query(api):
for folder in parsed_data["project"]["folders"]:
yield folder["id"]
def _get_representations_ayon(self, placeholder):
# An OpenPype placeholder loaded in AYON
if "asset" in placeholder.data:
return []
representation_name = placeholder.data["representation"]
if not representation_name:
return []
project_name = self.builder.project_name
current_asset_doc = self.builder.current_asset_doc
folder_path_regex = placeholder.data["folder_path"]
product_name_regex_value = placeholder.data["product_name"]
product_name_regex = None
if product_name_regex_value:
product_name_regex = re.compile(product_name_regex_value)
product_type = placeholder.data["family"]
builder_type = placeholder.data["builder_type"]
folder_ids = []
if builder_type == "context_folder":
folder_ids = [current_asset_doc["_id"]]
elif builder_type == "all_folders":
folder_ids = list(self._query_by_folder_regex(
project_name, folder_path_regex
))
if not folder_ids:
return []
from ayon_api import get_products, get_last_versions
products = list(get_products(
project_name,
folder_ids=folder_ids,
product_types=[product_type],
fields={"id", "name"}
))
filtered_product_ids = set()
for product in products:
if (
product_name_regex is None
or product_name_regex.match(product["name"])
):
filtered_product_ids.add(product["id"])
if not filtered_product_ids:
return []
version_ids = set(
version["id"]
for version in get_last_versions(
project_name, filtered_product_ids, fields={"id"}
).values()
)
return list(get_representations(
project_name,
representation_names=[representation_name],
version_ids=version_ids
))
def _get_representations(self, placeholder):
"""Prepared query of representations based on load options.
@ -1428,6 +1592,13 @@ class PlaceholderLoadMixin(object):
from placeholder data.
"""
if AYON_SERVER_ENABLED:
return self._get_representations_ayon(placeholder)
# An AYON placeholder loaded in OpenPype
if "folder_path" in placeholder.data:
return []
project_name = self.builder.project_name
current_asset_doc = self.builder.current_asset_doc
linked_asset_docs = self.builder.linked_asset_docs

View file

@ -1,3 +1,4 @@
import copy
import os
import subprocess
import tempfile
@ -5,12 +6,17 @@ import tempfile
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_args,
get_oiio_tool_args,
is_oiio_supported,
get_ffprobe_data,
is_oiio_supported,
get_rescaled_command_arguments,
run_subprocess,
path_to_subprocess_arg,
run_subprocess,
)
from openpype.lib.transcoding import convert_colorspace
from openpype.lib.transcoding import VIDEO_EXTENSIONS
class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -25,7 +31,16 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
hosts = ["shell", "fusion", "resolve", "traypublisher", "substancepainter"]
enabled = False
# presetable attribute
integrate_thumbnail = False
target_size = {
"type": "resize",
"width": 1920,
"height": 1080
}
background_color = None
duration_split = 0.5
# attribute presets from settings
oiiotool_defaults = None
ffmpeg_args = None
def process(self, instance):
@ -82,29 +97,62 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
oiio_supported = is_oiio_supported()
for repre in filtered_repres:
repre_files = repre["files"]
src_staging = os.path.normpath(repre["stagingDir"])
if not isinstance(repre_files, (list, tuple)):
input_file = repre_files
# convert any video file to frame so oiio doesn't need to
# read video file (it is slow) and also we are having control
# over which frame is used for thumbnail
# this will also work with ffmpeg fallback conversion in case
# oiio is not supported
repre_extension = os.path.splitext(repre_files)[1]
if repre_extension in VIDEO_EXTENSIONS:
video_file_path = os.path.join(
src_staging, repre_files
)
file_path = self._create_frame_from_video(
video_file_path,
dst_staging
)
if file_path:
src_staging, input_file = os.path.split(file_path)
else:
# if it is not video file then just use first file
input_file = repre_files
else:
file_index = int(float(len(repre_files)) * 0.5)
repre_files_thumb = copy.deepcopy(repre_files)
# exclude first frame if slate in representation tags
if "slate-frame" in repre.get("tags", []):
repre_files_thumb = repre_files_thumb[1:]
file_index = int(
float(len(repre_files_thumb)) * self.duration_split)
input_file = repre_files[file_index]
src_staging = os.path.normpath(repre["stagingDir"])
full_input_path = os.path.join(src_staging, input_file)
self.log.debug("input {}".format(full_input_path))
filename = os.path.splitext(input_file)[0]
jpeg_file = filename + "_thumb.jpg"
full_output_path = os.path.join(dst_staging, jpeg_file)
colorspace_data = repre.get("colorspaceData")
if oiio_supported:
self.log.debug("Trying to convert with OIIO")
# only use OIIO if it is supported and representation has
# colorspace data
if oiio_supported and colorspace_data:
self.log.debug(
"Trying to convert with OIIO "
"with colorspace data: {}".format(colorspace_data)
)
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self.create_thumbnail_oiio(
full_input_path, full_output_path
thumbnail_created = self._create_thumbnail_oiio(
full_input_path,
full_output_path,
colorspace_data
)
# Try to use FFMPEG if OIIO is not supported or for cases when
# oiiotool isn't available
# oiiotool isn't available or representation is not having
# colorspace data
if not thumbnail_created:
if oiio_supported:
self.log.debug(
@ -112,7 +160,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
" can't be read by OIIO."
)
thumbnail_created = self.create_thumbnail_ffmpeg(
thumbnail_created = self._create_thumbnail_ffmpeg(
full_input_path, full_output_path
)
@ -120,13 +168,19 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
if not thumbnail_created:
continue
new_repre_tags = ["thumbnail"]
# for workflows which needs to have thumbnails published as
# separate representations `delete` tag should not be added
if not self.integrate_thumbnail:
new_repre_tags.append("delete")
new_repre = {
"name": "thumbnail",
"ext": "jpg",
"files": jpeg_file,
"stagingDir": dst_staging,
"thumbnail": True,
"tags": ["thumbnail"]
"tags": new_repre_tags
}
# adding representation
@ -138,7 +192,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
break
if not thumbnail_created:
self.log.warning("Thumbanil has not been created.")
self.log.warning("Thumbnail has not been created.")
def _is_review_instance(self, instance):
# TODO: We should probably handle "not creating" of thumbnail
@ -173,17 +227,68 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
filtered_repres.append(repre)
return filtered_repres
def create_thumbnail_oiio(self, src_path, dst_path):
self.log.debug("Extracting thumbnail with OIIO: {}".format(dst_path))
oiio_cmd = get_oiio_tool_args(
"oiiotool",
"-a", src_path,
"-o", dst_path
)
self.log.debug("running: {}".format(" ".join(oiio_cmd)))
def _create_thumbnail_oiio(
self,
src_path,
dst_path,
colorspace_data,
):
"""Create thumbnail using OIIO tool oiiotool
Args:
src_path (str): path to source file
dst_path (str): path to destination file
colorspace_data (dict): colorspace data from representation
keys:
colorspace (str)
config (dict)
display (Optional[str])
view (Optional[str])
Returns:
str: path to created thumbnail
"""
self.log.info("Extracting thumbnail {}".format(dst_path))
resolution_arg = self._get_resolution_arg("oiiotool", src_path)
repre_display = colorspace_data.get("display")
repre_view = colorspace_data.get("view")
oiio_default_type = None
oiio_default_display = None
oiio_default_view = None
oiio_default_colorspace = None
# first look into representation colorspaceData, perhaps it has
# display and view
if all([repre_display, repre_view]):
self.log.info(
"Using Display & View from "
"representation: '{} ({})'".format(
repre_view,
repre_display
)
)
# if representation doesn't have display and view then use
# oiiotool_defaults
elif self.oiiotool_defaults:
oiio_default_type = self.oiiotool_defaults["type"]
if "colorspace" in oiio_default_type:
oiio_default_colorspace = self.oiiotool_defaults["colorspace"]
else:
oiio_default_display = self.oiiotool_defaults["display"]
oiio_default_view = self.oiiotool_defaults["view"]
try:
run_subprocess(oiio_cmd, logger=self.log)
return True
convert_colorspace(
src_path,
dst_path,
colorspace_data["config"]["path"],
colorspace_data["colorspace"],
display=repre_display or oiio_default_display,
view=repre_view or oiio_default_view,
target_colorspace=oiio_default_colorspace,
additional_input_args=resolution_arg,
logger=self.log,
)
except Exception:
self.log.warning(
"Failed to create thumbnail using oiiotool",
@ -191,9 +296,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
return False
def create_thumbnail_ffmpeg(self, src_path, dst_path):
self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path))
return True
def _create_thumbnail_ffmpeg(self, src_path, dst_path):
self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path))
resolution_arg = self._get_resolution_arg("ffmpeg", src_path)
ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg")
ffmpeg_args = self.ffmpeg_args or {}
@ -215,6 +322,10 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
jpeg_items.extend(ffmpeg_args.get("output") or [])
# we just want one frame from movie files
jpeg_items.extend(["-vframes", "1"])
if resolution_arg:
jpeg_items.extend(resolution_arg)
# output file
jpeg_items.append(path_to_subprocess_arg(dst_path))
subprocess_command = " ".join(jpeg_items)
@ -229,3 +340,69 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
exc_info=True
)
return False
def _create_frame_from_video(self, video_file_path, output_dir):
"""Convert video file to one frame image via ffmpeg"""
# create output file path
base_name = os.path.basename(video_file_path)
filename = os.path.splitext(base_name)[0]
output_thumb_file_path = os.path.join(
output_dir, "{}.png".format(filename))
# Set video input attributes
max_int = str(2147483647)
video_data = get_ffprobe_data(video_file_path, logger=self.log)
duration = float(video_data["format"]["duration"])
cmd_args = [
"-y",
"-ss", str(duration * self.duration_split),
"-i", video_file_path,
"-analyzeduration", max_int,
"-probesize", max_int,
"-vframes", "1"
]
# add output file path
cmd_args.append(output_thumb_file_path)
# create ffmpeg command
cmd = get_ffmpeg_tool_args(
"ffmpeg",
*cmd_args
)
try:
# run subprocess
self.log.debug("Executing: {}".format(" ".join(cmd)))
run_subprocess(cmd, logger=self.log)
self.log.debug(
"Thumbnail created: {}".format(output_thumb_file_path))
return output_thumb_file_path
except RuntimeError as error:
self.log.warning(
"Failed intermediate thumb source using ffmpeg: {}".format(
error)
)
return None
def _get_resolution_arg(
self,
application,
input_path,
):
# get settings
if self.target_size.get("type") == "source":
return []
target_width = self.target_size["width"]
target_height = self.target_size["height"]
# form arg string per application
return get_rescaled_command_arguments(
application,
input_path,
target_width,
target_height,
bg_color=self.background_color,
log=self.log
)

View file

@ -137,7 +137,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
"mvUsd",
"mvUsdComposition",
"mvUsdOverride",
"simpleUnrealTexture",
"online",
"uasset",
"blendScene",

View file

@ -92,8 +92,10 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
continue
# Find thumbnail path on instance
thumbnail_path = self._get_instance_thumbnail_path(
published_repres)
thumbnail_source = instance.data.get("thumbnailSource")
thumbnail_path = (thumbnail_source or
self._get_instance_thumbnail_path(
published_repres))
if thumbnail_path:
self.log.debug((
"Found thumbnail path for instance \"{}\"."
@ -157,8 +159,8 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
):
from openpype.client.server.operations import create_thumbnail
op_session = OperationsSession()
# Make sure each entity id has defined only one thumbnail id
thumbnail_info_by_entity_id = {}
for instance_item in filtered_instance_items:
instance, thumbnail_path, version_id = instance_item
instance_label = self._get_instance_label(instance)
@ -172,12 +174,10 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
thumbnail_id = create_thumbnail(project_name, thumbnail_path)
# Set thumbnail id for version
op_session.update_entity(
project_name,
version_doc["type"],
version_doc["_id"],
{"data.thumbnail_id": thumbnail_id}
)
thumbnail_info_by_entity_id[version_id] = {
"thumbnail_id": thumbnail_id,
"entity_type": version_doc["type"],
}
if version_doc["type"] == "hero_version":
version_name = "Hero"
else:
@ -187,16 +187,23 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
))
asset_entity = instance.data["assetEntity"]
op_session.update_entity(
project_name,
asset_entity["type"],
asset_entity["_id"],
{"data.thumbnail_id": thumbnail_id}
)
thumbnail_info_by_entity_id[asset_entity["_id"]] = {
"thumbnail_id": thumbnail_id,
"entity_type": "asset",
}
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
asset_entity["name"], version_id
))
op_session = OperationsSession()
for entity_id, thumbnail_info in thumbnail_info_by_entity_id.items():
thumbnail_id = thumbnail_info["thumbnail_id"]
op_session.update_entity(
project_name,
thumbnail_info["entity_type"],
entity_id,
{"data.thumbnail_id": thumbnail_id}
)
op_session.commit()
def _get_instance_label(self, instance):

View file

@ -185,7 +185,7 @@ class PypeCommands:
task,
app,
env_group=env_group,
launch_type=LaunchTypes.farm_render,
launch_type=LaunchTypes.farm_render
)
else:
env = os.environ.copy()
@ -214,7 +214,7 @@ class PypeCommands:
def run_tests(self, folder, mark, pyargs,
test_data_folder, persist, app_variant, timeout, setup_only,
mongo_url, app_group):
mongo_url, app_group, dump_databases):
"""
Runs tests from 'folder'
@ -275,6 +275,13 @@ class PypeCommands:
if mongo_url:
args.extend(["--mongo_url", mongo_url])
if dump_databases:
msg = "dump_databases format is not recognized: {}".format(
dump_databases
)
assert dump_databases in ["bson", "json"], msg
args.extend(["--dump_databases", dump_databases])
print("run_tests args: {}".format(args))
import pytest
pytest.main(args)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1 KiB

View file

@ -572,6 +572,27 @@ def _convert_maya_project_settings(ayon_settings, output):
for item in viewport_options["pluginObjects"]
}
ayon_playblast_settings = ayon_publish["ExtractPlayblast"]["profiles"]
if ayon_playblast_settings:
for setting in ayon_playblast_settings:
capture_preset = setting["capture_preset"]
display_options = capture_preset["DisplayOptions"]
for key in ("background", "backgroundBottom", "backgroundTop"):
display_options[key] = _convert_color(display_options[key])
for src_key, dst_key in (
("DisplayOptions", "Display Options"),
("ViewportOptions", "Viewport Options"),
("CameraOptions", "Camera Options"),
):
capture_preset[dst_key] = capture_preset.pop(src_key)
viewport_options = capture_preset["Viewport Options"]
viewport_options["pluginObjects"] = {
item["name"]: item["value"]
for item in viewport_options["pluginObjects"]
}
# Extract Camera Alembic bake attributes
try:
bake_attributes = json.loads(
@ -1241,6 +1262,26 @@ def _convert_global_project_settings(ayon_settings, output, default_settings):
profile["outputs"] = new_outputs
# ExtractThumbnail plugin
ayon_extract_thumbnail = ayon_publish["ExtractThumbnail"]
# fix display and view at oiio defaults
ayon_default_oiio = copy.deepcopy(
ayon_extract_thumbnail["oiiotool_defaults"])
display_and_view = ayon_default_oiio.pop("display_and_view")
ayon_default_oiio["display"] = display_and_view["display"]
ayon_default_oiio["view"] = display_and_view["view"]
ayon_extract_thumbnail["oiiotool_defaults"] = ayon_default_oiio
# fix target size
ayon_default_resize = copy.deepcopy(ayon_extract_thumbnail["target_size"])
resize = ayon_default_resize.pop("resize")
ayon_default_resize["width"] = resize["width"]
ayon_default_resize["height"] = resize["height"]
ayon_extract_thumbnail["target_size"] = ayon_default_resize
# fix background color
ayon_extract_thumbnail["background_color"] = _convert_color(
ayon_extract_thumbnail["background_color"]
)
# ExtractOIIOTranscode plugin
extract_oiio_transcode = ayon_publish["ExtractOIIOTranscode"]
extract_oiio_transcode_profiles = extract_oiio_transcode["profiles"]

View file

@ -38,16 +38,6 @@
"file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}",
"path": "{@folder}/{@file}"
},
"simpleUnrealTextureHero": {
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/hero",
"file": "{originalBasename}.{ext}",
"path": "{@folder}/{@file}"
},
"simpleUnrealTexture": {
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{@version}",
"file": "{originalBasename}_{@version}.{ext}",
"path": "{@folder}/{@file}"
},
"online": {
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}",
"file": "{originalBasename}<.{@frame}><_{udim}>.{ext}",
@ -68,8 +58,6 @@
},
"__dynamic_keys_labels__": {
"maya2unreal": "Maya to Unreal",
"simpleUnrealTextureHero": "Simple Unreal Texture - Hero",
"simpleUnrealTexture": "Simple Unreal Texture",
"online": "online",
"tycache": "tycache",
"source": "source",

View file

@ -107,7 +107,8 @@
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none"
"group": "none",
"job_delay": "00:00:00:00"
},
"ProcessSubmittedCacheJobOnFarm": {
"enabled": true,

View file

@ -70,6 +70,25 @@
},
"ExtractThumbnail": {
"enabled": true,
"integrate_thumbnail": false,
"background_color": [
0,
0,
0,
255
],
"duration_split": 0.5,
"target_size": {
"type": "resize",
"width": 1920,
"height": 1080
},
"oiiotool_defaults": {
"type": "colorspace",
"colorspace": "color_picking",
"view": "sRGB",
"display": "default"
},
"ffmpeg_args": {
"input": [
"-apply_trc gamma22"
@ -316,22 +335,9 @@
"animation",
"setdress",
"layout",
"mayaScene",
"simpleUnrealTexture"
"mayaScene"
],
"template_name_profiles": [
{
"families": [
"simpleUnrealTexture"
],
"hosts": [
"standalonepublisher"
],
"task_types": [],
"task_names": [],
"template_name": "simpleUnrealTextureHero"
}
]
"template_name_profiles": []
},
"CleanUp": {
"paterns": [],
@ -513,17 +519,6 @@
"task_names": [],
"template_name": "render"
},
{
"families": [
"simpleUnrealTexture"
],
"hosts": [
"standalonepublisher"
],
"task_types": [],
"task_names": [],
"template_name": "simpleUnrealTexture"
},
{
"families": [
"staticMesh",
@ -559,19 +554,7 @@
"template_name": "tycache"
}
],
"hero_template_name_profiles": [
{
"families": [
"simpleUnrealTexture"
],
"hosts": [
"standalonepublisher"
],
"task_types": [],
"task_names": [],
"template_name": "simpleUnrealTextureHero"
}
],
"hero_template_name_profiles": [],
"custom_staging_dir_profiles": []
}
},

View file

@ -133,14 +133,6 @@
],
"help": "Texture files with UDIM together with worfile"
},
"create_simple_unreal_texture": {
"name": "simple_unreal_texture",
"label": "Simple Unreal Texture",
"family": "simpleUnrealTexture",
"icon": "Image",
"defaults": [],
"help": "Texture files with Unreal naming convention"
},
"create_vdb": {
"name": "vdb",
"label": "VDB Volumetric Data",

View file

@ -244,19 +244,6 @@
".hda"
]
},
{
"family": "simpleUnrealTexture",
"identifier": "",
"label": "Simple UE texture",
"icon": "fa.image",
"default_variants": [],
"description": "Simple Unreal Engine texture",
"detailed_description": "Texture files with Unreal Engine naming conventions",
"allow_sequences": false,
"allow_multiple_items": true,
"allow_version_control": false,
"extensions": []
},
{
"family": "audio",
"identifier": "",

View file

@ -581,6 +581,11 @@
"type": "text",
"key": "group",
"label": "Group Name"
},
{
"type": "text",
"key": "job_delay",
"label": "Delay job (timecode dd:hh:mm:ss)"
}
]
},

View file

@ -202,6 +202,104 @@
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "integrate_thumbnail",
"label": "Integrate thumbnail as representation"
},
{
"type": "dict-conditional",
"use_label_wrap": false,
"collapsible": false,
"key": "target_size",
"label": "Target size",
"enum_key": "type",
"enum_label": "Type",
"enum_children": [
{
"key": "source",
"label": "Image source",
"children": [
{
"type": "label",
"label": "Image size will be inherited from source image."
}
]
},
{
"key": "resize",
"label": "Resize",
"children": [
{
"type": "label",
"label": "Image will be resized to specified size."
},
{
"type": "number",
"key": "width",
"label": "Width",
"decimal": 0,
"minimum": 0,
"maximum": 99999
},
{
"type": "number",
"key": "height",
"label": "Height",
"decimal": 0,
"minimum": 0,
"maximum": 99999
}
]
}
]
},
{
"type": "color",
"label": "Background color",
"key": "background_color"
},
{
"key": "duration_split",
"label": "Duration split ratio",
"type": "number",
"decimal": 1,
"default": 0.5,
"minimum": 0,
"maximum": 1
},
{
"type": "dict",
"collapsible": true,
"key": "oiiotool_defaults",
"label": "OIIOtool defaults",
"children": [
{
"type": "enum",
"key": "type",
"label": "Target type",
"enum_items": [
{ "colorspace": "Colorspace" },
{ "display_and_view": "Display & View" }
]
},
{
"type": "text",
"key": "colorspace",
"label": "Colorspace"
},
{
"type": "text",
"key": "view",
"label": "View"
},
{
"type": "text",
"key": "display",
"label": "Display"
}
]
},
{
"type": "dict",
"key": "ffmpeg_args",

View file

@ -608,7 +608,7 @@ class UnknownAttrWidget(_BaseAttrDefWidget):
class HiddenAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
self.setVisible(False)
self._value = None
self._value = self.attr_def.default
self._multivalue = False
def setVisible(self, visible):

View file

@ -137,7 +137,7 @@ class VersionItem:
handles,
step,
comment,
source
source,
):
self.version_id = version_id
self.product_id = product_id
@ -215,7 +215,7 @@ class RepreItem:
representation_name,
representation_icon,
product_name,
folder_label,
folder_label
):
self.representation_id = representation_id
self.representation_name = representation_name
@ -590,6 +590,22 @@ class FrontendLoaderController(_BaseLoaderController):
pass
@abstractmethod
def get_versions_representation_count(
self, project_name, version_ids, sender=None
):
"""
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
sender (Optional[str]): Sender who requested the items.
Returns:
dict[str, int]: Representation count by version id.
"""
pass
@abstractmethod
def get_thumbnail_path(self, project_name, thumbnail_id):
"""Get thumbnail path for thumbnail id.
@ -849,3 +865,80 @@ class FrontendLoaderController(_BaseLoaderController):
"""
pass
# Site sync functions
@abstractmethod
def is_site_sync_enabled(self, project_name=None):
"""Is site sync enabled.
Site sync addon can be enabled but can be disabled per project.
When asked for enabled state without project name, it should return
True if site sync addon is available and enabled.
Args:
project_name (Optional[str]): Project name.
Returns:
bool: True if site sync is enabled.
"""
pass
@abstractmethod
def get_active_site_icon_def(self, project_name):
"""Active site icon definition.
Args:
project_name (Union[str, None]): Project name.
Returns:
Union[dict[str, Any], None]: Icon definition or None if site sync
is not enabled for the project.
"""
pass
@abstractmethod
def get_remote_site_icon_def(self, project_name):
"""Remote site icon definition.
Args:
project_name (Union[str, None]): Project name.
Returns:
Union[dict[str, Any], None]: Icon definition or None if site sync
is not enabled for the project.
"""
pass
@abstractmethod
def get_version_sync_availability(self, project_name, version_ids):
"""Version sync availability.
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
Returns:
dict[str, tuple[int, int]]: Sync availability by version id.
"""
pass
@abstractmethod
def get_representations_sync_status(
self, project_name, representation_ids
):
"""Representations sync status.
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
dict[str, tuple[int, int]]: Sync status by representation id.
"""
pass

View file

@ -15,7 +15,12 @@ from openpype.tools.ayon_utils.models import (
)
from .abstract import BackendLoaderController, FrontendLoaderController
from .models import SelectionModel, ProductsModel, LoaderActionsModel
from .models import (
SelectionModel,
ProductsModel,
LoaderActionsModel,
SiteSyncModel
)
class ExpectedSelection:
@ -108,6 +113,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._products_model = ProductsModel(self)
self._loader_actions_model = LoaderActionsModel(self)
self._thumbnails_model = ThumbnailsModel()
self._site_sync_model = SiteSyncModel(self)
@property
def log(self):
@ -143,6 +149,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._loader_actions_model.reset()
self._projects_model.reset()
self._thumbnails_model.reset()
self._site_sync_model.reset()
self._projects_model.refresh()
@ -195,13 +202,22 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
project_name, version_ids, sender
)
def get_versions_representation_count(
self, project_name, version_ids, sender=None
):
return self._products_model.get_versions_repre_count(
project_name, version_ids, sender
)
def get_folder_thumbnail_ids(self, project_name, folder_ids):
return self._thumbnails_model.get_folder_thumbnail_ids(
project_name, folder_ids)
project_name, folder_ids
)
def get_version_thumbnail_ids(self, project_name, version_ids):
return self._thumbnails_model.get_version_thumbnail_ids(
project_name, version_ids)
project_name, version_ids
)
def get_thumbnail_path(self, project_name, thumbnail_id):
return self._thumbnails_model.get_thumbnail_path(
@ -219,8 +235,16 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
def get_representations_action_items(
self, project_name, representation_ids):
return self._loader_actions_model.get_representations_action_items(
action_items = (
self._loader_actions_model.get_representations_action_items(
project_name, representation_ids)
)
action_items.extend(self._site_sync_model.get_site_sync_action_items(
project_name, representation_ids)
)
return action_items
def trigger_action_item(
self,
@ -230,6 +254,14 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
version_ids,
representation_ids
):
if self._site_sync_model.is_site_sync_action(identifier):
self._site_sync_model.trigger_action_item(
identifier,
project_name,
representation_ids
)
return
self._loader_actions_model.trigger_action_item(
identifier,
options,
@ -336,6 +368,27 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._loaded_products_cache.update_data(product_ids)
return self._loaded_products_cache.get_data()
def is_site_sync_enabled(self, project_name=None):
return self._site_sync_model.is_site_sync_enabled(project_name)
def get_active_site_icon_def(self, project_name):
return self._site_sync_model.get_active_site_icon_def(project_name)
def get_remote_site_icon_def(self, project_name):
return self._site_sync_model.get_remote_site_icon_def(project_name)
def get_version_sync_availability(self, project_name, version_ids):
return self._site_sync_model.get_version_sync_availability(
project_name, version_ids
)
def get_representations_sync_status(
self, project_name, representation_ids
):
return self._site_sync_model.get_representations_sync_status(
project_name, representation_ids
)
def is_loaded_products_supported(self):
return self._host is not None

View file

@ -1,10 +1,12 @@
from .selection import SelectionModel
from .products import ProductsModel
from .actions import LoaderActionsModel
from .site_sync import SiteSyncModel
__all__ = (
"SelectionModel",
"ProductsModel",
"LoaderActionsModel",
"SiteSyncModel",
)

View file

@ -317,6 +317,42 @@ class ProductsModel:
return output
def get_versions_repre_count(self, project_name, version_ids, sender):
"""Get representation count for passed version ids.
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
sender (Union[str, None]): Who triggered the method.
Returns:
dict[str, int]: Number of representations by version id.
"""
output = {}
if not any((project_name, version_ids)):
return output
invalid_version_ids = set()
project_cache = self._repre_items_cache[project_name]
for version_id in version_ids:
version_cache = project_cache[version_id]
if version_cache.is_valid:
output[version_id] = len(version_cache.get_data())
else:
invalid_version_ids.add(version_id)
if invalid_version_ids:
self.refresh_representation_items(
project_name, invalid_version_ids, sender
)
for version_id in invalid_version_ids:
version_cache = project_cache[version_id]
output[version_id] = len(version_cache.get_data())
return output
def change_products_group(self, project_name, product_ids, group_name):
"""Change group name for passed product ids.

View file

@ -0,0 +1,509 @@
import collections
from openpype.lib import Logger
from openpype.client.entities import get_representations
from openpype.client import get_linked_representation_id
from openpype.modules import ModulesManager
from openpype.tools.ayon_utils.models import NestedCacheItem
from openpype.tools.ayon_loader.abstract import ActionItem
DOWNLOAD_IDENTIFIER = "sitesync.download"
UPLOAD_IDENTIFIER = "sitesync.upload"
REMOVE_IDENTIFIER = "sitesync.remove"
log = Logger.get_logger(__name__)
def _default_version_availability():
return 0, 0
def _default_repre_status():
return 0.0, 0.0
class SiteSyncModel:
"""Model handling site sync logic.
Model cares about handling of site sync functionality. All public
functions should be possible to call even if site sync is not available.
"""
lifetime = 60 # In seconds (minute by default)
status_lifetime = 20
def __init__(self, controller):
self._controller = controller
self._site_icons = None
self._site_sync_enabled_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._active_site_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._remote_site_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._version_availability_cache = NestedCacheItem(
levels=2,
default_factory=_default_version_availability,
lifetime=self.status_lifetime
)
self._repre_status_cache = NestedCacheItem(
levels=2,
default_factory=_default_repre_status,
lifetime=self.status_lifetime
)
manager = ModulesManager()
self._site_sync_addon = manager.get("sync_server")
def reset(self):
self._site_icons = None
self._site_sync_enabled_cache.reset()
self._active_site_cache.reset()
self._remote_site_cache.reset()
self._version_availability_cache.reset()
self._repre_status_cache.reset()
def is_site_sync_enabled(self, project_name=None):
"""Site sync is enabled for a project.
Returns false if site sync addon is not available or enabled
or project has disabled it.
Args:
project_name (Union[str, None]): Project name. If project name
is 'None', True is returned if site sync addon
is available and enabled.
Returns:
bool: Site sync is enabled.
"""
if not self._is_site_sync_addon_enabled():
return False
cache = self._site_sync_enabled_cache[project_name]
if not cache.is_valid:
enabled = True
if project_name:
enabled = self._site_sync_addon.is_project_enabled(
project_name, single=True
)
cache.update_data(enabled)
return cache.get_data()
def get_active_site(self, project_name):
"""Active site name for a project.
Args:
project_name (str): Project name.
Returns:
Union[str, None]: Remote site name.
"""
cache = self._active_site_cache[project_name]
if not cache.is_valid:
site_name = None
if project_name and self._is_site_sync_addon_enabled():
site_name = self._site_sync_addon.get_active_site(project_name)
cache.update_data(site_name)
return cache.get_data()
def get_remote_site(self, project_name):
"""Remote site name for a project.
Args:
project_name (str): Project name.
Returns:
Union[str, None]: Remote site name.
"""
cache = self._remote_site_cache[project_name]
if not cache.is_valid:
site_name = None
if project_name and self._is_site_sync_addon_enabled():
site_name = self._site_sync_addon.get_remote_site(project_name)
cache.update_data(site_name)
return cache.get_data()
def get_active_site_icon_def(self, project_name):
"""Active site icon definition.
Args:
project_name (Union[str, None]): Name of project.
Returns:
Union[dict[str, Any], None]: Site icon definition.
"""
if not project_name:
return None
active_site = self.get_active_site(project_name)
provider = self._get_provider_for_site(project_name, active_site)
return self._get_provider_icon(provider)
def get_remote_site_icon_def(self, project_name):
"""Remote site icon definition.
Args:
project_name (Union[str, None]): Name of project.
Returns:
Union[dict[str, Any], None]: Site icon definition.
"""
if not project_name or not self.is_site_sync_enabled(project_name):
return None
remote_site = self.get_remote_site(project_name)
provider = self._get_provider_for_site(project_name, remote_site)
return self._get_provider_icon(provider)
def get_version_sync_availability(self, project_name, version_ids):
"""Returns how many representations are available on sites.
Returned value `{version_id: (4, 6)}` denotes that locally are
available 4 and remotely 6 representation.
NOTE: Available means they were synced to site.
Returns:
dict[str, tuple[int, int]]
"""
if not self.is_site_sync_enabled(project_name):
return {
version_id: _default_version_availability()
for version_id in version_ids
}
output = {}
project_cache = self._version_availability_cache[project_name]
invalid_ids = set()
for version_id in version_ids:
repre_cache = project_cache[version_id]
if repre_cache.is_valid:
output[version_id] = repre_cache.get_data()
else:
invalid_ids.add(version_id)
if invalid_ids:
self._refresh_version_availability(
project_name, invalid_ids
)
for version_id in invalid_ids:
version_cache = project_cache[version_id]
output[version_id] = version_cache.get_data()
return output
def get_representations_sync_status(
self, project_name, representation_ids
):
"""
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
dict[str, tuple[float, float]]
"""
if not self.is_site_sync_enabled(project_name):
return {
repre_id: _default_repre_status()
for repre_id in representation_ids
}
output = {}
project_cache = self._repre_status_cache[project_name]
invalid_ids = set()
for repre_id in representation_ids:
repre_cache = project_cache[repre_id]
if repre_cache.is_valid:
output[repre_id] = repre_cache.get_data()
else:
invalid_ids.add(repre_id)
if invalid_ids:
self._refresh_representations_sync_status(
project_name, invalid_ids
)
for repre_id in invalid_ids:
repre_cache = project_cache[repre_id]
output[repre_id] = repre_cache.get_data()
return output
def get_site_sync_action_items(self, project_name, representation_ids):
"""
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
list[ActionItem]: Actions that can be shown in loader.
"""
if not self.is_site_sync_enabled(project_name):
return []
repres_status = self.get_representations_sync_status(
project_name, representation_ids
)
repre_ids_per_identifier = collections.defaultdict(set)
for repre_id in representation_ids:
repre_status = repres_status[repre_id]
local_status, remote_status = repre_status
if local_status:
repre_ids_per_identifier[UPLOAD_IDENTIFIER].add(repre_id)
repre_ids_per_identifier[REMOVE_IDENTIFIER].add(repre_id)
if remote_status:
repre_ids_per_identifier[DOWNLOAD_IDENTIFIER].add(repre_id)
action_items = []
for identifier, repre_ids in repre_ids_per_identifier.items():
if identifier == DOWNLOAD_IDENTIFIER:
action_items.append(self._create_download_action_item(
project_name, repre_ids
))
elif identifier == UPLOAD_IDENTIFIER:
action_items.append(self._create_upload_action_item(
project_name, repre_ids
))
elif identifier == REMOVE_IDENTIFIER:
action_items.append(self._create_delete_action_item(
project_name, repre_ids
))
return action_items
def is_site_sync_action(self, identifier):
"""Should be `identifier` handled by SiteSync.
Args:
identifier (str): Action identifier.
Returns:
bool: Should action be handled by SiteSync.
"""
return identifier in {
UPLOAD_IDENTIFIER,
DOWNLOAD_IDENTIFIER,
REMOVE_IDENTIFIER,
}
def trigger_action_item(
self,
identifier,
project_name,
representation_ids
):
"""Resets status for site_name or remove local files.
Args:
identifier (str): Action identifier.
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
"""
active_site = self.get_active_site(project_name)
remote_site = self.get_remote_site(project_name)
repre_docs = list(get_representations(
project_name, representation_ids=representation_ids
))
families_per_repre_id = {
item["_id"]: item["context"]["family"]
for item in repre_docs
}
for repre_id in representation_ids:
family = families_per_repre_id[repre_id]
if identifier == DOWNLOAD_IDENTIFIER:
self._add_site(
project_name, repre_id, active_site, family
)
elif identifier == UPLOAD_IDENTIFIER:
self._add_site(
project_name, repre_id, remote_site, family
)
elif identifier == REMOVE_IDENTIFIER:
self._site_sync_addon.remove_site(
project_name,
repre_id,
active_site,
remove_local_files=True
)
def _is_site_sync_addon_enabled(self):
"""
Returns:
bool: Site sync addon is enabled.
"""
if self._site_sync_addon is None:
return False
return self._site_sync_addon.enabled
def _get_provider_for_site(self, project_name, site_name):
"""Provider for a site.
Args:
project_name (str): Project name.
site_name (str): Site name.
Returns:
Union[str, None]: Provider name.
"""
if not self._is_site_sync_addon_enabled():
return None
return self._site_sync_addon.get_provider_for_site(
project_name, site_name
)
def _get_provider_icon(self, provider):
"""site provider icons.
Returns:
Union[dict[str, Any], None]: Icon of site provider.
"""
if not provider:
return None
if self._site_icons is None:
self._site_icons = self._site_sync_addon.get_site_icons()
return self._site_icons.get(provider)
def _refresh_version_availability(self, project_name, version_ids):
if not project_name or not version_ids:
return
project_cache = self._version_availability_cache[project_name]
avail_by_id = self._site_sync_addon.get_version_availability(
project_name,
version_ids,
self.get_active_site(project_name),
self.get_remote_site(project_name),
)
for version_id in version_ids:
status = avail_by_id.get(version_id)
if status is None:
status = _default_version_availability()
project_cache[version_id].update_data(status)
def _refresh_representations_sync_status(
self, project_name, representation_ids
):
if not project_name or not representation_ids:
return
project_cache = self._repre_status_cache[project_name]
status_by_repre_id = (
self._site_sync_addon.get_representations_sync_state(
project_name,
representation_ids,
self.get_active_site(project_name),
self.get_remote_site(project_name),
)
)
for repre_id in representation_ids:
status = status_by_repre_id.get(repre_id)
if status is None:
status = _default_repre_status()
project_cache[repre_id].update_data(status)
def _create_download_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
DOWNLOAD_IDENTIFIER,
"Download",
"Mark representation for download locally",
"fa.download"
)
def _create_upload_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
UPLOAD_IDENTIFIER,
"Upload",
"Mark representation for upload remotely",
"fa.upload"
)
def _create_delete_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
REMOVE_IDENTIFIER,
"Remove from local",
"Remove local synchronization",
"fa.trash"
)
def _create_action_item(
self,
project_name,
representation_ids,
identifier,
label,
tooltip,
icon_name
):
return ActionItem(
identifier,
label,
icon={
"type": "awesome-font",
"name": icon_name,
"color": "#999999"
},
tooltip=tooltip,
options={},
order=1,
project_name=project_name,
folder_ids=[],
product_ids=[],
version_ids=[],
representation_ids=representation_ids,
)
def _add_site(self, project_name, repre_id, site_name, family):
self._site_sync_addon.add_site(
project_name, repre_id, site_name, force=True
)
# TODO this should happen in site sync addon
if family != "workfile":
return
links = get_linked_representation_id(
project_name,
repre_id=repre_id,
link_type="reference"
)
for link_repre_id in links:
try:
print("Adding {} to linked representation: {}".format(
site_name, link_repre_id))
self._site_sync_addon.add_site(
project_name,
link_repre_id,
site_name,
force=False
)
except Exception:
# do not add/reset working site for references
log.debug("Site present", exc_info=True)

View file

@ -8,6 +8,11 @@ from .products_model import (
VERSION_NAME_EDIT_ROLE,
VERSION_ID_ROLE,
PRODUCT_IN_SCENE_ROLE,
ACTIVE_SITE_ICON_ROLE,
REMOTE_SITE_ICON_ROLE,
REPRESENTATIONS_COUNT_ROLE,
SYNC_ACTIVE_SITE_AVAILABILITY,
SYNC_REMOTE_SITE_AVAILABILITY,
)
@ -189,3 +194,78 @@ class LoadedInSceneDelegate(QtWidgets.QStyledItemDelegate):
value = index.data(PRODUCT_IN_SCENE_ROLE)
color = self._colors.get(value, self._default_color)
option.palette.setBrush(QtGui.QPalette.Text, color)
class SiteSyncDelegate(QtWidgets.QStyledItemDelegate):
"""Paints icons and downloaded representation ration for both sites."""
def paint(self, painter, option, index):
super(SiteSyncDelegate, self).paint(painter, option, index)
option = QtWidgets.QStyleOptionViewItem(option)
option.showDecorationSelected = True
active_icon = index.data(ACTIVE_SITE_ICON_ROLE)
remote_icon = index.data(REMOTE_SITE_ICON_ROLE)
availability_active = "{}/{}".format(
index.data(SYNC_ACTIVE_SITE_AVAILABILITY),
index.data(REPRESENTATIONS_COUNT_ROLE)
)
availability_remote = "{}/{}".format(
index.data(SYNC_REMOTE_SITE_AVAILABILITY),
index.data(REPRESENTATIONS_COUNT_ROLE)
)
if availability_active is None or availability_remote is None:
return
items_to_draw = [
(value, icon)
for value, icon in (
(availability_active, active_icon),
(availability_remote, remote_icon),
)
if icon
]
if not items_to_draw:
return
icon_size = QtCore.QSize(24, 24)
padding = 10
pos_x = option.rect.x()
item_width = int(option.rect.width() / len(items_to_draw))
if item_width < 1:
item_width = 0
for value, icon in items_to_draw:
item_rect = QtCore.QRect(
pos_x,
option.rect.y(),
item_width,
option.rect.height()
)
# Prepare pos_x for next item
pos_x = item_rect.x() + item_rect.width()
pixmap = icon.pixmap(icon.actualSize(icon_size))
point = QtCore.QPoint(
item_rect.x() + padding,
item_rect.y() + ((item_rect.height() - pixmap.height()) * 0.5)
)
painter.drawPixmap(point, pixmap)
icon_offset = icon_size.width() + (padding * 2)
text_rect = QtCore.QRect(item_rect)
text_rect.setLeft(text_rect.left() + icon_offset)
if text_rect.width() < 1:
continue
painter.drawText(
text_rect,
option.displayAlignment,
value
)
def displayText(self, value, locale):
pass

View file

@ -29,6 +29,11 @@ VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 18
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 19
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 20
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 21
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 22
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 23
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 24
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 25
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 26
class ProductsModel(QtGui.QStandardItemModel):
@ -68,6 +73,7 @@ class ProductsModel(QtGui.QStandardItemModel):
published_time_col = column_labels.index("Time")
folders_label_col = column_labels.index("Folder")
in_scene_col = column_labels.index("In scene")
site_sync_avail_col = column_labels.index("Availability")
def __init__(self, controller):
super(ProductsModel, self).__init__()
@ -303,7 +309,26 @@ class ProductsModel(QtGui.QStandardItemModel):
model_item.setData(
version_item.thumbnail_id, VERSION_THUMBNAIL_ID_ROLE)
def _get_product_model_item(self, product_item):
# TODO call site sync methods for all versions at once
project_name = self._last_project_name
version_id = version_item.version_id
repre_count = self._controller.get_versions_representation_count(
project_name, [version_id]
)[version_id]
active, remote = self._controller.get_version_sync_availability(
project_name, [version_id]
)[version_id]
model_item.setData(repre_count, REPRESENTATIONS_COUNT_ROLE)
model_item.setData(active, SYNC_ACTIVE_SITE_AVAILABILITY)
model_item.setData(remote, SYNC_REMOTE_SITE_AVAILABILITY)
def _get_product_model_item(
self,
product_item,
active_site_icon,
remote_site_icon
):
model_item = self._items_by_id.get(product_item.product_id)
versions = list(product_item.version_items.values())
versions.sort()
@ -329,6 +354,9 @@ class ProductsModel(QtGui.QStandardItemModel):
in_scene = 1 if product_item.product_in_scene else 0
model_item.setData(in_scene, PRODUCT_IN_SCENE_ROLE)
model_item.setData(active_site_icon, ACTIVE_SITE_ICON_ROLE)
model_item.setData(remote_site_icon, REMOTE_SITE_ICON_ROLE)
self._set_version_data_to_product_item(model_item, last_version)
return model_item
@ -341,6 +369,15 @@ class ProductsModel(QtGui.QStandardItemModel):
self._last_project_name = project_name
self._last_folder_ids = folder_ids
active_site_icon_def = self._controller.get_active_site_icon_def(
project_name
)
remote_site_icon_def = self._controller.get_remote_site_icon_def(
project_name
)
active_site_icon = get_qt_icon(active_site_icon_def)
remote_site_icon = get_qt_icon(remote_site_icon_def)
product_items = self._controller.get_product_items(
project_name,
folder_ids,
@ -402,7 +439,11 @@ class ProductsModel(QtGui.QStandardItemModel):
new_root_items.append(parent_item)
for product_item in top_items:
item = self._get_product_model_item(product_item)
item = self._get_product_model_item(
product_item,
active_site_icon,
remote_site_icon,
)
new_items.append(item)
for path_info in merged_product_items.values():
@ -418,7 +459,11 @@ class ProductsModel(QtGui.QStandardItemModel):
merged_product_types = set()
new_merged_items = []
for product_item in product_items:
item = self._get_product_model_item(product_item)
item = self._get_product_model_item(
product_item,
active_site_icon,
remote_site_icon,
)
new_merged_items.append(item)
merged_product_types.add(product_item.product_type)

View file

@ -19,7 +19,11 @@ from .products_model import (
VERSION_ID_ROLE,
VERSION_THUMBNAIL_ID_ROLE,
)
from .products_delegates import VersionDelegate, LoadedInSceneDelegate
from .products_delegates import (
VersionDelegate,
LoadedInSceneDelegate,
SiteSyncDelegate
)
from .actions_utils import show_actions_menu
@ -92,7 +96,7 @@ class ProductsWidget(QtWidgets.QWidget):
55, # Handles
10, # Step
25, # Loaded in scene
65, # Site info (maybe?)
65, # Site sync info
)
def __init__(self, controller, parent):
@ -135,6 +139,10 @@ class ProductsWidget(QtWidgets.QWidget):
products_view.setItemDelegateForColumn(
products_model.in_scene_col, in_scene_delegate)
site_sync_delegate = SiteSyncDelegate()
products_view.setItemDelegateForColumn(
products_model.site_sync_avail_col, site_sync_delegate)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(products_view, 1)
@ -167,6 +175,8 @@ class ProductsWidget(QtWidgets.QWidget):
self._version_delegate = version_delegate
self._time_delegate = time_delegate
self._in_scene_delegate = in_scene_delegate
self._site_sync_delegate = site_sync_delegate
self._selected_project_name = None
self._selected_folder_ids = set()
@ -182,6 +192,9 @@ class ProductsWidget(QtWidgets.QWidget):
products_model.in_scene_col,
not controller.is_loaded_products_supported()
)
self._set_site_sync_visibility(
self._controller.is_site_sync_enabled()
)
def set_name_filter(self, name):
"""Set filter of product name.
@ -216,6 +229,12 @@ class ProductsWidget(QtWidgets.QWidget):
def refresh(self):
self._refresh_model()
def _set_site_sync_visibility(self, site_sync_enabled):
self._products_view.setColumnHidden(
self._products_model.site_sync_avail_col,
not site_sync_enabled
)
def _fill_version_editor(self):
model = self._products_proxy_model
index_queue = collections.deque()
@ -375,7 +394,12 @@ class ProductsWidget(QtWidgets.QWidget):
self._on_selection_change()
def _on_folders_selection_change(self, event):
self._selected_project_name = event["project_name"]
project_name = event["project_name"]
site_sync_enabled = self._controller.is_site_sync_enabled(
project_name
)
self._set_site_sync_visibility(site_sync_enabled)
self._selected_project_name = project_name
self._selected_folder_ids = event["folder_ids"]
self._refresh_model()
self._update_folders_label_visible()

View file

@ -14,6 +14,10 @@ REPRESENTATION_ID_ROLE = QtCore.Qt.UserRole + 2
PRODUCT_NAME_ROLE = QtCore.Qt.UserRole + 3
FOLDER_LABEL_ROLE = QtCore.Qt.UserRole + 4
GROUP_TYPE_ROLE = QtCore.Qt.UserRole + 5
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 6
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 7
SYNC_ACTIVE_SITE_PROGRESS = QtCore.Qt.UserRole + 8
SYNC_REMOTE_SITE_PROGRESS = QtCore.Qt.UserRole + 9
class RepresentationsModel(QtGui.QStandardItemModel):
@ -22,12 +26,14 @@ class RepresentationsModel(QtGui.QStandardItemModel):
("Name", 120),
("Product name", 125),
("Folder", 125),
# ("Active site", 85),
# ("Remote site", 85)
("Active site", 85),
("Remote site", 85)
]
column_labels = [label for label, _ in colums_info]
column_widths = [width for _, width in colums_info]
folder_column = column_labels.index("Product name")
active_site_column = column_labels.index("Active site")
remote_site_column = column_labels.index("Remote site")
def __init__(self, controller):
super(RepresentationsModel, self).__init__()
@ -59,7 +65,7 @@ class RepresentationsModel(QtGui.QStandardItemModel):
repre_items = self._controller.get_representation_items(
self._selected_project_name, self._selected_version_ids
)
self._fill_items(repre_items)
self._fill_items(repre_items, self._selected_project_name)
self.refreshed.emit()
def data(self, index, role=None):
@ -69,13 +75,23 @@ class RepresentationsModel(QtGui.QStandardItemModel):
col = index.column()
if col != 0:
if role == QtCore.Qt.DecorationRole:
return None
if col == 3:
role = ACTIVE_SITE_ICON_ROLE
elif col == 4:
role = REMOTE_SITE_ICON_ROLE
else:
return None
if role == QtCore.Qt.DisplayRole:
if col == 1:
role = PRODUCT_NAME_ROLE
elif col == 2:
role = FOLDER_LABEL_ROLE
elif col == 3:
role = SYNC_ACTIVE_SITE_PROGRESS
elif col == 4:
role = SYNC_REMOTE_SITE_PROGRESS
index = self.index(index.row(), 0, index.parent())
return super(RepresentationsModel, self).data(index, role)
@ -89,7 +105,13 @@ class RepresentationsModel(QtGui.QStandardItemModel):
root_item = self.invisibleRootItem()
root_item.removeRows(0, root_item.rowCount())
def _get_repre_item(self, repre_item):
def _get_repre_item(
self,
repre_item,
active_site_icon,
remote_site_icon,
repres_sync_status
):
repre_id = repre_item.representation_id
repre_name = repre_item.representation_name
repre_icon = repre_item.representation_icon
@ -102,6 +124,12 @@ class RepresentationsModel(QtGui.QStandardItemModel):
item.setColumnCount(self.columnCount())
item.setEditable(False)
sync_status = repres_sync_status[repre_id]
active_progress, remote_progress = sync_status
active_site_progress = "{}%".format(int(active_progress * 100))
remote_site_progress = "{}%".format(int(remote_progress * 100))
icon = get_qt_icon(repre_icon)
item.setData(repre_name, QtCore.Qt.DisplayRole)
item.setData(icon, QtCore.Qt.DecorationRole)
@ -109,6 +137,10 @@ class RepresentationsModel(QtGui.QStandardItemModel):
item.setData(repre_id, REPRESENTATION_ID_ROLE)
item.setData(repre_item.product_name, PRODUCT_NAME_ROLE)
item.setData(repre_item.folder_label, FOLDER_LABEL_ROLE)
item.setData(active_site_icon, ACTIVE_SITE_ICON_ROLE)
item.setData(remote_site_icon, REMOTE_SITE_ICON_ROLE)
item.setData(active_site_progress, SYNC_ACTIVE_SITE_PROGRESS)
item.setData(remote_site_progress, SYNC_REMOTE_SITE_PROGRESS)
return is_new_item, item
def _get_group_icon(self):
@ -134,14 +166,29 @@ class RepresentationsModel(QtGui.QStandardItemModel):
self._groups_items_by_name[repre_name] = item
return True, item
def _fill_items(self, repre_items):
def _fill_items(self, repre_items, project_name):
active_site_icon_def = self._controller.get_active_site_icon_def(
project_name
)
remote_site_icon_def = self._controller.get_remote_site_icon_def(
project_name
)
active_site_icon = get_qt_icon(active_site_icon_def)
remote_site_icon = get_qt_icon(remote_site_icon_def)
items_to_remove = set(self._items_by_id.keys())
repre_items_by_name = collections.defaultdict(list)
repre_ids = set()
for repre_item in repre_items:
repre_ids.add(repre_item.representation_id)
items_to_remove.discard(repre_item.representation_id)
repre_name = repre_item.representation_name
repre_items_by_name[repre_name].append(repre_item)
repres_sync_status = self._controller.get_representations_sync_status(
project_name, repre_ids
)
root_item = self.invisibleRootItem()
for repre_id in items_to_remove:
item = self._items_by_id.pop(repre_id)
@ -164,7 +211,12 @@ class RepresentationsModel(QtGui.QStandardItemModel):
new_group_items = []
for repre_item in repre_name_items:
is_new_item, item = self._get_repre_item(repre_item)
is_new_item, item = self._get_repre_item(
repre_item,
active_site_icon,
remote_site_icon,
repres_sync_status
)
item_parent = item.parent()
if item_parent is None:
item_parent = root_item
@ -255,6 +307,9 @@ class RepresentationsWidget(QtWidgets.QWidget):
self._repre_model = repre_model
self._repre_proxy_model = repre_proxy_model
self._set_site_sync_visibility(
self._controller.is_site_sync_enabled()
)
self._set_multiple_folders_selected(False)
def refresh(self):
@ -265,6 +320,20 @@ class RepresentationsWidget(QtWidgets.QWidget):
def _on_project_change(self, event):
self._selected_project_name = event["project_name"]
site_sync_enabled = self._controller.is_site_sync_enabled(
self._selected_project_name
)
self._set_site_sync_visibility(site_sync_enabled)
def _set_site_sync_visibility(self, site_sync_enabled):
self._repre_view.setColumnHidden(
self._repre_model.active_site_column,
not site_sync_enabled
)
self._repre_view.setColumnHidden(
self._repre_model.remote_site_column,
not site_sync_enabled
)
def _set_multiple_folders_selected(self, selected_multiple_folders):
if selected_multiple_folders == self._selected_multiple_folders:

View file

@ -15,6 +15,7 @@ class RefreshThread(QtCore.QThread):
self._callback = partial(func, *args, **kwargs)
self._exception = None
self._result = None
self.finished.connect(self._on_finish_callback)
@property
def id(self):
@ -29,11 +30,19 @@ class RefreshThread(QtCore.QThread):
self._result = self._callback()
except Exception as exc:
self._exception = exc
self.refresh_finished.emit(self.id)
def get_result(self):
return self._result
def _on_finish_callback(self):
"""Trigger custom signal with thread id.
Listening for 'finished' signal we make sure that execution of thread
finished and QThread object can be safely deleted.
"""
self.refresh_finished.emit(self.id)
class _IconsCache:
"""Cache for icons."""

View file

@ -1,5 +1,6 @@
from qtpy import QtWidgets, QtCore, QtGui
from openpype import AYON_SERVER_ENABLED
from openpype.style import (
load_stylesheet,
app_icon_path
@ -26,7 +27,8 @@ class ExperimentalToolsDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(ExperimentalToolsDialog, self).__init__(parent)
self.setWindowTitle("OpenPype Experimental tools")
app_label = "AYON" if AYON_SERVER_ENABLED else "OpenPype"
self.setWindowTitle("{} Experimental tools".format(app_label))
icon = QtGui.QIcon(app_icon_path())
self.setWindowIcon(icon)
self.setStyleSheet(load_stylesheet())
@ -68,8 +70,8 @@ class ExperimentalToolsDialog(QtWidgets.QDialog):
tool_btns_label = QtWidgets.QLabel(
(
"You can enable these features in"
"<br><b>OpenPype tray -> Settings -> Experimental tools</b>"
),
"<br><b>{} tray -> Settings -> Experimental tools</b>"
).format(app_label),
tool_btns_widget
)
tool_btns_label.setAlignment(QtCore.Qt.AlignCenter)
@ -113,6 +115,7 @@ class ExperimentalToolsDialog(QtWidgets.QDialog):
self._window_is_active = False
def refresh(self):
app_label = "AYON" if AYON_SERVER_ENABLED else "OpenPype"
self._experimental_tools.refresh_availability()
buttons_to_remove = set(self._buttons_by_tool_identifier.keys())
@ -139,8 +142,8 @@ class ExperimentalToolsDialog(QtWidgets.QDialog):
elif is_new or button.isEnabled():
button.setToolTip((
"You can enable this tool in local settings."
"\n\nOpenPype Tray > Settings > Experimental Tools"
))
"\n\n{} Tray > Settings > Experimental Tools"
).format(app_label))
if tool.enabled != button.isEnabled():
button.setEnabled(tool.enabled)

View file

@ -2517,7 +2517,7 @@ class PublisherController(BasePublisherController):
else:
msg = (
"Something went wrong. Send report"
" to your supervisor or OpenPype."
" to your supervisor or Ynput team."
)
self.publish_error_msg = msg
self.publish_has_crashed = True

View file

@ -9,6 +9,7 @@ from openpype import (
resources,
style
)
from openpype import AYON_SERVER_ENABLED
from openpype.tools.utils import (
ErrorMessageBox,
PlaceholderLineEdit,
@ -53,7 +54,9 @@ class PublisherWindow(QtWidgets.QDialog):
self.setObjectName("PublishWindow")
self.setWindowTitle("OpenPype publisher")
self.setWindowTitle("{} publisher".format(
"AYON" if AYON_SERVER_ENABLED else "OpenPype"
))
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
self.setWindowIcon(icon)

View file

@ -219,7 +219,9 @@ class PypeInfoWidget(QtWidgets.QWidget):
icon = QtGui.QIcon(resources.get_openpype_icon_filepath())
self.setWindowIcon(icon)
self.setWindowTitle("OpenPype info")
self.setWindowTitle(
"{} info".format("AYON" if AYON_SERVER_ENABLED else "OpenPype")
)
scroll_area = QtWidgets.QScrollArea(self)
info_widget = PypeInfoSubWidget(scroll_area)
@ -441,16 +443,19 @@ class PypeInfoSubWidget(QtWidgets.QWidget):
info_values = {
"executable": executable_args[-1],
"server_url": os.environ["AYON_SERVER_URL"],
"bundle_name": os.environ["AYON_BUNDLE_NAME"],
"username": username
}
key_label_mapping = {
"executable": "AYON Executable:",
"server_url": "AYON Server:",
"bundle_name": "AYON Bundle:",
"username": "AYON Username:"
}
# Prepare keys order
keys_order = [
"server_url",
"bundle_name",
"username",
"executable",
]

View file

@ -632,6 +632,14 @@ class TrayManager:
self.exit()
elif result.restart or result.token_changed:
# Remove environment variables from current connection
# - keep develop, staging, headless values
for key in {
"AYON_SERVER_URL",
"AYON_API_KEY",
"AYON_BUNDLE_NAME",
}:
os.environ.pop(key, None)
self.restart()
def _on_restart_action(self):

View file

@ -374,7 +374,7 @@ def get_default_settings_variant():
"""
con = get_server_api_connection()
return con.get_client_version()
return con.get_default_settings_variant()
def set_default_settings_variant(variant):

View file

@ -3,6 +3,10 @@ SERVER_URL_ENV_KEY = "AYON_SERVER_URL"
SERVER_API_ENV_KEY = "AYON_API_KEY"
SERVER_TIMEOUT_ENV_KEY = "AYON_SERVER_TIMEOUT"
SERVER_RETRIES_ENV_KEY = "AYON_SERVER_RETRIES"
# Default variant used for settings
DEFAULT_VARIANT_ENV_KEY = "AYON_DEFAULT_SETTINGS_VARIANT"
# Default site id used for connection
SITE_ID_ENV_KEY = "AYON_SITE_ID"
# Backwards compatibility
SERVER_TOKEN_ENV_KEY = SERVER_API_ENV_KEY
@ -40,6 +44,7 @@ DEFAULT_PROJECT_FIELDS = {
"code",
"config",
"createdAt",
"data",
}
# --- Folders ---
@ -52,6 +57,7 @@ DEFAULT_FOLDER_FIELDS = {
"parentId",
"active",
"thumbnailId",
"data",
}
# --- Tasks ---
@ -63,6 +69,7 @@ DEFAULT_TASK_FIELDS = {
"folderId",
"active",
"assignees",
"data",
}
# --- Products ---
@ -72,6 +79,7 @@ DEFAULT_PRODUCT_FIELDS = {
"folderId",
"active",
"productType",
"data",
}
# --- Versions ---
@ -86,6 +94,7 @@ DEFAULT_VERSION_FIELDS = {
"thumbnailId",
"createdAt",
"updatedAt",
"data",
}
# --- Representations ---
@ -96,6 +105,7 @@ DEFAULT_REPRESENTATION_FIELDS = {
"createdAt",
"active",
"versionId",
"data",
}
REPRESENTATION_FILES_FIELDS = {
@ -119,6 +129,7 @@ DEFAULT_WORKFILE_INFO_FIELDS = {
"thumbnailId",
"updatedAt",
"updatedBy",
"data",
}
DEFAULT_EVENT_FIELDS = {

View file

@ -36,11 +36,20 @@ class EntityHub(object):
"""
def __init__(
self, project_name, connection=None, allow_data_changes=False
self, project_name, connection=None, allow_data_changes=None
):
if not connection:
connection = get_server_api_connection()
major, minor, patch, _, _ = connection.server_version_tuple
path_start_with_slash = True
if (major, minor) < (0, 6):
path_start_with_slash = False
if allow_data_changes is None:
allow_data_changes = connection.graphql_allows_data_in_query
self._connection = connection
self._path_start_with_slash = path_start_with_slash
self._project_name = project_name
self._entities_by_id = {}
@ -65,6 +74,18 @@ class EntityHub(object):
return self._allow_data_changes
@property
def path_start_with_slash(self):
"""Folder path should start with slash.
This changed in 0.6.x server version.
Returns:
bool: Path starts with slash.
"""
return self._path_start_with_slash
@property
def project_name(self):
"""Project name which is maintained by hub.
@ -2419,10 +2440,13 @@ class FolderEntity(BaseEntity):
if self._path is None:
parent = self.parent
path = self.name
if parent.entity_type == "folder":
parent_path = parent.path
path = "/".join([parent_path, path])
path = "/".join([parent_path, self.name])
elif self._entity_hub.path_start_with_slash:
path = "/{}".format(self.name)
else:
path = self.name
self._path = path
return self._path
@ -2525,7 +2549,10 @@ class FolderEntity(BaseEntity):
if self.thumbnail_id is not UNKNOWN_VALUE:
output["thumbnailId"] = self.thumbnail_id
if self._entity_hub.allow_data_changes:
if (
self._entity_hub.allow_data_changes
and self._data is not UNKNOWN_VALUE
):
output["data"] = self._data
return output

View file

@ -202,6 +202,15 @@ class GraphQlQuery:
self._variables[key]["value"] = value
def get_variable_keys(self):
"""Get all variable keys.
Returns:
set[str]: Variable keys.
"""
return set(self._variables.keys())
def get_variables_values(self):
"""Calculate variable values used that should be used in query.

Some files were not shown because too many files have changed in this diff Show more