mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into bugfix/OP-7438_3dsmax-preview-resolution-with-burnins
This commit is contained in:
commit
aaeb6ccae0
157 changed files with 4644 additions and 2341 deletions
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,10 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.17.7-nightly.7
|
||||
- 3.17.7-nightly.6
|
||||
- 3.17.7-nightly.5
|
||||
- 3.17.7-nightly.4
|
||||
- 3.17.7-nightly.3
|
||||
- 3.17.7-nightly.2
|
||||
- 3.17.7-nightly.1
|
||||
|
|
@ -131,10 +135,6 @@ body:
|
|||
- 3.15.3-nightly.3
|
||||
- 3.15.3-nightly.2
|
||||
- 3.15.3-nightly.1
|
||||
- 3.15.2
|
||||
- 3.15.2-nightly.6
|
||||
- 3.15.2-nightly.5
|
||||
- 3.15.2-nightly.4
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
|
|
@ -296,12 +296,15 @@ def run(script):
|
|||
@click.option("--mongo_url",
|
||||
help="MongoDB for testing.",
|
||||
default=None)
|
||||
@click.option("--dump_databases",
|
||||
help="Dump all databases to data folder.",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
|
||||
timeout, setup_only, mongo_url, app_group):
|
||||
timeout, setup_only, mongo_url, app_group, dump_databases):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
|
||||
persist, app_variant, timeout, setup_only,
|
||||
mongo_url, app_group)
|
||||
mongo_url, app_group, dump_databases)
|
||||
|
||||
|
||||
@main.command(help="DEPRECATED - run sync server")
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ def _get_subsets(
|
|||
|
||||
for subset in con.get_products(
|
||||
project_name,
|
||||
subset_ids,
|
||||
subset_names,
|
||||
product_ids=subset_ids,
|
||||
product_names=subset_names,
|
||||
folder_ids=folder_ids,
|
||||
names_by_folder_ids=names_by_folder_ids,
|
||||
active=active,
|
||||
|
|
@ -113,23 +113,23 @@ def _get_versions(
|
|||
|
||||
queried_versions = con.get_versions(
|
||||
project_name,
|
||||
version_ids,
|
||||
subset_ids,
|
||||
versions,
|
||||
hero,
|
||||
standard,
|
||||
latest,
|
||||
version_ids=version_ids,
|
||||
product_ids=subset_ids,
|
||||
versions=versions,
|
||||
hero=hero,
|
||||
standard=standard,
|
||||
latest=latest,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
|
||||
versions = []
|
||||
version_entities = []
|
||||
hero_versions = []
|
||||
for version in queried_versions:
|
||||
if version["version"] < 0:
|
||||
hero_versions.append(version)
|
||||
else:
|
||||
versions.append(convert_v4_version_to_v3(version))
|
||||
version_entities.append(convert_v4_version_to_v3(version))
|
||||
|
||||
if hero_versions:
|
||||
subset_ids = set()
|
||||
|
|
@ -159,9 +159,9 @@ def _get_versions(
|
|||
break
|
||||
conv_hero = convert_v4_version_to_v3(hero_version)
|
||||
conv_hero["version_id"] = version_id
|
||||
versions.append(conv_hero)
|
||||
version_entities.append(conv_hero)
|
||||
|
||||
return versions
|
||||
return version_entities
|
||||
|
||||
|
||||
def get_asset_by_id(project_name, asset_id, fields=None):
|
||||
|
|
@ -539,11 +539,11 @@ def get_representations(
|
|||
|
||||
representations = con.get_representations(
|
||||
project_name,
|
||||
representation_ids,
|
||||
representation_names,
|
||||
version_ids,
|
||||
names_by_version_ids,
|
||||
active,
|
||||
representation_ids=representation_ids,
|
||||
representation_names=representation_names,
|
||||
version_ids=version_ids,
|
||||
names_by_version_ids=names_by_version_ids,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
for representation in representations:
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
"tvpaint",
|
||||
"substancepainter",
|
||||
"aftereffects",
|
||||
"wrap"
|
||||
}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
|
|||
|
||||
# Before `AddLastWorkfileToLaunchArgs`
|
||||
order = 0
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
|
||||
"wrap"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -56,16 +56,15 @@ class RenderCreator(Creator):
|
|||
use_composition_name = (pre_create_data.get("use_composition_name") or
|
||||
len(comps) > 1)
|
||||
for comp in comps:
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
if use_composition_name:
|
||||
if "{composition}" not in subset_name_from_ui.lower():
|
||||
subset_name_from_ui += "{Composition}"
|
||||
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
|
||||
dynamic_fill = prepare_template_data({"composition":
|
||||
composition_name})
|
||||
subset_name = subset_name_from_ui.format(**dynamic_fill)
|
||||
|
|
@ -81,6 +80,8 @@ class RenderCreator(Creator):
|
|||
inst.subset_name))
|
||||
|
||||
data["members"] = [comp.id]
|
||||
data["orig_comp_name"] = composition_name
|
||||
|
||||
new_instance = CreatedInstance(self.family, subset_name, data,
|
||||
self)
|
||||
if "farm" in pre_create_data:
|
||||
|
|
@ -88,7 +89,7 @@ class RenderCreator(Creator):
|
|||
new_instance.creator_attributes["farm"] = use_farm
|
||||
|
||||
review = pre_create_data["mark_for_review"]
|
||||
new_instance.creator_attributes["mark_for_review"] = review
|
||||
new_instance. creator_attributes["mark_for_review"] = review
|
||||
|
||||
api.get_stub().imprint(new_instance.id,
|
||||
new_instance.data_to_store())
|
||||
|
|
@ -150,16 +151,18 @@ class RenderCreator(Creator):
|
|||
subset_change.new_value)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Removes metadata and renames to original comp name if available."""
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
self.host.remove_instance(instance)
|
||||
|
||||
subset = instance.data["subset"]
|
||||
comp_id = instance.data["members"][0]
|
||||
comp = api.get_stub().get_item(comp_id)
|
||||
orig_comp_name = instance.data.get("orig_comp_name")
|
||||
if comp:
|
||||
new_comp_name = comp.name.replace(subset, '')
|
||||
if not new_comp_name:
|
||||
if orig_comp_name:
|
||||
new_comp_name = orig_comp_name
|
||||
else:
|
||||
new_comp_name = "dummyCompName"
|
||||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import bpy
|
||||
|
||||
|
|
@ -59,7 +59,7 @@ def get_render_product(output_path, name, aov_sep):
|
|||
instance (pyblish.api.Instance): The instance to publish.
|
||||
ext (str): The image format to render.
|
||||
"""
|
||||
filepath = os.path.join(output_path, name)
|
||||
filepath = output_path / name.lstrip("/")
|
||||
render_product = f"{filepath}{aov_sep}beauty.####"
|
||||
render_product = render_product.replace("\\", "/")
|
||||
|
||||
|
|
@ -180,7 +180,7 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
|
|||
return []
|
||||
|
||||
output.file_slots.clear()
|
||||
output.base_path = output_path
|
||||
output.base_path = str(output_path)
|
||||
|
||||
aov_file_products = []
|
||||
|
||||
|
|
@ -191,8 +191,9 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
|
|||
|
||||
output.file_slots.new(filepath)
|
||||
|
||||
aov_file_products.append(
|
||||
(render_pass.name, os.path.join(output_path, filepath)))
|
||||
filename = str(output_path / filepath.lstrip("/"))
|
||||
|
||||
aov_file_products.append((render_pass.name, filename))
|
||||
|
||||
node_input = output.inputs[-1]
|
||||
|
||||
|
|
@ -214,12 +215,11 @@ def imprint_render_settings(node, data):
|
|||
def prepare_rendering(asset_group):
|
||||
name = asset_group.name
|
||||
|
||||
filepath = bpy.data.filepath
|
||||
filepath = Path(bpy.data.filepath)
|
||||
assert filepath, "Workfile not saved. Please save the file first."
|
||||
|
||||
file_path = os.path.dirname(filepath)
|
||||
file_name = os.path.basename(filepath)
|
||||
file_name, _ = os.path.splitext(file_name)
|
||||
dirpath = filepath.parent
|
||||
file_name = Path(filepath.name).stem
|
||||
|
||||
project = get_current_project_name()
|
||||
settings = get_project_settings(project)
|
||||
|
|
@ -232,7 +232,7 @@ def prepare_rendering(asset_group):
|
|||
set_render_format(ext, multilayer)
|
||||
aov_list, custom_passes = set_render_passes(settings)
|
||||
|
||||
output_path = os.path.join(file_path, render_folder, file_name)
|
||||
output_path = Path.joinpath(dirpath, render_folder, file_name)
|
||||
|
||||
render_product = get_render_product(output_path, name, aov_sep)
|
||||
aov_file_product = set_node_tree(
|
||||
|
|
|
|||
|
|
@ -11,12 +11,12 @@ import pyblish.api
|
|||
|
||||
|
||||
class CollectBlenderRender(pyblish.api.InstancePlugin):
|
||||
"""Gather all publishable render layers from renderSetup."""
|
||||
"""Gather all publishable render instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["blender"]
|
||||
families = ["render"]
|
||||
label = "Collect Render Layers"
|
||||
label = "Collect Render"
|
||||
sync_workfile_version = False
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -78,8 +78,6 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
|
||||
assert render_data, "No render data found."
|
||||
|
||||
self.log.debug(f"render_data: {dict(render_data)}")
|
||||
|
||||
render_product = render_data.get("render_product")
|
||||
aov_file_product = render_data.get("aov_file_product")
|
||||
ext = render_data.get("image_format")
|
||||
|
|
@ -101,7 +99,7 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
expected_files = expected_beauty | expected_aovs
|
||||
|
||||
instance.data.update({
|
||||
"family": "render.farm",
|
||||
"families": ["render", "render.farm"],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartHandle": frame_handle_start,
|
||||
|
|
@ -120,5 +118,3 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
"colorspaceView": "ACES 1.0 SDR-video",
|
||||
"renderProducts": colorspace.ARenderProduct(),
|
||||
})
|
||||
|
||||
self.log.debug(f"data: {instance.data}")
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
|
|||
class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
|
||||
"""Extract a layout."""
|
||||
|
||||
label = "Extract Layout"
|
||||
label = "Extract Layout (JSON)"
|
||||
hosts = ["blender"]
|
||||
families = ["layout"]
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@ class ExtractThumbnail(publish.Extractor):
|
|||
def process(self, instance):
|
||||
self.log.debug("Extracting capture..")
|
||||
|
||||
if instance.data.get("thumbnailSource"):
|
||||
self.log.debug("Thumbnail source found, skipping...")
|
||||
return
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
asset_name = instance.data["assetEntity"]["name"]
|
||||
subset = instance.data["subset"]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class IncrementWorkfileVersion(
|
|||
optional = True
|
||||
hosts = ["blender"]
|
||||
families = ["animation", "model", "rig", "action", "layout", "blendScene",
|
||||
"pointcache", "render"]
|
||||
"pointcache", "render.farm"]
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
|
|||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
families = ["render.farm"]
|
||||
families = ["render"]
|
||||
hosts = ["blender"]
|
||||
label = "Validate Render Output for Deadline"
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
|
@ -18,6 +19,10 @@ from openpype.resources import get_openpype_icon_filepath
|
|||
from .pipeline import FusionEventHandler
|
||||
from .pulse import FusionPulse
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.menu = None
|
||||
|
||||
|
|
@ -26,7 +31,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
icon_path = get_openpype_icon_filepath()
|
||||
icon = QtGui.QIcon(icon_path)
|
||||
|
|
@ -41,7 +46,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.render_mode_widget = None
|
||||
self.setWindowTitle("OpenPype")
|
||||
self.setWindowTitle(MENU_LABEL)
|
||||
|
||||
asset_label = QtWidgets.QLabel("Context", self)
|
||||
asset_label.setStyleSheet(
|
||||
|
|
|
|||
60
openpype/hosts/fusion/deploy/ayon/Config/menu.fu
Normal file
60
openpype/hosts/fusion/deploy/ayon/Config/menu.fu
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Menu",
|
||||
Category = "AYON",
|
||||
Name = "AYON Menu",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Install_PySide2",
|
||||
Category = "AYON",
|
||||
Name = "Install PySide2",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Menus
|
||||
{
|
||||
Target = "ChildFrame",
|
||||
|
||||
Before "Help"
|
||||
{
|
||||
Sub "AYON"
|
||||
{
|
||||
"AYON_Menu{}",
|
||||
"_",
|
||||
Sub "Admin" {
|
||||
"AYON_Install_PySide2{}"
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
19
openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
Normal file
19
openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
Locked = true,
|
||||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon",
|
||||
["Config:"] = "UserPaths:Config;AYON:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
},
|
||||
Script = {
|
||||
PythonVersion = 3,
|
||||
Python3Forced = true
|
||||
},
|
||||
UserInterface = {
|
||||
Language = "en_US"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -31,7 +31,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -3,7 +3,7 @@ Locked = true,
|
|||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype",
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import shutil
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.hosts.fusion import (
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
|
|
@ -161,6 +162,13 @@ class FusionCopyPrefsPrelaunch(PreLaunchHook):
|
|||
# profile directory variables to customize Fusion
|
||||
# to define where it can read custom scripts and tools from
|
||||
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
|
||||
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
|
||||
|
||||
if AYON_SERVER_ENABLED:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs")
|
||||
else:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs")
|
||||
|
||||
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
|
||||
self.launch_context.env[master_prefs_variable] = str(master_prefs)
|
||||
|
|
|
|||
|
|
@ -25,20 +25,24 @@ def enabled_savers(comp, savers):
|
|||
"""
|
||||
passthrough_key = "TOOLB_PassThrough"
|
||||
original_states = {}
|
||||
enabled_save_names = {saver.Name for saver in savers}
|
||||
enabled_saver_names = {saver.Name for saver in savers}
|
||||
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
savers_by_name = {saver.Name: saver for saver in all_savers}
|
||||
|
||||
try:
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
for saver in all_savers:
|
||||
original_state = saver.GetAttrs()[passthrough_key]
|
||||
original_states[saver] = original_state
|
||||
original_states[saver.Name] = original_state
|
||||
|
||||
# The passthrough state we want to set (passthrough != enabled)
|
||||
state = saver.Name not in enabled_save_names
|
||||
state = saver.Name not in enabled_saver_names
|
||||
if state != original_state:
|
||||
saver.SetAttrs({passthrough_key: state})
|
||||
yield
|
||||
finally:
|
||||
for saver, original_state in original_states.items():
|
||||
for saver_name, original_state in original_states.items():
|
||||
saver = savers_by_name[saver_name]
|
||||
saver.SetAttrs({"TOOLB_PassThrough": original_state})
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ var LD_OPENHARMONY_PATH = System.getenv('LIB_OPENHARMONY_PATH');
|
|||
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH + '/openHarmony.js';
|
||||
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH.replace(/\\/g, "/");
|
||||
include(LD_OPENHARMONY_PATH);
|
||||
this.__proto__['$'] = $;
|
||||
//this.__proto__['$'] = $;
|
||||
|
||||
function Client() {
|
||||
var self = this;
|
||||
|
|
|
|||
|
|
@ -59,8 +59,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
|
||||
args = [application_path, "-batch",
|
||||
"-frames", str(frame_start), str(frame_end),
|
||||
"-scene", scene_path]
|
||||
self.log.info(f"running [ {application_path} {' '.join(args)}")
|
||||
scene_path]
|
||||
self.log.info(f"running: {' '.join(args)}")
|
||||
proc = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
|
|
|
|||
|
|
@ -95,18 +95,18 @@ def menu_install():
|
|||
|
||||
menu.addSeparator()
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
creator_action = menu.addAction("Create...")
|
||||
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
creator_action.triggered.connect(
|
||||
lambda: host_tools.show_creator(parent=main_window)
|
||||
)
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(
|
||||
|
|
|
|||
|
|
@ -121,8 +121,8 @@ def get_id_required_nodes():
|
|||
return list(nodes)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter name of the given node
|
||||
def get_export_parameter(node):
|
||||
"""Return the export output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
|
|
@ -137,13 +137,70 @@ def get_output_parameter(node):
|
|||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
|
||||
node_type = node.type().name()
|
||||
if node_type == "geometry":
|
||||
# Ensures the proper Take is selected for each ROP to retrieve the correct
|
||||
# ifd
|
||||
try:
|
||||
rop_take = hou.takes.findTake(node.parm("take").eval())
|
||||
if rop_take is not None:
|
||||
hou.takes.setCurrentTake(rop_take)
|
||||
except AttributeError:
|
||||
# hou object doesn't always have the 'takes' attribute
|
||||
pass
|
||||
|
||||
if node_type == "Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Alfred":
|
||||
return node.parm("alf_diskfile")
|
||||
elif (node_type == "RenderMan" or node_type == "RenderMan RIS"):
|
||||
pre_ris22 = node.parm("rib_outputmode") and \
|
||||
node.parm("rib_outputmode").eval()
|
||||
ris22 = node.parm("diskfile") and node.parm("diskfile").eval()
|
||||
if pre_ris22 or ris22:
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Redshift" and node.parm("RS_archive_enable").eval():
|
||||
return node.parm("RS_archive_file")
|
||||
elif node_type == "Wedge" and node.parm("driver").eval():
|
||||
return get_export_parameter(node.node(node.parm("driver").eval()))
|
||||
elif node_type == "Arnold":
|
||||
return node.parm("ar_ass_file")
|
||||
elif node_type == "Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node_type == "Shotgun Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("sgtk_soho_diskfile")
|
||||
elif node_type == "Shotgun Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("render_export_filepath")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
my_alembic_node = root.createNode("alembic")
|
||||
get_output_parameter(my_alembic_node)
|
||||
# Result: "output"
|
||||
|
||||
Args:
|
||||
node(hou.Node): node instance
|
||||
|
||||
Returns:
|
||||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
category = node.type().category().name()
|
||||
|
||||
# Figure out which type of node is being rendered
|
||||
if node_type == "Geometry" or node_type == "Filmbox FBX" or \
|
||||
(node_type == "ROP Output Driver" and category == "Sop"):
|
||||
return node.parm("sopoutput")
|
||||
elif node_type == "alembic":
|
||||
return node.parm("filename")
|
||||
elif node_type == "comp":
|
||||
elif node_type == "Composite":
|
||||
return node.parm("copoutput")
|
||||
elif node_type == "opengl":
|
||||
return node.parm("picture")
|
||||
|
|
@ -155,6 +212,15 @@ def get_output_parameter(node):
|
|||
elif node_type == "ifd":
|
||||
if node.evalParm("soho_outputmode"):
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Octane":
|
||||
return node.parm("HO_img_fileName")
|
||||
elif node_type == "Fetch":
|
||||
inner_node = node.node(node.parm("source").eval())
|
||||
if inner_node:
|
||||
return get_output_parameter(inner_node)
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("SettingsOutput_img_file_path")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
# Default extension
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
|
||||
|
|
@ -48,6 +51,15 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
"ar_exr_half_precision": 1 # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ass_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ass".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["ar_ass_export_enable"] = 1
|
||||
parms["ar_ass_file"] = ass_filepath
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
|
|
@ -66,6 +78,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -12,6 +12,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
family = "mantra_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
|
||||
|
|
@ -44,6 +47,15 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
"vm_picture": filepath,
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ifd_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ifd".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["soho_outputmode"] = 1
|
||||
parms["soho_diskfile"] = ifd_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# If camera found in selection
|
||||
# we will use as render camera
|
||||
|
|
@ -78,6 +90,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -52,6 +55,17 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
scene_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.vrscene".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
# Setting render_export_mode to "2" because that's for
|
||||
# "Export only" ("1" is for "Export & Render")
|
||||
parms["render_export_mode"] = "2"
|
||||
parms["render_export_filepath"] = scene_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# set up the render camera from the selected node
|
||||
camera = None
|
||||
|
|
@ -140,6 +154,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -40,6 +40,25 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("ar_ass_export_enable").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "ar_ass_file", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(prefix=default_prefix,
|
||||
suffix=None)
|
||||
|
|
|
|||
|
|
@ -44,6 +44,25 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("soho_outputmode").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "soho_diskfile", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=None
|
||||
|
|
|
|||
|
|
@ -45,7 +45,26 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products = []
|
||||
# TODO: add render elements if render element
|
||||
|
||||
beauty_product = self.get_beauty_render_product(default_prefix)
|
||||
# Store whether we are splitting the render job in an export + render
|
||||
export_job = rop.parm("render_export_mode").eval() == "2"
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "render_export_filepath", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
beauty_product = self.get_render_product_name(default_prefix)
|
||||
render_products.append(beauty_product)
|
||||
files_by_aov = {
|
||||
"RGB Color": self.generate_expected_files(instance,
|
||||
|
|
@ -79,7 +98,7 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
instance.data["colorspaceDisplay"] = colorspace_data["display"]
|
||||
instance.data["colorspaceView"] = colorspace_data["view"]
|
||||
|
||||
def get_beauty_render_product(self, prefix, suffix="<reName>"):
|
||||
def get_render_product_name(self, prefix, suffix="<reName>"):
|
||||
"""Return the beauty output filename if render element enabled
|
||||
"""
|
||||
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""3dsmax menu definition of OpenPype."""
|
||||
"""3dsmax menu definition of AYON."""
|
||||
import os
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
|
@ -8,7 +9,7 @@ from openpype.hosts.max.api import lib
|
|||
|
||||
|
||||
class OpenPypeMenu(object):
|
||||
"""Object representing OpenPype menu.
|
||||
"""Object representing OpenPype/AYON menu.
|
||||
|
||||
This is using "hack" to inject itself before "Help" menu of 3dsmax.
|
||||
For some reason `postLoadingMenus` event doesn't fire, and main menu
|
||||
|
|
@ -50,17 +51,17 @@ class OpenPypeMenu(object):
|
|||
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
|
||||
|
||||
def get_or_create_openpype_menu(
|
||||
self, name: str = "&OpenPype",
|
||||
self, name: str = "&Openpype",
|
||||
before: str = "&Help") -> QtWidgets.QAction:
|
||||
"""Create OpenPype menu.
|
||||
"""Create AYON menu.
|
||||
|
||||
Args:
|
||||
name (str, Optional): OpenPypep menu name.
|
||||
name (str, Optional): AYON menu name.
|
||||
before (str, Optional): Name of the 3dsmax main menu item to
|
||||
add OpenPype menu before.
|
||||
add AYON menu before.
|
||||
|
||||
Returns:
|
||||
QtWidgets.QAction: OpenPype menu action.
|
||||
QtWidgets.QAction: AYON menu action.
|
||||
|
||||
"""
|
||||
if self.menu is not None:
|
||||
|
|
@ -77,15 +78,15 @@ class OpenPypeMenu(object):
|
|||
|
||||
if before in item.title():
|
||||
help_action = item.menuAction()
|
||||
|
||||
op_menu = QtWidgets.QMenu("&OpenPype")
|
||||
tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
|
||||
op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label))
|
||||
menu_bar.insertMenu(help_action, op_menu)
|
||||
|
||||
self.menu = op_menu
|
||||
return op_menu
|
||||
|
||||
def build_openpype_menu(self) -> QtWidgets.QAction:
|
||||
"""Build items in OpenPype menu."""
|
||||
"""Build items in AYON menu."""
|
||||
openpype_menu = self.get_or_create_openpype_menu()
|
||||
load_action = QtWidgets.QAction("Load...", openpype_menu)
|
||||
load_action.triggered.connect(self.load_callback)
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def containerise(name: str, nodes: list, context,
|
|||
|
||||
|
||||
def load_custom_attribute_data():
|
||||
"""Re-loading the Openpype/AYON custom parameter built by the creator
|
||||
"""Re-loading the AYON custom parameter built by the creator
|
||||
|
||||
Returns:
|
||||
attribute: re-loading the custom OP attributes set in Maxscript
|
||||
|
|
@ -213,7 +213,7 @@ def import_custom_attribute_data(container: str, selections: list):
|
|||
|
||||
|
||||
def update_custom_attribute_data(container: str, selections: list):
|
||||
"""Updating the Openpype/AYON custom parameter built by the creator
|
||||
"""Updating the AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class ImportModelRender(InventoryAction):
|
|||
)
|
||||
|
||||
def process(self, containers):
|
||||
from maya import cmds
|
||||
from maya import cmds # noqa: F401
|
||||
|
||||
project_name = get_current_project_name()
|
||||
for container in containers:
|
||||
|
|
@ -66,7 +66,7 @@ class ImportModelRender(InventoryAction):
|
|||
None
|
||||
"""
|
||||
|
||||
from maya import cmds
|
||||
from maya import cmds # noqa: F401
|
||||
|
||||
project_name = get_current_project_name()
|
||||
repre_docs = get_representations(
|
||||
|
|
@ -85,12 +85,7 @@ class ImportModelRender(InventoryAction):
|
|||
if scene_type_regex.fullmatch(repre_name):
|
||||
look_repres.append(repre_doc)
|
||||
|
||||
# QUESTION should we care if there is more then one look
|
||||
# representation? (since it's based on regex match)
|
||||
look_repre = None
|
||||
if look_repres:
|
||||
look_repre = look_repres[0]
|
||||
|
||||
look_repre = look_repres[0] if look_repres else None
|
||||
# QUESTION shouldn't be json representation validated too?
|
||||
if not look_repre:
|
||||
print("No model render sets for this model version..")
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.pipeline.load.utils import get_representation_path_from_context
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_colorspace_from_filepath,
|
||||
get_imageio_file_rules_colorspace_from_filepath,
|
||||
get_imageio_config,
|
||||
get_imageio_file_rules
|
||||
)
|
||||
|
|
@ -285,10 +285,10 @@ class FileNodeLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
path = get_representation_path_from_context(context)
|
||||
colorspace = get_imageio_colorspace_from_filepath(
|
||||
path=path,
|
||||
host_name=host_name,
|
||||
project_name=project_name,
|
||||
colorspace = get_imageio_file_rules_colorspace_from_filepath(
|
||||
path,
|
||||
host_name,
|
||||
project_name,
|
||||
config_data=config_data,
|
||||
file_rules=file_rules,
|
||||
project_settings=project_settings
|
||||
|
|
|
|||
|
|
@ -265,6 +265,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
class MayaUSDReferenceLoader(ReferenceLoader):
|
||||
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
|
||||
|
||||
label = "Reference Maya USD"
|
||||
families = ["usd"]
|
||||
representations = ["usd"]
|
||||
extensions = {"usd", "usda", "usdc"}
|
||||
|
|
|
|||
|
|
@ -45,11 +45,23 @@ FILE_NODES = {
|
|||
"PxrTexture": "filename"
|
||||
}
|
||||
|
||||
RENDER_SET_TYPES = [
|
||||
"VRayDisplacement",
|
||||
"VRayLightMesh",
|
||||
"VRayObjectProperties",
|
||||
"RedshiftObjectId",
|
||||
"RedshiftMeshParameters",
|
||||
]
|
||||
|
||||
# Keep only node types that actually exist
|
||||
all_node_types = set(cmds.allNodeTypes())
|
||||
for node_type in list(FILE_NODES.keys()):
|
||||
if node_type not in all_node_types:
|
||||
FILE_NODES.pop(node_type)
|
||||
|
||||
for node_type in RENDER_SET_TYPES:
|
||||
if node_type not in all_node_types:
|
||||
RENDER_SET_TYPES.remove(node_type)
|
||||
del all_node_types
|
||||
|
||||
# Cache pixar dependency node types so we can perform a type lookup against it
|
||||
|
|
@ -69,9 +81,7 @@ def get_attributes(dictionary, attr, node=None):
|
|||
else:
|
||||
val = dictionary.get(attr, [])
|
||||
|
||||
if not isinstance(val, list):
|
||||
return [val]
|
||||
return val
|
||||
return val if isinstance(val, list) else [val]
|
||||
|
||||
|
||||
def get_look_attrs(node):
|
||||
|
|
@ -106,7 +116,7 @@ def get_look_attrs(node):
|
|||
|
||||
|
||||
def node_uses_image_sequence(node, node_path):
|
||||
# type: (str) -> bool
|
||||
# type: (str, str) -> bool
|
||||
"""Return whether file node uses an image sequence or single image.
|
||||
|
||||
Determine if a node uses an image sequence or just a single image,
|
||||
|
|
@ -114,6 +124,7 @@ def node_uses_image_sequence(node, node_path):
|
|||
|
||||
Args:
|
||||
node (str): Name of the Maya node
|
||||
node_path (str): The file path of the node
|
||||
|
||||
Returns:
|
||||
bool: True if node uses an image sequence
|
||||
|
|
@ -247,7 +258,7 @@ def get_file_node_files(node):
|
|||
|
||||
# For sequences get all files and filter to only existing files
|
||||
result = []
|
||||
for index, path in enumerate(paths):
|
||||
for path in paths:
|
||||
if node_uses_image_sequence(node, path):
|
||||
glob_pattern = seq_to_glob(path)
|
||||
result.extend(glob.glob(glob_pattern))
|
||||
|
|
@ -358,6 +369,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
for attr in shader_attrs:
|
||||
if cmds.attributeQuery(attr, node=look, exists=True):
|
||||
existing_attrs.append("{}.{}".format(look, attr))
|
||||
|
||||
materials = cmds.listConnections(existing_attrs,
|
||||
source=True,
|
||||
destination=False) or []
|
||||
|
|
@ -367,30 +379,32 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Found the following sets:\n{}".format(look_sets))
|
||||
# Get the entire node chain of the look sets
|
||||
# history = cmds.listHistory(look_sets, allConnections=True)
|
||||
history = cmds.listHistory(materials, allConnections=True)
|
||||
# if materials list is empty, listHistory() will crash with
|
||||
# RuntimeError
|
||||
history = set()
|
||||
if materials:
|
||||
history = set(
|
||||
cmds.listHistory(materials, allConnections=True))
|
||||
|
||||
# Since we retrieved history only of the connected materials
|
||||
# connected to the look sets above we now add direct history
|
||||
# for some of the look sets directly
|
||||
# handling render attribute sets
|
||||
render_set_types = [
|
||||
"VRayDisplacement",
|
||||
"VRayLightMesh",
|
||||
"VRayObjectProperties",
|
||||
"RedshiftObjectId",
|
||||
"RedshiftMeshParameters",
|
||||
]
|
||||
render_sets = cmds.ls(look_sets, type=render_set_types)
|
||||
if render_sets:
|
||||
history.extend(
|
||||
cmds.listHistory(render_sets,
|
||||
future=False,
|
||||
pruneDagObjects=True)
|
||||
or []
|
||||
)
|
||||
|
||||
# Maya (at least 2024) crashes with Warning when render set type
|
||||
# isn't available. cmds.ls() will return empty list
|
||||
if RENDER_SET_TYPES:
|
||||
render_sets = cmds.ls(look_sets, type=RENDER_SET_TYPES)
|
||||
if render_sets:
|
||||
history.update(
|
||||
cmds.listHistory(render_sets,
|
||||
future=False,
|
||||
pruneDagObjects=True)
|
||||
or []
|
||||
)
|
||||
|
||||
# Ensure unique entries only
|
||||
history = list(set(history))
|
||||
history = list(history)
|
||||
|
||||
files = cmds.ls(history,
|
||||
# It's important only node types are passed that
|
||||
|
|
|
|||
|
|
@ -371,7 +371,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
for node in data["nodes"]:
|
||||
lib.set_attribute(data["attribute"], data["values"][0], node)
|
||||
|
||||
with lib.renderlayer(layer_node):
|
||||
|
||||
# Repair animation must be enabled
|
||||
|
|
@ -392,13 +391,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
if renderer != "renderman":
|
||||
prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
|
||||
fname_prefix = default_prefix
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
cmds.setAttr(prefix_attr, fname_prefix, type="string")
|
||||
|
||||
# Repair padding
|
||||
padding_attr = RenderSettings.get_padding_attr(renderer)
|
||||
cmds.setAttr("{}.{}".format(node, padding_attr),
|
||||
cls.DEFAULT_PADDING)
|
||||
cmds.setAttr(padding_attr, cls.DEFAULT_PADDING)
|
||||
else:
|
||||
# renderman handles stuff differently
|
||||
cmds.setAttr("rmanGlobals.imageFileFormat",
|
||||
|
|
|
|||
|
|
@ -21,6 +21,11 @@ from openpype.pipeline import (
|
|||
CreatedInstance,
|
||||
get_current_task_name
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_display_view_colorspace_name,
|
||||
get_colorspace_settings_from_publish_context,
|
||||
set_colorspace_data_to_representation
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS
|
||||
)
|
||||
|
|
@ -612,7 +617,7 @@ class ExporterReview(object):
|
|||
|
||||
def get_representation_data(
|
||||
self, tags=None, range=False,
|
||||
custom_tags=None
|
||||
custom_tags=None, colorspace=None
|
||||
):
|
||||
""" Add representation data to self.data
|
||||
|
||||
|
|
@ -652,6 +657,14 @@ class ExporterReview(object):
|
|||
if self.publish_on_farm:
|
||||
repre["tags"].append("publish_on_farm")
|
||||
|
||||
# add colorspace data to representation
|
||||
if colorspace:
|
||||
set_colorspace_data_to_representation(
|
||||
repre,
|
||||
self.instance.context.data,
|
||||
colorspace=colorspace,
|
||||
log=self.log
|
||||
)
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_imageio_baking_profile(self):
|
||||
|
|
@ -866,6 +879,13 @@ class ExporterReviewMov(ExporterReview):
|
|||
return path
|
||||
|
||||
def generate_mov(self, farm=False, **kwargs):
|
||||
# colorspace data
|
||||
colorspace = None
|
||||
# get colorspace settings
|
||||
# get colorspace data from context
|
||||
config_data, _ = get_colorspace_settings_from_publish_context(
|
||||
self.instance.context.data)
|
||||
|
||||
add_tags = []
|
||||
self.publish_on_farm = farm
|
||||
read_raw = kwargs["read_raw"]
|
||||
|
|
@ -951,6 +971,14 @@ class ExporterReviewMov(ExporterReview):
|
|||
# assign viewer
|
||||
dag_node["view"].setValue(viewer)
|
||||
|
||||
if config_data:
|
||||
# convert display and view to colorspace
|
||||
colorspace = get_display_view_colorspace_name(
|
||||
config_path=config_data["path"],
|
||||
display=display,
|
||||
view=viewer
|
||||
)
|
||||
|
||||
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
|
|
@ -996,9 +1024,10 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"] + add_tags,
|
||||
tags=["review", "need_thumbnail", "delete"] + add_tags,
|
||||
custom_tags=add_custom_tags,
|
||||
range=True
|
||||
range=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
if not matching_repre:
|
||||
self.log.info(
|
||||
"Matching reresentation was not found."
|
||||
"Matching representation was not found."
|
||||
" Representation files were not filled with slate."
|
||||
)
|
||||
return
|
||||
|
|
@ -294,7 +294,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
self.log.debug(
|
||||
"__ matching_repre: {}".format(pformat(matching_repre)))
|
||||
|
||||
self.log.warning("Added slate frame to representation files")
|
||||
self.log.info("Added slate frame to representation files")
|
||||
|
||||
def add_comment_slate_node(self, instance, node):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,216 +0,0 @@
|
|||
import sys
|
||||
import os
|
||||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.nuke import api as napi
|
||||
from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
if sys.version_info[0] >= 3:
|
||||
unicode = str
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.011
|
||||
label = "Extract Thumbnail"
|
||||
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# settings
|
||||
use_rendered = False
|
||||
bake_viewer_process = True
|
||||
bake_viewer_input_process = True
|
||||
nodes = {}
|
||||
reposition_nodes = None
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
return
|
||||
|
||||
with napi.maintained_selection():
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
self.log.debug("instance.data[families]: {}".format(
|
||||
instance.data["families"]))
|
||||
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.render_thumbnail(instance, o_name, **o_data)
|
||||
else:
|
||||
viewer_process_switches = {
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True
|
||||
}
|
||||
self.render_thumbnail(
|
||||
instance, None, **viewer_process_switches)
|
||||
|
||||
def render_thumbnail(self, instance, output_name=None, **kwargs):
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
colorspace = instance.data["colorspace"]
|
||||
|
||||
# find frame range and define middle thumb frame
|
||||
mid_frame = int((last_frame - first_frame) / 2)
|
||||
|
||||
# solve output name if any is set
|
||||
output_name = output_name or ""
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
node = instance.data["transientData"]["node"] # group node
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data['path']))
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
temporary_nodes = []
|
||||
|
||||
# try to connect already rendered images
|
||||
previous_node = node
|
||||
collection = instance.data.get("collection", None)
|
||||
self.log.debug("__ collection: `{}`".format(collection))
|
||||
|
||||
if collection:
|
||||
# get path
|
||||
fhead = collection.format("{head}")
|
||||
|
||||
thumb_fname = list(collection)[mid_frame]
|
||||
else:
|
||||
fname = thumb_fname = os.path.basename(
|
||||
instance.data.get("path", None))
|
||||
fhead = os.path.splitext(fname)[0] + "."
|
||||
|
||||
self.log.debug("__ fhead: `{}`".format(fhead))
|
||||
|
||||
if "#" in fhead:
|
||||
fhead = fhead.replace("#", "")[:-1]
|
||||
|
||||
path_render = os.path.join(
|
||||
staging_dir, thumb_fname).replace("\\", "/")
|
||||
self.log.debug("__ path_render: `{}`".format(path_render))
|
||||
|
||||
if self.use_rendered and os.path.isfile(path_render):
|
||||
# check if file exist otherwise connect to write node
|
||||
rnode = nuke.createNode("Read")
|
||||
rnode["file"].setValue(path_render)
|
||||
rnode["colorspace"].setValue(colorspace)
|
||||
|
||||
# turn it raw if none of baking is ON
|
||||
if all([
|
||||
not self.bake_viewer_input_process,
|
||||
not self.bake_viewer_process
|
||||
]):
|
||||
rnode["raw"].setValue(True)
|
||||
|
||||
temporary_nodes.append(rnode)
|
||||
previous_node = rnode
|
||||
|
||||
if self.reposition_nodes is None:
|
||||
# [deprecated] create reformat node old way
|
||||
reformat_node = nuke.createNode("Reformat")
|
||||
ref_node = self.nodes.get("Reformat", None)
|
||||
if ref_node:
|
||||
for k, v in ref_node:
|
||||
self.log.debug("k, v: {0}:{1}".format(k, v))
|
||||
if isinstance(v, unicode):
|
||||
v = str(v)
|
||||
reformat_node[k].setValue(v)
|
||||
|
||||
reformat_node.setInput(0, previous_node)
|
||||
previous_node = reformat_node
|
||||
temporary_nodes.append(reformat_node)
|
||||
else:
|
||||
# create reformat node new way
|
||||
for repo_node in self.reposition_nodes:
|
||||
node_class = repo_node["node_class"]
|
||||
knobs = repo_node["knobs"]
|
||||
node = nuke.createNode(node_class)
|
||||
set_node_knobs_from_settings(node, knobs)
|
||||
|
||||
# connect in order
|
||||
node.setInput(0, previous_node)
|
||||
previous_node = node
|
||||
temporary_nodes.append(node)
|
||||
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# get input process and connect it to baking
|
||||
ipn = napi.get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node.setInput(0, previous_node)
|
||||
previous_node = dag_node
|
||||
temporary_nodes.append(dag_node)
|
||||
|
||||
thumb_name = "thumbnail"
|
||||
# only add output name and
|
||||
# if there are more than one bake preset
|
||||
if (
|
||||
output_name
|
||||
and len(instance.data.get("bakePresets", {}).keys()) > 1
|
||||
):
|
||||
thumb_name = "{}_{}".format(output_name, thumb_name)
|
||||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead[:-1] + thumb_name + ".jpg"
|
||||
thumb_path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
|
||||
# add thumbnail to cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(thumb_path)
|
||||
|
||||
# make sure only one thumbnail path is set
|
||||
# and it is existing file
|
||||
instance_thumb_path = instance.data.get("thumbnailPath")
|
||||
if not instance_thumb_path or not os.path.isfile(instance_thumb_path):
|
||||
instance.data["thumbnailPath"] = thumb_path
|
||||
|
||||
write_node["file"].setValue(thumb_path)
|
||||
write_node["file_type"].setValue("jpg")
|
||||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
|
||||
repre = {
|
||||
'name': thumb_name,
|
||||
'ext': "jpg",
|
||||
"outputName": thumb_name,
|
||||
'files': file,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "publish_on_farm", "delete"]
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(write_node.name(), mid_frame, mid_frame)
|
||||
|
||||
self.log.debug(
|
||||
"representations: {}".format(instance.data["representations"]))
|
||||
|
||||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
|
@ -298,7 +298,7 @@ def create_timeline_item(
|
|||
if source_end:
|
||||
clip_data["endFrame"] = source_end
|
||||
if timecode_in:
|
||||
clip_data["recordFrame"] = timecode_in
|
||||
clip_data["recordFrame"] = timeline_in
|
||||
|
||||
# add to timeline
|
||||
media_pool.AppendToTimeline([clip_data])
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@ from openpype.tools.utils import host_tools
|
|||
from openpype.pipeline import registered_host
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
|
||||
if not os.path.exists(path):
|
||||
|
|
@ -39,7 +42,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
|
|
@ -49,7 +52,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
|
||||
self.setWindowTitle("OpenPype")
|
||||
self.setWindowTitle(f"{MENU_LABEL}")
|
||||
save_current_btn = QtWidgets.QPushButton("Save current file", self)
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create ...", self)
|
||||
|
|
|
|||
|
|
@ -406,26 +406,42 @@ class ClipLoader:
|
|||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# get handles
|
||||
handle_start = self.data["versionData"].get("handleStart")
|
||||
handle_end = self.data["versionData"].get("handleEnd")
|
||||
if handle_start is None:
|
||||
handle_start = int(self.data["assetData"]["handleStart"])
|
||||
if handle_end is None:
|
||||
handle_end = int(self.data["assetData"]["handleEnd"])
|
||||
if not self.with_handles:
|
||||
# Load file without the handles of the source media
|
||||
# We remove the handles from the source in and source out
|
||||
# so that the handles are excluded in the timeline
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
|
||||
# check frame duration from versionData or assetData
|
||||
frame_start = self.data["versionData"].get("frameStart")
|
||||
if frame_start is None:
|
||||
frame_start = self.data["assetData"]["frameStart"]
|
||||
# get version data frame data from db
|
||||
version_data = self.data["versionData"]
|
||||
frame_start = version_data.get("frameStart")
|
||||
frame_end = version_data.get("frameEnd")
|
||||
|
||||
# check frame duration from versionData or assetData
|
||||
frame_end = self.data["versionData"].get("frameEnd")
|
||||
if frame_end is None:
|
||||
frame_end = self.data["assetData"]["frameEnd"]
|
||||
|
||||
db_frame_duration = int(frame_end) - int(frame_start) + 1
|
||||
# The version data usually stored the frame range + handles of the
|
||||
# media however certain representations may be shorter because they
|
||||
# exclude those handles intentionally. Unfortunately the
|
||||
# representation does not store that in the database currently;
|
||||
# so we should compensate for those cases. If the media is shorter
|
||||
# than the frame range specified in the database we assume it is
|
||||
# without handles and thus we do not need to remove the handles
|
||||
# from source and out
|
||||
if frame_start is not None and frame_end is not None:
|
||||
# Version has frame range data, so we can compare media length
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_start + handle_end
|
||||
database_frame_duration = int(
|
||||
frame_end_handle - frame_start_handle + 1
|
||||
)
|
||||
if source_duration >= database_frame_duration:
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# get timeline in
|
||||
timeline_start = self.active_timeline.GetStartFrame()
|
||||
|
|
@ -437,24 +453,6 @@ class ClipLoader:
|
|||
timeline_in = int(
|
||||
timeline_start + self.data["assetData"]["clipIn"])
|
||||
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# check if source duration is shorter than db frame duration
|
||||
source_with_handles = True
|
||||
if source_duration < db_frame_duration:
|
||||
source_with_handles = False
|
||||
|
||||
# only exclude handles if source has no handles or
|
||||
# if user wants to load without handles
|
||||
if (
|
||||
not self.with_handles
|
||||
or not source_with_handles
|
||||
):
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# make track item from source in bin as item
|
||||
timeline_item = lib.create_timeline_item(
|
||||
media_pool_item,
|
||||
|
|
@ -868,7 +866,7 @@ class PublishClip:
|
|||
def _convert_to_entity(self, key):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
entity_type = self.types.get(key, None)
|
||||
entity_type = self.types.get(key)
|
||||
|
||||
assert entity_type, "Missing entity type for `{}`".format(
|
||||
key
|
||||
|
|
|
|||
22
openpype/hosts/resolve/utility_scripts/AYON__Menu.py
Normal file
22
openpype/hosts/resolve/utility_scripts/AYON__Menu.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def main(env):
|
||||
from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu
|
||||
|
||||
# activate resolve from openpype
|
||||
host = ResolveHost()
|
||||
install_host(host)
|
||||
|
||||
launch_pype_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import shutil
|
||||
from openpype.lib import Logger, is_running_from_build
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
|
|
@ -54,6 +55,14 @@ def setup(env):
|
|||
src = os.path.join(directory, script)
|
||||
dst = os.path.join(util_scripts_dir, script)
|
||||
|
||||
# TODO: remove this once we have a proper solution
|
||||
if AYON_SERVER_ENABLED:
|
||||
if "OpenPype__Menu.py" == script:
|
||||
continue
|
||||
else:
|
||||
if "AYON__Menu.py" == script:
|
||||
continue
|
||||
|
||||
# TODO: Make this a less hacky workaround
|
||||
if script == "openpype_startup.scriptlib":
|
||||
# Handle special case for scriptlib that needs to be a folder
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect original base name for use in templates."""
|
||||
from pathlib import Path
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOriginalBasename(pyblish.api.InstancePlugin):
|
||||
"""Collect original file base name."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
label = "Collect Base Name"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
|
||||
def process(self, instance):
|
||||
file_name = Path(instance.data["representations"][0]["files"])
|
||||
instance.data["originalBasename"] = file_name.stem
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validator for correct file naming."""
|
||||
import re
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
)
|
||||
|
||||
|
||||
class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin):
|
||||
label = "Validate Unreal Texture Names"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
order = ValidateContentsOrder
|
||||
regex = "^T_{asset}.*"
|
||||
|
||||
def process(self, instance):
|
||||
file_name = instance.data.get("originalBasename")
|
||||
self.log.info(file_name)
|
||||
pattern = self.regex.format(asset=instance.data.get("asset"))
|
||||
if not re.match(pattern, file_name):
|
||||
msg = f"Invalid file name {file_name}"
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data={
|
||||
"invalid_file": file_name,
|
||||
"asset": instance.data.get("asset")
|
||||
})
|
||||
|
|
@ -583,18 +583,9 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
|
||||
# Give the explorer window time to refresh to the folder and select
|
||||
# the file
|
||||
while not file_dialog.selectedFiles():
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
|
||||
print(f"Selected: {file_dialog.selectedFiles()}")
|
||||
|
||||
# Set it again now we know the path is refreshed - without this
|
||||
# accepting the dialog will often not trigger the correct filepath
|
||||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
# TODO: find a way to improve the process event to
|
||||
# load more complicated mesh
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
|
||||
|
||||
file_dialog.done(file_dialog.Accepted)
|
||||
app.processEvents(QtCore.QEventLoop.AllEvents)
|
||||
|
|
@ -628,7 +619,12 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel)
|
||||
if not mesh_filename_label.text():
|
||||
dialog.close()
|
||||
raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}")
|
||||
substance_painter.logging.warning(
|
||||
"Failed to set mesh path with the prompt dialog:"
|
||||
f"{mesh_filepath}\n\n"
|
||||
"Creating new project directly with the mesh path instead.")
|
||||
else:
|
||||
dialog.done(dialog.Accepted)
|
||||
|
||||
new_action = _get_new_project_action()
|
||||
if not new_action:
|
||||
|
|
|
|||
|
|
@ -44,14 +44,22 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
|||
# Get user inputs
|
||||
import_cameras = data.get("import_cameras", True)
|
||||
preserve_strokes = data.get("preserve_strokes", True)
|
||||
|
||||
sp_settings = substance_painter.project.Settings(
|
||||
import_cameras=import_cameras
|
||||
)
|
||||
if not substance_painter.project.is_open():
|
||||
# Allow to 'initialize' a new project
|
||||
path = self.filepath_from_context(context)
|
||||
# TODO: improve the prompt dialog function to not
|
||||
# only works for simple polygon scene
|
||||
result = prompt_new_file_with_mesh(mesh_filepath=path)
|
||||
if not result:
|
||||
self.log.info("User cancelled new project prompt.")
|
||||
return
|
||||
self.log.info("User cancelled new project prompt."
|
||||
"Creating new project directly from"
|
||||
" Substance Painter API Instead.")
|
||||
settings = substance_painter.project.create(
|
||||
mesh_file_path=path, settings=sp_settings
|
||||
)
|
||||
|
||||
else:
|
||||
# Reload the mesh
|
||||
|
|
|
|||
|
|
@ -663,7 +663,7 @@ or updating already created. Publishing will create OTIO file.
|
|||
variant_name = instance_data["variant"]
|
||||
|
||||
# basic unique asset name
|
||||
clip_name = os.path.splitext(otio_clip.name)[0].lower()
|
||||
clip_name = os.path.splitext(otio_clip.name)[0]
|
||||
project_doc = get_project(self.project_name)
|
||||
|
||||
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
|
|||
render_layer_id = creator_attributes["render_layer_instance_id"]
|
||||
for in_data in instance.context.data["workfileInstances"]:
|
||||
if (
|
||||
in_data["creator_identifier"] == "render.layer"
|
||||
in_data.get("creator_identifier") == "render.layer"
|
||||
and in_data["instance_id"] == render_layer_id
|
||||
):
|
||||
render_layer_data = in_data
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ from .transcoding import (
|
|||
get_ffmpeg_format_args,
|
||||
convert_ffprobe_fps_value,
|
||||
convert_ffprobe_fps_to_float,
|
||||
get_rescaled_command_arguments,
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
|
|
@ -232,6 +233,7 @@ __all__ = [
|
|||
"get_ffmpeg_format_args",
|
||||
"convert_ffprobe_fps_value",
|
||||
"convert_ffprobe_fps_to_float",
|
||||
"get_rescaled_command_arguments",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
|
|
|
|||
|
|
@ -536,7 +536,7 @@ def convert_for_ffmpeg(
|
|||
input_frame_end=None,
|
||||
logger=None
|
||||
):
|
||||
"""Contert source file to format supported in ffmpeg.
|
||||
"""Convert source file to format supported in ffmpeg.
|
||||
|
||||
Currently can convert only exrs.
|
||||
|
||||
|
|
@ -592,29 +592,7 @@ def convert_for_ffmpeg(
|
|||
oiio_cmd.extend(["--compression", compression])
|
||||
|
||||
# Collect channels to export
|
||||
channel_names = input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
channels_arg = "R={},G={},B={}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart/subimages exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
|
||||
|
||||
oiio_cmd.extend([
|
||||
input_arg, first_input_path,
|
||||
|
|
@ -635,7 +613,7 @@ def convert_for_ffmpeg(
|
|||
continue
|
||||
|
||||
# Remove attributes that have string value longer than allowed length
|
||||
# for ffmpeg or when contain unallowed symbols
|
||||
# for ffmpeg or when contain prohibited symbols
|
||||
erase_reason = "Missing reason"
|
||||
erase_attribute = False
|
||||
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
|
||||
|
|
@ -695,7 +673,7 @@ def convert_input_paths_for_ffmpeg(
|
|||
|
||||
Args:
|
||||
input_paths (str): Paths that should be converted. It is expected that
|
||||
contains single file or image sequence of samy type.
|
||||
contains single file or image sequence of same type.
|
||||
output_dir (str): Path to directory where output will be rendered.
|
||||
Must not be same as input's directory.
|
||||
logger (logging.Logger): Logger used for logging.
|
||||
|
|
@ -709,6 +687,7 @@ def convert_input_paths_for_ffmpeg(
|
|||
|
||||
first_input_path = input_paths[0]
|
||||
ext = os.path.splitext(first_input_path)[1].lower()
|
||||
|
||||
if ext != ".exr":
|
||||
raise ValueError((
|
||||
"Function 'convert_for_ffmpeg' currently support only"
|
||||
|
|
@ -724,30 +703,7 @@ def convert_input_paths_for_ffmpeg(
|
|||
compression = "none"
|
||||
|
||||
# Collect channels to export
|
||||
channel_names = input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
# TODO find subimage inder where rgba is available for multipart exrs
|
||||
channels_arg = "R={},G={},B={}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
|
||||
|
||||
for input_path in input_paths:
|
||||
# Prepare subprocess arguments
|
||||
|
|
@ -774,7 +730,7 @@ def convert_input_paths_for_ffmpeg(
|
|||
continue
|
||||
|
||||
# Remove attributes that have string value longer than allowed
|
||||
# length for ffmpeg or when containing unallowed symbols
|
||||
# length for ffmpeg or when containing prohibited symbols
|
||||
erase_reason = "Missing reason"
|
||||
erase_attribute = False
|
||||
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
|
||||
|
|
@ -1021,9 +977,7 @@ def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd):
|
|||
if pix_fmt:
|
||||
output.extend(["-pix_fmt", pix_fmt])
|
||||
|
||||
output.extend(["-intra"])
|
||||
output.extend(["-g", "1"])
|
||||
|
||||
output.extend(["-intra", "-g", "1"])
|
||||
return output
|
||||
|
||||
|
||||
|
|
@ -1150,7 +1104,7 @@ def convert_colorspace(
|
|||
view=None,
|
||||
display=None,
|
||||
additional_command_args=None,
|
||||
logger=None
|
||||
logger=None,
|
||||
):
|
||||
"""Convert source file from one color space to another.
|
||||
|
||||
|
|
@ -1169,6 +1123,7 @@ def convert_colorspace(
|
|||
view (str): name for viewer space (ocio valid)
|
||||
both 'view' and 'display' must be filled (if 'target_colorspace')
|
||||
display (str): name for display-referred reference space (ocio valid)
|
||||
both 'view' and 'display' must be filled (if 'target_colorspace')
|
||||
additional_command_args (list): arguments for oiiotool (like binary
|
||||
depth for .dpx)
|
||||
logger (logging.Logger): Logger used for logging.
|
||||
|
|
@ -1178,14 +1133,28 @@ def convert_colorspace(
|
|||
if logger is None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
input_info = get_oiio_info_for_input(input_path, logger=logger)
|
||||
|
||||
# Collect channels to export
|
||||
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
|
||||
|
||||
# Prepare subprocess arguments
|
||||
oiio_cmd = get_oiio_tool_args(
|
||||
"oiiotool",
|
||||
input_path,
|
||||
# Don't add any additional attributes
|
||||
"--nosoftwareattrib",
|
||||
"--colorconfig", config_path
|
||||
)
|
||||
|
||||
oiio_cmd.extend([
|
||||
input_arg, input_path,
|
||||
# Tell oiiotool which channels should be put to top stack
|
||||
# (and output)
|
||||
"--ch", channels_arg,
|
||||
# Use first subimage
|
||||
"--subimage", "0"
|
||||
])
|
||||
|
||||
if all([target_colorspace, view, display]):
|
||||
raise ValueError("Colorspace and both screen and display"
|
||||
" cannot be set together."
|
||||
|
|
@ -1226,3 +1195,221 @@ def split_cmd_args(in_args):
|
|||
continue
|
||||
splitted_args.extend(arg.split(" "))
|
||||
return splitted_args
|
||||
|
||||
|
||||
def get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
target_width,
|
||||
target_height,
|
||||
target_par=None,
|
||||
bg_color=None,
|
||||
log=None
|
||||
):
|
||||
"""Get command arguments for rescaling input to target size.
|
||||
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
Currently supported are "ffmpeg" and "oiiotool".
|
||||
input_path (str): Path to input file.
|
||||
target_width (int): Width of target.
|
||||
target_height (int): Height of target.
|
||||
target_par (Optional[float]): Pixel aspect ratio of target.
|
||||
bg_color (Optional[list[int]]): List of 8bit int values for
|
||||
background color. Should be in range 0 - 255.
|
||||
log (Optional[logging.Logger]): Logger used for logging.
|
||||
|
||||
Returns:
|
||||
list[str]: List of command arguments.
|
||||
"""
|
||||
command_args = []
|
||||
target_par = target_par or 1.0
|
||||
input_par = 1.0
|
||||
|
||||
# ffmpeg command
|
||||
input_file_metadata = get_ffprobe_data(input_path, logger=log)
|
||||
stream = input_file_metadata["streams"][0]
|
||||
input_width = int(stream["width"])
|
||||
input_height = int(stream["height"])
|
||||
stream_input_par = stream.get("sample_aspect_ratio")
|
||||
if stream_input_par:
|
||||
input_par = (
|
||||
float(stream_input_par.split(":")[0])
|
||||
/ float(stream_input_par.split(":")[1])
|
||||
)
|
||||
# recalculating input and target width
|
||||
input_width = int(input_width * input_par)
|
||||
target_width = int(target_width * target_par)
|
||||
|
||||
# calculate aspect ratios
|
||||
target_aspect = float(target_width) / target_height
|
||||
input_aspect = float(input_width) / input_height
|
||||
|
||||
# calculate scale size
|
||||
scale_size = float(input_width) / target_width
|
||||
if input_aspect < target_aspect:
|
||||
scale_size = float(input_height) / target_height
|
||||
|
||||
# calculate rescaled width and height
|
||||
rescaled_width = int(input_width / scale_size)
|
||||
rescaled_height = int(input_height / scale_size)
|
||||
|
||||
# calculate width and height shift
|
||||
rescaled_width_shift = int((target_width - rescaled_width) / 2)
|
||||
rescaled_height_shift = int((target_height - rescaled_height) / 2)
|
||||
|
||||
if application == "ffmpeg":
|
||||
# create scale command
|
||||
scale = "scale={0}:{1}".format(input_width, input_height)
|
||||
pad = "pad={0}:{1}:({2}-iw)/2:({3}-ih)/2".format(
|
||||
target_width,
|
||||
target_height,
|
||||
target_width,
|
||||
target_height
|
||||
)
|
||||
if input_width > target_width or input_height > target_height:
|
||||
scale = "scale={0}:{1}".format(rescaled_width, rescaled_height)
|
||||
pad = "pad={0}:{1}:{2}:{3}".format(
|
||||
target_width,
|
||||
target_height,
|
||||
rescaled_width_shift,
|
||||
rescaled_height_shift
|
||||
)
|
||||
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
pad += ":{0}".format(color)
|
||||
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
|
||||
|
||||
elif application == "oiiotool":
|
||||
input_info = get_oiio_info_for_input(input_path, logger=log)
|
||||
# Collect channels to export
|
||||
_, channels_arg = get_oiio_input_and_channel_args(
|
||||
input_info, alpha_default=1.0)
|
||||
|
||||
command_args.extend([
|
||||
# Tell oiiotool which channels should be put to top stack
|
||||
# (and output)
|
||||
"--ch", channels_arg,
|
||||
# Use first subimage
|
||||
"--subimage", "0"
|
||||
])
|
||||
|
||||
if input_par != 1.0:
|
||||
command_args.extend(["--pixelaspect", "1"])
|
||||
|
||||
width_shift = int((target_width - input_width) / 2)
|
||||
height_shift = int((target_height - input_height) / 2)
|
||||
|
||||
# default resample is not scaling source image
|
||||
resample = [
|
||||
"--resize",
|
||||
"{0}x{1}".format(input_width, input_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(width_shift, height_shift),
|
||||
]
|
||||
# scaled source image to target size
|
||||
if input_width > target_width or input_height > target_height:
|
||||
# form resample command
|
||||
resample = [
|
||||
"--resize:filter=lanczos3",
|
||||
"{0}x{1}".format(rescaled_width, rescaled_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(rescaled_width_shift, rescaled_height_shift),
|
||||
]
|
||||
command_args.extend(resample)
|
||||
|
||||
fullsize = [
|
||||
"--fullsize",
|
||||
"{0}x{1}".format(target_width, target_height)
|
||||
]
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
|
||||
fullsize.extend([
|
||||
"--pattern",
|
||||
"constant:color={0}".format(color),
|
||||
"{0}x{1}".format(target_width, target_height),
|
||||
"4", # 4 channels
|
||||
"--over"
|
||||
])
|
||||
command_args.extend(fullsize)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
return command_args
|
||||
|
||||
|
||||
def convert_color_values(application, color_value):
|
||||
"""Get color mapping for ffmpeg and oiiotool.
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
color_value (list[int]): List of 8bit int values for RGBA.
|
||||
Returns:
|
||||
str: ffmpeg returns hex string, oiiotool is string with floats.
|
||||
"""
|
||||
red, green, blue, alpha = color_value
|
||||
|
||||
if application == "ffmpeg":
|
||||
return "{0:0>2X}{1:0>2X}{2:0>2X}@{3}".format(
|
||||
red, green, blue, (alpha / 255.0)
|
||||
)
|
||||
elif application == "oiiotool":
|
||||
red = float(red / 255)
|
||||
green = float(green / 255)
|
||||
blue = float(blue / 255)
|
||||
alpha = float(alpha / 255)
|
||||
|
||||
return "{0:.3f},{1:.3f},{2:.3f},{3:.3f}".format(
|
||||
red, green, blue, alpha)
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
|
||||
def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
|
||||
"""Get input and channel arguments for oiiotool.
|
||||
Args:
|
||||
oiio_input_info (dict): Information about input from oiio tool.
|
||||
Should be output of function `get_oiio_info_for_input`.
|
||||
alpha_default (float, optional): Default value for alpha channel.
|
||||
Returns:
|
||||
tuple[str, str]: Tuple of input and channel arguments.
|
||||
"""
|
||||
channel_names = oiio_input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
|
||||
channels_arg = "R={0},G={1},B={2}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
elif alpha_default:
|
||||
channels_arg += ",A={}".format(float(alpha_default))
|
||||
input_channels.append("A")
|
||||
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = oiio_input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
|
||||
return input_arg, channels_arg
|
||||
|
|
|
|||
|
|
@ -460,7 +460,21 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.plugin_info = self.get_plugin_info()
|
||||
self.aux_files = self.get_aux_files()
|
||||
|
||||
self.process_submission()
|
||||
job_id = self.process_submission()
|
||||
self.log.info("Submitted job to Deadline: {}.".format(job_id))
|
||||
|
||||
# TODO: Find a way that's more generic and not render type specific
|
||||
if "exportJob" in instance.data:
|
||||
self.log.info("Splitting export and render in two jobs")
|
||||
self.log.info("Export job id: %s", job_id)
|
||||
render_job_info = self.get_job_info(dependency_job_ids=[job_id])
|
||||
render_plugin_info = self.get_plugin_info(job_type="render")
|
||||
payload = self.assemble_payload(
|
||||
job_info=render_job_info,
|
||||
plugin_info=render_plugin_info
|
||||
)
|
||||
render_job_id = self.submit(payload)
|
||||
self.log.info("Render job id: %s", render_job_id)
|
||||
|
||||
def process_submission(self):
|
||||
"""Process data for submission.
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
"""Collect default Deadline server."""
|
||||
import pyblish.api
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
||||
"""Collect default Deadline Webservice URL.
|
||||
|
|
@ -30,24 +32,26 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
|||
self.log.error("Cannot get OpenPype Deadline module.")
|
||||
raise AssertionError("OpenPype Deadline module not found.")
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
self.log.debug(deadline_module.deadline_urls)
|
||||
context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501
|
||||
deadline_settings = context.data["project_settings"]["deadline"]
|
||||
deadline_server_name = None
|
||||
if AYON_SERVER_ENABLED:
|
||||
deadline_server_name = deadline_settings["deadline_server"]
|
||||
else:
|
||||
deadline_servers = deadline_settings["deadline_servers"]
|
||||
if deadline_servers:
|
||||
deadline_server_name = deadline_servers[0]
|
||||
|
||||
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
|
||||
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
|
||||
|
||||
deadline_servers = (context.data
|
||||
["project_settings"]
|
||||
["deadline"]
|
||||
["deadline_servers"])
|
||||
if deadline_servers:
|
||||
deadline_server_name = deadline_servers[0]
|
||||
deadline_webservice = None
|
||||
if deadline_server_name:
|
||||
deadline_webservice = deadline_module.deadline_urls.get(
|
||||
deadline_server_name)
|
||||
if deadline_webservice:
|
||||
context.data["defaultDeadline"] = deadline_webservice
|
||||
self.log.debug("Overriding from project settings with {}".format( # noqa: E501
|
||||
deadline_webservice))
|
||||
|
||||
context.data["defaultDeadline"] = \
|
||||
context.data["defaultDeadline"].strip().rstrip("/")
|
||||
default_deadline_webservice = deadline_module.deadline_urls["default"]
|
||||
deadline_webservice = (
|
||||
deadline_webservice
|
||||
or default_deadline_webservice
|
||||
)
|
||||
|
||||
context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa
|
||||
|
|
|
|||
|
|
@ -6,8 +6,14 @@ import getpass
|
|||
import attr
|
||||
from datetime import datetime
|
||||
|
||||
from openpype.lib import is_running_from_build
|
||||
from openpype.lib import (
|
||||
is_running_from_build,
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
TextDef,
|
||||
)
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
from openpype.pipeline.farm.tools import iter_expected_files
|
||||
from openpype.tests.lib import is_in_tests
|
||||
|
||||
|
|
@ -22,10 +28,11 @@ class BlenderPluginInfo():
|
|||
SaveFile = attr.ib(default=True)
|
||||
|
||||
|
||||
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
OpenPypePyblishPluginMixin):
|
||||
label = "Submit Render to Deadline"
|
||||
hosts = ["blender"]
|
||||
families = ["render.farm"]
|
||||
families = ["render"]
|
||||
|
||||
use_published = True
|
||||
priority = 50
|
||||
|
|
@ -33,6 +40,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
jobInfo = {}
|
||||
pluginInfo = {}
|
||||
group = None
|
||||
job_delay = "00:00:00:00"
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="Blender")
|
||||
|
|
@ -67,8 +75,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
job_info.Comment = context.data.get("comment")
|
||||
job_info.Priority = instance.data.get("priority", self.priority)
|
||||
job_info.Comment = instance.data.get("comment")
|
||||
|
||||
if self.group != "none" and self.group:
|
||||
job_info.Group = self.group
|
||||
|
|
@ -83,8 +90,10 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
machine_list_key = "Blacklist"
|
||||
render_globals[machine_list_key] = machine_list
|
||||
|
||||
job_info.Priority = attr_values.get("priority")
|
||||
job_info.ChunkSize = attr_values.get("chunkSize")
|
||||
job_info.ChunkSize = attr_values.get("chunkSize", self.chunk_size)
|
||||
job_info.Priority = attr_values.get("priority", self.priority)
|
||||
job_info.ScheduledType = "Once"
|
||||
job_info.JobDelay = attr_values.get("job_delay", self.job_delay)
|
||||
|
||||
# Add options from RenderGlobals
|
||||
render_globals = instance.data.get("renderGlobals", {})
|
||||
|
|
@ -180,3 +189,39 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
the metadata and the rendered files are in the same location.
|
||||
"""
|
||||
return super().from_published_scene(False)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
defs = super(BlenderSubmitDeadline, cls).get_attribute_defs()
|
||||
defs.extend([
|
||||
BoolDef("use_published",
|
||||
default=cls.use_published,
|
||||
label="Use Published Scene"),
|
||||
|
||||
NumberDef("priority",
|
||||
minimum=1,
|
||||
maximum=250,
|
||||
decimals=0,
|
||||
default=cls.priority,
|
||||
label="Priority"),
|
||||
|
||||
NumberDef("chunkSize",
|
||||
minimum=1,
|
||||
maximum=50,
|
||||
decimals=0,
|
||||
default=cls.chunk_size,
|
||||
label="Frame Per Task"),
|
||||
|
||||
TextDef("group",
|
||||
default=cls.group,
|
||||
label="Group Name"),
|
||||
|
||||
TextDef("job_delay",
|
||||
default=cls.job_delay,
|
||||
label="Job Delay",
|
||||
placeholder="dd:hh:mm:ss",
|
||||
tooltip="Delay the job by the specified amount of time. "
|
||||
"Timecode: dd:hh:mm:ss."),
|
||||
])
|
||||
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ import os
|
|||
import getpass
|
||||
from datetime import datetime
|
||||
|
||||
import hou
|
||||
|
||||
import attr
|
||||
import pyblish.api
|
||||
from openpype.lib import (
|
||||
|
|
@ -141,6 +139,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
instance = self._instance
|
||||
version = hou.applicationVersionString()
|
||||
version = ".".join(version.split(".")[:2])
|
||||
|
|
@ -167,6 +168,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
||||
def get_rop_node(self, instance):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
rop = instance.data.get("instance_node")
|
||||
rop_node = hou.node(rop)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,12 +5,15 @@ from datetime import datetime
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import legacy_io, OpenPypePyblishPluginMixin
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
from openpype.lib import (
|
||||
is_running_from_build,
|
||||
BoolDef,
|
||||
NumberDef
|
||||
)
|
||||
|
||||
@attr.s
|
||||
class DeadlinePluginInfo():
|
||||
|
|
@ -20,8 +23,29 @@ class DeadlinePluginInfo():
|
|||
IgnoreInputs = attr.ib(default=True)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
"""Submit Solaris USD Render ROPs to Deadline.
|
||||
@attr.s
|
||||
class ArnoldRenderDeadlinePluginInfo():
|
||||
InputFile = attr.ib(default=None)
|
||||
Verbose = attr.ib(default=4)
|
||||
|
||||
|
||||
@attr.s
|
||||
class MantraRenderDeadlinePluginInfo():
|
||||
SceneFile = attr.ib(default=None)
|
||||
Version = attr.ib(default=None)
|
||||
|
||||
|
||||
@attr.s
|
||||
class VrayRenderPluginInfo():
|
||||
InputFilename = attr.ib(default=None)
|
||||
SeparateFilesPerFrame = attr.ib(default=True)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(
|
||||
abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
OpenPypePyblishPluginMixin
|
||||
):
|
||||
"""Submit Render ROPs to Deadline.
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable AVALON_DEADLINE.
|
||||
|
|
@ -45,21 +69,95 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
targets = ["local"]
|
||||
use_published = True
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="Houdini")
|
||||
# presets
|
||||
priority = 50
|
||||
chunk_size = 1
|
||||
export_priority = 50
|
||||
export_chunk_size = 10
|
||||
group = ""
|
||||
export_group = ""
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
NumberDef(
|
||||
"priority",
|
||||
label="Priority",
|
||||
default=cls.priority,
|
||||
decimals=0
|
||||
),
|
||||
NumberDef(
|
||||
"chunk",
|
||||
label="Frames Per Task",
|
||||
default=cls.chunk_size,
|
||||
decimals=0,
|
||||
minimum=1,
|
||||
maximum=1000
|
||||
),
|
||||
NumberDef(
|
||||
"export_priority",
|
||||
label="Export Priority",
|
||||
default=cls.priority,
|
||||
decimals=0
|
||||
),
|
||||
NumberDef(
|
||||
"export_chunk",
|
||||
label="Export Frames Per Task",
|
||||
default=cls.export_chunk_size,
|
||||
decimals=0,
|
||||
minimum=1,
|
||||
maximum=1000
|
||||
),
|
||||
BoolDef(
|
||||
"suspend_publish",
|
||||
default=False,
|
||||
label="Suspend publish"
|
||||
)
|
||||
]
|
||||
|
||||
def get_job_info(self, dependency_job_ids=None):
|
||||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
attribute_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
# Whether Deadline render submission is being split in two
|
||||
# (extract + render)
|
||||
split_render_job = instance.data["exportJob"]
|
||||
|
||||
# If there's some dependency job ids we can assume this is a render job
|
||||
# and not an export job
|
||||
is_export_job = True
|
||||
if dependency_job_ids:
|
||||
is_export_job = False
|
||||
|
||||
if split_render_job and not is_export_job:
|
||||
# Convert from family to Deadline plugin name
|
||||
# i.e., arnold_rop -> Arnold
|
||||
plugin = instance.data["family"].replace("_rop", "").capitalize()
|
||||
else:
|
||||
plugin = "Houdini"
|
||||
|
||||
job_info = DeadlineJobInfo(Plugin=plugin)
|
||||
|
||||
filepath = context.data["currentFile"]
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
job_info.Name = "{} - {}".format(filename, instance.name)
|
||||
job_info.BatchName = filename
|
||||
job_info.Plugin = "Houdini"
|
||||
|
||||
job_info.UserName = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
|
||||
if split_render_job and is_export_job:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"export_priority", self.export_priority
|
||||
)
|
||||
else:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"priority", self.priority
|
||||
)
|
||||
|
||||
if is_in_tests():
|
||||
job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S")
|
||||
|
||||
|
|
@ -73,9 +171,23 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
)
|
||||
job_info.Frames = frames
|
||||
|
||||
# Make sure we make job frame dependent so render tasks pick up a soon
|
||||
# as export tasks are done
|
||||
if split_render_job and not is_export_job:
|
||||
job_info.IsFrameDependent = True
|
||||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
job_info.ChunkSize = instance.data.get("chunkSize", 10)
|
||||
job_info.Group = self.group
|
||||
if split_render_job and is_export_job:
|
||||
job_info.ChunkSize = attribute_values.get(
|
||||
"export_chunk", self.export_chunk_size
|
||||
)
|
||||
else:
|
||||
job_info.ChunkSize = attribute_values.get(
|
||||
"chunk", self.chunk_size
|
||||
)
|
||||
|
||||
job_info.Comment = context.data.get("comment")
|
||||
|
||||
keys = [
|
||||
|
|
@ -101,6 +213,7 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for key in keys:
|
||||
value = environment.get(key)
|
||||
if value:
|
||||
|
|
@ -115,25 +228,51 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
job_info.OutputDirectory += dirname.replace("\\", "/")
|
||||
job_info.OutputFilename += fname
|
||||
|
||||
# Add dependencies if given
|
||||
if dependency_job_ids:
|
||||
job_info.JobDependencies = ",".join(dependency_job_ids)
|
||||
|
||||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
def get_plugin_info(self, job_type=None):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
# Output driver to render
|
||||
driver = hou.node(instance.data["instance_node"])
|
||||
hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0]
|
||||
|
||||
plugin_info = DeadlinePluginInfo(
|
||||
SceneFile=context.data["currentFile"],
|
||||
OutputDriver=driver.path(),
|
||||
Version=hou_major_minor,
|
||||
IgnoreInputs=True
|
||||
)
|
||||
# Output driver to render
|
||||
if job_type == "render":
|
||||
family = instance.data.get("family")
|
||||
if family == "arnold_rop":
|
||||
plugin_info = ArnoldRenderDeadlinePluginInfo(
|
||||
InputFile=instance.data["ifdFile"]
|
||||
)
|
||||
elif family == "mantra_rop":
|
||||
plugin_info = MantraRenderDeadlinePluginInfo(
|
||||
SceneFile=instance.data["ifdFile"],
|
||||
Version=hou_major_minor,
|
||||
)
|
||||
elif family == "vray_rop":
|
||||
plugin_info = VrayRenderPluginInfo(
|
||||
InputFilename=instance.data["ifdFile"],
|
||||
)
|
||||
else:
|
||||
self.log.error(
|
||||
"Family '%s' not supported yet to split render job",
|
||||
family
|
||||
)
|
||||
return
|
||||
else:
|
||||
driver = hou.node(instance.data["instance_node"])
|
||||
plugin_info = DeadlinePluginInfo(
|
||||
SceneFile=context.data["currentFile"],
|
||||
OutputDriver=driver.path(),
|
||||
Version=hou_major_minor,
|
||||
IgnoreInputs=True
|
||||
)
|
||||
|
||||
return attr.asdict(plugin_info)
|
||||
|
||||
|
|
|
|||
|
|
@ -370,10 +370,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for _path in os.environ:
|
||||
if _path.lower().startswith('openpype_'):
|
||||
environment[_path] = os.environ[_path]
|
||||
|
||||
# to recognize render jobs
|
||||
if AYON_SERVER_ENABLED:
|
||||
environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"]
|
||||
|
|
@ -402,7 +398,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.log.debug("Submitting..")
|
||||
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
# adding expectied files to instance.data
|
||||
# adding expected files to instance.data
|
||||
self.expected_files(
|
||||
instance,
|
||||
render_path,
|
||||
|
|
@ -458,7 +454,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
def expected_files(
|
||||
self,
|
||||
instance,
|
||||
path,
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame
|
||||
):
|
||||
|
|
@ -467,21 +463,44 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
if not instance.data.get("expectedFiles"):
|
||||
instance.data["expectedFiles"] = []
|
||||
|
||||
dirname = os.path.dirname(path)
|
||||
file = os.path.basename(path)
|
||||
dirname = os.path.dirname(filepath)
|
||||
file = os.path.basename(filepath)
|
||||
|
||||
# since some files might be already tagged as publish_on_farm
|
||||
# we need to avoid adding them to expected files since those would be
|
||||
# duplicated into metadata.json file
|
||||
representations = instance.data.get("representations", [])
|
||||
# check if file is not in representations with publish_on_farm tag
|
||||
for repre in representations:
|
||||
# Skip if 'publish_on_farm' not available
|
||||
if "publish_on_farm" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
# in case where single file (video, image) is already in
|
||||
# representation file. Will be added to expected files via
|
||||
# submit_publish_job.py
|
||||
if file in repre.get("files", []):
|
||||
self.log.debug(
|
||||
"Skipping expected file: {}".format(filepath))
|
||||
return
|
||||
|
||||
# in case path is hashed sequence expression
|
||||
# (e.g. /path/to/file.####.png)
|
||||
if "#" in file:
|
||||
pparts = file.split("#")
|
||||
padding = "%0{}d".format(len(pparts) - 1)
|
||||
file = pparts[0] + padding + pparts[-1]
|
||||
|
||||
# in case input path was single file (video or image)
|
||||
if "%" not in file:
|
||||
instance.data["expectedFiles"].append(path)
|
||||
instance.data["expectedFiles"].append(filepath)
|
||||
return
|
||||
|
||||
# shift start frame by 1 if slate is present
|
||||
if instance.data.get("slate"):
|
||||
start_frame -= 1
|
||||
|
||||
# add sequence files to expected files
|
||||
for i in range(start_frame, (end_frame + 1)):
|
||||
instance.data["expectedFiles"].append(
|
||||
os.path.join(dirname, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -429,7 +429,7 @@ def inject_ayon_environment(deadlinePlugin):
|
|||
"separated list \"{}\"."
|
||||
"The path to the render executable can be configured"
|
||||
" from the Plugin Configuration in the Deadline Monitor."
|
||||
).format(";".join(exe_list)))
|
||||
).format(exe_list))
|
||||
|
||||
print("--- Ayon executable: {}".format(exe))
|
||||
|
||||
|
|
|
|||
|
|
@ -127,17 +127,25 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
other_representations = []
|
||||
has_movie_review = False
|
||||
for repre in instance_repres:
|
||||
self.log.debug("Representation {}".format(repre))
|
||||
repre_tags = repre.get("tags") or []
|
||||
# exclude representations with are going to be published on farm
|
||||
if "publish_on_farm" in repre_tags:
|
||||
continue
|
||||
|
||||
self.log.debug("Representation {}".format(repre))
|
||||
|
||||
# include only thumbnail representations
|
||||
if repre.get("thumbnail") or "thumbnail" in repre_tags:
|
||||
thumbnail_representations.append(repre)
|
||||
|
||||
# include only review representations
|
||||
elif "ftrackreview" in repre_tags:
|
||||
review_representations.append(repre)
|
||||
if self._is_repre_video(repre):
|
||||
has_movie_review = True
|
||||
|
||||
else:
|
||||
# include all other representations
|
||||
other_representations.append(repre)
|
||||
|
||||
# Prepare ftrack locations
|
||||
|
|
@ -230,6 +238,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
# Create review components
|
||||
# Change asset name of each new component for review
|
||||
multiple_reviewable = len(review_representations) > 1
|
||||
extended_asset_name = None
|
||||
for index, repre in enumerate(review_representations):
|
||||
if not self._is_repre_video(repre) and has_movie_review:
|
||||
self.log.debug("Movie repre has priority "
|
||||
|
|
|
|||
|
|
@ -8,10 +8,12 @@ import appdirs
|
|||
from qtpy import QtCore, QtWidgets, QtGui
|
||||
|
||||
from openpype import resources
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.style import load_stylesheet
|
||||
from openpype.lib import JSONSettingRegistry
|
||||
|
||||
|
||||
|
||||
openpype_art = """
|
||||
. . .. . ..
|
||||
_oOOP3OPP3Op_. .
|
||||
|
|
@ -27,6 +29,18 @@ openpype_art = """
|
|||
~P3.OPPPO3OP~ . .. .
|
||||
. ' '. . .. . . . .. .
|
||||
|
||||
"""
|
||||
|
||||
ayon_art = r"""
|
||||
|
||||
▄██▄
|
||||
▄███▄ ▀██▄ ▀██▀ ▄██▀ ▄██▀▀▀██▄ ▀███▄ █▄
|
||||
▄▄ ▀██▄ ▀██▄ ▄██▀ ██▀ ▀██▄ ▄ ▀██▄ ███
|
||||
▄██▀ ██▄ ▀ ▄▄ ▀ ██ ▄██ ███ ▀██▄ ███
|
||||
▄██▀ ▀██▄ ██ ▀██▄ ▄██▀ ███ ▀██ ▀█▀
|
||||
▄██▀ ▀██▄ ▀█ ▀██▄▄▄▄██▀ █▀ ▀██▄
|
||||
|
||||
· · - =[ by YNPUT ]:[ http://ayon.ynput.io ]= - · ·
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -41,8 +55,12 @@ class PythonInterpreterRegistry(JSONSettingRegistry):
|
|||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.vendor = "pypeclub"
|
||||
self.product = "openpype"
|
||||
if AYON_SERVER_ENABLED:
|
||||
self.vendor = "ynput"
|
||||
self.product = "ayon"
|
||||
else:
|
||||
self.vendor = "pypeclub"
|
||||
self.product = "openpype"
|
||||
name = "python_interpreter_tool"
|
||||
path = appdirs.user_data_dir(self.product, self.vendor)
|
||||
super(PythonInterpreterRegistry, self).__init__(name, path)
|
||||
|
|
@ -339,7 +357,9 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
def __init__(self, parent=None):
|
||||
super(PythonInterpreterWidget, self).__init__(parent)
|
||||
|
||||
self.setWindowTitle("OpenPype Console")
|
||||
self.setWindowTitle("{} Console".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"
|
||||
))
|
||||
self.setWindowIcon(QtGui.QIcon(resources.get_openpype_icon_filepath()))
|
||||
|
||||
self.ansi_escape = re.compile(
|
||||
|
|
@ -387,7 +407,10 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
self._tab_widget = tab_widget
|
||||
self._line_check_timer = line_check_timer
|
||||
|
||||
self._append_lines([openpype_art])
|
||||
if AYON_SERVER_ENABLED:
|
||||
self._append_lines([ayon_art])
|
||||
else:
|
||||
self._append_lines([openpype_art])
|
||||
|
||||
self._first_show = True
|
||||
self._splitter_size_ratio = None
|
||||
|
|
|
|||
|
|
@ -1,23 +1,28 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Submitting render job to RoyalRender."""
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
import tempfile
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
import pyblish.api
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.pipeline.publish.lib import get_published_workfile_instance
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
from openpype.lib import BoolDef, NumberDef, is_running_from_build
|
||||
from openpype.lib.execute import run_openpype_process
|
||||
from openpype.modules.royalrender.api import Api as rrApi
|
||||
from openpype.modules.royalrender.rr_job import (
|
||||
RRJob, CustomAttribute, get_rr_platform)
|
||||
from openpype.lib import (
|
||||
is_running_from_build,
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
CustomAttribute,
|
||||
RRJob,
|
||||
RREnvList,
|
||||
get_rr_platform,
|
||||
)
|
||||
from openpype.pipeline import OpenPypePyblishPluginMixin
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.pipeline.publish.lib import get_published_workfile_instance
|
||||
from openpype.tests.lib import is_in_tests
|
||||
|
||||
|
||||
class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
|
||||
|
|
@ -302,3 +307,68 @@ class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
|
|||
path = path.replace(first_frame, "#" * padding)
|
||||
|
||||
return path
|
||||
|
||||
def inject_environment(self, instance, job):
|
||||
# type: (pyblish.api.Instance, RRJob) -> RRJob
|
||||
"""Inject environment variables for RR submission.
|
||||
|
||||
This function mimics the behaviour of the Deadline
|
||||
integration. It is just temporary solution until proper
|
||||
runtime environment injection is implemented in RR.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): Publishing instance
|
||||
job (RRJob): RRJob instance to be injected.
|
||||
|
||||
Returns:
|
||||
RRJob: Injected RRJob instance.
|
||||
|
||||
Throws:
|
||||
RuntimeError: If any of the required env vars is missing.
|
||||
|
||||
"""
|
||||
|
||||
temp_file_name = "{}_{}.json".format(
|
||||
datetime.utcnow().strftime('%Y%m%d%H%M%S%f'),
|
||||
str(uuid.uuid1())
|
||||
)
|
||||
|
||||
export_url = os.path.join(tempfile.gettempdir(), temp_file_name)
|
||||
print(">>> Temporary path: {}".format(export_url))
|
||||
|
||||
args = [
|
||||
"--headless",
|
||||
"extractenvironments",
|
||||
export_url
|
||||
]
|
||||
|
||||
anatomy_data = instance.context.data["anatomyData"]
|
||||
|
||||
add_kwargs = {
|
||||
"project": anatomy_data["project"]["name"],
|
||||
"asset": instance.context.data["asset"],
|
||||
"task": anatomy_data["task"]["name"],
|
||||
"app": instance.context.data.get("appName"),
|
||||
"envgroup": "farm"
|
||||
}
|
||||
|
||||
if os.getenv('IS_TEST'):
|
||||
args.append("--automatic-tests")
|
||||
|
||||
if not all(add_kwargs.values()):
|
||||
raise RuntimeError((
|
||||
"Missing required env vars: AVALON_PROJECT, AVALON_ASSET,"
|
||||
" AVALON_TASK, AVALON_APP_NAME"
|
||||
))
|
||||
|
||||
for key, value in add_kwargs.items():
|
||||
args.extend([f"--{key}", value])
|
||||
self.log.debug("Executing: {}".format(" ".join(args)))
|
||||
run_openpype_process(*args, logger=self.log)
|
||||
|
||||
self.log.debug("Loading file ...")
|
||||
with open(export_url) as fp:
|
||||
contents = json.load(fp)
|
||||
|
||||
job.rrEnvList = RREnvList(contents).serialize()
|
||||
return job
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"""Submitting render job to RoyalRender."""
|
||||
import os
|
||||
|
||||
from maya.OpenMaya import MGlobal
|
||||
from maya.OpenMaya import MGlobal # noqa: F401
|
||||
|
||||
from openpype.modules.royalrender import lib
|
||||
from openpype.pipeline.farm.tools import iter_expected_files
|
||||
|
|
@ -38,5 +38,6 @@ class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
|
|||
job = self.get_job(instance, self.scene_path, first_file_path,
|
||||
layer_name)
|
||||
job = self.update_job_with_host_specific(instance, job)
|
||||
job = self.inject_environment(instance, job)
|
||||
|
||||
instance.data["rrJobs"].append(job)
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
|
|||
jobs = self.create_jobs(instance)
|
||||
for job in jobs:
|
||||
job = self.update_job_with_host_specific(instance, job)
|
||||
job = self.inject_environment(instance, job)
|
||||
|
||||
instance.data["rrJobs"].append(job)
|
||||
|
||||
|
|
|
|||
|
|
@ -205,6 +205,9 @@ class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin,
|
|||
jobs_pre_ids = []
|
||||
for job in instance.data["rrJobs"]: # type: RRJob
|
||||
if job.rrEnvList:
|
||||
if len(job.rrEnvList) > 2000:
|
||||
self.log.warning(("Job environment is too long "
|
||||
f"{len(job.rrEnvList)} > 2000"))
|
||||
job_environ.update(
|
||||
dict(RREnvList.parse(job.rrEnvList))
|
||||
)
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class RREnvList(dict):
|
|||
"""Parse rrEnvList string and return it as RREnvList object."""
|
||||
out = RREnvList()
|
||||
for var in data.split("~~~"):
|
||||
k, v = var.split("=")
|
||||
k, v = var.split("=", maxsplit=1)
|
||||
out[k] = v
|
||||
return out
|
||||
|
||||
|
|
@ -172,7 +172,7 @@ class RRJob(object):
|
|||
|
||||
# Environment
|
||||
# only used in RR 8.3 and newer
|
||||
rrEnvList = attr.ib(default=None) # type: str
|
||||
rrEnvList = attr.ib(default=None, type=str) # type: str
|
||||
|
||||
|
||||
class SubmitterParameter:
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from openpype.pipeline.plugin_discover import (
|
|||
deregister_plugin_path
|
||||
)
|
||||
|
||||
from .load.utils import get_representation_path_from_context
|
||||
|
||||
|
||||
class LauncherAction(object):
|
||||
"""A custom action available"""
|
||||
|
|
@ -100,6 +102,10 @@ class InventoryAction(object):
|
|||
"""
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def filepath_from_context(cls, context):
|
||||
return get_representation_path_from_context(context)
|
||||
|
||||
|
||||
# Launcher action
|
||||
def discover_launcher_actions():
|
||||
|
|
|
|||
|
|
@ -145,6 +145,9 @@ def get_transferable_representations(instance):
|
|||
|
||||
trans_rep = representation.copy()
|
||||
|
||||
# remove publish_on_farm from representations tags
|
||||
trans_rep["tags"].remove("publish_on_farm")
|
||||
|
||||
staging_dir = trans_rep.get("stagingDir")
|
||||
|
||||
if staging_dir:
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ from abc import ABCMeta, abstractmethod
|
|||
|
||||
import six
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
get_linked_assets,
|
||||
|
|
@ -1272,31 +1273,54 @@ class PlaceholderLoadMixin(object):
|
|||
# Sort for readability
|
||||
families = list(sorted(families))
|
||||
|
||||
return [
|
||||
if AYON_SERVER_ENABLED:
|
||||
builder_type_enum_items = [
|
||||
{"label": "Current folder", "value": "context_folder"},
|
||||
# TODO implement linked folders
|
||||
# {"label": "Linked folders", "value": "linked_folders"},
|
||||
{"label": "All folders", "value": "all_folders"},
|
||||
]
|
||||
build_type_label = "Folder Builder Type"
|
||||
build_type_help = (
|
||||
"Folder Builder Type\n"
|
||||
"\nBuilder type describe what template loader will look"
|
||||
" for."
|
||||
"\nCurrent Folder: Template loader will look for products"
|
||||
" of current context folder (Folder /assets/bob will"
|
||||
" find asset)"
|
||||
"\nAll folders: All folders matching the regex will be"
|
||||
" used."
|
||||
)
|
||||
else:
|
||||
builder_type_enum_items = [
|
||||
{"label": "Current asset", "value": "context_asset"},
|
||||
{"label": "Linked assets", "value": "linked_asset"},
|
||||
{"label": "All assets", "value": "all_assets"},
|
||||
]
|
||||
build_type_label = "Asset Builder Type"
|
||||
build_type_help = (
|
||||
"Asset Builder Type\n"
|
||||
"\nBuilder type describe what template loader will look"
|
||||
" for."
|
||||
"\ncontext_asset : Template loader will look for subsets"
|
||||
" of current context asset (Asset bob will find asset)"
|
||||
"\nlinked_asset : Template loader will look for assets"
|
||||
" linked to current context asset."
|
||||
"\nLinked asset are looked in database under"
|
||||
" field \"inputLinks\""
|
||||
)
|
||||
|
||||
attr_defs = [
|
||||
attribute_definitions.UISeparatorDef(),
|
||||
attribute_definitions.UILabelDef("Main attributes"),
|
||||
attribute_definitions.UISeparatorDef(),
|
||||
|
||||
attribute_definitions.EnumDef(
|
||||
"builder_type",
|
||||
label="Asset Builder Type",
|
||||
label=build_type_label,
|
||||
default=options.get("builder_type"),
|
||||
items=[
|
||||
{"label": "Current asset", "value": "context_asset"},
|
||||
{"label": "Linked assets", "value": "linked_asset"},
|
||||
{"label": "All assets", "value": "all_assets"},
|
||||
],
|
||||
tooltip=(
|
||||
"Asset Builder Type\n"
|
||||
"\nBuilder type describe what template loader will look"
|
||||
" for."
|
||||
"\ncontext_asset : Template loader will look for subsets"
|
||||
" of current context asset (Asset bob will find asset)"
|
||||
"\nlinked_asset : Template loader will look for assets"
|
||||
" linked to current context asset."
|
||||
"\nLinked asset are looked in database under"
|
||||
" field \"inputLinks\""
|
||||
)
|
||||
items=builder_type_enum_items,
|
||||
tooltip=build_type_help
|
||||
),
|
||||
attribute_definitions.EnumDef(
|
||||
"family",
|
||||
|
|
@ -1352,34 +1376,63 @@ class PlaceholderLoadMixin(object):
|
|||
attribute_definitions.UISeparatorDef(),
|
||||
attribute_definitions.UILabelDef("Optional attributes"),
|
||||
attribute_definitions.UISeparatorDef(),
|
||||
attribute_definitions.TextDef(
|
||||
"asset",
|
||||
label="Asset filter",
|
||||
default=options.get("asset"),
|
||||
placeholder="regex filtering by asset name",
|
||||
tooltip=(
|
||||
"Filtering assets by matching field regex to asset's name"
|
||||
)
|
||||
),
|
||||
attribute_definitions.TextDef(
|
||||
"subset",
|
||||
label="Subset filter",
|
||||
default=options.get("subset"),
|
||||
placeholder="regex filtering by subset name",
|
||||
tooltip=(
|
||||
"Filtering assets by matching field regex to subset's name"
|
||||
)
|
||||
),
|
||||
attribute_definitions.TextDef(
|
||||
"hierarchy",
|
||||
label="Hierarchy filter",
|
||||
default=options.get("hierarchy"),
|
||||
placeholder="regex filtering by asset's hierarchy",
|
||||
tooltip=(
|
||||
"Filtering assets by matching field asset's hierarchy"
|
||||
)
|
||||
)
|
||||
]
|
||||
if AYON_SERVER_ENABLED:
|
||||
attr_defs.extend([
|
||||
attribute_definitions.TextDef(
|
||||
"folder_path",
|
||||
label="Folder filter",
|
||||
default=options.get("folder_path"),
|
||||
placeholder="regex filtering by folder path",
|
||||
tooltip=(
|
||||
"Filtering assets by matching"
|
||||
" field regex to folder path"
|
||||
)
|
||||
),
|
||||
attribute_definitions.TextDef(
|
||||
"product_name",
|
||||
label="Product filter",
|
||||
default=options.get("product_name"),
|
||||
placeholder="regex filtering by product name",
|
||||
tooltip=(
|
||||
"Filtering assets by matching"
|
||||
" field regex to product name"
|
||||
)
|
||||
),
|
||||
])
|
||||
else:
|
||||
attr_defs.extend([
|
||||
attribute_definitions.TextDef(
|
||||
"asset",
|
||||
label="Asset filter",
|
||||
default=options.get("asset"),
|
||||
placeholder="regex filtering by asset name",
|
||||
tooltip=(
|
||||
"Filtering assets by matching"
|
||||
" field regex to asset's name"
|
||||
)
|
||||
),
|
||||
attribute_definitions.TextDef(
|
||||
"subset",
|
||||
label="Subset filter",
|
||||
default=options.get("subset"),
|
||||
placeholder="regex filtering by subset name",
|
||||
tooltip=(
|
||||
"Filtering assets by matching"
|
||||
" field regex to subset's name"
|
||||
)
|
||||
),
|
||||
attribute_definitions.TextDef(
|
||||
"hierarchy",
|
||||
label="Hierarchy filter",
|
||||
default=options.get("hierarchy"),
|
||||
placeholder="regex filtering by asset's hierarchy",
|
||||
tooltip=(
|
||||
"Filtering assets by matching field asset's hierarchy"
|
||||
)
|
||||
)
|
||||
])
|
||||
return attr_defs
|
||||
|
||||
def parse_loader_args(self, loader_args):
|
||||
"""Helper function to parse string of loader arugments.
|
||||
|
|
@ -1409,6 +1462,117 @@ class PlaceholderLoadMixin(object):
|
|||
|
||||
return {}
|
||||
|
||||
def _query_by_folder_regex(self, project_name, folder_regex):
|
||||
"""Query folders by folder path regex.
|
||||
|
||||
WARNING:
|
||||
This method will be removed once the same functionality is
|
||||
available in ayon-python-api.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
folder_regex (str): Regex for folder path.
|
||||
|
||||
Returns:
|
||||
list[str]: List of folder paths.
|
||||
"""
|
||||
|
||||
from ayon_api.graphql_queries import folders_graphql_query
|
||||
from openpype.client import get_ayon_server_api_connection
|
||||
|
||||
query = folders_graphql_query({"id"})
|
||||
|
||||
folders_field = None
|
||||
for child in query._children:
|
||||
if child.path != "project":
|
||||
continue
|
||||
|
||||
for project_child in child._children:
|
||||
if project_child.path == "project/folders":
|
||||
folders_field = project_child
|
||||
break
|
||||
if folders_field:
|
||||
break
|
||||
|
||||
if "folderPathRegex" not in query._variables:
|
||||
folder_path_regex_var = query.add_variable(
|
||||
"folderPathRegex", "String!"
|
||||
)
|
||||
folders_field.set_filter("pathEx", folder_path_regex_var)
|
||||
|
||||
query.set_variable_value("projectName", project_name)
|
||||
if folder_regex:
|
||||
query.set_variable_value("folderPathRegex", folder_regex)
|
||||
|
||||
api = get_ayon_server_api_connection()
|
||||
for parsed_data in query.continuous_query(api):
|
||||
for folder in parsed_data["project"]["folders"]:
|
||||
yield folder["id"]
|
||||
|
||||
def _get_representations_ayon(self, placeholder):
|
||||
# An OpenPype placeholder loaded in AYON
|
||||
if "asset" in placeholder.data:
|
||||
return []
|
||||
|
||||
representation_name = placeholder.data["representation"]
|
||||
if not representation_name:
|
||||
return []
|
||||
|
||||
project_name = self.builder.project_name
|
||||
current_asset_doc = self.builder.current_asset_doc
|
||||
|
||||
folder_path_regex = placeholder.data["folder_path"]
|
||||
product_name_regex_value = placeholder.data["product_name"]
|
||||
product_name_regex = None
|
||||
if product_name_regex_value:
|
||||
product_name_regex = re.compile(product_name_regex_value)
|
||||
product_type = placeholder.data["family"]
|
||||
|
||||
builder_type = placeholder.data["builder_type"]
|
||||
folder_ids = []
|
||||
if builder_type == "context_folder":
|
||||
folder_ids = [current_asset_doc["_id"]]
|
||||
|
||||
elif builder_type == "all_folders":
|
||||
folder_ids = list(self._query_by_folder_regex(
|
||||
project_name, folder_path_regex
|
||||
))
|
||||
|
||||
if not folder_ids:
|
||||
return []
|
||||
|
||||
from ayon_api import get_products, get_last_versions
|
||||
|
||||
products = list(get_products(
|
||||
project_name,
|
||||
folder_ids=folder_ids,
|
||||
product_types=[product_type],
|
||||
fields={"id", "name"}
|
||||
))
|
||||
filtered_product_ids = set()
|
||||
for product in products:
|
||||
if (
|
||||
product_name_regex is None
|
||||
or product_name_regex.match(product["name"])
|
||||
):
|
||||
filtered_product_ids.add(product["id"])
|
||||
|
||||
if not filtered_product_ids:
|
||||
return []
|
||||
|
||||
version_ids = set(
|
||||
version["id"]
|
||||
for version in get_last_versions(
|
||||
project_name, filtered_product_ids, fields={"id"}
|
||||
).values()
|
||||
)
|
||||
return list(get_representations(
|
||||
project_name,
|
||||
representation_names=[representation_name],
|
||||
version_ids=version_ids
|
||||
))
|
||||
|
||||
|
||||
def _get_representations(self, placeholder):
|
||||
"""Prepared query of representations based on load options.
|
||||
|
||||
|
|
@ -1428,6 +1592,13 @@ class PlaceholderLoadMixin(object):
|
|||
from placeholder data.
|
||||
"""
|
||||
|
||||
if AYON_SERVER_ENABLED:
|
||||
return self._get_representations_ayon(placeholder)
|
||||
|
||||
# An AYON placeholder loaded in OpenPype
|
||||
if "folder_path" in placeholder.data:
|
||||
return []
|
||||
|
||||
project_name = self.builder.project_name
|
||||
current_asset_doc = self.builder.current_asset_doc
|
||||
linked_asset_docs = self.builder.linked_asset_docs
|
||||
|
|
|
|||
|
|
@ -68,11 +68,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
]
|
||||
|
||||
def process(self, instance):
|
||||
# editorial would fail since they might not be in database yet
|
||||
new_asset_publishing = instance.data.get("newAssetPublishing")
|
||||
if new_asset_publishing:
|
||||
self.log.debug("Instance is creating new asset. Skipping.")
|
||||
return
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
|
|
|
|||
|
|
@ -89,8 +89,8 @@ class ExtractBurnin(publish.Extractor):
|
|||
|
||||
self.main_process(instance)
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
# QUESTION Is possible to have representation with "delete" tag?
|
||||
# Remove only representation tagged with both
|
||||
# tags `delete` and `burnin`
|
||||
for repre in tuple(instance.data["representations"]):
|
||||
if all(x in repre.get("tags", []) for x in ['delete', 'burnin']):
|
||||
self.log.debug("Removing representation: {}".format(repre))
|
||||
|
|
|
|||
|
|
@ -319,6 +319,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
Returns:
|
||||
str: temp fpath
|
||||
"""
|
||||
name = name.replace("/", "_")
|
||||
return os.path.normpath(
|
||||
tempfile.mktemp(
|
||||
prefix="pyblish_tmp_{}_".format(name),
|
||||
|
|
|
|||
|
|
@ -89,8 +89,18 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# Make sure cleanup happens and pop representations with "delete" tag.
|
||||
for repre in tuple(instance.data["representations"]):
|
||||
tags = repre.get("tags") or []
|
||||
if "delete" in tags and "thumbnail" not in tags:
|
||||
instance.data["representations"].remove(repre)
|
||||
# Representation is not marked to be deleted
|
||||
if "delete" not in tags:
|
||||
continue
|
||||
|
||||
# The representation can be used as thumbnail source
|
||||
if "thumbnail" in tags or "need_thumbnail" in tags:
|
||||
continue
|
||||
|
||||
self.log.debug(
|
||||
"Removing representation: {}".format(repre)
|
||||
)
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
def _get_outputs_for_instance(self, instance):
|
||||
host_name = instance.context.data["hostName"]
|
||||
|
|
@ -321,19 +331,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
# Create copy of representation
|
||||
new_repre = copy.deepcopy(repre)
|
||||
new_tags = new_repre.get("tags") or []
|
||||
# Make sure new representation has origin staging dir
|
||||
# - this is because source representation may change
|
||||
# it's staging dir because of ffmpeg conversion
|
||||
new_repre["stagingDir"] = src_repre_staging_dir
|
||||
|
||||
# Remove "delete" tag from new repre if there is
|
||||
if "delete" in new_repre["tags"]:
|
||||
new_repre["tags"].remove("delete")
|
||||
if "delete" in new_tags:
|
||||
new_tags.remove("delete")
|
||||
|
||||
if "need_thumbnail" in new_tags:
|
||||
new_tags.remove("need_thumbnail")
|
||||
|
||||
# Add additional tags from output definition to representation
|
||||
for tag in output_def["tags"]:
|
||||
if tag not in new_repre["tags"]:
|
||||
new_repre["tags"].append(tag)
|
||||
if tag not in new_tags:
|
||||
new_tags.append(tag)
|
||||
|
||||
# Return tags to new representation
|
||||
new_repre["tags"] = new_tags
|
||||
|
||||
# Add burnin link from output definition to representation
|
||||
for burnin in output_def["burnins"]:
|
||||
|
|
|
|||
|
|
@ -376,9 +376,13 @@ class ExtractReviewSlate(publish.Extractor):
|
|||
|
||||
# Remove any representations tagged for deletion.
|
||||
for repre in inst_data.get("representations", []):
|
||||
if "delete" in repre.get("tags", []):
|
||||
self.log.debug("Removing representation: {}".format(repre))
|
||||
inst_data["representations"].remove(repre)
|
||||
tags = repre.get("tags", [])
|
||||
if "delete" not in tags:
|
||||
continue
|
||||
if "need_thumbnail" in tags:
|
||||
continue
|
||||
self.log.debug("Removing representation: {}".format(repre))
|
||||
inst_data["representations"].remove(repre)
|
||||
|
||||
self.log.debug(inst_data["representations"])
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import copy
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
|
@ -5,12 +6,17 @@ import tempfile
|
|||
import pyblish.api
|
||||
from openpype.lib import (
|
||||
get_ffmpeg_tool_args,
|
||||
get_oiio_tool_args,
|
||||
is_oiio_supported,
|
||||
get_ffprobe_data,
|
||||
|
||||
is_oiio_supported,
|
||||
get_rescaled_command_arguments,
|
||||
|
||||
run_subprocess,
|
||||
path_to_subprocess_arg,
|
||||
run_subprocess,
|
||||
)
|
||||
from openpype.lib.transcoding import convert_colorspace
|
||||
|
||||
from openpype.lib.transcoding import VIDEO_EXTENSIONS
|
||||
|
||||
|
||||
class ExtractThumbnail(pyblish.api.InstancePlugin):
|
||||
|
|
@ -22,13 +28,49 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
"imagesequence", "render", "render2d", "prerender",
|
||||
"source", "clip", "take", "online", "image"
|
||||
]
|
||||
hosts = ["shell", "fusion", "resolve", "traypublisher", "substancepainter"]
|
||||
hosts = [
|
||||
"shell",
|
||||
"fusion",
|
||||
"resolve",
|
||||
"traypublisher",
|
||||
"substancepainter",
|
||||
"nuke",
|
||||
]
|
||||
enabled = False
|
||||
|
||||
# presetable attribute
|
||||
integrate_thumbnail = False
|
||||
target_size = {
|
||||
"type": "resize",
|
||||
"width": 1920,
|
||||
"height": 1080
|
||||
}
|
||||
background_color = None
|
||||
duration_split = 0.5
|
||||
# attribute presets from settings
|
||||
oiiotool_defaults = None
|
||||
ffmpeg_args = None
|
||||
|
||||
def process(self, instance):
|
||||
# run main process
|
||||
self._main_process(instance)
|
||||
|
||||
# Make sure cleanup happens to representations which are having both
|
||||
# tags `delete` and `need_thumbnail`
|
||||
for repre in tuple(instance.data.get("representations", [])):
|
||||
tags = repre.get("tags") or []
|
||||
# skip representations which are going to be published on farm
|
||||
if "publish_on_farm" in tags:
|
||||
continue
|
||||
if (
|
||||
"delete" in tags
|
||||
and "need_thumbnail" in tags
|
||||
):
|
||||
self.log.debug(
|
||||
"Removing representation: {}".format(repre)
|
||||
)
|
||||
instance.data["representations"].remove(repre)
|
||||
|
||||
def _main_process(self, instance):
|
||||
subset_name = instance.data["subset"]
|
||||
instance_repres = instance.data.get("representations")
|
||||
if not instance_repres:
|
||||
|
|
@ -61,7 +103,13 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Skipping crypto passes.")
|
||||
return
|
||||
|
||||
filtered_repres = self._get_filtered_repres(instance)
|
||||
# first check for any explicitly marked representations for thumbnail
|
||||
explicit_repres = self._get_explicit_repres_for_thumbnail(instance)
|
||||
if explicit_repres:
|
||||
filtered_repres = explicit_repres
|
||||
else:
|
||||
filtered_repres = self._get_filtered_repres(instance)
|
||||
|
||||
if not filtered_repres:
|
||||
self.log.info(
|
||||
"Instance doesn't have representations that can be used "
|
||||
|
|
@ -82,29 +130,62 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
oiio_supported = is_oiio_supported()
|
||||
for repre in filtered_repres:
|
||||
repre_files = repre["files"]
|
||||
src_staging = os.path.normpath(repre["stagingDir"])
|
||||
if not isinstance(repre_files, (list, tuple)):
|
||||
input_file = repre_files
|
||||
# convert any video file to frame so oiio doesn't need to
|
||||
# read video file (it is slow) and also we are having control
|
||||
# over which frame is used for thumbnail
|
||||
# this will also work with ffmpeg fallback conversion in case
|
||||
# oiio is not supported
|
||||
repre_extension = os.path.splitext(repre_files)[1]
|
||||
if repre_extension in VIDEO_EXTENSIONS:
|
||||
video_file_path = os.path.join(
|
||||
src_staging, repre_files
|
||||
)
|
||||
file_path = self._create_frame_from_video(
|
||||
video_file_path,
|
||||
dst_staging
|
||||
)
|
||||
if file_path:
|
||||
src_staging, input_file = os.path.split(file_path)
|
||||
else:
|
||||
# if it is not video file then just use first file
|
||||
input_file = repre_files
|
||||
else:
|
||||
file_index = int(float(len(repre_files)) * 0.5)
|
||||
repre_files_thumb = copy.deepcopy(repre_files)
|
||||
# exclude first frame if slate in representation tags
|
||||
if "slate-frame" in repre.get("tags", []):
|
||||
repre_files_thumb = repre_files_thumb[1:]
|
||||
file_index = int(
|
||||
float(len(repre_files_thumb)) * self.duration_split)
|
||||
input_file = repre_files[file_index]
|
||||
|
||||
src_staging = os.path.normpath(repre["stagingDir"])
|
||||
full_input_path = os.path.join(src_staging, input_file)
|
||||
self.log.debug("input {}".format(full_input_path))
|
||||
|
||||
filename = os.path.splitext(input_file)[0]
|
||||
jpeg_file = filename + "_thumb.jpg"
|
||||
full_output_path = os.path.join(dst_staging, jpeg_file)
|
||||
colorspace_data = repre.get("colorspaceData")
|
||||
|
||||
if oiio_supported:
|
||||
self.log.debug("Trying to convert with OIIO")
|
||||
# only use OIIO if it is supported and representation has
|
||||
# colorspace data
|
||||
if oiio_supported and colorspace_data:
|
||||
self.log.debug(
|
||||
"Trying to convert with OIIO "
|
||||
"with colorspace data: {}".format(colorspace_data)
|
||||
)
|
||||
# If the input can read by OIIO then use OIIO method for
|
||||
# conversion otherwise use ffmpeg
|
||||
thumbnail_created = self.create_thumbnail_oiio(
|
||||
full_input_path, full_output_path
|
||||
thumbnail_created = self._create_thumbnail_oiio(
|
||||
full_input_path,
|
||||
full_output_path,
|
||||
colorspace_data
|
||||
)
|
||||
|
||||
# Try to use FFMPEG if OIIO is not supported or for cases when
|
||||
# oiiotool isn't available
|
||||
# oiiotool isn't available or representation is not having
|
||||
# colorspace data
|
||||
if not thumbnail_created:
|
||||
if oiio_supported:
|
||||
self.log.debug(
|
||||
|
|
@ -112,7 +193,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
" can't be read by OIIO."
|
||||
)
|
||||
|
||||
thumbnail_created = self.create_thumbnail_ffmpeg(
|
||||
thumbnail_created = self._create_thumbnail_ffmpeg(
|
||||
full_input_path, full_output_path
|
||||
)
|
||||
|
||||
|
|
@ -120,25 +201,58 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
if not thumbnail_created:
|
||||
continue
|
||||
|
||||
if len(explicit_repres) > 1:
|
||||
repre_name = "thumbnail_{}".format(repre["outputName"])
|
||||
else:
|
||||
repre_name = "thumbnail"
|
||||
|
||||
# add thumbnail path to instance data for integrator
|
||||
instance_thumb_path = instance.data.get("thumbnailPath")
|
||||
if (
|
||||
not instance_thumb_path
|
||||
or not os.path.isfile(instance_thumb_path)
|
||||
):
|
||||
self.log.debug(
|
||||
"Adding thumbnail path to instance data: {}".format(
|
||||
full_output_path
|
||||
)
|
||||
)
|
||||
instance.data["thumbnailPath"] = full_output_path
|
||||
|
||||
new_repre_tags = ["thumbnail"]
|
||||
# for workflows which needs to have thumbnails published as
|
||||
# separate representations `delete` tag should not be added
|
||||
if not self.integrate_thumbnail:
|
||||
new_repre_tags.append("delete")
|
||||
|
||||
new_repre = {
|
||||
"name": "thumbnail",
|
||||
"name": repre_name,
|
||||
"ext": "jpg",
|
||||
"files": jpeg_file,
|
||||
"stagingDir": dst_staging,
|
||||
"thumbnail": True,
|
||||
"tags": ["thumbnail"]
|
||||
"tags": new_repre_tags
|
||||
}
|
||||
|
||||
# adding representation
|
||||
self.log.debug(
|
||||
"Adding thumbnail representation: {}".format(new_repre)
|
||||
)
|
||||
instance.data["representations"].append(new_repre)
|
||||
# There is no need to create more then one thumbnail
|
||||
break
|
||||
|
||||
if explicit_repres:
|
||||
# this key will then align assetVersion ftrack thumbnail sync
|
||||
new_repre["outputName"] = (
|
||||
repre.get("outputName") or repre["name"])
|
||||
self.log.debug(
|
||||
"Adding explicit thumbnail representation: {}".format(
|
||||
new_repre))
|
||||
else:
|
||||
self.log.debug(
|
||||
"Adding thumbnail representation: {}".format(new_repre)
|
||||
)
|
||||
# There is no need to create more then one thumbnail
|
||||
break
|
||||
|
||||
if not thumbnail_created:
|
||||
self.log.warning("Thumbanil has not been created.")
|
||||
self.log.warning("Thumbnail has not been created.")
|
||||
|
||||
def _is_review_instance(self, instance):
|
||||
# TODO: We should probably handle "not creating" of thumbnail
|
||||
|
|
@ -154,12 +268,42 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
return True
|
||||
return False
|
||||
|
||||
def _get_explicit_repres_for_thumbnail(self, instance):
|
||||
src_repres = instance.data.get("representations") or []
|
||||
# This is mainly for Nuke where we have multiple representations for
|
||||
# one instance and representations are tagged for thumbnail.
|
||||
# First check if any of the representations have
|
||||
# `need_thumbnail` in tags and add them to filtered_repres
|
||||
need_thumb_repres = [
|
||||
repre for repre in src_repres
|
||||
if "need_thumbnail" in repre.get("tags", [])
|
||||
if "publish_on_farm" not in repre.get("tags", [])
|
||||
]
|
||||
if not need_thumb_repres:
|
||||
return []
|
||||
|
||||
self.log.info(
|
||||
"Instance has representation with tag `need_thumbnail`. "
|
||||
"Using only this representations for thumbnail creation. "
|
||||
)
|
||||
self.log.debug(
|
||||
"Representations: {}".format(need_thumb_repres)
|
||||
)
|
||||
return need_thumb_repres
|
||||
|
||||
def _get_filtered_repres(self, instance):
|
||||
filtered_repres = []
|
||||
src_repres = instance.data.get("representations") or []
|
||||
|
||||
for repre in src_repres:
|
||||
self.log.debug(repre)
|
||||
tags = repre.get("tags") or []
|
||||
|
||||
if "publish_on_farm" in tags:
|
||||
# only process representations with are going
|
||||
# to be published locally
|
||||
continue
|
||||
|
||||
valid = "review" in tags or "thumb-nuke" in tags
|
||||
if not valid:
|
||||
continue
|
||||
|
|
@ -173,17 +317,68 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
filtered_repres.append(repre)
|
||||
return filtered_repres
|
||||
|
||||
def create_thumbnail_oiio(self, src_path, dst_path):
|
||||
self.log.debug("Extracting thumbnail with OIIO: {}".format(dst_path))
|
||||
oiio_cmd = get_oiio_tool_args(
|
||||
"oiiotool",
|
||||
"-a", src_path,
|
||||
"-o", dst_path
|
||||
)
|
||||
self.log.debug("running: {}".format(" ".join(oiio_cmd)))
|
||||
def _create_thumbnail_oiio(
|
||||
self,
|
||||
src_path,
|
||||
dst_path,
|
||||
colorspace_data,
|
||||
):
|
||||
"""Create thumbnail using OIIO tool oiiotool
|
||||
|
||||
Args:
|
||||
src_path (str): path to source file
|
||||
dst_path (str): path to destination file
|
||||
colorspace_data (dict): colorspace data from representation
|
||||
keys:
|
||||
colorspace (str)
|
||||
config (dict)
|
||||
display (Optional[str])
|
||||
view (Optional[str])
|
||||
|
||||
Returns:
|
||||
str: path to created thumbnail
|
||||
"""
|
||||
self.log.info("Extracting thumbnail {}".format(dst_path))
|
||||
resolution_arg = self._get_resolution_arg("oiiotool", src_path)
|
||||
|
||||
repre_display = colorspace_data.get("display")
|
||||
repre_view = colorspace_data.get("view")
|
||||
oiio_default_type = None
|
||||
oiio_default_display = None
|
||||
oiio_default_view = None
|
||||
oiio_default_colorspace = None
|
||||
# first look into representation colorspaceData, perhaps it has
|
||||
# display and view
|
||||
if all([repre_display, repre_view]):
|
||||
self.log.info(
|
||||
"Using Display & View from "
|
||||
"representation: '{} ({})'".format(
|
||||
repre_view,
|
||||
repre_display
|
||||
)
|
||||
)
|
||||
# if representation doesn't have display and view then use
|
||||
# oiiotool_defaults
|
||||
elif self.oiiotool_defaults:
|
||||
oiio_default_type = self.oiiotool_defaults["type"]
|
||||
if "colorspace" in oiio_default_type:
|
||||
oiio_default_colorspace = self.oiiotool_defaults["colorspace"]
|
||||
else:
|
||||
oiio_default_display = self.oiiotool_defaults["display"]
|
||||
oiio_default_view = self.oiiotool_defaults["view"]
|
||||
|
||||
try:
|
||||
run_subprocess(oiio_cmd, logger=self.log)
|
||||
return True
|
||||
convert_colorspace(
|
||||
src_path,
|
||||
dst_path,
|
||||
colorspace_data["config"]["path"],
|
||||
colorspace_data["colorspace"],
|
||||
display=repre_display or oiio_default_display,
|
||||
view=repre_view or oiio_default_view,
|
||||
target_colorspace=oiio_default_colorspace,
|
||||
additional_command_args=resolution_arg,
|
||||
logger=self.log,
|
||||
)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Failed to create thumbnail using oiiotool",
|
||||
|
|
@ -191,9 +386,11 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
)
|
||||
return False
|
||||
|
||||
def create_thumbnail_ffmpeg(self, src_path, dst_path):
|
||||
self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path))
|
||||
return True
|
||||
|
||||
def _create_thumbnail_ffmpeg(self, src_path, dst_path):
|
||||
self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path))
|
||||
resolution_arg = self._get_resolution_arg("ffmpeg", src_path)
|
||||
ffmpeg_path_args = get_ffmpeg_tool_args("ffmpeg")
|
||||
ffmpeg_args = self.ffmpeg_args or {}
|
||||
|
||||
|
|
@ -215,6 +412,10 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
jpeg_items.extend(ffmpeg_args.get("output") or [])
|
||||
# we just want one frame from movie files
|
||||
jpeg_items.extend(["-vframes", "1"])
|
||||
|
||||
if resolution_arg:
|
||||
jpeg_items.extend(resolution_arg)
|
||||
|
||||
# output file
|
||||
jpeg_items.append(path_to_subprocess_arg(dst_path))
|
||||
subprocess_command = " ".join(jpeg_items)
|
||||
|
|
@ -229,3 +430,69 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
exc_info=True
|
||||
)
|
||||
return False
|
||||
|
||||
def _create_frame_from_video(self, video_file_path, output_dir):
|
||||
"""Convert video file to one frame image via ffmpeg"""
|
||||
# create output file path
|
||||
base_name = os.path.basename(video_file_path)
|
||||
filename = os.path.splitext(base_name)[0]
|
||||
output_thumb_file_path = os.path.join(
|
||||
output_dir, "{}.png".format(filename))
|
||||
|
||||
# Set video input attributes
|
||||
max_int = str(2147483647)
|
||||
video_data = get_ffprobe_data(video_file_path, logger=self.log)
|
||||
duration = float(video_data["format"]["duration"])
|
||||
|
||||
cmd_args = [
|
||||
"-y",
|
||||
"-ss", str(duration * self.duration_split),
|
||||
"-i", video_file_path,
|
||||
"-analyzeduration", max_int,
|
||||
"-probesize", max_int,
|
||||
"-vframes", "1"
|
||||
]
|
||||
|
||||
# add output file path
|
||||
cmd_args.append(output_thumb_file_path)
|
||||
|
||||
# create ffmpeg command
|
||||
cmd = get_ffmpeg_tool_args(
|
||||
"ffmpeg",
|
||||
*cmd_args
|
||||
)
|
||||
try:
|
||||
# run subprocess
|
||||
self.log.debug("Executing: {}".format(" ".join(cmd)))
|
||||
run_subprocess(cmd, logger=self.log)
|
||||
self.log.debug(
|
||||
"Thumbnail created: {}".format(output_thumb_file_path))
|
||||
return output_thumb_file_path
|
||||
except RuntimeError as error:
|
||||
self.log.warning(
|
||||
"Failed intermediate thumb source using ffmpeg: {}".format(
|
||||
error)
|
||||
)
|
||||
return None
|
||||
|
||||
def _get_resolution_arg(
|
||||
self,
|
||||
application,
|
||||
input_path,
|
||||
):
|
||||
# get settings
|
||||
if self.target_size.get("type") == "source":
|
||||
return []
|
||||
|
||||
target_width = self.target_size["width"]
|
||||
target_height = self.target_size["height"]
|
||||
|
||||
# form arg string per application
|
||||
return get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
target_width,
|
||||
target_height,
|
||||
bg_color=self.background_color,
|
||||
log=self.log
|
||||
)
|
||||
|
|
|
|||
|
|
@ -137,7 +137,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
"mvUsd",
|
||||
"mvUsdComposition",
|
||||
"mvUsdOverride",
|
||||
"simpleUnrealTexture",
|
||||
"online",
|
||||
"uasset",
|
||||
"blendScene",
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
)
|
||||
|
||||
def _prepare_instances(self, context):
|
||||
context_thumbnail_path = context.get("thumbnailPath")
|
||||
context_thumbnail_path = context.data.get("thumbnailPath")
|
||||
valid_context_thumbnail = bool(
|
||||
context_thumbnail_path
|
||||
and os.path.exists(context_thumbnail_path)
|
||||
|
|
@ -92,8 +92,13 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
continue
|
||||
|
||||
# Find thumbnail path on instance
|
||||
thumbnail_path = self._get_instance_thumbnail_path(
|
||||
published_repres)
|
||||
thumbnail_source = instance.data.get("thumbnailSource")
|
||||
thumbnail_path = instance.data.get("thumbnailPath")
|
||||
thumbnail_path = (
|
||||
thumbnail_source
|
||||
or thumbnail_path
|
||||
or self._get_instance_thumbnail_path(published_repres)
|
||||
)
|
||||
if thumbnail_path:
|
||||
self.log.debug((
|
||||
"Found thumbnail path for instance \"{}\"."
|
||||
|
|
@ -131,7 +136,7 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
thumb_repre_doc = None
|
||||
for repre_info in published_representations.values():
|
||||
repre_doc = repre_info["representation"]
|
||||
if repre_doc["name"].lower() == "thumbnail":
|
||||
if "thumbnail" in repre_doc["name"].lower():
|
||||
thumb_repre_doc = repre_doc
|
||||
break
|
||||
|
||||
|
|
@ -157,8 +162,8 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
):
|
||||
from openpype.client.server.operations import create_thumbnail
|
||||
|
||||
op_session = OperationsSession()
|
||||
|
||||
# Make sure each entity id has defined only one thumbnail id
|
||||
thumbnail_info_by_entity_id = {}
|
||||
for instance_item in filtered_instance_items:
|
||||
instance, thumbnail_path, version_id = instance_item
|
||||
instance_label = self._get_instance_label(instance)
|
||||
|
|
@ -172,12 +177,10 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
thumbnail_id = create_thumbnail(project_name, thumbnail_path)
|
||||
|
||||
# Set thumbnail id for version
|
||||
op_session.update_entity(
|
||||
project_name,
|
||||
version_doc["type"],
|
||||
version_doc["_id"],
|
||||
{"data.thumbnail_id": thumbnail_id}
|
||||
)
|
||||
thumbnail_info_by_entity_id[version_id] = {
|
||||
"thumbnail_id": thumbnail_id,
|
||||
"entity_type": version_doc["type"],
|
||||
}
|
||||
if version_doc["type"] == "hero_version":
|
||||
version_name = "Hero"
|
||||
else:
|
||||
|
|
@ -187,16 +190,23 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
|
|||
))
|
||||
|
||||
asset_entity = instance.data["assetEntity"]
|
||||
op_session.update_entity(
|
||||
project_name,
|
||||
asset_entity["type"],
|
||||
asset_entity["_id"],
|
||||
{"data.thumbnail_id": thumbnail_id}
|
||||
)
|
||||
thumbnail_info_by_entity_id[asset_entity["_id"]] = {
|
||||
"thumbnail_id": thumbnail_id,
|
||||
"entity_type": "asset",
|
||||
}
|
||||
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
|
||||
asset_entity["name"], version_id
|
||||
))
|
||||
|
||||
op_session = OperationsSession()
|
||||
for entity_id, thumbnail_info in thumbnail_info_by_entity_id.items():
|
||||
thumbnail_id = thumbnail_info["thumbnail_id"]
|
||||
op_session.update_entity(
|
||||
project_name,
|
||||
thumbnail_info["entity_type"],
|
||||
entity_id,
|
||||
{"data.thumbnail_id": thumbnail_id}
|
||||
)
|
||||
op_session.commit()
|
||||
|
||||
def _get_instance_label(self, instance):
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ class PypeCommands:
|
|||
task,
|
||||
app,
|
||||
env_group=env_group,
|
||||
launch_type=LaunchTypes.farm_render,
|
||||
launch_type=LaunchTypes.farm_render
|
||||
)
|
||||
else:
|
||||
env = os.environ.copy()
|
||||
|
|
@ -214,7 +214,7 @@ class PypeCommands:
|
|||
|
||||
def run_tests(self, folder, mark, pyargs,
|
||||
test_data_folder, persist, app_variant, timeout, setup_only,
|
||||
mongo_url, app_group):
|
||||
mongo_url, app_group, dump_databases):
|
||||
"""
|
||||
Runs tests from 'folder'
|
||||
|
||||
|
|
@ -275,6 +275,13 @@ class PypeCommands:
|
|||
if mongo_url:
|
||||
args.extend(["--mongo_url", mongo_url])
|
||||
|
||||
if dump_databases:
|
||||
msg = "dump_databases format is not recognized: {}".format(
|
||||
dump_databases
|
||||
)
|
||||
assert dump_databases in ["bson", "json"], msg
|
||||
args.extend(["--dump_databases", dump_databases])
|
||||
|
||||
print("run_tests args: {}".format(args))
|
||||
import pytest
|
||||
pytest.main(args)
|
||||
|
|
|
|||
BIN
openpype/resources/app_icons/wrap.png
Normal file
BIN
openpype/resources/app_icons/wrap.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1 KiB |
|
|
@ -572,6 +572,27 @@ def _convert_maya_project_settings(ayon_settings, output):
|
|||
for item in viewport_options["pluginObjects"]
|
||||
}
|
||||
|
||||
ayon_playblast_settings = ayon_publish["ExtractPlayblast"]["profiles"]
|
||||
if ayon_playblast_settings:
|
||||
for setting in ayon_playblast_settings:
|
||||
capture_preset = setting["capture_preset"]
|
||||
display_options = capture_preset["DisplayOptions"]
|
||||
for key in ("background", "backgroundBottom", "backgroundTop"):
|
||||
display_options[key] = _convert_color(display_options[key])
|
||||
|
||||
for src_key, dst_key in (
|
||||
("DisplayOptions", "Display Options"),
|
||||
("ViewportOptions", "Viewport Options"),
|
||||
("CameraOptions", "Camera Options"),
|
||||
):
|
||||
capture_preset[dst_key] = capture_preset.pop(src_key)
|
||||
|
||||
viewport_options = capture_preset["Viewport Options"]
|
||||
viewport_options["pluginObjects"] = {
|
||||
item["name"]: item["value"]
|
||||
for item in viewport_options["pluginObjects"]
|
||||
}
|
||||
|
||||
# Extract Camera Alembic bake attributes
|
||||
try:
|
||||
bake_attributes = json.loads(
|
||||
|
|
@ -821,28 +842,6 @@ def _convert_nuke_project_settings(ayon_settings, output):
|
|||
collect_instance_data.pop(
|
||||
"sync_workfile_version_on_product_types"))
|
||||
|
||||
# TODO 'ExtractThumbnail' does not have ideal schema in v3
|
||||
ayon_extract_thumbnail = ayon_publish["ExtractThumbnail"]
|
||||
new_thumbnail_nodes = {}
|
||||
for item in ayon_extract_thumbnail["nodes"]:
|
||||
name = item["nodeclass"]
|
||||
value = []
|
||||
for knob in _convert_nuke_knobs(item["knobs"]):
|
||||
knob_name = knob["name"]
|
||||
# This may crash
|
||||
if knob["type"] == "expression":
|
||||
knob_value = knob["expression"]
|
||||
else:
|
||||
knob_value = knob["value"]
|
||||
value.append([knob_name, knob_value])
|
||||
new_thumbnail_nodes[name] = value
|
||||
|
||||
ayon_extract_thumbnail["nodes"] = new_thumbnail_nodes
|
||||
|
||||
if "reposition_nodes" in ayon_extract_thumbnail:
|
||||
for item in ayon_extract_thumbnail["reposition_nodes"]:
|
||||
item["knobs"] = _convert_nuke_knobs(item["knobs"])
|
||||
|
||||
# --- ImageIO ---
|
||||
# NOTE 'monitorOutLut' is maybe not yet in v3 (ut should be)
|
||||
_convert_host_imageio(ayon_nuke)
|
||||
|
|
@ -1241,6 +1240,26 @@ def _convert_global_project_settings(ayon_settings, output, default_settings):
|
|||
|
||||
profile["outputs"] = new_outputs
|
||||
|
||||
# ExtractThumbnail plugin
|
||||
ayon_extract_thumbnail = ayon_publish["ExtractThumbnail"]
|
||||
# fix display and view at oiio defaults
|
||||
ayon_default_oiio = copy.deepcopy(
|
||||
ayon_extract_thumbnail["oiiotool_defaults"])
|
||||
display_and_view = ayon_default_oiio.pop("display_and_view")
|
||||
ayon_default_oiio["display"] = display_and_view["display"]
|
||||
ayon_default_oiio["view"] = display_and_view["view"]
|
||||
ayon_extract_thumbnail["oiiotool_defaults"] = ayon_default_oiio
|
||||
# fix target size
|
||||
ayon_default_resize = copy.deepcopy(ayon_extract_thumbnail["target_size"])
|
||||
resize = ayon_default_resize.pop("resize")
|
||||
ayon_default_resize["width"] = resize["width"]
|
||||
ayon_default_resize["height"] = resize["height"]
|
||||
ayon_extract_thumbnail["target_size"] = ayon_default_resize
|
||||
# fix background color
|
||||
ayon_extract_thumbnail["background_color"] = _convert_color(
|
||||
ayon_extract_thumbnail["background_color"]
|
||||
)
|
||||
|
||||
# ExtractOIIOTranscode plugin
|
||||
extract_oiio_transcode = ayon_publish["ExtractOIIOTranscode"]
|
||||
extract_oiio_transcode_profiles = extract_oiio_transcode["profiles"]
|
||||
|
|
|
|||
|
|
@ -38,16 +38,6 @@
|
|||
"file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}",
|
||||
"path": "{@folder}/{@file}"
|
||||
},
|
||||
"simpleUnrealTextureHero": {
|
||||
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/hero",
|
||||
"file": "{originalBasename}.{ext}",
|
||||
"path": "{@folder}/{@file}"
|
||||
},
|
||||
"simpleUnrealTexture": {
|
||||
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{@version}",
|
||||
"file": "{originalBasename}_{@version}.{ext}",
|
||||
"path": "{@folder}/{@file}"
|
||||
},
|
||||
"online": {
|
||||
"folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}",
|
||||
"file": "{originalBasename}<.{@frame}><_{udim}>.{ext}",
|
||||
|
|
@ -68,8 +58,6 @@
|
|||
},
|
||||
"__dynamic_keys_labels__": {
|
||||
"maya2unreal": "Maya to Unreal",
|
||||
"simpleUnrealTextureHero": "Simple Unreal Texture - Hero",
|
||||
"simpleUnrealTexture": "Simple Unreal Texture",
|
||||
"online": "online",
|
||||
"tycache": "tycache",
|
||||
"source": "source",
|
||||
|
|
|
|||
|
|
@ -107,7 +107,8 @@
|
|||
"use_published": true,
|
||||
"priority": 50,
|
||||
"chunk_size": 10,
|
||||
"group": "none"
|
||||
"group": "none",
|
||||
"job_delay": "00:00:00:00"
|
||||
},
|
||||
"ProcessSubmittedCacheJobOnFarm": {
|
||||
"enabled": true,
|
||||
|
|
|
|||
|
|
@ -70,6 +70,25 @@
|
|||
},
|
||||
"ExtractThumbnail": {
|
||||
"enabled": true,
|
||||
"integrate_thumbnail": false,
|
||||
"background_color": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
255
|
||||
],
|
||||
"duration_split": 0.5,
|
||||
"target_size": {
|
||||
"type": "resize",
|
||||
"width": 1920,
|
||||
"height": 1080
|
||||
},
|
||||
"oiiotool_defaults": {
|
||||
"type": "colorspace",
|
||||
"colorspace": "color_picking",
|
||||
"view": "sRGB",
|
||||
"display": "default"
|
||||
},
|
||||
"ffmpeg_args": {
|
||||
"input": [
|
||||
"-apply_trc gamma22"
|
||||
|
|
@ -316,22 +335,9 @@
|
|||
"animation",
|
||||
"setdress",
|
||||
"layout",
|
||||
"mayaScene",
|
||||
"simpleUnrealTexture"
|
||||
"mayaScene"
|
||||
],
|
||||
"template_name_profiles": [
|
||||
{
|
||||
"families": [
|
||||
"simpleUnrealTexture"
|
||||
],
|
||||
"hosts": [
|
||||
"standalonepublisher"
|
||||
],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
"template_name": "simpleUnrealTextureHero"
|
||||
}
|
||||
]
|
||||
"template_name_profiles": []
|
||||
},
|
||||
"CleanUp": {
|
||||
"paterns": [],
|
||||
|
|
@ -513,17 +519,6 @@
|
|||
"task_names": [],
|
||||
"template_name": "render"
|
||||
},
|
||||
{
|
||||
"families": [
|
||||
"simpleUnrealTexture"
|
||||
],
|
||||
"hosts": [
|
||||
"standalonepublisher"
|
||||
],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
"template_name": "simpleUnrealTexture"
|
||||
},
|
||||
{
|
||||
"families": [
|
||||
"staticMesh",
|
||||
|
|
@ -559,19 +554,7 @@
|
|||
"template_name": "tycache"
|
||||
}
|
||||
],
|
||||
"hero_template_name_profiles": [
|
||||
{
|
||||
"families": [
|
||||
"simpleUnrealTexture"
|
||||
],
|
||||
"hosts": [
|
||||
"standalonepublisher"
|
||||
],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
"template_name": "simpleUnrealTextureHero"
|
||||
}
|
||||
],
|
||||
"hero_template_name_profiles": [],
|
||||
"custom_staging_dir_profiles": []
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -56,6 +56,31 @@
|
|||
"enabled": false,
|
||||
"optional": true,
|
||||
"family_plugins_mapping": []
|
||||
},
|
||||
"ExtractModelObj": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": false
|
||||
},
|
||||
"ExtractModelFbx": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": false
|
||||
},
|
||||
"ExtractModelUSD": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": false
|
||||
},
|
||||
"ExtractModel": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": true
|
||||
},
|
||||
"ExtractMaxSceneRaw": {
|
||||
"enabled": true,
|
||||
"optional": true,
|
||||
"active": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -379,68 +379,6 @@
|
|||
"optional": true,
|
||||
"active": true
|
||||
},
|
||||
"ExtractThumbnail": {
|
||||
"enabled": true,
|
||||
"use_rendered": true,
|
||||
"bake_viewer_process": true,
|
||||
"bake_viewer_input_process": true,
|
||||
"nodes": {
|
||||
"Reformat": [
|
||||
[
|
||||
"type",
|
||||
"to format"
|
||||
],
|
||||
[
|
||||
"format",
|
||||
"HD_1080"
|
||||
],
|
||||
[
|
||||
"filter",
|
||||
"Lanczos6"
|
||||
],
|
||||
[
|
||||
"black_outside",
|
||||
true
|
||||
],
|
||||
[
|
||||
"pbb",
|
||||
false
|
||||
]
|
||||
]
|
||||
},
|
||||
"reposition_nodes": [
|
||||
{
|
||||
"node_class": "Reformat",
|
||||
"knobs": [
|
||||
{
|
||||
"type": "text",
|
||||
"name": "type",
|
||||
"value": "to format"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "format",
|
||||
"value": "HD_1080"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"name": "filter",
|
||||
"value": "Lanczos6"
|
||||
},
|
||||
{
|
||||
"type": "bool",
|
||||
"name": "black_outside",
|
||||
"value": true
|
||||
},
|
||||
{
|
||||
"type": "bool",
|
||||
"name": "pbb",
|
||||
"value": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExtractReviewData": {
|
||||
"enabled": false
|
||||
},
|
||||
|
|
|
|||
|
|
@ -133,14 +133,6 @@
|
|||
],
|
||||
"help": "Texture files with UDIM together with worfile"
|
||||
},
|
||||
"create_simple_unreal_texture": {
|
||||
"name": "simple_unreal_texture",
|
||||
"label": "Simple Unreal Texture",
|
||||
"family": "simpleUnrealTexture",
|
||||
"icon": "Image",
|
||||
"defaults": [],
|
||||
"help": "Texture files with Unreal naming convention"
|
||||
},
|
||||
"create_vdb": {
|
||||
"name": "vdb",
|
||||
"label": "VDB Volumetric Data",
|
||||
|
|
|
|||
|
|
@ -244,19 +244,6 @@
|
|||
".hda"
|
||||
]
|
||||
},
|
||||
{
|
||||
"family": "simpleUnrealTexture",
|
||||
"identifier": "",
|
||||
"label": "Simple UE texture",
|
||||
"icon": "fa.image",
|
||||
"default_variants": [],
|
||||
"description": "Simple Unreal Engine texture",
|
||||
"detailed_description": "Texture files with Unreal Engine naming conventions",
|
||||
"allow_sequences": false,
|
||||
"allow_multiple_items": true,
|
||||
"allow_version_control": false,
|
||||
"extensions": []
|
||||
},
|
||||
{
|
||||
"family": "audio",
|
||||
"identifier": "",
|
||||
|
|
|
|||
|
|
@ -581,6 +581,11 @@
|
|||
"type": "text",
|
||||
"key": "group",
|
||||
"label": "Group Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "job_delay",
|
||||
"label": "Delay job (timecode dd:hh:mm:ss)"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -202,6 +202,104 @@
|
|||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "integrate_thumbnail",
|
||||
"label": "Integrate thumbnail as representation"
|
||||
},
|
||||
{
|
||||
"type": "dict-conditional",
|
||||
"use_label_wrap": false,
|
||||
"collapsible": false,
|
||||
"key": "target_size",
|
||||
"label": "Target size",
|
||||
"enum_key": "type",
|
||||
"enum_label": "Type",
|
||||
"enum_children": [
|
||||
{
|
||||
"key": "source",
|
||||
"label": "Image source",
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Image size will be inherited from source image."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "resize",
|
||||
"label": "Resize",
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Image will be resized to specified size."
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "width",
|
||||
"label": "Width",
|
||||
"decimal": 0,
|
||||
"minimum": 0,
|
||||
"maximum": 99999
|
||||
},
|
||||
{
|
||||
"type": "number",
|
||||
"key": "height",
|
||||
"label": "Height",
|
||||
"decimal": 0,
|
||||
"minimum": 0,
|
||||
"maximum": 99999
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "color",
|
||||
"label": "Background color",
|
||||
"key": "background_color"
|
||||
},
|
||||
{
|
||||
"key": "duration_split",
|
||||
"label": "Duration split ratio",
|
||||
"type": "number",
|
||||
"decimal": 1,
|
||||
"default": 0.5,
|
||||
"minimum": 0,
|
||||
"maximum": 1
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "oiiotool_defaults",
|
||||
"label": "OIIOtool defaults",
|
||||
"children": [
|
||||
{
|
||||
"type": "enum",
|
||||
"key": "type",
|
||||
"label": "Target type",
|
||||
"enum_items": [
|
||||
{ "colorspace": "Colorspace" },
|
||||
{ "display_and_view": "Display & View" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "colorspace",
|
||||
"label": "Colorspace"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "view",
|
||||
"label": "View"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "display",
|
||||
"label": "Display"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "ffmpeg_args",
|
||||
|
|
|
|||
|
|
@ -90,6 +90,131 @@
|
|||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractModelObj",
|
||||
"label": "Extract Obj",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractModelFbx",
|
||||
"label": "Extract FBX",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractModelUSD",
|
||||
"label": "Extract Geometry (USD)",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractModel",
|
||||
"label": "Extract Geometry (Alembic)",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractMaxSceneRaw",
|
||||
"label": "Extract Max Scene (Raw)",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "optional",
|
||||
"label": "Optional"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "active",
|
||||
"label": "Active"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -125,81 +125,6 @@
|
|||
"type": "label",
|
||||
"label": "Extractors"
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "ExtractThumbnail",
|
||||
"label": "ExtractThumbnail",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "use_rendered",
|
||||
"label": "Use rendered images"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "bake_viewer_process",
|
||||
"label": "Bake viewer process"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "bake_viewer_input_process",
|
||||
"label": "Bake viewer input process"
|
||||
},
|
||||
{
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Nodes",
|
||||
"collapsible": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Nodes attribute will be deprecated in future releases. Use reposition_nodes instead."
|
||||
},
|
||||
{
|
||||
"type": "raw-json",
|
||||
"key": "nodes",
|
||||
"label": "Nodes [depricated]"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Reposition knobs supported only. You can add multiple reformat nodes <br/>and set their knobs. Order of reformat nodes is important. First reformat node <br/>will be applied first and last reformat node will be applied last."
|
||||
},
|
||||
{
|
||||
"key": "reposition_nodes",
|
||||
"type": "list",
|
||||
"label": "Reposition nodes",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"key": "node_class",
|
||||
"label": "Node class",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"type": "schema_template",
|
||||
"name": "template_nuke_knob_inputs",
|
||||
"template_data": [
|
||||
{
|
||||
"label": "Node knobs",
|
||||
"key": "knobs"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
|
|||
|
|
@ -608,7 +608,7 @@ class UnknownAttrWidget(_BaseAttrDefWidget):
|
|||
class HiddenAttrWidget(_BaseAttrDefWidget):
|
||||
def _ui_init(self):
|
||||
self.setVisible(False)
|
||||
self._value = None
|
||||
self._value = self.attr_def.default
|
||||
self._multivalue = False
|
||||
|
||||
def setVisible(self, visible):
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ class VersionItem:
|
|||
handles,
|
||||
step,
|
||||
comment,
|
||||
source
|
||||
source,
|
||||
):
|
||||
self.version_id = version_id
|
||||
self.product_id = product_id
|
||||
|
|
@ -215,7 +215,7 @@ class RepreItem:
|
|||
representation_name,
|
||||
representation_icon,
|
||||
product_name,
|
||||
folder_label,
|
||||
folder_label
|
||||
):
|
||||
self.representation_id = representation_id
|
||||
self.representation_name = representation_name
|
||||
|
|
@ -590,6 +590,22 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_versions_representation_count(
|
||||
self, project_name, version_ids, sender=None
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
version_ids (Iterable[str]): Version ids.
|
||||
sender (Optional[str]): Sender who requested the items.
|
||||
|
||||
Returns:
|
||||
dict[str, int]: Representation count by version id.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_thumbnail_path(self, project_name, thumbnail_id):
|
||||
"""Get thumbnail path for thumbnail id.
|
||||
|
|
@ -849,3 +865,80 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
"""
|
||||
|
||||
pass
|
||||
|
||||
# Site sync functions
|
||||
@abstractmethod
|
||||
def is_site_sync_enabled(self, project_name=None):
|
||||
"""Is site sync enabled.
|
||||
|
||||
Site sync addon can be enabled but can be disabled per project.
|
||||
|
||||
When asked for enabled state without project name, it should return
|
||||
True if site sync addon is available and enabled.
|
||||
|
||||
Args:
|
||||
project_name (Optional[str]): Project name.
|
||||
|
||||
Returns:
|
||||
bool: True if site sync is enabled.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_active_site_icon_def(self, project_name):
|
||||
"""Active site icon definition.
|
||||
|
||||
Args:
|
||||
project_name (Union[str, None]): Project name.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, Any], None]: Icon definition or None if site sync
|
||||
is not enabled for the project.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_remote_site_icon_def(self, project_name):
|
||||
"""Remote site icon definition.
|
||||
|
||||
Args:
|
||||
project_name (Union[str, None]): Project name.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, Any], None]: Icon definition or None if site sync
|
||||
is not enabled for the project.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_version_sync_availability(self, project_name, version_ids):
|
||||
"""Version sync availability.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
version_ids (Iterable[str]): Version ids.
|
||||
|
||||
Returns:
|
||||
dict[str, tuple[int, int]]: Sync availability by version id.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_representations_sync_status(
|
||||
self, project_name, representation_ids
|
||||
):
|
||||
"""Representations sync status.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
representation_ids (Iterable[str]): Representation ids.
|
||||
|
||||
Returns:
|
||||
dict[str, tuple[int, int]]: Sync status by representation id.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -15,7 +15,12 @@ from openpype.tools.ayon_utils.models import (
|
|||
)
|
||||
|
||||
from .abstract import BackendLoaderController, FrontendLoaderController
|
||||
from .models import SelectionModel, ProductsModel, LoaderActionsModel
|
||||
from .models import (
|
||||
SelectionModel,
|
||||
ProductsModel,
|
||||
LoaderActionsModel,
|
||||
SiteSyncModel
|
||||
)
|
||||
|
||||
|
||||
class ExpectedSelection:
|
||||
|
|
@ -108,6 +113,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
self._products_model = ProductsModel(self)
|
||||
self._loader_actions_model = LoaderActionsModel(self)
|
||||
self._thumbnails_model = ThumbnailsModel()
|
||||
self._site_sync_model = SiteSyncModel(self)
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
|
|
@ -143,6 +149,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
self._loader_actions_model.reset()
|
||||
self._projects_model.reset()
|
||||
self._thumbnails_model.reset()
|
||||
self._site_sync_model.reset()
|
||||
|
||||
self._projects_model.refresh()
|
||||
|
||||
|
|
@ -195,13 +202,22 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
project_name, version_ids, sender
|
||||
)
|
||||
|
||||
def get_versions_representation_count(
|
||||
self, project_name, version_ids, sender=None
|
||||
):
|
||||
return self._products_model.get_versions_repre_count(
|
||||
project_name, version_ids, sender
|
||||
)
|
||||
|
||||
def get_folder_thumbnail_ids(self, project_name, folder_ids):
|
||||
return self._thumbnails_model.get_folder_thumbnail_ids(
|
||||
project_name, folder_ids)
|
||||
project_name, folder_ids
|
||||
)
|
||||
|
||||
def get_version_thumbnail_ids(self, project_name, version_ids):
|
||||
return self._thumbnails_model.get_version_thumbnail_ids(
|
||||
project_name, version_ids)
|
||||
project_name, version_ids
|
||||
)
|
||||
|
||||
def get_thumbnail_path(self, project_name, thumbnail_id):
|
||||
return self._thumbnails_model.get_thumbnail_path(
|
||||
|
|
@ -219,8 +235,16 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
|
||||
def get_representations_action_items(
|
||||
self, project_name, representation_ids):
|
||||
return self._loader_actions_model.get_representations_action_items(
|
||||
action_items = (
|
||||
self._loader_actions_model.get_representations_action_items(
|
||||
project_name, representation_ids)
|
||||
)
|
||||
|
||||
action_items.extend(self._site_sync_model.get_site_sync_action_items(
|
||||
project_name, representation_ids)
|
||||
)
|
||||
|
||||
return action_items
|
||||
|
||||
def trigger_action_item(
|
||||
self,
|
||||
|
|
@ -230,6 +254,14 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
version_ids,
|
||||
representation_ids
|
||||
):
|
||||
if self._site_sync_model.is_site_sync_action(identifier):
|
||||
self._site_sync_model.trigger_action_item(
|
||||
identifier,
|
||||
project_name,
|
||||
representation_ids
|
||||
)
|
||||
return
|
||||
|
||||
self._loader_actions_model.trigger_action_item(
|
||||
identifier,
|
||||
options,
|
||||
|
|
@ -336,6 +368,27 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
|
|||
self._loaded_products_cache.update_data(product_ids)
|
||||
return self._loaded_products_cache.get_data()
|
||||
|
||||
def is_site_sync_enabled(self, project_name=None):
|
||||
return self._site_sync_model.is_site_sync_enabled(project_name)
|
||||
|
||||
def get_active_site_icon_def(self, project_name):
|
||||
return self._site_sync_model.get_active_site_icon_def(project_name)
|
||||
|
||||
def get_remote_site_icon_def(self, project_name):
|
||||
return self._site_sync_model.get_remote_site_icon_def(project_name)
|
||||
|
||||
def get_version_sync_availability(self, project_name, version_ids):
|
||||
return self._site_sync_model.get_version_sync_availability(
|
||||
project_name, version_ids
|
||||
)
|
||||
|
||||
def get_representations_sync_status(
|
||||
self, project_name, representation_ids
|
||||
):
|
||||
return self._site_sync_model.get_representations_sync_status(
|
||||
project_name, representation_ids
|
||||
)
|
||||
|
||||
def is_loaded_products_supported(self):
|
||||
return self._host is not None
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from .selection import SelectionModel
|
||||
from .products import ProductsModel
|
||||
from .actions import LoaderActionsModel
|
||||
from .site_sync import SiteSyncModel
|
||||
|
||||
|
||||
__all__ = (
|
||||
"SelectionModel",
|
||||
"ProductsModel",
|
||||
"LoaderActionsModel",
|
||||
"SiteSyncModel",
|
||||
)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue