Merge branch 'develop' into enhancement/add-environments-to-royalrender-job

This commit is contained in:
Ondřej Samohel 2023-12-04 17:55:28 +01:00 committed by GitHub
commit eacb97dc50
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
60 changed files with 1906 additions and 920 deletions

View file

@ -1,4 +1,4 @@
import os
from pathlib import Path
import bpy
@ -59,7 +59,7 @@ def get_render_product(output_path, name, aov_sep):
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = os.path.join(output_path, name)
filepath = output_path / name.lstrip("/")
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
@ -180,7 +180,7 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
return []
output.file_slots.clear()
output.base_path = output_path
output.base_path = str(output_path)
aov_file_products = []
@ -191,8 +191,9 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
output.file_slots.new(filepath)
aov_file_products.append(
(render_pass.name, os.path.join(output_path, filepath)))
filename = str(output_path / filepath.lstrip("/"))
aov_file_products.append((render_pass.name, filename))
node_input = output.inputs[-1]
@ -214,12 +215,11 @@ def imprint_render_settings(node, data):
def prepare_rendering(asset_group):
name = asset_group.name
filepath = bpy.data.filepath
filepath = Path(bpy.data.filepath)
assert filepath, "Workfile not saved. Please save the file first."
file_path = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
file_name, _ = os.path.splitext(file_name)
dirpath = filepath.parent
file_name = Path(filepath.name).stem
project = get_current_project_name()
settings = get_project_settings(project)
@ -232,7 +232,7 @@ def prepare_rendering(asset_group):
set_render_format(ext, multilayer)
aov_list, custom_passes = set_render_passes(settings)
output_path = os.path.join(file_path, render_folder, file_name)
output_path = Path.joinpath(dirpath, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(

View file

@ -11,12 +11,12 @@ import pyblish.api
class CollectBlenderRender(pyblish.api.InstancePlugin):
"""Gather all publishable render layers from renderSetup."""
"""Gather all publishable render instances."""
order = pyblish.api.CollectorOrder + 0.01
hosts = ["blender"]
families = ["render"]
label = "Collect Render Layers"
label = "Collect Render"
sync_workfile_version = False
@staticmethod
@ -78,8 +78,6 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
assert render_data, "No render data found."
self.log.debug(f"render_data: {dict(render_data)}")
render_product = render_data.get("render_product")
aov_file_product = render_data.get("aov_file_product")
ext = render_data.get("image_format")
@ -101,7 +99,7 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
expected_files = expected_beauty | expected_aovs
instance.data.update({
"family": "render.farm",
"families": ["render", "render.farm"],
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartHandle": frame_handle_start,
@ -120,5 +118,3 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
"colorspaceView": "ACES 1.0 SDR-video",
"renderProducts": colorspace.ARenderProduct(),
})
self.log.debug(f"data: {instance.data}")

View file

@ -14,7 +14,7 @@ from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
"""Extract a layout."""
label = "Extract Layout"
label = "Extract Layout (JSON)"
hosts = ["blender"]
families = ["layout"]
optional = True

View file

@ -14,7 +14,7 @@ class IncrementWorkfileVersion(
optional = True
hosts = ["blender"]
families = ["animation", "model", "rig", "action", "layout", "blendScene",
"pointcache", "render"]
"pointcache", "render.farm"]
def process(self, context):
if not self.is_active(context.data):

View file

@ -19,7 +19,7 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
"""
order = ValidateContentsOrder
families = ["render.farm"]
families = ["render"]
hosts = ["blender"]
label = "Validate Render Output for Deadline"
optional = True

View file

@ -25,20 +25,24 @@ def enabled_savers(comp, savers):
"""
passthrough_key = "TOOLB_PassThrough"
original_states = {}
enabled_save_names = {saver.Name for saver in savers}
enabled_saver_names = {saver.Name for saver in savers}
all_savers = comp.GetToolList(False, "Saver").values()
savers_by_name = {saver.Name: saver for saver in all_savers}
try:
all_savers = comp.GetToolList(False, "Saver").values()
for saver in all_savers:
original_state = saver.GetAttrs()[passthrough_key]
original_states[saver] = original_state
original_states[saver.Name] = original_state
# The passthrough state we want to set (passthrough != enabled)
state = saver.Name not in enabled_save_names
state = saver.Name not in enabled_saver_names
if state != original_state:
saver.SetAttrs({passthrough_key: state})
yield
finally:
for saver, original_state in original_states.items():
for saver_name, original_state in original_states.items():
saver = savers_by_name[saver_name]
saver.SetAttrs({"TOOLB_PassThrough": original_state})

View file

@ -13,7 +13,7 @@ var LD_OPENHARMONY_PATH = System.getenv('LIB_OPENHARMONY_PATH');
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH + '/openHarmony.js';
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH.replace(/\\/g, "/");
include(LD_OPENHARMONY_PATH);
this.__proto__['$'] = $;
//this.__proto__['$'] = $;
function Client() {
var self = this;

View file

@ -59,8 +59,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
args = [application_path, "-batch",
"-frames", str(frame_start), str(frame_end),
"-scene", scene_path]
self.log.info(f"running [ {application_path} {' '.join(args)}")
scene_path]
self.log.info(f"running: {' '.join(args)}")
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,

View file

@ -95,18 +95,18 @@ def menu_install():
menu.addSeparator()
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish(hiero.ui.mainWindow())
)
creator_action = menu.addAction("Create...")
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
creator_action.triggered.connect(
lambda: host_tools.show_creator(parent=main_window)
)
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish(hiero.ui.mainWindow())
)
loader_action = menu.addAction("Load...")
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
loader_action.triggered.connect(

View file

@ -66,10 +66,6 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_event_callback("new", on_new)
self._has_been_setup = True
# add houdini vendor packages
hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor")
sys.path.append(hou_pythonpath)
# Set asset settings for the empty scene directly after launch of
# Houdini so it initializes into the correct scene FPS,

View file

@ -1 +0,0 @@
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

View file

@ -1,152 +0,0 @@
import os
import hou
import husdoutputprocessors.base as base
import colorbleed.usdlib as usdlib
from openpype.client import get_asset_by_name
from openpype.pipeline import Anatomy, get_current_project_name
class AvalonURIOutputProcessor(base.OutputProcessorBase):
"""Process Avalon URIs into their full path equivalents.
"""
_parameters = None
_param_prefix = 'avalonurioutputprocessor_'
_parms = {
"use_publish_paths": _param_prefix + "use_publish_paths"
}
def __init__(self):
""" There is only one object of each output processor class that is
ever created in a Houdini session. Therefore be very careful
about what data gets put in this object.
"""
self._use_publish_paths = False
self._cache = dict()
def displayName(self):
return 'Avalon URI Output Processor'
def parameters(self):
if not self._parameters:
parameters = hou.ParmTemplateGroup()
use_publish_path = hou.ToggleParmTemplate(
name=self._parms["use_publish_paths"],
label='Resolve Reference paths to publish paths',
default_value=False,
help=("When enabled any paths for Layers, References or "
"Payloads are resolved to published master versions.\n"
"This is usually only used by the publishing pipeline, "
"but can be used for testing too."))
parameters.append(use_publish_path)
self._parameters = parameters.asDialogScript()
return self._parameters
def beginSave(self, config_node, t):
parm = self._parms["use_publish_paths"]
self._use_publish_paths = config_node.parm(parm).evalAtTime(t)
self._cache.clear()
def endSave(self):
self._use_publish_paths = None
self._cache.clear()
def processAsset(self,
asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Retrieve from cache if this query occurred before (optimization)
cache_key = (asset_path, asset_path_for_save, asset_is_layer, for_save)
if cache_key in self._cache:
return self._cache[cache_key]
relative_template = "{asset}_{subset}.{ext}"
uri_data = usdlib.parse_avalon_uri(asset_path)
if uri_data:
if for_save:
# Set save output path to a relative path so other
# processors can potentially manage it easily?
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
if self._use_publish_paths:
# Resolve to an Avalon published asset for embedded paths
path = self._get_usd_master_path(**uri_data)
else:
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
self._cache[cache_key] = asset_path
return asset_path
def _get_usd_master_path(self,
asset,
subset,
ext):
"""Get the filepath for a .usd file of a subset.
This will return the path to an unversioned master file generated by
`usd_master_file.py`.
"""
PROJECT = get_current_project_name()
anatomy = Anatomy(PROJECT)
asset_doc = get_asset_by_name(PROJECT, asset)
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)
template_obj = anatomy.templates_obj["publish"]["path"]
path = template_obj.format_strict({
"project": PROJECT,
"asset": asset_doc["name"],
"subset": subset,
"representation": ext,
"version": 0 # stub version zero
})
# Remove the version folder
subset_folder = os.path.dirname(os.path.dirname(path))
master_folder = os.path.join(subset_folder, "master")
fname = "{0}.{1}".format(subset, ext)
return os.path.join(master_folder, fname).replace("\\", "/")
output_processor = AvalonURIOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -1,90 +0,0 @@
import hou
import husdoutputprocessors.base as base
import os
class StagingDirOutputProcessor(base.OutputProcessorBase):
"""Output all USD Rop file nodes into the Staging Directory
Ignore any folders and paths set in the Configured Layers
and USD Rop node, just take the filename and save into a
single directory.
"""
theParameters = None
parameter_prefix = "stagingdiroutputprocessor_"
stagingdir_parm_name = parameter_prefix + "stagingDir"
def __init__(self):
self.staging_dir = None
def displayName(self):
return 'StagingDir Output Processor'
def parameters(self):
if not self.theParameters:
parameters = hou.ParmTemplateGroup()
rootdirparm = hou.StringParmTemplate(
self.stagingdir_parm_name,
'Staging Directory', 1,
string_type=hou.stringParmType.FileReference,
file_type=hou.fileType.Directory
)
parameters.append(rootdirparm)
self.theParameters = parameters.asDialogScript()
return self.theParameters
def beginSave(self, config_node, t):
# Use the Root Directory parameter if it is set.
root_dir_parm = config_node.parm(self.stagingdir_parm_name)
if root_dir_parm:
self.staging_dir = root_dir_parm.evalAtTime(t)
if not self.staging_dir:
out_file_parm = config_node.parm('lopoutput')
if out_file_parm:
self.staging_dir = out_file_parm.evalAtTime(t)
if self.staging_dir:
(self.staging_dir, filename) = os.path.split(self.staging_dir)
def endSave(self):
self.staging_dir = None
def processAsset(self, asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Treat save paths as being relative to the output path.
if for_save and self.staging_dir:
# Whenever we're processing a Save Path make sure to
# resolve it to the Staging Directory
filename = os.path.basename(asset_path)
return os.path.join(self.staging_dir, filename)
return asset_path
output_processor = StagingDirOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""3dsmax menu definition of OpenPype."""
"""3dsmax menu definition of AYON."""
import os
from qtpy import QtWidgets, QtCore
from pymxs import runtime as rt
@ -8,7 +9,7 @@ from openpype.hosts.max.api import lib
class OpenPypeMenu(object):
"""Object representing OpenPype menu.
"""Object representing OpenPype/AYON menu.
This is using "hack" to inject itself before "Help" menu of 3dsmax.
For some reason `postLoadingMenus` event doesn't fire, and main menu
@ -50,17 +51,17 @@ class OpenPypeMenu(object):
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
def get_or_create_openpype_menu(
self, name: str = "&OpenPype",
self, name: str = "&Openpype",
before: str = "&Help") -> QtWidgets.QAction:
"""Create OpenPype menu.
"""Create AYON menu.
Args:
name (str, Optional): OpenPypep menu name.
name (str, Optional): AYON menu name.
before (str, Optional): Name of the 3dsmax main menu item to
add OpenPype menu before.
add AYON menu before.
Returns:
QtWidgets.QAction: OpenPype menu action.
QtWidgets.QAction: AYON menu action.
"""
if self.menu is not None:
@ -77,15 +78,15 @@ class OpenPypeMenu(object):
if before in item.title():
help_action = item.menuAction()
op_menu = QtWidgets.QMenu("&OpenPype")
tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label))
menu_bar.insertMenu(help_action, op_menu)
self.menu = op_menu
return op_menu
def build_openpype_menu(self) -> QtWidgets.QAction:
"""Build items in OpenPype menu."""
"""Build items in AYON menu."""
openpype_menu = self.get_or_create_openpype_menu()
load_action = QtWidgets.QAction("Load...", openpype_menu)
load_action.triggered.connect(self.load_callback)

View file

@ -175,7 +175,7 @@ def containerise(name: str, nodes: list, context,
def load_custom_attribute_data():
"""Re-loading the Openpype/AYON custom parameter built by the creator
"""Re-loading the AYON custom parameter built by the creator
Returns:
attribute: re-loading the custom OP attributes set in Maxscript
@ -213,7 +213,7 @@ def import_custom_attribute_data(container: str, selections: list):
def update_custom_attribute_data(container: str, selections: list):
"""Updating the Openpype/AYON custom parameter built by the creator
"""Updating the AYON custom parameter built by the creator
Args:
container (str): target container which adds custom attributes

View file

@ -102,8 +102,6 @@ _alembic_options = {
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
DISPLAY_LIGHTS_ENUM = [
{"label": "Use Project Settings", "value": "project_settings"},
@ -3021,194 +3019,6 @@ class shelf():
cmds.shelfLayout(self.name, p="ShelfLayout")
def _get_render_instances():
"""Return all 'render-like' instances.
This returns list of instance sets that needs to receive information
about render layer changes.
Returns:
list: list of instances
"""
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
recursive=True, objectsOnly=True)
instances = []
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
if not has_family:
continue
if cmds.getAttr(
"{}.family".format(objset)) in RENDERLIKE_INSTANCE_FAMILIES:
instances.append(objset)
return instances
renderItemObserverList = []
class RenderSetupListObserver:
"""Observer to catch changes in render setup layers."""
def listItemAdded(self, item):
print("--- adding ...")
self._add_render_layer(item)
def listItemRemoved(self, item):
print("--- removing ...")
self._remove_render_layer(item.name())
def _add_render_layer(self, item):
render_sets = _get_render_instances()
layer_name = item.name()
for render_set in render_sets:
members = cmds.sets(render_set, query=True) or []
namespace_name = "_{}".format(render_set)
if not cmds.namespace(exists=namespace_name):
index = 1
namespace_name = "_{}".format(render_set)
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
# namespace is not empty, so we leave it untouched
pass
orignal_namespace_name = namespace_name
while(cmds.namespace(exists=namespace_name)):
namespace_name = "{}{}".format(
orignal_namespace_name, index)
index += 1
namespace = cmds.namespace(add=namespace_name)
if members:
# if set already have namespaced members, use the same
# namespace as others.
namespace = members[0].rpartition(":")[0]
else:
namespace = namespace_name
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
continue
print(" - creating set for {}".format(layer_name))
maya_set = cmds.sets(n=render_layer_set_name, empty=True)
cmds.sets(maya_set, forceElement=render_set)
rio = RenderSetupItemObserver(item)
print("- adding observer for {}".format(item.name()))
item.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def _remove_render_layer(self, layer_name):
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
print(" - removing set for {}".format(layer_name))
cmds.delete(render_layer_set_name)
class RenderSetupItemObserver:
"""Handle changes in render setup items."""
def __init__(self, item):
self.item = item
self.original_name = item.name()
def itemChanged(self, *args, **kwargs):
"""Item changed callback."""
if self.item.name() == self.original_name:
return
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(
namespace, self.original_name)
if render_layer_set_name in members:
print(" <> renaming {} to {}".format(self.original_name,
self.item.name()))
cmds.rename(render_layer_set_name,
"{}:{}".format(
namespace, self.item.name()))
self.original_name = self.item.name()
renderListObserver = RenderSetupListObserver()
def add_render_layer_change_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
rs = renderSetup.instance()
render_sets = _get_render_instances()
layers = rs.getRenderLayers()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
for layer in layers:
render_layer_set_name = "{}:{}".format(namespace, layer.name())
if render_layer_set_name not in members:
continue
rio = RenderSetupItemObserver(layer)
print("- adding observer for {}".format(layer.name()))
layer.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def add_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("> adding renderSetup observer ...")
rs = renderSetup.instance()
rs.addListObserver(renderListObserver)
pass
def remove_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("< removing renderSetup observer ...")
rs = renderSetup.instance()
try:
rs.removeListObserver(renderListObserver)
except ValueError:
# no observer set yet
pass
def update_content_on_context_change():
"""
This will update scene content to match new asset on context change

View file

@ -580,20 +580,11 @@ def on_save():
lib.set_id(node, new_id, overwrite=False)
def _update_render_layer_observers():
# Helper to trigger update for all renderlayer observer logic
lib.remove_render_layer_observer()
lib.add_render_layer_observer()
lib.add_render_layer_change_observer()
def on_open():
"""On scene open let's assume the containers have changed."""
from openpype.widgets import popup
utils.executeDeferred(_update_render_layer_observers)
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
lib.validate_fps()
@ -630,7 +621,6 @@ def on_new():
with lib.suspended_refresh():
lib.set_context_settings()
utils.executeDeferred(_update_render_layer_observers)
_remove_workfile_lock()

View file

@ -33,7 +33,7 @@ class ImportModelRender(InventoryAction):
)
def process(self, containers):
from maya import cmds
from maya import cmds # noqa: F401
project_name = get_current_project_name()
for container in containers:
@ -66,7 +66,7 @@ class ImportModelRender(InventoryAction):
None
"""
from maya import cmds
from maya import cmds # noqa: F401
project_name = get_current_project_name()
repre_docs = get_representations(
@ -85,12 +85,7 @@ class ImportModelRender(InventoryAction):
if scene_type_regex.fullmatch(repre_name):
look_repres.append(repre_doc)
# QUESTION should we care if there is more then one look
# representation? (since it's based on regex match)
look_repre = None
if look_repres:
look_repre = look_repres[0]
look_repre = look_repres[0] if look_repres else None
# QUESTION shouldn't be json representation validated too?
if not look_repre:
print("No model render sets for this model version..")

View file

@ -265,6 +265,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
class MayaUSDReferenceLoader(ReferenceLoader):
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
label = "Reference Maya USD"
families = ["usd"]
representations = ["usd"]
extensions = {"usd", "usda", "usdc"}

View file

@ -45,11 +45,23 @@ FILE_NODES = {
"PxrTexture": "filename"
}
RENDER_SET_TYPES = [
"VRayDisplacement",
"VRayLightMesh",
"VRayObjectProperties",
"RedshiftObjectId",
"RedshiftMeshParameters",
]
# Keep only node types that actually exist
all_node_types = set(cmds.allNodeTypes())
for node_type in list(FILE_NODES.keys()):
if node_type not in all_node_types:
FILE_NODES.pop(node_type)
for node_type in RENDER_SET_TYPES:
if node_type not in all_node_types:
RENDER_SET_TYPES.remove(node_type)
del all_node_types
# Cache pixar dependency node types so we can perform a type lookup against it
@ -69,9 +81,7 @@ def get_attributes(dictionary, attr, node=None):
else:
val = dictionary.get(attr, [])
if not isinstance(val, list):
return [val]
return val
return val if isinstance(val, list) else [val]
def get_look_attrs(node):
@ -106,7 +116,7 @@ def get_look_attrs(node):
def node_uses_image_sequence(node, node_path):
# type: (str) -> bool
# type: (str, str) -> bool
"""Return whether file node uses an image sequence or single image.
Determine if a node uses an image sequence or just a single image,
@ -114,6 +124,7 @@ def node_uses_image_sequence(node, node_path):
Args:
node (str): Name of the Maya node
node_path (str): The file path of the node
Returns:
bool: True if node uses an image sequence
@ -247,7 +258,7 @@ def get_file_node_files(node):
# For sequences get all files and filter to only existing files
result = []
for index, path in enumerate(paths):
for path in paths:
if node_uses_image_sequence(node, path):
glob_pattern = seq_to_glob(path)
result.extend(glob.glob(glob_pattern))
@ -358,6 +369,7 @@ class CollectLook(pyblish.api.InstancePlugin):
for attr in shader_attrs:
if cmds.attributeQuery(attr, node=look, exists=True):
existing_attrs.append("{}.{}".format(look, attr))
materials = cmds.listConnections(existing_attrs,
source=True,
destination=False) or []
@ -367,30 +379,32 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.debug("Found the following sets:\n{}".format(look_sets))
# Get the entire node chain of the look sets
# history = cmds.listHistory(look_sets, allConnections=True)
history = cmds.listHistory(materials, allConnections=True)
# if materials list is empty, listHistory() will crash with
# RuntimeError
history = set()
if materials:
history = set(
cmds.listHistory(materials, allConnections=True))
# Since we retrieved history only of the connected materials
# connected to the look sets above we now add direct history
# for some of the look sets directly
# handling render attribute sets
render_set_types = [
"VRayDisplacement",
"VRayLightMesh",
"VRayObjectProperties",
"RedshiftObjectId",
"RedshiftMeshParameters",
]
render_sets = cmds.ls(look_sets, type=render_set_types)
if render_sets:
history.extend(
cmds.listHistory(render_sets,
future=False,
pruneDagObjects=True)
or []
)
# Maya (at least 2024) crashes with Warning when render set type
# isn't available. cmds.ls() will return empty list
if RENDER_SET_TYPES:
render_sets = cmds.ls(look_sets, type=RENDER_SET_TYPES)
if render_sets:
history.update(
cmds.listHistory(render_sets,
future=False,
pruneDagObjects=True)
or []
)
# Ensure unique entries only
history = list(set(history))
history = list(history)
files = cmds.ls(history,
# It's important only node types are passed that

View file

@ -50,11 +50,11 @@ class ExtractRedshiftProxy(publish.Extractor):
# Padding is taken from number of digits of the end_frame.
# Not sure where Redshift is taking it.
repr_files = [
"{}.{}{}".format(root, str(frame).rjust(4, "0"), ext) # noqa: E501
"{}.{}{}".format(os.path.basename(root), str(frame).rjust(4, "0"), ext) # noqa: E501
for frame in range(
int(start_frame),
int(end_frame) + 1,
int(instance.data["step"]),
int(instance.data["step"])
)]
# vertex_colors = instance.data.get("vertexColors", False)

View file

@ -3,60 +3,76 @@ from maya import cmds
import pyblish.api
from openpype.pipeline.publish import (
ValidateContentsOrder,
RepairContextAction,
PublishValidationError
)
class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin):
class ValidateLookDefaultShadersConnections(pyblish.api.ContextPlugin):
"""Validate default shaders in the scene have their default connections.
For example the lambert1 could potentially be disconnected from the
initialShadingGroup. As such it's not lambert1 that will be identified
as the default shader which can have unpredictable results.
For example the standardSurface1 or lambert1 (maya 2023 and before) could
potentially be disconnected from the initialShadingGroup. As such it's not
lambert1 that will be identified as the default shader which can have
unpredictable results.
To fix the default connections need to be made again. See the logs for
more details on which connections are missing.
"""
order = ValidateContentsOrder
order = pyblish.api.ValidatorOrder - 0.4999
families = ['look']
hosts = ['maya']
label = 'Look Default Shader Connections'
actions = [RepairContextAction]
# The default connections to check
DEFAULTS = [("initialShadingGroup.surfaceShader", "lambert1"),
("initialParticleSE.surfaceShader", "lambert1"),
("initialParticleSE.volumeShader", "particleCloud1")
]
DEFAULTS = {
"initialShadingGroup.surfaceShader": ["standardSurface1.outColor",
"lambert1.outColor"],
"initialParticleSE.surfaceShader": ["standardSurface1.outColor",
"lambert1.outColor"],
"initialParticleSE.volumeShader": ["particleCloud1.outColor"]
}
def process(self, instance):
def process(self, context):
# Ensure check is run only once. We don't use ContextPlugin because
# of a bug where the ContextPlugin will always be visible. Even when
# the family is not present in an instance.
key = "__validate_look_default_shaders_connections_checked"
context = instance.context
is_run = context.data.get(key, False)
if is_run:
return
else:
context.data[key] = True
if self.get_invalid():
raise PublishValidationError(
"Default shaders in your scene do not have their "
"default shader connections. Please repair them to continue."
)
@classmethod
def get_invalid(cls):
# Process as usual
invalid = list()
for plug, input_node in self.DEFAULTS:
for plug, valid_inputs in cls.DEFAULTS.items():
inputs = cmds.listConnections(plug,
source=True,
destination=False) or None
if not inputs or inputs[0] != input_node:
self.log.error("{0} is not connected to {1}. "
"This can result in unexpected behavior. "
"Please reconnect to continue.".format(
plug,
input_node))
destination=False,
plugs=True) or None
if not inputs or inputs[0] not in valid_inputs:
cls.log.error(
"{0} is not connected to {1}. This can result in "
"unexpected behavior. Please reconnect to continue."
"".format(plug, " or ".join(valid_inputs))
)
invalid.append(plug)
if invalid:
raise PublishValidationError("Invalid connections.")
return invalid
@classmethod
def repair(cls, context):
invalid = cls.get_invalid()
for plug in invalid:
valid_inputs = cls.DEFAULTS[plug]
for valid_input in valid_inputs:
if cmds.objExists(valid_input):
cls.log.info(
"Connecting {} -> {}".format(valid_input, plug)
)
cmds.connectAttr(valid_input, plug, force=True)
break

View file

@ -111,7 +111,6 @@ class ValidateNukeWriteNode(
for value in values:
if type(node_value) in (int, float):
try:
if isinstance(value, list):
value = color_gui_to_int(value)
else:
@ -130,7 +129,7 @@ class ValidateNukeWriteNode(
and key != "file"
and key != "tile_color"
):
check.append([key, value, write_node[key].value()])
check.append([key, node_value, write_node[key].value()])
if check:
self._make_error(check)

View file

@ -298,7 +298,7 @@ def create_timeline_item(
if source_end:
clip_data["endFrame"] = source_end
if timecode_in:
clip_data["recordFrame"] = timecode_in
clip_data["recordFrame"] = timeline_in
# add to timeline
media_pool.AppendToTimeline([clip_data])

View file

@ -7,6 +7,9 @@ from openpype.tools.utils import host_tools
from openpype.pipeline import registered_host
MENU_LABEL = os.environ["AVALON_LABEL"]
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
if not os.path.exists(path):
@ -39,7 +42,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(OpenPypeMenu, self).__init__(*args, **kwargs)
self.setObjectName("OpenPypeMenu")
self.setObjectName(f"{MENU_LABEL}Menu")
self.setWindowFlags(
QtCore.Qt.Window
@ -49,7 +52,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle("OpenPype")
self.setWindowTitle(f"{MENU_LABEL}")
save_current_btn = QtWidgets.QPushButton("Save current file", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)

View file

@ -406,26 +406,42 @@ class ClipLoader:
self.active_bin
)
_clip_property = media_pool_item.GetClipProperty
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
source_duration = int(_clip_property("Frames"))
# get handles
handle_start = self.data["versionData"].get("handleStart")
handle_end = self.data["versionData"].get("handleEnd")
if handle_start is None:
handle_start = int(self.data["assetData"]["handleStart"])
if handle_end is None:
handle_end = int(self.data["assetData"]["handleEnd"])
if not self.with_handles:
# Load file without the handles of the source media
# We remove the handles from the source in and source out
# so that the handles are excluded in the timeline
handle_start = 0
handle_end = 0
# check frame duration from versionData or assetData
frame_start = self.data["versionData"].get("frameStart")
if frame_start is None:
frame_start = self.data["assetData"]["frameStart"]
# get version data frame data from db
version_data = self.data["versionData"]
frame_start = version_data.get("frameStart")
frame_end = version_data.get("frameEnd")
# check frame duration from versionData or assetData
frame_end = self.data["versionData"].get("frameEnd")
if frame_end is None:
frame_end = self.data["assetData"]["frameEnd"]
db_frame_duration = int(frame_end) - int(frame_start) + 1
# The version data usually stored the frame range + handles of the
# media however certain representations may be shorter because they
# exclude those handles intentionally. Unfortunately the
# representation does not store that in the database currently;
# so we should compensate for those cases. If the media is shorter
# than the frame range specified in the database we assume it is
# without handles and thus we do not need to remove the handles
# from source and out
if frame_start is not None and frame_end is not None:
# Version has frame range data, so we can compare media length
handle_start = version_data.get("handleStart", 0)
handle_end = version_data.get("handleEnd", 0)
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_start + handle_end
database_frame_duration = int(
frame_end_handle - frame_start_handle + 1
)
if source_duration >= database_frame_duration:
source_in += handle_start
source_out -= handle_end
# get timeline in
timeline_start = self.active_timeline.GetStartFrame()
@ -437,24 +453,6 @@ class ClipLoader:
timeline_in = int(
timeline_start + self.data["assetData"]["clipIn"])
source_in = int(_clip_property("Start"))
source_out = int(_clip_property("End"))
source_duration = int(_clip_property("Frames"))
# check if source duration is shorter than db frame duration
source_with_handles = True
if source_duration < db_frame_duration:
source_with_handles = False
# only exclude handles if source has no handles or
# if user wants to load without handles
if (
not self.with_handles
or not source_with_handles
):
source_in += handle_start
source_out -= handle_end
# make track item from source in bin as item
timeline_item = lib.create_timeline_item(
media_pool_item,
@ -868,7 +866,7 @@ class PublishClip:
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
entity_type = self.types.get(key)
assert entity_type, "Missing entity type for `{}`".format(
key

View file

@ -0,0 +1,22 @@
import os
import sys
from openpype.pipeline import install_host
from openpype.lib import Logger
log = Logger.get_logger(__name__)
def main(env):
from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu
# activate resolve from openpype
host = ResolveHost()
install_host(host)
launch_pype_menu()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -2,6 +2,7 @@ import os
import shutil
from openpype.lib import Logger, is_running_from_build
from openpype import AYON_SERVER_ENABLED
RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@ -54,6 +55,14 @@ def setup(env):
src = os.path.join(directory, script)
dst = os.path.join(util_scripts_dir, script)
# TODO: remove this once we have a proper solution
if AYON_SERVER_ENABLED:
if "OpenPype__Menu.py" == script:
continue
else:
if "AYON__Menu.py" == script:
continue
# TODO: Make this a less hacky workaround
if script == "openpype_startup.scriptlib":
# Handle special case for scriptlib that needs to be a folder

View file

@ -536,7 +536,7 @@ def convert_for_ffmpeg(
input_frame_end=None,
logger=None
):
"""Contert source file to format supported in ffmpeg.
"""Convert source file to format supported in ffmpeg.
Currently can convert only exrs.
@ -592,29 +592,7 @@ def convert_for_ffmpeg(
oiio_cmd.extend(["--compression", compression])
# Collect channels to export
channel_names = input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart/subimages exrs
input_arg += ":ch={}".format(input_channels_str)
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
oiio_cmd.extend([
input_arg, first_input_path,
@ -635,7 +613,7 @@ def convert_for_ffmpeg(
continue
# Remove attributes that have string value longer than allowed length
# for ffmpeg or when contain unallowed symbols
# for ffmpeg or when contain prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
@ -677,6 +655,47 @@ def convert_for_ffmpeg(
run_subprocess(oiio_cmd, logger=logger)
def get_oiio_input_and_channel_args(oiio_input_info):
"""Get input and channel arguments for oiiotool.
Args:
oiio_input_info (dict): Information about input from oiio tool.
Should be output of function `get_oiio_info_for_input`.
Returns:
tuple[str, str]: Tuple of input and channel arguments.
"""
channel_names = oiio_input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
# TODO find subimage where rgba is available for multipart exrs
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = oiio_input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart exrs
input_arg += ":ch={}".format(input_channels_str)
return input_arg, channels_arg
def convert_input_paths_for_ffmpeg(
input_paths,
output_dir,
@ -695,7 +714,7 @@ def convert_input_paths_for_ffmpeg(
Args:
input_paths (str): Paths that should be converted. It is expected that
contains single file or image sequence of samy type.
contains single file or image sequence of same type.
output_dir (str): Path to directory where output will be rendered.
Must not be same as input's directory.
logger (logging.Logger): Logger used for logging.
@ -709,6 +728,7 @@ def convert_input_paths_for_ffmpeg(
first_input_path = input_paths[0]
ext = os.path.splitext(first_input_path)[1].lower()
if ext != ".exr":
raise ValueError((
"Function 'convert_for_ffmpeg' currently support only"
@ -724,30 +744,7 @@ def convert_input_paths_for_ffmpeg(
compression = "none"
# Collect channels to export
channel_names = input_info["channelnames"]
review_channels = get_convert_rgb_channels(channel_names)
if review_channels is None:
raise ValueError(
"Couldn't find channels that can be used for conversion."
)
red, green, blue, alpha = review_channels
input_channels = [red, green, blue]
# TODO find subimage inder where rgba is available for multipart exrs
channels_arg = "R={},G={},B={}".format(red, green, blue)
if alpha is not None:
channels_arg += ",A={}".format(alpha)
input_channels.append(alpha)
input_channels_str = ",".join(input_channels)
subimages = input_info.get("subimages")
input_arg = "-i"
if subimages is None or subimages == 1:
# Tell oiiotool which channels should be loaded
# - other channels are not loaded to memory so helps to avoid memory
# leak issues
# - this option is crashing if used on multipart exrs
input_arg += ":ch={}".format(input_channels_str)
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
for input_path in input_paths:
# Prepare subprocess arguments
@ -774,7 +771,7 @@ def convert_input_paths_for_ffmpeg(
continue
# Remove attributes that have string value longer than allowed
# length for ffmpeg or when containing unallowed symbols
# length for ffmpeg or when containing prohibited symbols
erase_reason = "Missing reason"
erase_attribute = False
if len(attr_value) > MAX_FFMPEG_STRING_LEN:
@ -1021,9 +1018,7 @@ def _ffmpeg_h264_codec_args(stream_data, source_ffmpeg_cmd):
if pix_fmt:
output.extend(["-pix_fmt", pix_fmt])
output.extend(["-intra"])
output.extend(["-g", "1"])
output.extend(["-intra", "-g", "1"])
return output
@ -1150,7 +1145,7 @@ def convert_colorspace(
view=None,
display=None,
additional_command_args=None,
logger=None
logger=None,
):
"""Convert source file from one color space to another.
@ -1169,6 +1164,7 @@ def convert_colorspace(
view (str): name for viewer space (ocio valid)
both 'view' and 'display' must be filled (if 'target_colorspace')
display (str): name for display-referred reference space (ocio valid)
both 'view' and 'display' must be filled (if 'target_colorspace')
additional_command_args (list): arguments for oiiotool (like binary
depth for .dpx)
logger (logging.Logger): Logger used for logging.
@ -1178,14 +1174,28 @@ def convert_colorspace(
if logger is None:
logger = logging.getLogger(__name__)
input_info = get_oiio_info_for_input(input_path, logger=logger)
# Collect channels to export
input_arg, channels_arg = get_oiio_input_and_channel_args(input_info)
# Prepare subprocess arguments
oiio_cmd = get_oiio_tool_args(
"oiiotool",
input_path,
# Don't add any additional attributes
"--nosoftwareattrib",
"--colorconfig", config_path
)
oiio_cmd.extend([
input_arg, input_path,
# Tell oiiotool which channels should be put to top stack
# (and output)
"--ch", channels_arg,
# Use first subimage
"--subimage", "0"
])
if all([target_colorspace, view, display]):
raise ValueError("Colorspace and both screen and display"
" cannot be set together."

View file

@ -6,8 +6,14 @@ import getpass
import attr
from datetime import datetime
from openpype.lib import is_running_from_build
from openpype.lib import (
is_running_from_build,
BoolDef,
NumberDef,
TextDef,
)
from openpype.pipeline import legacy_io
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
from openpype.pipeline.farm.tools import iter_expected_files
from openpype.tests.lib import is_in_tests
@ -22,10 +28,11 @@ class BlenderPluginInfo():
SaveFile = attr.ib(default=True)
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
OpenPypePyblishPluginMixin):
label = "Submit Render to Deadline"
hosts = ["blender"]
families = ["render.farm"]
families = ["render"]
use_published = True
priority = 50
@ -33,6 +40,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
jobInfo = {}
pluginInfo = {}
group = None
job_delay = "00:00:00:00"
def get_job_info(self):
job_info = DeadlineJobInfo(Plugin="Blender")
@ -67,8 +75,7 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info.Pool = instance.data.get("primaryPool")
job_info.SecondaryPool = instance.data.get("secondaryPool")
job_info.Comment = context.data.get("comment")
job_info.Priority = instance.data.get("priority", self.priority)
job_info.Comment = instance.data.get("comment")
if self.group != "none" and self.group:
job_info.Group = self.group
@ -83,8 +90,10 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
machine_list_key = "Blacklist"
render_globals[machine_list_key] = machine_list
job_info.Priority = attr_values.get("priority")
job_info.ChunkSize = attr_values.get("chunkSize")
job_info.ChunkSize = attr_values.get("chunkSize", self.chunk_size)
job_info.Priority = attr_values.get("priority", self.priority)
job_info.ScheduledType = "Once"
job_info.JobDelay = attr_values.get("job_delay", self.job_delay)
# Add options from RenderGlobals
render_globals = instance.data.get("renderGlobals", {})
@ -180,3 +189,39 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
the metadata and the rendered files are in the same location.
"""
return super().from_published_scene(False)
@classmethod
def get_attribute_defs(cls):
defs = super(BlenderSubmitDeadline, cls).get_attribute_defs()
defs.extend([
BoolDef("use_published",
default=cls.use_published,
label="Use Published Scene"),
NumberDef("priority",
minimum=1,
maximum=250,
decimals=0,
default=cls.priority,
label="Priority"),
NumberDef("chunkSize",
minimum=1,
maximum=50,
decimals=0,
default=cls.chunk_size,
label="Frame Per Task"),
TextDef("group",
default=cls.group,
label="Group Name"),
TextDef("job_delay",
default=cls.job_delay,
label="Job Delay",
placeholder="dd:hh:mm:ss",
tooltip="Delay the job by the specified amount of time. "
"Timecode: dd:hh:mm:ss."),
])
return defs

View file

@ -61,6 +61,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
additional_metadata_keys = []
def process(self, instance):
# QUESTION: should this be operating even for `farm` target?
self.log.debug("instance {}".format(instance))
instance_repres = instance.data.get("representations")
@ -143,70 +144,93 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
unmanaged_location_name = "ftrack.unmanaged"
ftrack_server_location_name = "ftrack.server"
# check if any outputName keys are in review_representations
# also check if any outputName keys are in thumbnail_representations
synced_multiple_output_names = []
for review_repre in review_representations:
review_output_name = review_repre.get("outputName")
if not review_output_name:
continue
for thumb_repre in thumbnail_representations:
thumb_output_name = thumb_repre.get("outputName")
if not thumb_output_name:
continue
if (
thumb_output_name == review_output_name
# output name can be added also as tags during intermediate
# files creation
or thumb_output_name in review_repre.get("tags", [])
):
synced_multiple_output_names.append(
thumb_repre["outputName"])
self.log.debug("Multiple output names: {}".format(
synced_multiple_output_names
))
multiple_synced_thumbnails = len(synced_multiple_output_names) > 1
# Components data
component_list = []
# Components that will be duplicated to unmanaged location
src_components_to_add = []
thumbnail_data_items = []
# Create thumbnail components
# TODO what if there is multiple thumbnails?
first_thumbnail_component = None
first_thumbnail_component_repre = None
if not review_representations or has_movie_review:
for repre in thumbnail_representations:
repre_path = get_publish_repre_path(instance, repre, False)
if not repre_path:
self.log.warning(
"Published path is not set and source was removed."
)
continue
# Create copy of base comp item and append it
thumbnail_item = copy.deepcopy(base_component_item)
thumbnail_item["component_path"] = repre_path
thumbnail_item["component_data"] = {
"name": "thumbnail"
}
thumbnail_item["thumbnail"] = True
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(thumbnail_item))
# Create copy of first thumbnail
if first_thumbnail_component is None:
first_thumbnail_component_repre = repre
first_thumbnail_component = thumbnail_item
# Set location
thumbnail_item["component_location_name"] = (
ftrack_server_location_name
)
# Add item to component list
component_list.append(thumbnail_item)
if first_thumbnail_component is not None:
metadata = self._prepare_image_component_metadata(
first_thumbnail_component_repre,
first_thumbnail_component["component_path"]
for repre in thumbnail_representations:
# get repre path from representation
# and return published_path if available
# the path is validated and if it does not exists it returns None
repre_path = get_publish_repre_path(
instance,
repre,
only_published=False
)
if not repre_path:
self.log.warning(
"Published path is not set or source was removed."
)
continue
if metadata:
component_data = first_thumbnail_component["component_data"]
component_data["metadata"] = metadata
# Create copy of base comp item and append it
thumbnail_item = copy.deepcopy(base_component_item)
thumbnail_item.update({
"component_path": repre_path,
"component_data": {
"name": (
"thumbnail" if review_representations
else "ftrackreview-image"
),
"metadata": self._prepare_image_component_metadata(
repre,
repre_path
)
},
"thumbnail": True,
"component_location_name": ftrack_server_location_name
})
if review_representations:
component_data["name"] = "thumbnail"
else:
component_data["name"] = "ftrackreview-image"
# add thumbnail data to items for future synchronization
current_item_data = {
"sync_key": repre.get("outputName"),
"representation": repre,
"item": thumbnail_item
}
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_comp = self._create_src_component(
instance,
repre,
copy.deepcopy(thumbnail_item),
unmanaged_location_name
)
component_list.append(src_comp)
current_item_data["src_component"] = src_comp
# Add item to component list
thumbnail_data_items.append(current_item_data)
# Create review components
# Change asset name of each new component for review
is_first_review_repre = True
not_first_components = []
extended_asset_name = ""
multiple_reviewable = len(review_representations) > 1
for repre in review_representations:
for index, repre in enumerate(review_representations):
if not self._is_repre_video(repre) and has_movie_review:
self.log.debug("Movie repre has priority "
"from {}".format(repre))
@ -222,45 +246,50 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
# Create copy of base comp item and append it
review_item = copy.deepcopy(base_component_item)
# get asset name and define extended name variant
asset_name = review_item["asset_data"]["name"]
extended_asset_name = "_".join(
(asset_name, repre["name"])
# get first or synchronize thumbnail item
sync_thumbnail_item = None
sync_thumbnail_item_src = None
sync_thumbnail_data = self._get_matching_thumbnail_item(
repre,
thumbnail_data_items,
multiple_synced_thumbnails
)
if sync_thumbnail_data:
sync_thumbnail_item = sync_thumbnail_data.get("item")
sync_thumbnail_item_src = sync_thumbnail_data.get(
"src_component")
# reset extended if no need for extended asset name
if (
self.keep_first_subset_name_for_review
and is_first_review_repre
):
extended_asset_name = ""
else:
# only rename if multiple reviewable
if multiple_reviewable:
review_item["asset_data"]["name"] = extended_asset_name
else:
extended_asset_name = ""
"""
Renaming asset name only to those components which are explicitly
allowed in settings. Usually clients wanted to keep first component
as untouched product name with version and any other assetVersion
to be named with extended form. The renaming will only happen if
there is more than one reviewable component and extended name is
not empty.
"""
extended_asset_name = self._make_extended_component_name(
base_component_item, repre, index)
# rename all already created components
# only if first repre and extended name available
if is_first_review_repre and extended_asset_name:
# and rename all already created components
for _ci in component_list:
_ci["asset_data"]["name"] = extended_asset_name
if multiple_reviewable and extended_asset_name:
review_item["asset_data"]["name"] = extended_asset_name
# rename also thumbnail
if sync_thumbnail_item:
sync_thumbnail_item["asset_data"]["name"] = (
extended_asset_name
)
# rename also src_thumbnail
if sync_thumbnail_item_src:
sync_thumbnail_item_src["asset_data"]["name"] = (
extended_asset_name
)
# and rename all already created src components
for _sci in src_components_to_add:
_sci["asset_data"]["name"] = extended_asset_name
# rename also first thumbnail component if any
if first_thumbnail_component is not None:
first_thumbnail_component[
"asset_data"]["name"] = extended_asset_name
# Change location
review_item["component_path"] = repre_path
# Change component data
# adding thumbnail component to component list
if sync_thumbnail_item:
component_list.append(copy.deepcopy(sync_thumbnail_item))
if sync_thumbnail_item_src:
component_list.append(copy.deepcopy(sync_thumbnail_item_src))
# add metadata to review component
if self._is_repre_video(repre):
component_name = "ftrackreview-mp4"
metadata = self._prepare_video_component_metadata(
@ -273,28 +302,29 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
review_item["thumbnail"] = True
review_item["component_data"] = {
# Default component name is "main".
"name": component_name,
"metadata": metadata
}
if is_first_review_repre:
is_first_review_repre = False
else:
# later detection for thumbnail duplication
not_first_components.append(review_item)
review_item.update({
"component_path": repre_path,
"component_data": {
"name": component_name,
"metadata": metadata
},
"component_location_name": ftrack_server_location_name
})
# Create copy of item before setting location
if "delete" not in repre.get("tags", []):
src_components_to_add.append(copy.deepcopy(review_item))
src_comp = self._create_src_component(
instance,
repre,
copy.deepcopy(review_item),
unmanaged_location_name
)
component_list.append(src_comp)
# Set location
review_item["component_location_name"] = (
ftrack_server_location_name
)
# Add item to component list
component_list.append(review_item)
if self.upload_reviewable_with_origin_name:
origin_name_component = copy.deepcopy(review_item)
filename = os.path.basename(repre_path)
@ -303,34 +333,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
component_list.append(origin_name_component)
# Duplicate thumbnail component for all not first reviews
if first_thumbnail_component is not None:
for component_item in not_first_components:
asset_name = component_item["asset_data"]["name"]
new_thumbnail_component = copy.deepcopy(
first_thumbnail_component
)
new_thumbnail_component["asset_data"]["name"] = asset_name
new_thumbnail_component["component_location_name"] = (
ftrack_server_location_name
)
component_list.append(new_thumbnail_component)
# Add source components for review and thubmnail components
for copy_src_item in src_components_to_add:
# Make sure thumbnail is disabled
copy_src_item["thumbnail"] = False
# Set location
copy_src_item["component_location_name"] = unmanaged_location_name
# Modify name of component to have suffix "_src"
component_data = copy_src_item["component_data"]
component_name = component_data["name"]
component_data["name"] = component_name + "_src"
component_data["metadata"] = self._prepare_component_metadata(
instance, repre, copy_src_item["component_path"], False
)
component_list.append(copy_src_item)
# Add others representations as component
for repre in other_representations:
published_path = get_publish_repre_path(instance, repre, True)
@ -346,15 +348,17 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
):
other_item["asset_data"]["name"] = extended_asset_name
component_data = {
"name": repre["name"],
"metadata": self._prepare_component_metadata(
instance, repre, published_path, False
)
}
other_item["component_data"] = component_data
other_item["component_location_name"] = unmanaged_location_name
other_item["component_path"] = published_path
other_item.update({
"component_path": published_path,
"component_data": {
"name": repre["name"],
"metadata": self._prepare_component_metadata(
instance, repre, published_path, False
)
},
"component_location_name": unmanaged_location_name,
})
component_list.append(other_item)
def json_obj_parser(obj):
@ -370,6 +374,124 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
))
instance.data["ftrackComponentsList"] = component_list
def _get_matching_thumbnail_item(
self,
review_representation,
thumbnail_data_items,
are_multiple_synced_thumbnails
):
"""Return matching thumbnail item from list of thumbnail items.
If a thumbnail item already exists, this should return it.
The benefit is that if an `outputName` key is found in
representation and is also used as a `sync_key` in a thumbnail
data item, it can sync with that item.
Args:
review_representation (dict): Review representation
thumbnail_data_items (list): List of thumbnail data items
are_multiple_synced_thumbnails (bool): If there are multiple synced
thumbnails
Returns:
dict: Thumbnail data item or empty dict
"""
output_name = review_representation.get("outputName")
tags = review_representation.get("tags", [])
matching_thumbnail_item = {}
for thumb_item in thumbnail_data_items:
if (
are_multiple_synced_thumbnails
and (
thumb_item["sync_key"] == output_name
# intermediate files can have preset name in tags
# this is usually aligned with `outputName` distributed
# during thumbnail creation in `need_thumbnail` tagging
# workflow
or thumb_item["sync_key"] in tags
)
):
# return only synchronized thumbnail if multiple
matching_thumbnail_item = thumb_item
break
elif not are_multiple_synced_thumbnails:
# return any first found thumbnail since we need thumbnail
# but dont care which one
matching_thumbnail_item = thumb_item
break
if not matching_thumbnail_item:
# WARNING: this can only happen if multiple thumbnails
# workflow is broken, since it found multiple matching outputName
# in representation but they do not align with any thumbnail item
self.log.warning(
"No matching thumbnail item found for output name "
"'{}'".format(output_name)
)
if not thumbnail_data_items:
self.log.warning(
"No thumbnail data items found"
)
return {}
# as fallback return first thumbnail item
return thumbnail_data_items[0]
return matching_thumbnail_item
def _make_extended_component_name(
self, component_item, repre, iteration_index):
""" Returns the extended component name
Name is based on the asset name and representation name.
Args:
component_item (dict): The component item dictionary.
repre (dict): The representation dictionary.
iteration_index (int): The index of the iteration.
Returns:
str: The extended component name.
"""
# reset extended if no need for extended asset name
if self.keep_first_subset_name_for_review and iteration_index == 0:
return
# get asset name and define extended name variant
asset_name = component_item["asset_data"]["name"]
return "_".join(
(asset_name, repre["name"])
)
def _create_src_component(
self, instance, repre, component_item, location):
"""Create src component for thumbnail.
This will replicate the input component and change its name to
have suffix "_src".
Args:
instance (pyblish.api.Instance): Instance
repre (dict): Representation
component_item (dict): Component item
location (str): Location name
Returns:
dict: Component item
"""
# Make sure thumbnail is disabled
component_item["thumbnail"] = False
# Set location
component_item["component_location_name"] = location
# Modify name of component to have suffix "_src"
component_data = component_item["component_data"]
component_name = component_data["name"]
component_data["name"] = component_name + "_src"
component_data["metadata"] = self._prepare_component_metadata(
instance, repre, component_item["component_path"], False
)
return component_item
def _collect_additional_metadata(self, streams):
pass
@ -472,9 +594,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
stream_width = tmp_width
stream_height = tmp_height
self.log.debug("FPS from stream is {} and duration is {}".format(
input_framerate, stream_duration
))
frame_out = float(stream_duration) * stream_fps
break

View file

@ -7,6 +7,8 @@ from openpype.pipeline.plugin_discover import (
deregister_plugin_path
)
from .load.utils import get_representation_path_from_context
class LauncherAction(object):
"""A custom action available"""
@ -100,6 +102,10 @@ class InventoryAction(object):
"""
return True
@classmethod
def filepath_from_context(cls, context):
return get_representation_path_from_context(context)
# Launcher action
def discover_launcher_actions():

View file

@ -19,6 +19,7 @@ from abc import ABCMeta, abstractmethod
import six
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_asset_by_name,
get_linked_assets,
@ -1272,31 +1273,54 @@ class PlaceholderLoadMixin(object):
# Sort for readability
families = list(sorted(families))
return [
if AYON_SERVER_ENABLED:
builder_type_enum_items = [
{"label": "Current folder", "value": "context_folder"},
# TODO implement linked folders
# {"label": "Linked folders", "value": "linked_folders"},
{"label": "All folders", "value": "all_folders"},
]
build_type_label = "Folder Builder Type"
build_type_help = (
"Folder Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\nCurrent Folder: Template loader will look for products"
" of current context folder (Folder /assets/bob will"
" find asset)"
"\nAll folders: All folders matching the regex will be"
" used."
)
else:
builder_type_enum_items = [
{"label": "Current asset", "value": "context_asset"},
{"label": "Linked assets", "value": "linked_asset"},
{"label": "All assets", "value": "all_assets"},
]
build_type_label = "Asset Builder Type"
build_type_help = (
"Asset Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\ncontext_asset : Template loader will look for subsets"
" of current context asset (Asset bob will find asset)"
"\nlinked_asset : Template loader will look for assets"
" linked to current context asset."
"\nLinked asset are looked in database under"
" field \"inputLinks\""
)
attr_defs = [
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Main attributes"),
attribute_definitions.UISeparatorDef(),
attribute_definitions.EnumDef(
"builder_type",
label="Asset Builder Type",
label=build_type_label,
default=options.get("builder_type"),
items=[
{"label": "Current asset", "value": "context_asset"},
{"label": "Linked assets", "value": "linked_asset"},
{"label": "All assets", "value": "all_assets"},
],
tooltip=(
"Asset Builder Type\n"
"\nBuilder type describe what template loader will look"
" for."
"\ncontext_asset : Template loader will look for subsets"
" of current context asset (Asset bob will find asset)"
"\nlinked_asset : Template loader will look for assets"
" linked to current context asset."
"\nLinked asset are looked in database under"
" field \"inputLinks\""
)
items=builder_type_enum_items,
tooltip=build_type_help
),
attribute_definitions.EnumDef(
"family",
@ -1352,34 +1376,63 @@ class PlaceholderLoadMixin(object):
attribute_definitions.UISeparatorDef(),
attribute_definitions.UILabelDef("Optional attributes"),
attribute_definitions.UISeparatorDef(),
attribute_definitions.TextDef(
"asset",
label="Asset filter",
default=options.get("asset"),
placeholder="regex filtering by asset name",
tooltip=(
"Filtering assets by matching field regex to asset's name"
)
),
attribute_definitions.TextDef(
"subset",
label="Subset filter",
default=options.get("subset"),
placeholder="regex filtering by subset name",
tooltip=(
"Filtering assets by matching field regex to subset's name"
)
),
attribute_definitions.TextDef(
"hierarchy",
label="Hierarchy filter",
default=options.get("hierarchy"),
placeholder="regex filtering by asset's hierarchy",
tooltip=(
"Filtering assets by matching field asset's hierarchy"
)
)
]
if AYON_SERVER_ENABLED:
attr_defs.extend([
attribute_definitions.TextDef(
"folder_path",
label="Folder filter",
default=options.get("folder_path"),
placeholder="regex filtering by folder path",
tooltip=(
"Filtering assets by matching"
" field regex to folder path"
)
),
attribute_definitions.TextDef(
"product_name",
label="Product filter",
default=options.get("product_name"),
placeholder="regex filtering by product name",
tooltip=(
"Filtering assets by matching"
" field regex to product name"
)
),
])
else:
attr_defs.extend([
attribute_definitions.TextDef(
"asset",
label="Asset filter",
default=options.get("asset"),
placeholder="regex filtering by asset name",
tooltip=(
"Filtering assets by matching"
" field regex to asset's name"
)
),
attribute_definitions.TextDef(
"subset",
label="Subset filter",
default=options.get("subset"),
placeholder="regex filtering by subset name",
tooltip=(
"Filtering assets by matching"
" field regex to subset's name"
)
),
attribute_definitions.TextDef(
"hierarchy",
label="Hierarchy filter",
default=options.get("hierarchy"),
placeholder="regex filtering by asset's hierarchy",
tooltip=(
"Filtering assets by matching field asset's hierarchy"
)
)
])
return attr_defs
def parse_loader_args(self, loader_args):
"""Helper function to parse string of loader arugments.
@ -1409,6 +1462,117 @@ class PlaceholderLoadMixin(object):
return {}
def _query_by_folder_regex(self, project_name, folder_regex):
"""Query folders by folder path regex.
WARNING:
This method will be removed once the same functionality is
available in ayon-python-api.
Args:
project_name (str): Project name.
folder_regex (str): Regex for folder path.
Returns:
list[str]: List of folder paths.
"""
from ayon_api.graphql_queries import folders_graphql_query
from openpype.client import get_ayon_server_api_connection
query = folders_graphql_query({"id"})
folders_field = None
for child in query._children:
if child.path != "project":
continue
for project_child in child._children:
if project_child.path == "project/folders":
folders_field = project_child
break
if folders_field:
break
if "folderPathRegex" not in query._variables:
folder_path_regex_var = query.add_variable(
"folderPathRegex", "String!"
)
folders_field.set_filter("pathEx", folder_path_regex_var)
query.set_variable_value("projectName", project_name)
if folder_regex:
query.set_variable_value("folderPathRegex", folder_regex)
api = get_ayon_server_api_connection()
for parsed_data in query.continuous_query(api):
for folder in parsed_data["project"]["folders"]:
yield folder["id"]
def _get_representations_ayon(self, placeholder):
# An OpenPype placeholder loaded in AYON
if "asset" in placeholder.data:
return []
representation_name = placeholder.data["representation"]
if not representation_name:
return []
project_name = self.builder.project_name
current_asset_doc = self.builder.current_asset_doc
folder_path_regex = placeholder.data["folder_path"]
product_name_regex_value = placeholder.data["product_name"]
product_name_regex = None
if product_name_regex_value:
product_name_regex = re.compile(product_name_regex_value)
product_type = placeholder.data["family"]
builder_type = placeholder.data["builder_type"]
folder_ids = []
if builder_type == "context_folder":
folder_ids = [current_asset_doc["_id"]]
elif builder_type == "all_folders":
folder_ids = list(self._query_by_folder_regex(
project_name, folder_path_regex
))
if not folder_ids:
return []
from ayon_api import get_products, get_last_versions
products = list(get_products(
project_name,
folder_ids=folder_ids,
product_types=[product_type],
fields={"id", "name"}
))
filtered_product_ids = set()
for product in products:
if (
product_name_regex is None
or product_name_regex.match(product["name"])
):
filtered_product_ids.add(product["id"])
if not filtered_product_ids:
return []
version_ids = set(
version["id"]
for version in get_last_versions(
project_name, filtered_product_ids, fields={"id"}
).values()
)
return list(get_representations(
project_name,
representation_names=[representation_name],
version_ids=version_ids
))
def _get_representations(self, placeholder):
"""Prepared query of representations based on load options.
@ -1428,6 +1592,13 @@ class PlaceholderLoadMixin(object):
from placeholder data.
"""
if AYON_SERVER_ENABLED:
return self._get_representations_ayon(placeholder)
# An AYON placeholder loaded in OpenPype
if "folder_path" in placeholder.data:
return []
project_name = self.builder.project_name
current_asset_doc = self.builder.current_asset_doc
linked_asset_docs = self.builder.linked_asset_docs

View file

@ -5,12 +5,12 @@ import tempfile
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_args,
get_oiio_tool_args,
is_oiio_supported,
run_subprocess,
path_to_subprocess_arg,
)
from openpype.lib.transcoding import convert_colorspace
class ExtractThumbnail(pyblish.api.InstancePlugin):
@ -25,7 +25,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
hosts = ["shell", "fusion", "resolve", "traypublisher", "substancepainter"]
enabled = False
# presetable attribute
# attribute presets from settings
oiiotool_defaults = None
ffmpeg_args = None
def process(self, instance):
@ -94,17 +95,26 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
filename = os.path.splitext(input_file)[0]
jpeg_file = filename + "_thumb.jpg"
full_output_path = os.path.join(dst_staging, jpeg_file)
colorspace_data = repre.get("colorspaceData")
if oiio_supported:
self.log.debug("Trying to convert with OIIO")
# only use OIIO if it is supported and representation has
# colorspace data
if oiio_supported and colorspace_data:
self.log.debug(
"Trying to convert with OIIO "
"with colorspace data: {}".format(colorspace_data)
)
# If the input can read by OIIO then use OIIO method for
# conversion otherwise use ffmpeg
thumbnail_created = self.create_thumbnail_oiio(
full_input_path, full_output_path
full_input_path,
full_output_path,
colorspace_data
)
# Try to use FFMPEG if OIIO is not supported or for cases when
# oiiotool isn't available
# oiiotool isn't available or representation is not having
# colorspace data
if not thumbnail_created:
if oiio_supported:
self.log.debug(
@ -138,7 +148,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
break
if not thumbnail_created:
self.log.warning("Thumbanil has not been created.")
self.log.warning("Thumbnail has not been created.")
def _is_review_instance(self, instance):
# TODO: We should probably handle "not creating" of thumbnail
@ -173,17 +183,66 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
filtered_repres.append(repre)
return filtered_repres
def create_thumbnail_oiio(self, src_path, dst_path):
self.log.debug("Extracting thumbnail with OIIO: {}".format(dst_path))
oiio_cmd = get_oiio_tool_args(
"oiiotool",
"-a", src_path,
"-o", dst_path
)
self.log.debug("running: {}".format(" ".join(oiio_cmd)))
def create_thumbnail_oiio(
self,
src_path,
dst_path,
colorspace_data,
):
"""Create thumbnail using OIIO tool oiiotool
Args:
src_path (str): path to source file
dst_path (str): path to destination file
colorspace_data (dict): colorspace data from representation
keys:
colorspace (str)
config (dict)
display (Optional[str])
view (Optional[str])
Returns:
str: path to created thumbnail
"""
self.log.info("Extracting thumbnail {}".format(dst_path))
repre_display = colorspace_data.get("display")
repre_view = colorspace_data.get("view")
oiio_default_type = None
oiio_default_display = None
oiio_default_view = None
oiio_default_colorspace = None
# first look into representation colorspaceData, perhaps it has
# display and view
if all([repre_display, repre_view]):
self.log.info(
"Using Display & View from "
"representation: '{} ({})'".format(
repre_view,
repre_display
)
)
# if representation doesn't have display and view then use
# oiiotool_defaults
elif self.oiiotool_defaults:
oiio_default_type = self.oiiotool_defaults["type"]
if "colorspace" in oiio_default_type:
oiio_default_colorspace = self.oiiotool_defaults["colorspace"]
else:
oiio_default_display = self.oiiotool_defaults["display"]
oiio_default_view = self.oiiotool_defaults["view"]
try:
run_subprocess(oiio_cmd, logger=self.log)
return True
convert_colorspace(
src_path,
dst_path,
colorspace_data["config"]["path"],
colorspace_data["colorspace"],
display=repre_display or oiio_default_display,
view=repre_view or oiio_default_view,
target_colorspace=oiio_default_colorspace,
logger=self.log,
)
except Exception:
self.log.warning(
"Failed to create thumbnail using oiiotool",
@ -191,6 +250,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
)
return False
return True
def create_thumbnail_ffmpeg(self, src_path, dst_path):
self.log.debug("Extracting thumbnail with FFMPEG: {}".format(dst_path))

View file

@ -157,8 +157,8 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
):
from openpype.client.server.operations import create_thumbnail
op_session = OperationsSession()
# Make sure each entity id has defined only one thumbnail id
thumbnail_info_by_entity_id = {}
for instance_item in filtered_instance_items:
instance, thumbnail_path, version_id = instance_item
instance_label = self._get_instance_label(instance)
@ -172,12 +172,10 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
thumbnail_id = create_thumbnail(project_name, thumbnail_path)
# Set thumbnail id for version
op_session.update_entity(
project_name,
version_doc["type"],
version_doc["_id"],
{"data.thumbnail_id": thumbnail_id}
)
thumbnail_info_by_entity_id[version_id] = {
"thumbnail_id": thumbnail_id,
"entity_type": version_doc["type"],
}
if version_doc["type"] == "hero_version":
version_name = "Hero"
else:
@ -187,16 +185,23 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
))
asset_entity = instance.data["assetEntity"]
op_session.update_entity(
project_name,
asset_entity["type"],
asset_entity["_id"],
{"data.thumbnail_id": thumbnail_id}
)
thumbnail_info_by_entity_id[asset_entity["_id"]] = {
"thumbnail_id": thumbnail_id,
"entity_type": "asset",
}
self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format(
asset_entity["name"], version_id
))
op_session = OperationsSession()
for entity_id, thumbnail_info in thumbnail_info_by_entity_id.items():
thumbnail_id = thumbnail_info["thumbnail_id"]
op_session.update_entity(
project_name,
thumbnail_info["entity_type"],
entity_id,
{"data.thumbnail_id": thumbnail_id}
)
op_session.commit()
def _get_instance_label(self, instance):

View file

@ -107,7 +107,8 @@
"use_published": true,
"priority": 50,
"chunk_size": 10,
"group": "none"
"group": "none",
"job_delay": "00:00:00:00"
},
"ProcessSubmittedCacheJobOnFarm": {
"enabled": true,

View file

@ -70,6 +70,12 @@
},
"ExtractThumbnail": {
"enabled": true,
"oiiotool_defaults": {
"type": "colorspace",
"colorspace": "color_picking",
"view": "sRGB",
"display": "default"
},
"ffmpeg_args": {
"input": [
"-apply_trc gamma22"

View file

@ -581,6 +581,11 @@
"type": "text",
"key": "group",
"label": "Group Name"
},
{
"type": "text",
"key": "job_delay",
"label": "Delay job (timecode dd:hh:mm:ss)"
}
]
},

View file

@ -202,6 +202,38 @@
"key": "enabled",
"label": "Enabled"
},
{
"type": "dict",
"collapsible": true,
"key": "oiiotool_defaults",
"label": "OIIOtool defaults",
"children": [
{
"type": "enum",
"key": "type",
"label": "Target type",
"enum_items": [
{ "colorspace": "Colorspace" },
{ "display_and_view": "Display & View" }
]
},
{
"type": "text",
"key": "colorspace",
"label": "Colorspace"
},
{
"type": "text",
"key": "view",
"label": "View"
},
{
"type": "text",
"key": "display",
"label": "Display"
}
]
},
{
"type": "dict",
"key": "ffmpeg_args",

View file

@ -608,7 +608,7 @@ class UnknownAttrWidget(_BaseAttrDefWidget):
class HiddenAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
self.setVisible(False)
self._value = None
self._value = self.attr_def.default
self._multivalue = False
def setVisible(self, visible):

View file

@ -137,7 +137,7 @@ class VersionItem:
handles,
step,
comment,
source
source,
):
self.version_id = version_id
self.product_id = product_id
@ -215,7 +215,7 @@ class RepreItem:
representation_name,
representation_icon,
product_name,
folder_label,
folder_label
):
self.representation_id = representation_id
self.representation_name = representation_name
@ -590,6 +590,22 @@ class FrontendLoaderController(_BaseLoaderController):
pass
@abstractmethod
def get_versions_representation_count(
self, project_name, version_ids, sender=None
):
"""
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
sender (Optional[str]): Sender who requested the items.
Returns:
dict[str, int]: Representation count by version id.
"""
pass
@abstractmethod
def get_thumbnail_path(self, project_name, thumbnail_id):
"""Get thumbnail path for thumbnail id.
@ -849,3 +865,80 @@ class FrontendLoaderController(_BaseLoaderController):
"""
pass
# Site sync functions
@abstractmethod
def is_site_sync_enabled(self, project_name=None):
"""Is site sync enabled.
Site sync addon can be enabled but can be disabled per project.
When asked for enabled state without project name, it should return
True if site sync addon is available and enabled.
Args:
project_name (Optional[str]): Project name.
Returns:
bool: True if site sync is enabled.
"""
pass
@abstractmethod
def get_active_site_icon_def(self, project_name):
"""Active site icon definition.
Args:
project_name (Union[str, None]): Project name.
Returns:
Union[dict[str, Any], None]: Icon definition or None if site sync
is not enabled for the project.
"""
pass
@abstractmethod
def get_remote_site_icon_def(self, project_name):
"""Remote site icon definition.
Args:
project_name (Union[str, None]): Project name.
Returns:
Union[dict[str, Any], None]: Icon definition or None if site sync
is not enabled for the project.
"""
pass
@abstractmethod
def get_version_sync_availability(self, project_name, version_ids):
"""Version sync availability.
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
Returns:
dict[str, tuple[int, int]]: Sync availability by version id.
"""
pass
@abstractmethod
def get_representations_sync_status(
self, project_name, representation_ids
):
"""Representations sync status.
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
dict[str, tuple[int, int]]: Sync status by representation id.
"""
pass

View file

@ -15,7 +15,12 @@ from openpype.tools.ayon_utils.models import (
)
from .abstract import BackendLoaderController, FrontendLoaderController
from .models import SelectionModel, ProductsModel, LoaderActionsModel
from .models import (
SelectionModel,
ProductsModel,
LoaderActionsModel,
SiteSyncModel
)
class ExpectedSelection:
@ -108,6 +113,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._products_model = ProductsModel(self)
self._loader_actions_model = LoaderActionsModel(self)
self._thumbnails_model = ThumbnailsModel()
self._site_sync_model = SiteSyncModel(self)
@property
def log(self):
@ -143,6 +149,7 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._loader_actions_model.reset()
self._projects_model.reset()
self._thumbnails_model.reset()
self._site_sync_model.reset()
self._projects_model.refresh()
@ -195,13 +202,22 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
project_name, version_ids, sender
)
def get_versions_representation_count(
self, project_name, version_ids, sender=None
):
return self._products_model.get_versions_repre_count(
project_name, version_ids, sender
)
def get_folder_thumbnail_ids(self, project_name, folder_ids):
return self._thumbnails_model.get_folder_thumbnail_ids(
project_name, folder_ids)
project_name, folder_ids
)
def get_version_thumbnail_ids(self, project_name, version_ids):
return self._thumbnails_model.get_version_thumbnail_ids(
project_name, version_ids)
project_name, version_ids
)
def get_thumbnail_path(self, project_name, thumbnail_id):
return self._thumbnails_model.get_thumbnail_path(
@ -219,8 +235,16 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
def get_representations_action_items(
self, project_name, representation_ids):
return self._loader_actions_model.get_representations_action_items(
action_items = (
self._loader_actions_model.get_representations_action_items(
project_name, representation_ids)
)
action_items.extend(self._site_sync_model.get_site_sync_action_items(
project_name, representation_ids)
)
return action_items
def trigger_action_item(
self,
@ -230,6 +254,14 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
version_ids,
representation_ids
):
if self._site_sync_model.is_site_sync_action(identifier):
self._site_sync_model.trigger_action_item(
identifier,
project_name,
representation_ids
)
return
self._loader_actions_model.trigger_action_item(
identifier,
options,
@ -336,6 +368,27 @@ class LoaderController(BackendLoaderController, FrontendLoaderController):
self._loaded_products_cache.update_data(product_ids)
return self._loaded_products_cache.get_data()
def is_site_sync_enabled(self, project_name=None):
return self._site_sync_model.is_site_sync_enabled(project_name)
def get_active_site_icon_def(self, project_name):
return self._site_sync_model.get_active_site_icon_def(project_name)
def get_remote_site_icon_def(self, project_name):
return self._site_sync_model.get_remote_site_icon_def(project_name)
def get_version_sync_availability(self, project_name, version_ids):
return self._site_sync_model.get_version_sync_availability(
project_name, version_ids
)
def get_representations_sync_status(
self, project_name, representation_ids
):
return self._site_sync_model.get_representations_sync_status(
project_name, representation_ids
)
def is_loaded_products_supported(self):
return self._host is not None

View file

@ -1,10 +1,12 @@
from .selection import SelectionModel
from .products import ProductsModel
from .actions import LoaderActionsModel
from .site_sync import SiteSyncModel
__all__ = (
"SelectionModel",
"ProductsModel",
"LoaderActionsModel",
"SiteSyncModel",
)

View file

@ -317,6 +317,42 @@ class ProductsModel:
return output
def get_versions_repre_count(self, project_name, version_ids, sender):
"""Get representation count for passed version ids.
Args:
project_name (str): Project name.
version_ids (Iterable[str]): Version ids.
sender (Union[str, None]): Who triggered the method.
Returns:
dict[str, int]: Number of representations by version id.
"""
output = {}
if not any((project_name, version_ids)):
return output
invalid_version_ids = set()
project_cache = self._repre_items_cache[project_name]
for version_id in version_ids:
version_cache = project_cache[version_id]
if version_cache.is_valid:
output[version_id] = len(version_cache.get_data())
else:
invalid_version_ids.add(version_id)
if invalid_version_ids:
self.refresh_representation_items(
project_name, invalid_version_ids, sender
)
for version_id in invalid_version_ids:
version_cache = project_cache[version_id]
output[version_id] = len(version_cache.get_data())
return output
def change_products_group(self, project_name, product_ids, group_name):
"""Change group name for passed product ids.

View file

@ -0,0 +1,509 @@
import collections
from openpype.lib import Logger
from openpype.client.entities import get_representations
from openpype.client import get_linked_representation_id
from openpype.modules import ModulesManager
from openpype.tools.ayon_utils.models import NestedCacheItem
from openpype.tools.ayon_loader.abstract import ActionItem
DOWNLOAD_IDENTIFIER = "sitesync.download"
UPLOAD_IDENTIFIER = "sitesync.upload"
REMOVE_IDENTIFIER = "sitesync.remove"
log = Logger.get_logger(__name__)
def _default_version_availability():
return 0, 0
def _default_repre_status():
return 0.0, 0.0
class SiteSyncModel:
"""Model handling site sync logic.
Model cares about handling of site sync functionality. All public
functions should be possible to call even if site sync is not available.
"""
lifetime = 60 # In seconds (minute by default)
status_lifetime = 20
def __init__(self, controller):
self._controller = controller
self._site_icons = None
self._site_sync_enabled_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._active_site_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._remote_site_cache = NestedCacheItem(
levels=1, lifetime=self.lifetime
)
self._version_availability_cache = NestedCacheItem(
levels=2,
default_factory=_default_version_availability,
lifetime=self.status_lifetime
)
self._repre_status_cache = NestedCacheItem(
levels=2,
default_factory=_default_repre_status,
lifetime=self.status_lifetime
)
manager = ModulesManager()
self._site_sync_addon = manager.get("sync_server")
def reset(self):
self._site_icons = None
self._site_sync_enabled_cache.reset()
self._active_site_cache.reset()
self._remote_site_cache.reset()
self._version_availability_cache.reset()
self._repre_status_cache.reset()
def is_site_sync_enabled(self, project_name=None):
"""Site sync is enabled for a project.
Returns false if site sync addon is not available or enabled
or project has disabled it.
Args:
project_name (Union[str, None]): Project name. If project name
is 'None', True is returned if site sync addon
is available and enabled.
Returns:
bool: Site sync is enabled.
"""
if not self._is_site_sync_addon_enabled():
return False
cache = self._site_sync_enabled_cache[project_name]
if not cache.is_valid:
enabled = True
if project_name:
enabled = self._site_sync_addon.is_project_enabled(
project_name, single=True
)
cache.update_data(enabled)
return cache.get_data()
def get_active_site(self, project_name):
"""Active site name for a project.
Args:
project_name (str): Project name.
Returns:
Union[str, None]: Remote site name.
"""
cache = self._active_site_cache[project_name]
if not cache.is_valid:
site_name = None
if project_name and self._is_site_sync_addon_enabled():
site_name = self._site_sync_addon.get_active_site(project_name)
cache.update_data(site_name)
return cache.get_data()
def get_remote_site(self, project_name):
"""Remote site name for a project.
Args:
project_name (str): Project name.
Returns:
Union[str, None]: Remote site name.
"""
cache = self._remote_site_cache[project_name]
if not cache.is_valid:
site_name = None
if project_name and self._is_site_sync_addon_enabled():
site_name = self._site_sync_addon.get_remote_site(project_name)
cache.update_data(site_name)
return cache.get_data()
def get_active_site_icon_def(self, project_name):
"""Active site icon definition.
Args:
project_name (Union[str, None]): Name of project.
Returns:
Union[dict[str, Any], None]: Site icon definition.
"""
if not project_name:
return None
active_site = self.get_active_site(project_name)
provider = self._get_provider_for_site(project_name, active_site)
return self._get_provider_icon(provider)
def get_remote_site_icon_def(self, project_name):
"""Remote site icon definition.
Args:
project_name (Union[str, None]): Name of project.
Returns:
Union[dict[str, Any], None]: Site icon definition.
"""
if not project_name or not self.is_site_sync_enabled(project_name):
return None
remote_site = self.get_remote_site(project_name)
provider = self._get_provider_for_site(project_name, remote_site)
return self._get_provider_icon(provider)
def get_version_sync_availability(self, project_name, version_ids):
"""Returns how many representations are available on sites.
Returned value `{version_id: (4, 6)}` denotes that locally are
available 4 and remotely 6 representation.
NOTE: Available means they were synced to site.
Returns:
dict[str, tuple[int, int]]
"""
if not self.is_site_sync_enabled(project_name):
return {
version_id: _default_version_availability()
for version_id in version_ids
}
output = {}
project_cache = self._version_availability_cache[project_name]
invalid_ids = set()
for version_id in version_ids:
repre_cache = project_cache[version_id]
if repre_cache.is_valid:
output[version_id] = repre_cache.get_data()
else:
invalid_ids.add(version_id)
if invalid_ids:
self._refresh_version_availability(
project_name, invalid_ids
)
for version_id in invalid_ids:
version_cache = project_cache[version_id]
output[version_id] = version_cache.get_data()
return output
def get_representations_sync_status(
self, project_name, representation_ids
):
"""
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
dict[str, tuple[float, float]]
"""
if not self.is_site_sync_enabled(project_name):
return {
repre_id: _default_repre_status()
for repre_id in representation_ids
}
output = {}
project_cache = self._repre_status_cache[project_name]
invalid_ids = set()
for repre_id in representation_ids:
repre_cache = project_cache[repre_id]
if repre_cache.is_valid:
output[repre_id] = repre_cache.get_data()
else:
invalid_ids.add(repre_id)
if invalid_ids:
self._refresh_representations_sync_status(
project_name, invalid_ids
)
for repre_id in invalid_ids:
repre_cache = project_cache[repre_id]
output[repre_id] = repre_cache.get_data()
return output
def get_site_sync_action_items(self, project_name, representation_ids):
"""
Args:
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
Returns:
list[ActionItem]: Actions that can be shown in loader.
"""
if not self.is_site_sync_enabled(project_name):
return []
repres_status = self.get_representations_sync_status(
project_name, representation_ids
)
repre_ids_per_identifier = collections.defaultdict(set)
for repre_id in representation_ids:
repre_status = repres_status[repre_id]
local_status, remote_status = repre_status
if local_status:
repre_ids_per_identifier[UPLOAD_IDENTIFIER].add(repre_id)
repre_ids_per_identifier[REMOVE_IDENTIFIER].add(repre_id)
if remote_status:
repre_ids_per_identifier[DOWNLOAD_IDENTIFIER].add(repre_id)
action_items = []
for identifier, repre_ids in repre_ids_per_identifier.items():
if identifier == DOWNLOAD_IDENTIFIER:
action_items.append(self._create_download_action_item(
project_name, repre_ids
))
elif identifier == UPLOAD_IDENTIFIER:
action_items.append(self._create_upload_action_item(
project_name, repre_ids
))
elif identifier == REMOVE_IDENTIFIER:
action_items.append(self._create_delete_action_item(
project_name, repre_ids
))
return action_items
def is_site_sync_action(self, identifier):
"""Should be `identifier` handled by SiteSync.
Args:
identifier (str): Action identifier.
Returns:
bool: Should action be handled by SiteSync.
"""
return identifier in {
UPLOAD_IDENTIFIER,
DOWNLOAD_IDENTIFIER,
REMOVE_IDENTIFIER,
}
def trigger_action_item(
self,
identifier,
project_name,
representation_ids
):
"""Resets status for site_name or remove local files.
Args:
identifier (str): Action identifier.
project_name (str): Project name.
representation_ids (Iterable[str]): Representation ids.
"""
active_site = self.get_active_site(project_name)
remote_site = self.get_remote_site(project_name)
repre_docs = list(get_representations(
project_name, representation_ids=representation_ids
))
families_per_repre_id = {
item["_id"]: item["context"]["family"]
for item in repre_docs
}
for repre_id in representation_ids:
family = families_per_repre_id[repre_id]
if identifier == DOWNLOAD_IDENTIFIER:
self._add_site(
project_name, repre_id, active_site, family
)
elif identifier == UPLOAD_IDENTIFIER:
self._add_site(
project_name, repre_id, remote_site, family
)
elif identifier == REMOVE_IDENTIFIER:
self._site_sync_addon.remove_site(
project_name,
repre_id,
active_site,
remove_local_files=True
)
def _is_site_sync_addon_enabled(self):
"""
Returns:
bool: Site sync addon is enabled.
"""
if self._site_sync_addon is None:
return False
return self._site_sync_addon.enabled
def _get_provider_for_site(self, project_name, site_name):
"""Provider for a site.
Args:
project_name (str): Project name.
site_name (str): Site name.
Returns:
Union[str, None]: Provider name.
"""
if not self._is_site_sync_addon_enabled():
return None
return self._site_sync_addon.get_provider_for_site(
project_name, site_name
)
def _get_provider_icon(self, provider):
"""site provider icons.
Returns:
Union[dict[str, Any], None]: Icon of site provider.
"""
if not provider:
return None
if self._site_icons is None:
self._site_icons = self._site_sync_addon.get_site_icons()
return self._site_icons.get(provider)
def _refresh_version_availability(self, project_name, version_ids):
if not project_name or not version_ids:
return
project_cache = self._version_availability_cache[project_name]
avail_by_id = self._site_sync_addon.get_version_availability(
project_name,
version_ids,
self.get_active_site(project_name),
self.get_remote_site(project_name),
)
for version_id in version_ids:
status = avail_by_id.get(version_id)
if status is None:
status = _default_version_availability()
project_cache[version_id].update_data(status)
def _refresh_representations_sync_status(
self, project_name, representation_ids
):
if not project_name or not representation_ids:
return
project_cache = self._repre_status_cache[project_name]
status_by_repre_id = (
self._site_sync_addon.get_representations_sync_state(
project_name,
representation_ids,
self.get_active_site(project_name),
self.get_remote_site(project_name),
)
)
for repre_id in representation_ids:
status = status_by_repre_id.get(repre_id)
if status is None:
status = _default_repre_status()
project_cache[repre_id].update_data(status)
def _create_download_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
DOWNLOAD_IDENTIFIER,
"Download",
"Mark representation for download locally",
"fa.download"
)
def _create_upload_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
UPLOAD_IDENTIFIER,
"Upload",
"Mark representation for upload remotely",
"fa.upload"
)
def _create_delete_action_item(self, project_name, representation_ids):
return self._create_action_item(
project_name,
representation_ids,
REMOVE_IDENTIFIER,
"Remove from local",
"Remove local synchronization",
"fa.trash"
)
def _create_action_item(
self,
project_name,
representation_ids,
identifier,
label,
tooltip,
icon_name
):
return ActionItem(
identifier,
label,
icon={
"type": "awesome-font",
"name": icon_name,
"color": "#999999"
},
tooltip=tooltip,
options={},
order=1,
project_name=project_name,
folder_ids=[],
product_ids=[],
version_ids=[],
representation_ids=representation_ids,
)
def _add_site(self, project_name, repre_id, site_name, family):
self._site_sync_addon.add_site(
project_name, repre_id, site_name, force=True
)
# TODO this should happen in site sync addon
if family != "workfile":
return
links = get_linked_representation_id(
project_name,
repre_id=repre_id,
link_type="reference"
)
for link_repre_id in links:
try:
print("Adding {} to linked representation: {}".format(
site_name, link_repre_id))
self._site_sync_addon.add_site(
project_name,
link_repre_id,
site_name,
force=False
)
except Exception:
# do not add/reset working site for references
log.debug("Site present", exc_info=True)

View file

@ -8,6 +8,11 @@ from .products_model import (
VERSION_NAME_EDIT_ROLE,
VERSION_ID_ROLE,
PRODUCT_IN_SCENE_ROLE,
ACTIVE_SITE_ICON_ROLE,
REMOTE_SITE_ICON_ROLE,
REPRESENTATIONS_COUNT_ROLE,
SYNC_ACTIVE_SITE_AVAILABILITY,
SYNC_REMOTE_SITE_AVAILABILITY,
)
@ -189,3 +194,78 @@ class LoadedInSceneDelegate(QtWidgets.QStyledItemDelegate):
value = index.data(PRODUCT_IN_SCENE_ROLE)
color = self._colors.get(value, self._default_color)
option.palette.setBrush(QtGui.QPalette.Text, color)
class SiteSyncDelegate(QtWidgets.QStyledItemDelegate):
"""Paints icons and downloaded representation ration for both sites."""
def paint(self, painter, option, index):
super(SiteSyncDelegate, self).paint(painter, option, index)
option = QtWidgets.QStyleOptionViewItem(option)
option.showDecorationSelected = True
active_icon = index.data(ACTIVE_SITE_ICON_ROLE)
remote_icon = index.data(REMOTE_SITE_ICON_ROLE)
availability_active = "{}/{}".format(
index.data(SYNC_ACTIVE_SITE_AVAILABILITY),
index.data(REPRESENTATIONS_COUNT_ROLE)
)
availability_remote = "{}/{}".format(
index.data(SYNC_REMOTE_SITE_AVAILABILITY),
index.data(REPRESENTATIONS_COUNT_ROLE)
)
if availability_active is None or availability_remote is None:
return
items_to_draw = [
(value, icon)
for value, icon in (
(availability_active, active_icon),
(availability_remote, remote_icon),
)
if icon
]
if not items_to_draw:
return
icon_size = QtCore.QSize(24, 24)
padding = 10
pos_x = option.rect.x()
item_width = int(option.rect.width() / len(items_to_draw))
if item_width < 1:
item_width = 0
for value, icon in items_to_draw:
item_rect = QtCore.QRect(
pos_x,
option.rect.y(),
item_width,
option.rect.height()
)
# Prepare pos_x for next item
pos_x = item_rect.x() + item_rect.width()
pixmap = icon.pixmap(icon.actualSize(icon_size))
point = QtCore.QPoint(
item_rect.x() + padding,
item_rect.y() + ((item_rect.height() - pixmap.height()) * 0.5)
)
painter.drawPixmap(point, pixmap)
icon_offset = icon_size.width() + (padding * 2)
text_rect = QtCore.QRect(item_rect)
text_rect.setLeft(text_rect.left() + icon_offset)
if text_rect.width() < 1:
continue
painter.drawText(
text_rect,
option.displayAlignment,
value
)
def displayText(self, value, locale):
pass

View file

@ -29,6 +29,11 @@ VERSION_HANDLES_ROLE = QtCore.Qt.UserRole + 18
VERSION_STEP_ROLE = QtCore.Qt.UserRole + 19
VERSION_AVAILABLE_ROLE = QtCore.Qt.UserRole + 20
VERSION_THUMBNAIL_ID_ROLE = QtCore.Qt.UserRole + 21
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 22
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 23
REPRESENTATIONS_COUNT_ROLE = QtCore.Qt.UserRole + 24
SYNC_ACTIVE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 25
SYNC_REMOTE_SITE_AVAILABILITY = QtCore.Qt.UserRole + 26
class ProductsModel(QtGui.QStandardItemModel):
@ -68,6 +73,7 @@ class ProductsModel(QtGui.QStandardItemModel):
published_time_col = column_labels.index("Time")
folders_label_col = column_labels.index("Folder")
in_scene_col = column_labels.index("In scene")
site_sync_avail_col = column_labels.index("Availability")
def __init__(self, controller):
super(ProductsModel, self).__init__()
@ -303,7 +309,26 @@ class ProductsModel(QtGui.QStandardItemModel):
model_item.setData(
version_item.thumbnail_id, VERSION_THUMBNAIL_ID_ROLE)
def _get_product_model_item(self, product_item):
# TODO call site sync methods for all versions at once
project_name = self._last_project_name
version_id = version_item.version_id
repre_count = self._controller.get_versions_representation_count(
project_name, [version_id]
)[version_id]
active, remote = self._controller.get_version_sync_availability(
project_name, [version_id]
)[version_id]
model_item.setData(repre_count, REPRESENTATIONS_COUNT_ROLE)
model_item.setData(active, SYNC_ACTIVE_SITE_AVAILABILITY)
model_item.setData(remote, SYNC_REMOTE_SITE_AVAILABILITY)
def _get_product_model_item(
self,
product_item,
active_site_icon,
remote_site_icon
):
model_item = self._items_by_id.get(product_item.product_id)
versions = list(product_item.version_items.values())
versions.sort()
@ -329,6 +354,9 @@ class ProductsModel(QtGui.QStandardItemModel):
in_scene = 1 if product_item.product_in_scene else 0
model_item.setData(in_scene, PRODUCT_IN_SCENE_ROLE)
model_item.setData(active_site_icon, ACTIVE_SITE_ICON_ROLE)
model_item.setData(remote_site_icon, REMOTE_SITE_ICON_ROLE)
self._set_version_data_to_product_item(model_item, last_version)
return model_item
@ -341,6 +369,15 @@ class ProductsModel(QtGui.QStandardItemModel):
self._last_project_name = project_name
self._last_folder_ids = folder_ids
active_site_icon_def = self._controller.get_active_site_icon_def(
project_name
)
remote_site_icon_def = self._controller.get_remote_site_icon_def(
project_name
)
active_site_icon = get_qt_icon(active_site_icon_def)
remote_site_icon = get_qt_icon(remote_site_icon_def)
product_items = self._controller.get_product_items(
project_name,
folder_ids,
@ -402,7 +439,11 @@ class ProductsModel(QtGui.QStandardItemModel):
new_root_items.append(parent_item)
for product_item in top_items:
item = self._get_product_model_item(product_item)
item = self._get_product_model_item(
product_item,
active_site_icon,
remote_site_icon,
)
new_items.append(item)
for path_info in merged_product_items.values():
@ -418,7 +459,11 @@ class ProductsModel(QtGui.QStandardItemModel):
merged_product_types = set()
new_merged_items = []
for product_item in product_items:
item = self._get_product_model_item(product_item)
item = self._get_product_model_item(
product_item,
active_site_icon,
remote_site_icon,
)
new_merged_items.append(item)
merged_product_types.add(product_item.product_type)

View file

@ -19,7 +19,11 @@ from .products_model import (
VERSION_ID_ROLE,
VERSION_THUMBNAIL_ID_ROLE,
)
from .products_delegates import VersionDelegate, LoadedInSceneDelegate
from .products_delegates import (
VersionDelegate,
LoadedInSceneDelegate,
SiteSyncDelegate
)
from .actions_utils import show_actions_menu
@ -92,7 +96,7 @@ class ProductsWidget(QtWidgets.QWidget):
55, # Handles
10, # Step
25, # Loaded in scene
65, # Site info (maybe?)
65, # Site sync info
)
def __init__(self, controller, parent):
@ -135,6 +139,10 @@ class ProductsWidget(QtWidgets.QWidget):
products_view.setItemDelegateForColumn(
products_model.in_scene_col, in_scene_delegate)
site_sync_delegate = SiteSyncDelegate()
products_view.setItemDelegateForColumn(
products_model.site_sync_avail_col, site_sync_delegate)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(products_view, 1)
@ -167,6 +175,8 @@ class ProductsWidget(QtWidgets.QWidget):
self._version_delegate = version_delegate
self._time_delegate = time_delegate
self._in_scene_delegate = in_scene_delegate
self._site_sync_delegate = site_sync_delegate
self._selected_project_name = None
self._selected_folder_ids = set()
@ -182,6 +192,9 @@ class ProductsWidget(QtWidgets.QWidget):
products_model.in_scene_col,
not controller.is_loaded_products_supported()
)
self._set_site_sync_visibility(
self._controller.is_site_sync_enabled()
)
def set_name_filter(self, name):
"""Set filter of product name.
@ -216,6 +229,12 @@ class ProductsWidget(QtWidgets.QWidget):
def refresh(self):
self._refresh_model()
def _set_site_sync_visibility(self, site_sync_enabled):
self._products_view.setColumnHidden(
self._products_model.site_sync_avail_col,
not site_sync_enabled
)
def _fill_version_editor(self):
model = self._products_proxy_model
index_queue = collections.deque()
@ -375,7 +394,12 @@ class ProductsWidget(QtWidgets.QWidget):
self._on_selection_change()
def _on_folders_selection_change(self, event):
self._selected_project_name = event["project_name"]
project_name = event["project_name"]
site_sync_enabled = self._controller.is_site_sync_enabled(
project_name
)
self._set_site_sync_visibility(site_sync_enabled)
self._selected_project_name = project_name
self._selected_folder_ids = event["folder_ids"]
self._refresh_model()
self._update_folders_label_visible()

View file

@ -14,6 +14,10 @@ REPRESENTATION_ID_ROLE = QtCore.Qt.UserRole + 2
PRODUCT_NAME_ROLE = QtCore.Qt.UserRole + 3
FOLDER_LABEL_ROLE = QtCore.Qt.UserRole + 4
GROUP_TYPE_ROLE = QtCore.Qt.UserRole + 5
ACTIVE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 6
REMOTE_SITE_ICON_ROLE = QtCore.Qt.UserRole + 7
SYNC_ACTIVE_SITE_PROGRESS = QtCore.Qt.UserRole + 8
SYNC_REMOTE_SITE_PROGRESS = QtCore.Qt.UserRole + 9
class RepresentationsModel(QtGui.QStandardItemModel):
@ -22,12 +26,14 @@ class RepresentationsModel(QtGui.QStandardItemModel):
("Name", 120),
("Product name", 125),
("Folder", 125),
# ("Active site", 85),
# ("Remote site", 85)
("Active site", 85),
("Remote site", 85)
]
column_labels = [label for label, _ in colums_info]
column_widths = [width for _, width in colums_info]
folder_column = column_labels.index("Product name")
active_site_column = column_labels.index("Active site")
remote_site_column = column_labels.index("Remote site")
def __init__(self, controller):
super(RepresentationsModel, self).__init__()
@ -59,7 +65,7 @@ class RepresentationsModel(QtGui.QStandardItemModel):
repre_items = self._controller.get_representation_items(
self._selected_project_name, self._selected_version_ids
)
self._fill_items(repre_items)
self._fill_items(repre_items, self._selected_project_name)
self.refreshed.emit()
def data(self, index, role=None):
@ -69,13 +75,23 @@ class RepresentationsModel(QtGui.QStandardItemModel):
col = index.column()
if col != 0:
if role == QtCore.Qt.DecorationRole:
return None
if col == 3:
role = ACTIVE_SITE_ICON_ROLE
elif col == 4:
role = REMOTE_SITE_ICON_ROLE
else:
return None
if role == QtCore.Qt.DisplayRole:
if col == 1:
role = PRODUCT_NAME_ROLE
elif col == 2:
role = FOLDER_LABEL_ROLE
elif col == 3:
role = SYNC_ACTIVE_SITE_PROGRESS
elif col == 4:
role = SYNC_REMOTE_SITE_PROGRESS
index = self.index(index.row(), 0, index.parent())
return super(RepresentationsModel, self).data(index, role)
@ -89,7 +105,13 @@ class RepresentationsModel(QtGui.QStandardItemModel):
root_item = self.invisibleRootItem()
root_item.removeRows(0, root_item.rowCount())
def _get_repre_item(self, repre_item):
def _get_repre_item(
self,
repre_item,
active_site_icon,
remote_site_icon,
repres_sync_status
):
repre_id = repre_item.representation_id
repre_name = repre_item.representation_name
repre_icon = repre_item.representation_icon
@ -102,6 +124,12 @@ class RepresentationsModel(QtGui.QStandardItemModel):
item.setColumnCount(self.columnCount())
item.setEditable(False)
sync_status = repres_sync_status[repre_id]
active_progress, remote_progress = sync_status
active_site_progress = "{}%".format(int(active_progress * 100))
remote_site_progress = "{}%".format(int(remote_progress * 100))
icon = get_qt_icon(repre_icon)
item.setData(repre_name, QtCore.Qt.DisplayRole)
item.setData(icon, QtCore.Qt.DecorationRole)
@ -109,6 +137,10 @@ class RepresentationsModel(QtGui.QStandardItemModel):
item.setData(repre_id, REPRESENTATION_ID_ROLE)
item.setData(repre_item.product_name, PRODUCT_NAME_ROLE)
item.setData(repre_item.folder_label, FOLDER_LABEL_ROLE)
item.setData(active_site_icon, ACTIVE_SITE_ICON_ROLE)
item.setData(remote_site_icon, REMOTE_SITE_ICON_ROLE)
item.setData(active_site_progress, SYNC_ACTIVE_SITE_PROGRESS)
item.setData(remote_site_progress, SYNC_REMOTE_SITE_PROGRESS)
return is_new_item, item
def _get_group_icon(self):
@ -134,14 +166,29 @@ class RepresentationsModel(QtGui.QStandardItemModel):
self._groups_items_by_name[repre_name] = item
return True, item
def _fill_items(self, repre_items):
def _fill_items(self, repre_items, project_name):
active_site_icon_def = self._controller.get_active_site_icon_def(
project_name
)
remote_site_icon_def = self._controller.get_remote_site_icon_def(
project_name
)
active_site_icon = get_qt_icon(active_site_icon_def)
remote_site_icon = get_qt_icon(remote_site_icon_def)
items_to_remove = set(self._items_by_id.keys())
repre_items_by_name = collections.defaultdict(list)
repre_ids = set()
for repre_item in repre_items:
repre_ids.add(repre_item.representation_id)
items_to_remove.discard(repre_item.representation_id)
repre_name = repre_item.representation_name
repre_items_by_name[repre_name].append(repre_item)
repres_sync_status = self._controller.get_representations_sync_status(
project_name, repre_ids
)
root_item = self.invisibleRootItem()
for repre_id in items_to_remove:
item = self._items_by_id.pop(repre_id)
@ -164,7 +211,12 @@ class RepresentationsModel(QtGui.QStandardItemModel):
new_group_items = []
for repre_item in repre_name_items:
is_new_item, item = self._get_repre_item(repre_item)
is_new_item, item = self._get_repre_item(
repre_item,
active_site_icon,
remote_site_icon,
repres_sync_status
)
item_parent = item.parent()
if item_parent is None:
item_parent = root_item
@ -255,6 +307,9 @@ class RepresentationsWidget(QtWidgets.QWidget):
self._repre_model = repre_model
self._repre_proxy_model = repre_proxy_model
self._set_site_sync_visibility(
self._controller.is_site_sync_enabled()
)
self._set_multiple_folders_selected(False)
def refresh(self):
@ -265,6 +320,20 @@ class RepresentationsWidget(QtWidgets.QWidget):
def _on_project_change(self, event):
self._selected_project_name = event["project_name"]
site_sync_enabled = self._controller.is_site_sync_enabled(
self._selected_project_name
)
self._set_site_sync_visibility(site_sync_enabled)
def _set_site_sync_visibility(self, site_sync_enabled):
self._repre_view.setColumnHidden(
self._repre_model.active_site_column,
not site_sync_enabled
)
self._repre_view.setColumnHidden(
self._repre_model.remote_site_column,
not site_sync_enabled
)
def _set_multiple_folders_selected(self, selected_multiple_folders):
if selected_multiple_folders == self._selected_multiple_folders:

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.7-nightly.2"
__version__ = "3.17.7-nightly.5"