mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' of https://github.com/ynput/ayon-core into enhancement/maya_load_reference_usd_time
This commit is contained in:
commit
13448626b0
876 changed files with 9582 additions and 5335 deletions
|
|
@ -51,8 +51,18 @@ IGNORED_MODULES_IN_AYON = set()
|
|||
# - this is used to log the missing addon
|
||||
MOVED_ADDON_MILESTONE_VERSIONS = {
|
||||
"applications": VersionInfo(0, 2, 0),
|
||||
"celaction": VersionInfo(0, 2, 0),
|
||||
"clockify": VersionInfo(0, 2, 0),
|
||||
"flame": VersionInfo(0, 2, 0),
|
||||
"max": VersionInfo(0, 2, 0),
|
||||
"traypublisher": VersionInfo(0, 2, 0),
|
||||
"tvpaint": VersionInfo(0, 2, 0),
|
||||
"maya": VersionInfo(0, 2, 0),
|
||||
"nuke": VersionInfo(0, 2, 0),
|
||||
"substancepainter": VersionInfo(0, 2, 0),
|
||||
}
|
||||
|
||||
|
||||
# Inherit from `object` for Python 2 hosts
|
||||
class _ModuleClass(object):
|
||||
"""Fake module class for storing AYON addons.
|
||||
|
|
@ -1321,7 +1331,7 @@ class TrayAddonsManager(AddonsManager):
|
|||
self.doubleclick_callback = None
|
||||
|
||||
def add_doubleclick_callback(self, addon, callback):
|
||||
"""Register doubleclick callbacks on tray icon.
|
||||
"""Register double-click callbacks on tray icon.
|
||||
|
||||
Currently, there is no way how to determine which is launched. Name of
|
||||
callback can be defined with `doubleclick_callback` attribute.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from ayon_applications import PreLaunchHook
|
||||
|
||||
from ayon_core.pipeline.colorspace import get_imageio_config
|
||||
from ayon_core.pipeline.template_data import get_template_data_with_names
|
||||
from ayon_core.pipeline.colorspace import get_imageio_config_preset
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
|
||||
|
||||
class OCIOEnvHook(PreLaunchHook):
|
||||
|
|
@ -26,32 +26,38 @@ class OCIOEnvHook(PreLaunchHook):
|
|||
def execute(self):
|
||||
"""Hook entry method."""
|
||||
|
||||
template_data = get_template_data_with_names(
|
||||
project_name=self.data["project_name"],
|
||||
folder_path=self.data["folder_path"],
|
||||
task_name=self.data["task_name"],
|
||||
folder_entity = self.data["folder_entity"]
|
||||
|
||||
template_data = get_template_data(
|
||||
self.data["project_entity"],
|
||||
folder_entity=folder_entity,
|
||||
task_entity=self.data["task_entity"],
|
||||
host_name=self.host_name,
|
||||
settings=self.data["project_settings"]
|
||||
settings=self.data["project_settings"],
|
||||
)
|
||||
|
||||
config_data = get_imageio_config(
|
||||
project_name=self.data["project_name"],
|
||||
host_name=self.host_name,
|
||||
project_settings=self.data["project_settings"],
|
||||
anatomy_data=template_data,
|
||||
config_data = get_imageio_config_preset(
|
||||
self.data["project_name"],
|
||||
self.data["folder_path"],
|
||||
self.data["task_name"],
|
||||
self.host_name,
|
||||
anatomy=self.data["anatomy"],
|
||||
project_settings=self.data["project_settings"],
|
||||
template_data=template_data,
|
||||
env=self.launch_context.env,
|
||||
folder_id=folder_entity["id"],
|
||||
)
|
||||
|
||||
if config_data:
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
if self.host_name in ["nuke", "hiero"]:
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
self.log.info(
|
||||
f"Setting OCIO environment to config path: {ocio_path}")
|
||||
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
else:
|
||||
if not config_data:
|
||||
self.log.debug("OCIO not set or enabled")
|
||||
return
|
||||
|
||||
ocio_path = config_data["path"]
|
||||
|
||||
if self.host_name in ["nuke", "hiero"]:
|
||||
ocio_path = ocio_path.replace("\\", "/")
|
||||
|
||||
self.log.info(
|
||||
f"Setting OCIO environment to config path: {ocio_path}")
|
||||
|
||||
self.launch_context.env["OCIO"] = ocio_path
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ def main(*subprocess_args):
|
|||
)
|
||||
)
|
||||
|
||||
elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True):
|
||||
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
|
||||
save = False
|
||||
if os.getenv("WORKFILES_SAVE_AS"):
|
||||
save = True
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class AERenderInstance(RenderInstance):
|
|||
|
||||
class CollectAERender(publish.AbstractCollectRender):
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.405
|
||||
order = pyblish.api.CollectorOrder + 0.100
|
||||
label = "Collect After Effects Render Layers"
|
||||
hosts = ["aftereffects"]
|
||||
|
||||
|
|
@ -145,6 +145,7 @@ class CollectAERender(publish.AbstractCollectRender):
|
|||
if "review" in instance.families:
|
||||
# to skip ExtractReview locally
|
||||
instance.families.remove("review")
|
||||
instance.deadline = inst.data.get("deadline")
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ def load_scripts(paths):
|
|||
if register:
|
||||
try:
|
||||
register()
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
else:
|
||||
print("\nWarning! '%s' has no register function, "
|
||||
|
|
@ -45,7 +45,7 @@ def load_scripts(paths):
|
|||
if unregister:
|
||||
try:
|
||||
unregister()
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
|
||||
def test_reload(mod):
|
||||
|
|
@ -57,7 +57,7 @@ def load_scripts(paths):
|
|||
|
||||
try:
|
||||
return importlib.reload(mod)
|
||||
except:
|
||||
except: # noqa E722
|
||||
traceback.print_exc()
|
||||
|
||||
def test_register(mod):
|
||||
|
|
@ -365,3 +365,62 @@ def maintained_time():
|
|||
yield
|
||||
finally:
|
||||
bpy.context.scene.frame_current = current_time
|
||||
|
||||
|
||||
def get_all_parents(obj):
|
||||
"""Get all recursive parents of object.
|
||||
|
||||
Arguments:
|
||||
obj (bpy.types.Object): Object to get all parents for.
|
||||
|
||||
Returns:
|
||||
List[bpy.types.Object]: All parents of object
|
||||
|
||||
"""
|
||||
result = []
|
||||
while True:
|
||||
obj = obj.parent
|
||||
if not obj:
|
||||
break
|
||||
result.append(obj)
|
||||
return result
|
||||
|
||||
|
||||
def get_highest_root(objects):
|
||||
"""Get the highest object (the least parents) among the objects.
|
||||
|
||||
If multiple objects have the same amount of parents (or no parents) the
|
||||
first object found in the input iterable will be returned.
|
||||
|
||||
Note that this will *not* return objects outside of the input list, as
|
||||
such it will not return the root of node from a child node. It is purely
|
||||
intended to find the highest object among a list of objects. To instead
|
||||
get the root from one object use, e.g. `get_all_parents(obj)[-1]`
|
||||
|
||||
Arguments:
|
||||
objects (List[bpy.types.Object]): Objects to find the highest root in.
|
||||
|
||||
Returns:
|
||||
Optional[bpy.types.Object]: First highest root found or None if no
|
||||
`bpy.types.Object` found in input list.
|
||||
|
||||
"""
|
||||
included_objects = {obj.name_full for obj in objects}
|
||||
num_parents_to_obj = {}
|
||||
for obj in objects:
|
||||
if isinstance(obj, bpy.types.Object):
|
||||
parents = get_all_parents(obj)
|
||||
# included parents
|
||||
parents = [parent for parent in parents if
|
||||
parent.name_full in included_objects]
|
||||
if not parents:
|
||||
# A node without parents must be a highest root
|
||||
return obj
|
||||
|
||||
num_parents_to_obj.setdefault(len(parents), obj)
|
||||
|
||||
if not num_parents_to_obj:
|
||||
return
|
||||
|
||||
minimum_parent = min(num_parents_to_obj)
|
||||
return num_parents_to_obj[minimum_parent]
|
||||
|
|
|
|||
|
|
@ -26,7 +26,8 @@ from .ops import (
|
|||
)
|
||||
from .lib import imprint
|
||||
|
||||
VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx"]
|
||||
VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx",
|
||||
".usd", ".usdc", ".usda"]
|
||||
|
||||
|
||||
def prepare_scene_name(
|
||||
|
|
@ -143,13 +144,19 @@ def deselect_all():
|
|||
if obj.mode != 'OBJECT':
|
||||
modes.append((obj, obj.mode))
|
||||
bpy.context.view_layer.objects.active = obj
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
context_override = create_blender_context(active=obj)
|
||||
with bpy.context.temp_override(**context_override):
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
context_override = create_blender_context()
|
||||
with bpy.context.temp_override(**context_override):
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
for p in modes:
|
||||
bpy.context.view_layer.objects.active = p[0]
|
||||
bpy.ops.object.mode_set(mode=p[1])
|
||||
context_override = create_blender_context(active=p[0])
|
||||
with bpy.context.temp_override(**context_override):
|
||||
bpy.ops.object.mode_set(mode=p[1])
|
||||
|
||||
bpy.context.view_layer.objects.active = active
|
||||
|
||||
|
|
|
|||
30
client/ayon_core/hosts/blender/plugins/create/create_usd.py
Normal file
30
client/ayon_core/hosts/blender/plugins/create/create_usd.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
"""Create a USD Export."""
|
||||
|
||||
from ayon_core.hosts.blender.api import plugin, lib
|
||||
|
||||
|
||||
class CreateUSD(plugin.BaseCreator):
|
||||
"""Create USD Export"""
|
||||
|
||||
identifier = "io.openpype.creators.blender.usd"
|
||||
name = "usdMain"
|
||||
label = "USD"
|
||||
product_type = "usd"
|
||||
icon = "gears"
|
||||
|
||||
def create(
|
||||
self, product_name: str, instance_data: dict, pre_create_data: dict
|
||||
):
|
||||
# Run parent create method
|
||||
collection = super().create(
|
||||
product_name, instance_data, pre_create_data
|
||||
)
|
||||
|
||||
if pre_create_data.get("use_selection"):
|
||||
objects = lib.get_selection()
|
||||
for obj in objects:
|
||||
collection.objects.link(obj)
|
||||
if obj.type == 'EMPTY':
|
||||
objects.extend(obj.children)
|
||||
|
||||
return collection
|
||||
|
|
@ -26,10 +26,10 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
Note:
|
||||
At least for now it only supports Alembic files.
|
||||
"""
|
||||
product_types = {"model", "pointcache", "animation"}
|
||||
representations = {"abc"}
|
||||
product_types = {"model", "pointcache", "animation", "usd"}
|
||||
representations = {"abc", "usd"}
|
||||
|
||||
label = "Load Alembic"
|
||||
label = "Load Cache"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
|
|
@ -53,10 +53,21 @@ class CacheModelLoader(plugin.AssetLoader):
|
|||
plugin.deselect_all()
|
||||
|
||||
relative = bpy.context.preferences.filepaths.use_relative_paths
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath,
|
||||
relative_path=relative
|
||||
)
|
||||
|
||||
if any(libpath.lower().endswith(ext)
|
||||
for ext in [".usd", ".usda", ".usdc"]):
|
||||
# USD
|
||||
bpy.ops.wm.usd_import(
|
||||
filepath=libpath,
|
||||
relative_path=relative
|
||||
)
|
||||
|
||||
else:
|
||||
# Alembic
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath,
|
||||
relative_path=relative
|
||||
)
|
||||
|
||||
imported = lib.get_selection()
|
||||
|
||||
|
|
@ -43,7 +43,10 @@ class AbcCameraLoader(plugin.AssetLoader):
|
|||
def _process(self, libpath, asset_group, group_name):
|
||||
plugin.deselect_all()
|
||||
|
||||
bpy.ops.wm.alembic_import(filepath=libpath)
|
||||
# Force the creation of the transform cache even if the camera
|
||||
# doesn't have an animation. We use the cache to update the camera.
|
||||
bpy.ops.wm.alembic_import(
|
||||
filepath=libpath, always_add_cache_reader=True)
|
||||
|
||||
objects = lib.get_selection()
|
||||
|
||||
|
|
@ -178,12 +181,33 @@ class AbcCameraLoader(plugin.AssetLoader):
|
|||
self.log.info("Library already loaded, not updating...")
|
||||
return
|
||||
|
||||
mat = asset_group.matrix_basis.copy()
|
||||
for obj in asset_group.children:
|
||||
found = False
|
||||
for constraint in obj.constraints:
|
||||
if constraint.type == "TRANSFORM_CACHE":
|
||||
constraint.cache_file.filepath = libpath.as_posix()
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
# This is to keep compatibility with cameras loaded with
|
||||
# the old loader
|
||||
# Create a new constraint for the cache file
|
||||
constraint = obj.constraints.new("TRANSFORM_CACHE")
|
||||
bpy.ops.cachefile.open(filepath=libpath.as_posix())
|
||||
constraint.cache_file = bpy.data.cache_files[-1]
|
||||
constraint.cache_file.scale = 1.0
|
||||
|
||||
self._remove(asset_group)
|
||||
self._process(str(libpath), asset_group, object_name)
|
||||
# This is a workaround to set the object path. Blender doesn't
|
||||
# load the list of object paths until the object is evaluated.
|
||||
# This is a hack to force the object to be evaluated.
|
||||
# The modifier doesn't need to be removed because camera
|
||||
# objects don't have modifiers.
|
||||
obj.modifiers.new(
|
||||
name='MeshSequenceCache', type='MESH_SEQUENCE_CACHE')
|
||||
bpy.context.evaluated_depsgraph_get()
|
||||
|
||||
asset_group.matrix_basis = mat
|
||||
constraint.object_path = (
|
||||
constraint.cache_file.object_paths[0].path)
|
||||
|
||||
metadata["libpath"] = str(libpath)
|
||||
metadata["representation"] = repre_entity["id"]
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class CollectBlenderInstanceData(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["blender"]
|
||||
families = ["model", "pointcache", "animation", "rig", "camera", "layout",
|
||||
"blendScene"]
|
||||
"blendScene", "usd"]
|
||||
label = "Collect Instance"
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,90 @@
|
|||
import os
|
||||
|
||||
import bpy
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.blender.api import plugin, lib
|
||||
|
||||
|
||||
class ExtractUSD(publish.Extractor):
|
||||
"""Extract as USD."""
|
||||
|
||||
label = "Extract USD"
|
||||
hosts = ["blender"]
|
||||
families = ["usd"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Ignore runtime instances (e.g. USD layers)
|
||||
# TODO: This is better done via more specific `families`
|
||||
if not instance.data.get("transientData", {}).get("instance_node"):
|
||||
return
|
||||
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.usd"
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# Perform extraction
|
||||
self.log.debug("Performing extraction..")
|
||||
|
||||
# Select all members to "export selected"
|
||||
plugin.deselect_all()
|
||||
|
||||
selected = []
|
||||
for obj in instance:
|
||||
if isinstance(obj, bpy.types.Object):
|
||||
obj.select_set(True)
|
||||
selected.append(obj)
|
||||
|
||||
root = lib.get_highest_root(objects=instance[:])
|
||||
if not root:
|
||||
instance_node = instance.data["transientData"]["instance_node"]
|
||||
raise publish.KnownPublishError(
|
||||
f"No root object found in instance: {instance_node.name}"
|
||||
)
|
||||
self.log.debug(f"Exporting using active root: {root.name}")
|
||||
|
||||
context = plugin.create_blender_context(
|
||||
active=root, selected=selected)
|
||||
|
||||
# Export USD
|
||||
with bpy.context.temp_override(**context):
|
||||
bpy.ops.wm.usd_export(
|
||||
filepath=filepath,
|
||||
selected_objects_only=True,
|
||||
export_textures=False,
|
||||
relative_paths=False,
|
||||
export_animation=False,
|
||||
export_hair=False,
|
||||
export_uvmaps=True,
|
||||
# TODO: add for new version of Blender (4+?)
|
||||
# export_mesh_colors=True,
|
||||
export_normals=True,
|
||||
export_materials=True,
|
||||
use_instancing=True
|
||||
)
|
||||
|
||||
plugin.deselect_all()
|
||||
|
||||
# Add representation
|
||||
representation = {
|
||||
'name': 'usd',
|
||||
'ext': 'usd',
|
||||
'files': filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data.setdefault("representations", []).append(representation)
|
||||
self.log.debug("Extracted instance '%s' to: %s",
|
||||
instance.name, representation)
|
||||
|
||||
|
||||
class ExtractModelUSD(ExtractUSD):
|
||||
"""Extract model as USD."""
|
||||
|
||||
label = "Extract USD (Model)"
|
||||
hosts = ["blender"]
|
||||
families = ["model"]
|
||||
|
||||
# Driven by settings
|
||||
optional = True
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
from .addon import (
|
||||
HOST_DIR,
|
||||
FlameAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"HOST_DIR",
|
||||
"FlameAddon",
|
||||
)
|
||||
|
|
@ -58,3 +58,55 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
self.log.info(
|
||||
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||
)
|
||||
|
||||
|
||||
class SelectToolAction(pyblish.api.Action):
|
||||
"""Select invalid output tool in Fusion when plug-in failed.
|
||||
|
||||
"""
|
||||
|
||||
label = "Select saver"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
errored_instances = get_errored_instances_from_context(
|
||||
context,
|
||||
plugin=plugin,
|
||||
)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid nodes..")
|
||||
tools = []
|
||||
for instance in errored_instances:
|
||||
|
||||
tool = instance.data.get("tool")
|
||||
if tool is not None:
|
||||
tools.append(tool)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Plug-in returned to be invalid, "
|
||||
f"but has no saver for instance {instance.name}."
|
||||
)
|
||||
|
||||
if not tools:
|
||||
# Assume relevant comp is current comp and clear selection
|
||||
self.log.info("No invalid tools found.")
|
||||
comp = get_current_comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
return
|
||||
|
||||
# Assume a single comp
|
||||
first_tool = tools[0]
|
||||
comp = first_tool.Comp()
|
||||
flow = comp.CurrentFrame.FlowView
|
||||
flow.Select() # No args equals clearing selection
|
||||
names = set()
|
||||
for tool in tools:
|
||||
flow.Select(tool, True)
|
||||
comp.SetActiveTool(tool)
|
||||
names.add(tool.Name)
|
||||
self.log.info(
|
||||
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||
)
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
|
|||
def _on_repair():
|
||||
attributes = dict()
|
||||
for key, comp_key, _label in validations:
|
||||
value = folder_value[key]
|
||||
value = folder_attributes[key]
|
||||
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
|
||||
attributes[comp_key_full] = value
|
||||
comp.SetPrefs(attributes)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class CollectFusionRender(
|
|||
if product_type not in ["render", "image"]:
|
||||
continue
|
||||
|
||||
task_name = context.data["task"]
|
||||
task_name = inst.data["task"]
|
||||
tool = inst.data["transientData"]["tool"]
|
||||
|
||||
instance_families = inst.data.get("families", [])
|
||||
|
|
@ -115,6 +115,7 @@ class CollectFusionRender(
|
|||
if "review" in instance.families:
|
||||
# to skip ExtractReview locally
|
||||
instance.families.remove("review")
|
||||
instance.deadline = inst.data.get("deadline")
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate if instance context is the same as publish context."""
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.hosts.fusion.api.action import SelectToolAction
|
||||
from ayon_core.pipeline.publish import (
|
||||
RepairAction,
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
|
||||
|
||||
class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validator to check if instance context matches context of publish.
|
||||
|
||||
When working in per-shot style you always publish data in context of
|
||||
current asset (shot). This validator checks if this is so. It is optional
|
||||
so it can be disabled when needed.
|
||||
"""
|
||||
# Similar to maya and houdini-equivalent `ValidateInstanceInContext`
|
||||
|
||||
order = ValidateContentsOrder
|
||||
label = "Instance in same Context"
|
||||
optional = True
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectToolAction, RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
instance_context = self.get_context(instance.data)
|
||||
context = self.get_context(instance.context.data)
|
||||
if instance_context != context:
|
||||
context_label = "{} > {}".format(*context)
|
||||
instance_label = "{} > {}".format(*instance_context)
|
||||
|
||||
raise PublishValidationError(
|
||||
message=(
|
||||
"Instance '{}' publishes to different asset than current "
|
||||
"context: {}. Current context: {}".format(
|
||||
instance.name, instance_label, context_label
|
||||
)
|
||||
),
|
||||
description=(
|
||||
"## Publishing to a different asset\n"
|
||||
"There are publish instances present which are publishing "
|
||||
"into a different asset than your current context.\n\n"
|
||||
"Usually this is not what you want but there can be cases "
|
||||
"where you might want to publish into another asset or "
|
||||
"shot. If that's the case you can disable the validation "
|
||||
"on the instance to ignore it."
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
create_context = instance.context.data["create_context"]
|
||||
instance_id = instance.data.get("instance_id")
|
||||
created_instance = create_context.get_instance_by_id(
|
||||
instance_id
|
||||
)
|
||||
if created_instance is None:
|
||||
raise RuntimeError(
|
||||
f"No CreatedInstances found with id '{instance_id} "
|
||||
f"in {create_context.instances_by_id}"
|
||||
)
|
||||
|
||||
context_asset, context_task = cls.get_context(instance.context.data)
|
||||
created_instance["folderPath"] = context_asset
|
||||
created_instance["task"] = context_task
|
||||
create_context.save_changes()
|
||||
|
||||
@staticmethod
|
||||
def get_context(data):
|
||||
"""Return asset, task from publishing context data"""
|
||||
return data["folderPath"], data["task"]
|
||||
|
|
@ -177,7 +177,10 @@ class CollectFarmRender(publish.AbstractCollectRender):
|
|||
outputFormat=info[1],
|
||||
outputStartFrame=info[3],
|
||||
leadingZeros=info[2],
|
||||
ignoreFrameHandleCheck=True
|
||||
ignoreFrameHandleCheck=True,
|
||||
#todo: inst is not available, must be determined, fix when
|
||||
#reworking to Publisher
|
||||
# deadline=inst.data.get("deadline")
|
||||
|
||||
)
|
||||
render_instance.context = context
|
||||
|
|
|
|||
|
|
@ -1110,10 +1110,7 @@ def apply_colorspace_project():
|
|||
'''
|
||||
# backward compatibility layer
|
||||
# TODO: remove this after some time
|
||||
config_data = get_imageio_config(
|
||||
project_name=get_current_project_name(),
|
||||
host_name="hiero"
|
||||
)
|
||||
config_data = get_current_context_imageio_config_preset()
|
||||
|
||||
if config_data:
|
||||
presets.update({
|
||||
|
|
|
|||
|
|
@ -13,11 +13,17 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
# Default extension
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -25,8 +31,6 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 1
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateArnoldRop, self).create(
|
||||
product_name,
|
||||
|
|
@ -51,7 +55,7 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
"ar_exr_half_precision": 1 # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
ass_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.ass".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||
|
|
@ -66,23 +70,41 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target),
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
label="Image Format Options")
|
||||
label="Image Format Options"),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -11,15 +11,23 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
product_type = "karma_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default render target
|
||||
render_target = "farm"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "karma"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateKarmaROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -86,18 +94,40 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
@ -112,5 +142,6 @@ class CreateKarmaROP(plugin.HoudiniCreator):
|
|||
decimals=0),
|
||||
BoolDef("cam_res",
|
||||
label="Camera Resolution",
|
||||
default=False)
|
||||
default=False),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -11,18 +11,22 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
product_type = "mantra_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "ifd"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateMantraROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -46,7 +50,7 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
"vm_picture": filepath,
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
ifd_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.ifd".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||
|
|
@ -77,21 +81,40 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = super(CreateMantraROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
@ -100,5 +123,6 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
label="Override Camera Resolution",
|
||||
tooltip="Override the current camera "
|
||||
"resolution, recommended for IPR.",
|
||||
default=False)
|
||||
default=False),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
141
client/ayon_core/hosts/houdini/plugins/create/create_model.py
Normal file
141
client/ayon_core/hosts/houdini/plugins/create/create_model.py
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating Model product type.
|
||||
|
||||
Note:
|
||||
Currently, This creator plugin is the same as 'create_pointcache.py'
|
||||
But renaming the product type to 'model'.
|
||||
|
||||
It's purpose to support
|
||||
Maya (load/publish model from maya to/from houdini).
|
||||
|
||||
It's considered to support multiple representations in the future.
|
||||
"""
|
||||
|
||||
from ayon_core.hosts.houdini.api import plugin
|
||||
from ayon_core.lib import BoolDef
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
|
||||
class CreateModel(plugin.HoudiniCreator):
|
||||
"""Create Model"""
|
||||
identifier = "io.openpype.creators.houdini.model"
|
||||
label = "Model"
|
||||
product_type = "model"
|
||||
icon = "cube"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "alembic"})
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
creator_attributes["farm"] = pre_create_data["farm"]
|
||||
|
||||
instance = super(CreateModel, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
parms = {
|
||||
"use_sop_path": True,
|
||||
"build_from_path": True,
|
||||
"path_attrib": "path",
|
||||
"prim_to_detail_pattern": "cbId",
|
||||
"format": 2,
|
||||
"facesets": 0,
|
||||
"filename": hou.text.expandString(
|
||||
"$HIP/pyblish/{}.abc".format(product_name))
|
||||
}
|
||||
|
||||
if self.selected_nodes:
|
||||
selected_node = self.selected_nodes[0]
|
||||
|
||||
# Although Houdini allows ObjNode path on `sop_path` for the
|
||||
# the ROP node we prefer it set to the SopNode path explicitly
|
||||
|
||||
# Allow sop level paths (e.g. /obj/geo1/box1)
|
||||
if isinstance(selected_node, hou.SopNode):
|
||||
parms["sop_path"] = selected_node.path()
|
||||
self.log.debug(
|
||||
"Valid SopNode selection, 'SOP Path' in ROP will be set to '%s'."
|
||||
% selected_node.path()
|
||||
)
|
||||
|
||||
# Allow object level paths to Geometry nodes (e.g. /obj/geo1)
|
||||
# but do not allow other object level nodes types like cameras, etc.
|
||||
elif isinstance(selected_node, hou.ObjNode) and \
|
||||
selected_node.type().name() in ["geo"]:
|
||||
|
||||
# get the output node with the minimum
|
||||
# 'outputidx' or the node with display flag
|
||||
sop_path = self.get_obj_output(selected_node)
|
||||
|
||||
if sop_path:
|
||||
parms["sop_path"] = sop_path.path()
|
||||
self.log.debug(
|
||||
"Valid ObjNode selection, 'SOP Path' in ROP will be set to "
|
||||
"the child path '%s'."
|
||||
% sop_path.path()
|
||||
)
|
||||
|
||||
if not parms.get("sop_path", None):
|
||||
self.log.debug(
|
||||
"Selection isn't valid. 'SOP Path' in ROP will be empty."
|
||||
)
|
||||
else:
|
||||
self.log.debug(
|
||||
"No Selection. 'SOP Path' in ROP will be empty."
|
||||
)
|
||||
|
||||
instance_node.setParms(parms)
|
||||
instance_node.parm("trange").set(1)
|
||||
|
||||
# Explicitly set f1 and f2 to frame start.
|
||||
# Which forces the rop node to export one frame.
|
||||
instance_node.parmTuple('f').deleteAllKeyframes()
|
||||
fstart = int(hou.hscriptExpression("$FSTART"))
|
||||
instance_node.parmTuple('f').set((fstart, fstart, 1))
|
||||
|
||||
# Lock any parameters in this list
|
||||
to_lock = ["prim_to_detail_pattern"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_network_categories(self):
|
||||
return [
|
||||
hou.ropNodeTypeCategory(),
|
||||
hou.sopNodeTypeCategory()
|
||||
]
|
||||
|
||||
def get_obj_output(self, obj_node):
|
||||
"""Find output node with the smallest 'outputidx'."""
|
||||
|
||||
outputs = obj_node.subnetOutputs()
|
||||
|
||||
# if obj_node is empty
|
||||
if not outputs:
|
||||
return
|
||||
|
||||
# if obj_node has one output child whether its
|
||||
# sop output node or a node with the render flag
|
||||
elif len(outputs) == 1:
|
||||
return outputs[0]
|
||||
|
||||
# if there are more than one, then it have multiple output nodes
|
||||
# return the one with the minimum 'outputidx'
|
||||
else:
|
||||
return min(outputs,
|
||||
key=lambda node: node.evalParm('outputidx'))
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=False)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super().get_pre_create_attr_defs()
|
||||
# Use same attributes as for instance attributes
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
@ -17,17 +17,21 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
ext = "exr"
|
||||
multi_layered_mode = "No Multi-Layered EXR File"
|
||||
|
||||
# Default to split export and render jobs
|
||||
split_render = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "Redshift_ROP"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateRedshiftROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -99,7 +103,7 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
rs_filepath = f"{export_dir}{product_name}/{product_name}.$F4.rs"
|
||||
parms["RS_archive_file"] = rs_filepath
|
||||
|
||||
if pre_create_data.get("split_render", self.split_render):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
parms["RS_archive_enable"] = 1
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
|
@ -118,24 +122,44 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
|
||||
return super(CreateRedshiftROP, self).remove_instances(instances)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
|
||||
image_format_enum = [
|
||||
"exr", "tif", "jpg", "png",
|
||||
]
|
||||
|
||||
multi_layered_mode = [
|
||||
"No Multi-Layered EXR File",
|
||||
"Full Multi-Layered EXR File"
|
||||
]
|
||||
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("split_render",
|
||||
label="Split export and render jobs",
|
||||
default=self.split_render),
|
||||
attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs()
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
@ -143,5 +167,6 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
EnumDef("multi_layered_mode",
|
||||
multi_layered_mode,
|
||||
default=self.multi_layered_mode,
|
||||
label="Multi-Layered EXR")
|
||||
label="Multi-Layered EXR"),
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -16,17 +16,21 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "vray_renderer"})
|
||||
# Add chunk size attribute
|
||||
instance_data["chunkSize"] = 10
|
||||
# Submit for job publishing
|
||||
instance_data["farm"] = pre_create_data.get("farm")
|
||||
|
||||
instance = super(CreateVrayROP, self).create(
|
||||
product_name,
|
||||
|
|
@ -55,7 +59,7 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
scene_filepath = \
|
||||
"{export_dir}{product_name}/{product_name}.$F4.vrscene".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||
|
|
@ -143,20 +147,41 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
|
||||
return super(CreateVrayROP, self).remove_instances(instances)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
||||
image_format_enum = [
|
||||
"bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png",
|
||||
"rad", "rat", "rta", "sgi", "tga", "tif",
|
||||
]
|
||||
|
||||
return attrs + [
|
||||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
attrs = super(CreateVrayROP, self).get_pre_create_attr_defs()
|
||||
|
||||
attrs += [
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
@ -172,3 +197,4 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"if enabled",
|
||||
default=False)
|
||||
]
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
|
|||
# write workfile information to context container.
|
||||
op_ctx = hou.node(CONTEXT_CONTAINER)
|
||||
if not op_ctx:
|
||||
op_ctx = self.create_context_node()
|
||||
op_ctx = self.host.create_context_node()
|
||||
|
||||
workfile_data = {"workfile": current_instance.data_to_store()}
|
||||
imprint(op_ctx, workfile_data)
|
||||
|
|
|
|||
|
|
@ -40,12 +40,9 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("ar_ass_export_enable").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "ar_ass_file", pad_character="0"
|
||||
)
|
||||
|
|
@ -68,7 +65,12 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"": self.generate_expected_files(instance, beauty_product)
|
||||
}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
num_aovs = rop.evalParm("ar_aovs")
|
||||
# TODO: Check the following logic.
|
||||
# as it always assumes that all AOV are not merged.
|
||||
for index in range(1, num_aovs + 1):
|
||||
# Skip disabled AOVs
|
||||
if not rop.evalParm("ar_enable_aov{}".format(index)):
|
||||
|
|
@ -85,6 +87,14 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
files_by_aov[label] = self.generate_expected_files(instance,
|
||||
aov_product)
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: {}".format(product))
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,11 @@ from ayon_core.hosts.houdini.api import lib
|
|||
class CollectDataforCache(pyblish.api.InstancePlugin):
|
||||
"""Collect data for caching to Deadline."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.04
|
||||
# Run after Collect Frames
|
||||
order = pyblish.api.CollectorOrder + 0.11
|
||||
families = ["ass", "pointcache",
|
||||
"mantraifd", "redshiftproxy",
|
||||
"vdbcache"]
|
||||
"vdbcache", "model"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Data for Cache"
|
||||
|
|
@ -42,10 +43,7 @@ class CollectDataforCache(pyblish.api.InstancePlugin):
|
|||
cache_files = {"_": instance.data["files"]}
|
||||
# Convert instance family to pointcache if it is bgeo or abc
|
||||
# because ???
|
||||
for family in instance.data["families"]:
|
||||
if family == "bgeo" or "abc":
|
||||
instance.data["productType"] = "pointcache"
|
||||
break
|
||||
self.log.debug(instance.data["families"])
|
||||
instance.data.update({
|
||||
"plugin": "Houdini",
|
||||
"publish": True
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
|
|||
order = pyblish.api.CollectorOrder + 0.05
|
||||
families = ["ass", "pointcache",
|
||||
"vdbcache", "mantraifd",
|
||||
"redshiftproxy"]
|
||||
"redshiftproxy", "model"]
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Chunk Size"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFarmInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect instances for farm render."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect farm instances"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
|
||||
# Collect Render Target
|
||||
if creator_attribute.get("render_target") not in {
|
||||
"farm_split", "farm"
|
||||
}:
|
||||
instance.data["farm"] = False
|
||||
instance.data["splitRender"] = False
|
||||
self.log.debug("Render on farm is disabled. "
|
||||
"Skipping farm collecting.")
|
||||
return
|
||||
|
||||
instance.data["farm"] = True
|
||||
instance.data["splitRender"] = (
|
||||
creator_attribute.get("render_target") == "farm_split"
|
||||
)
|
||||
|
|
@ -17,7 +17,7 @@ class CollectFrames(pyblish.api.InstancePlugin):
|
|||
label = "Collect Frames"
|
||||
families = ["vdbcache", "imagesequence", "ass",
|
||||
"mantraifd", "redshiftproxy", "review",
|
||||
"bgeo"]
|
||||
"pointcache"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,21 +1,24 @@
|
|||
"""Collector for pointcache types.
|
||||
"""Collector for different types.
|
||||
|
||||
This will add additional family to pointcache instance based on
|
||||
This will add additional families to different instance based on
|
||||
the creator_identifier parameter.
|
||||
"""
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectPointcacheType(pyblish.api.InstancePlugin):
|
||||
"""Collect data type for pointcache instance."""
|
||||
"""Collect data type for different instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["houdini"]
|
||||
families = ["pointcache"]
|
||||
label = "Collect type of pointcache"
|
||||
families = ["pointcache", "model"]
|
||||
label = "Collect instances types"
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data["creator_identifier"] == "io.openpype.creators.houdini.bgeo": # noqa: E501
|
||||
instance.data["families"] += ["bgeo"]
|
||||
elif instance.data["creator_identifier"] == "io.openpype.creators.houdini.pointcache": # noqa: E501
|
||||
elif instance.data["creator_identifier"] in {
|
||||
"io.openpype.creators.houdini.pointcache",
|
||||
"io.openpype.creators.houdini.model"
|
||||
}:
|
||||
instance.data["families"] += ["abc"]
|
||||
|
|
@ -55,6 +55,12 @@ class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_product)
|
||||
}
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
# By default karma render is a multipart Exr.
|
||||
instance.data["multipartExr"] = True
|
||||
|
||||
filenames = list(render_products)
|
||||
instance.data["files"] = filenames
|
||||
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,137 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||
from ayon_core.pipeline.publish import (
|
||||
get_plugin_settings,
|
||||
apply_plugin_settings_automatically
|
||||
)
|
||||
|
||||
|
||||
class CollectLocalRenderInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect instances for local render.
|
||||
|
||||
Agnostic Local Render Collector.
|
||||
"""
|
||||
|
||||
# this plugin runs after Collect Render Products
|
||||
order = pyblish.api.CollectorOrder + 0.12
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
hosts = ["houdini"]
|
||||
label = "Collect local render instances"
|
||||
|
||||
use_deadline_aov_filter = False
|
||||
aov_filter = {"host_name": "houdini",
|
||||
"value": [".*([Bb]eauty).*"]}
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
# Preserve automatic settings applying logic
|
||||
settings = get_plugin_settings(plugin=cls,
|
||||
project_settings=project_settings,
|
||||
log=cls.log,
|
||||
category="houdini")
|
||||
apply_plugin_settings_automatically(cls, settings, logger=cls.log)
|
||||
|
||||
if not cls.use_deadline_aov_filter:
|
||||
# get aov_filter from collector settings
|
||||
# and restructure it as match_aov_pattern requires.
|
||||
cls.aov_filter = {
|
||||
cls.aov_filter["host_name"]: cls.aov_filter["value"]
|
||||
}
|
||||
else:
|
||||
# get aov_filter from deadline settings
|
||||
cls.aov_filter = project_settings["deadline"]["publish"]["ProcessSubmittedJobOnFarm"]["aov_filter"]
|
||||
cls.aov_filter = {
|
||||
item["name"]: item["value"]
|
||||
for item in cls.aov_filter
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data["farm"]:
|
||||
self.log.debug("Render on farm is enabled. "
|
||||
"Skipping local render collecting.")
|
||||
return
|
||||
|
||||
# Create Instance for each AOV.
|
||||
context = instance.context
|
||||
expectedFiles = next(iter(instance.data["expectedFiles"]), {})
|
||||
|
||||
product_type = "render" # is always render
|
||||
product_group = get_product_name(
|
||||
context.data["projectName"],
|
||||
context.data["taskEntity"]["name"],
|
||||
context.data["taskEntity"]["taskType"],
|
||||
context.data["hostName"],
|
||||
product_type,
|
||||
instance.data["productName"]
|
||||
)
|
||||
|
||||
for aov_name, aov_filepaths in expectedFiles.items():
|
||||
product_name = product_group
|
||||
|
||||
if aov_name:
|
||||
product_name = "{}_{}".format(product_name, aov_name)
|
||||
|
||||
# Create instance for each AOV
|
||||
aov_instance = context.create_instance(product_name)
|
||||
|
||||
# Prepare Representation for each AOV
|
||||
aov_filenames = [os.path.basename(path) for path in aov_filepaths]
|
||||
staging_dir = os.path.dirname(aov_filepaths[0])
|
||||
ext = aov_filepaths[0].split(".")[-1]
|
||||
|
||||
# Decide if instance is reviewable
|
||||
preview = False
|
||||
if instance.data.get("multipartExr", False):
|
||||
# Add preview tag because its multipartExr.
|
||||
preview = True
|
||||
else:
|
||||
# Add Preview tag if the AOV matches the filter.
|
||||
preview = match_aov_pattern(
|
||||
"houdini", self.aov_filter, aov_filenames[0]
|
||||
)
|
||||
|
||||
preview = preview and instance.data.get("review", False)
|
||||
|
||||
# Support Single frame.
|
||||
# The integrator wants single files to be a single
|
||||
# filename instead of a list.
|
||||
# More info: https://github.com/ynput/ayon-core/issues/238
|
||||
if len(aov_filenames) == 1:
|
||||
aov_filenames = aov_filenames[0]
|
||||
|
||||
aov_instance.data.update({
|
||||
# 'label': label,
|
||||
"task": instance.data["task"],
|
||||
"folderPath": instance.data["folderPath"],
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"],
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"productName": product_name,
|
||||
"productGroup": product_group,
|
||||
"families": ["render.local.hou", "review"],
|
||||
"instance_node": instance.data["instance_node"],
|
||||
"representations": [
|
||||
{
|
||||
"stagingDir": staging_dir,
|
||||
"ext": ext,
|
||||
"name": ext,
|
||||
"tags": ["review"] if preview else [],
|
||||
"files": aov_filenames,
|
||||
"frameStart": instance.data["frameStartHandle"],
|
||||
"frameEnd": instance.data["frameEndHandle"]
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
# Skip integrating original render instance.
|
||||
# We are not removing it because it's used to trigger the render.
|
||||
instance.data["integrate"] = False
|
||||
|
|
@ -44,12 +44,9 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("soho_outputmode").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "soho_diskfile", pad_character="0"
|
||||
)
|
||||
|
|
@ -74,6 +71,11 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_product)
|
||||
}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
# TODO: This logic doesn't take into considerations
|
||||
# cryptomatte defined in 'Images > Cryptomatte'
|
||||
aov_numbers = rop.evalParm("vm_numaux")
|
||||
if aov_numbers > 0:
|
||||
# get the filenames of the AOVs
|
||||
|
|
@ -93,6 +95,14 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ class CollectOutputSOPPath(pyblish.api.InstancePlugin):
|
|||
"usd",
|
||||
"usdrender",
|
||||
"redshiftproxy",
|
||||
"staticMesh"
|
||||
"staticMesh",
|
||||
"model"
|
||||
]
|
||||
|
||||
hosts = ["houdini"]
|
||||
|
|
|
|||
|
|
@ -42,11 +42,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
|
||||
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("RS_archive_enable").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "RS_archive_file", pad_character="0"
|
||||
)
|
||||
|
|
@ -63,9 +61,12 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
full_exr_mode = (rop.evalParm("RS_outputMultilayerMode") == "2")
|
||||
if full_exr_mode:
|
||||
# Ignore beauty suffix if full mode is enabled
|
||||
# As this is what the rop does.
|
||||
# As this is what the rop does.
|
||||
beauty_suffix = ""
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
# Default beauty/main layer AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=beauty_suffix
|
||||
|
|
@ -75,7 +76,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
beauty_suffix: self.generate_expected_files(instance,
|
||||
beauty_product)
|
||||
}
|
||||
|
||||
|
||||
aovs_rop = rop.parm("RS_aovGetFromNode").evalAsNode()
|
||||
if aovs_rop:
|
||||
rop = aovs_rop
|
||||
|
|
@ -98,13 +99,21 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
if rop.parm(f"RS_aovID_{i}").evalAsString() == "CRYPTOMATTE" or \
|
||||
not full_exr_mode:
|
||||
|
||||
|
||||
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
|
||||
render_products.append(aov_product)
|
||||
|
||||
files_by_aov[aov_suffix] = self.generate_expected_files(instance,
|
||||
aov_product) # noqa
|
||||
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
label = "Collect Review Data"
|
||||
# This specific order value is used so that
|
||||
# this plugin runs after CollectRopFrameRange
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
# Also after CollectLocalRenderInstances
|
||||
order = pyblish.api.CollectorOrder + 0.13
|
||||
hosts = ["houdini"]
|
||||
families = ["review"]
|
||||
|
||||
|
|
@ -28,7 +29,8 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
ropnode_path = instance.data["instance_node"]
|
||||
ropnode = hou.node(ropnode_path)
|
||||
|
||||
camera_path = ropnode.parm("camera").eval()
|
||||
# Get camera based on the instance_node type.
|
||||
camera_path = self._get_camera_path(ropnode)
|
||||
camera_node = hou.node(camera_path)
|
||||
if not camera_node:
|
||||
self.log.warning("No valid camera node found on review node: "
|
||||
|
|
@ -55,3 +57,29 @@ class CollectHoudiniReviewData(pyblish.api.InstancePlugin):
|
|||
# Store focal length in `burninDataMembers`
|
||||
burnin_members = instance.data.setdefault("burninDataMembers", {})
|
||||
burnin_members["focalLength"] = focal_length
|
||||
|
||||
def _get_camera_path(self, ropnode):
|
||||
"""Get the camera path associated with the given rop node.
|
||||
|
||||
This function evaluates the camera parameter according to the
|
||||
type of the given rop node.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Camera path or None.
|
||||
|
||||
This function can return empty string if the camera
|
||||
path is empty i.e. no camera path.
|
||||
"""
|
||||
|
||||
if ropnode.type().name() in {
|
||||
"opengl", "karma", "ifd", "arnold"
|
||||
}:
|
||||
return ropnode.parm("camera").eval()
|
||||
|
||||
elif ropnode.type().name() == "Redshift_ROP":
|
||||
return ropnode.parm("RS_renderCamera").eval()
|
||||
|
||||
elif ropnode.type().name() == "vray_renderer":
|
||||
return ropnode.parm("render_camera").eval()
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectReviewableInstances(pyblish.api.InstancePlugin):
|
||||
"""Collect Reviewable Instances.
|
||||
|
||||
Basically, all instances of the specified families
|
||||
with creator_attribure["review"]
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Reviewable Instances"
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
|
||||
instance.data["review"] = creator_attribute.get("review", False)
|
||||
|
|
@ -45,12 +45,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products = []
|
||||
# TODO: add render elements if render element
|
||||
|
||||
# Store whether we are splitting the render job in an export + render
|
||||
split_render = rop.parm("render_export_mode").eval() == "2"
|
||||
instance.data["splitRender"] = split_render
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if split_render:
|
||||
if instance.data["splitRender"]:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "render_export_filepath", pad_character="0"
|
||||
)
|
||||
|
|
@ -70,6 +67,9 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
"": self.generate_expected_files(instance,
|
||||
beauty_product)}
|
||||
|
||||
# Assume it's a multipartExr Render.
|
||||
multipartExr = True
|
||||
|
||||
if instance.data.get("RenderElement", True):
|
||||
render_element = self.get_render_element_name(rop, default_prefix)
|
||||
if render_element:
|
||||
|
|
@ -77,7 +77,13 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products.append(renderpass)
|
||||
files_by_aov[aov] = self.generate_expected_files(
|
||||
instance, renderpass)
|
||||
# Set to False as soon as we have a separated aov.
|
||||
multipartExr = False
|
||||
|
||||
# Review Logic expects this key to exist and be True
|
||||
# if render is a multipart Exr.
|
||||
# As long as we have one AOV then multipartExr should be True.
|
||||
instance.data["multipartExr"] = multipartExr
|
||||
|
||||
for product in render_products:
|
||||
self.log.debug("Found render product: %s" % product)
|
||||
|
|
|
|||
|
|
@ -28,10 +28,15 @@ class ExtractAlembic(publish.Extractor):
|
|||
staging_dir = os.path.dirname(output)
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
file_name = os.path.basename(output)
|
||||
if instance.data.get("frames"):
|
||||
# list of files
|
||||
files = instance.data["frames"]
|
||||
else:
|
||||
# single file
|
||||
files = os.path.basename(output)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name,
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (files,
|
||||
staging_dir))
|
||||
|
||||
render_rop(ropnode)
|
||||
|
|
@ -42,7 +47,7 @@ class ExtractAlembic(publish.Extractor):
|
|||
representation = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': file_name,
|
||||
'files': files,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ class ExtractOpenGL(publish.Extractor,
|
|||
def process(self, instance):
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if ropnode.type().name() != "opengl":
|
||||
self.log.debug("Skipping OpenGl extraction. Rop node {} "
|
||||
"is not an OpenGl node.".format(ropnode.path()))
|
||||
return
|
||||
|
||||
output = ropnode.evalParm("picture")
|
||||
staging_dir = os.path.normpath(os.path.dirname(output))
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.hosts.houdini.api.lib import render_rop
|
||||
import hou
|
||||
import os
|
||||
|
||||
|
||||
class ExtractRender(publish.Extractor):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Render"
|
||||
hosts = ["houdini"]
|
||||
families = ["mantra_rop",
|
||||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
product_type = instance.data["productType"]
|
||||
rop_node = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# Align split parameter value on rop node to the render target.
|
||||
if instance.data["splitRender"]:
|
||||
if product_type == "arnold_rop":
|
||||
rop_node.setParms({"ar_ass_export_enable": 1})
|
||||
elif product_type == "mantra_rop":
|
||||
rop_node.setParms({"soho_outputmode": 1})
|
||||
elif product_type == "redshift_rop":
|
||||
rop_node.setParms({"RS_archive_enable": 1})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "2"})
|
||||
else:
|
||||
if product_type == "arnold_rop":
|
||||
rop_node.setParms({"ar_ass_export_enable": 0})
|
||||
elif product_type == "mantra_rop":
|
||||
rop_node.setParms({"soho_outputmode": 0})
|
||||
elif product_type == "redshift_rop":
|
||||
rop_node.setParms({"RS_archive_enable": 0})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "1"})
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Render should be processed on farm, skipping local render.")
|
||||
return
|
||||
|
||||
if creator_attribute.get("render_target") == "local":
|
||||
ropnode = hou.node(instance.data.get("instance_node"))
|
||||
render_rop(ropnode)
|
||||
|
||||
# `ExpectedFiles` is a list that includes one dict.
|
||||
expected_files = instance.data["expectedFiles"][0]
|
||||
# Each key in that dict is a list of files.
|
||||
# Combine lists of files into one big list.
|
||||
all_frames = []
|
||||
for value in expected_files.values():
|
||||
if isinstance(value, str):
|
||||
all_frames.append(value)
|
||||
elif isinstance(value, list):
|
||||
all_frames.extend(value)
|
||||
# Check missing frames.
|
||||
# Frames won't exist if user cancels the render.
|
||||
missing_frames = [
|
||||
frame
|
||||
for frame in all_frames
|
||||
if not os.path.exists(frame)
|
||||
]
|
||||
if missing_frames:
|
||||
# TODO: Use user friendly error reporting.
|
||||
raise RuntimeError("Failed to complete render extraction. "
|
||||
"Missing output files: {}".format(
|
||||
missing_frames))
|
||||
|
|
@ -17,11 +17,13 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["houdini"]
|
||||
families = ["workfile",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"usdrender",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop",
|
||||
"render.local.hou",
|
||||
"publish.hou"]
|
||||
optional = True
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,60 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validator for checking that export is a single frame."""
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_core.pipeline.publish import ValidateContentsOrder
|
||||
from ayon_core.hosts.houdini.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateSingleFrame(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate Export is a Single Frame.
|
||||
|
||||
It checks if rop node is exporting one frame.
|
||||
This is mainly for Model product type.
|
||||
"""
|
||||
|
||||
families = ["model"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Single Frame"
|
||||
order = ValidateContentsOrder + 0.1
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
nodes = [n.path() for n in invalid]
|
||||
raise PublishValidationError(
|
||||
"See log for details. "
|
||||
"Invalid nodes: {0}".format(nodes)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
||||
invalid = []
|
||||
|
||||
frame_start = instance.data.get("frameStartHandle")
|
||||
frame_end = instance.data.get("frameEndHandle")
|
||||
|
||||
# This happens if instance node has no 'trange' parameter.
|
||||
if frame_start is None or frame_end is None:
|
||||
cls.log.debug(
|
||||
"No frame data, skipping check.."
|
||||
)
|
||||
return
|
||||
|
||||
if frame_start != frame_end:
|
||||
invalid.append(instance.data["instance_node"])
|
||||
cls.log.error(
|
||||
"Invalid frame range on '%s'."
|
||||
"You should use the same frame number for 'f1' "
|
||||
"and 'f2' parameters.",
|
||||
instance.data["instance_node"].path()
|
||||
)
|
||||
|
||||
return invalid
|
||||
|
|
@ -16,9 +16,13 @@ class ValidateMeshIsStatic(pyblish.api.InstancePlugin,
|
|||
"""Validate mesh is static.
|
||||
|
||||
It checks if output node is time dependent.
|
||||
this avoids getting different output from ROP node when extracted
|
||||
from a different frame than the first frame.
|
||||
(Might be overly restrictive though)
|
||||
"""
|
||||
|
||||
families = ["staticMesh"]
|
||||
families = ["staticMesh",
|
||||
"model"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Mesh is Static"
|
||||
order = ValidateContentsOrder + 0.1
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class ValidateIntermediateDirectoriesChecked(pyblish.api.InstancePlugin):
|
|||
"""Validate Create Intermediate Directories is enabled on ROP node."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcache", "camera", "vdbcache"]
|
||||
families = ["pointcache", "camera", "vdbcache", "model"]
|
||||
hosts = ["houdini"]
|
||||
label = "Create Intermediate Directories Checked"
|
||||
|
||||
|
|
|
|||
|
|
@ -56,6 +56,18 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if rop_node.type().name() != "opengl":
|
||||
self.log.debug("Skipping Validation. Rop node {} "
|
||||
"is not an OpenGl node.".format(rop_node.path()))
|
||||
return
|
||||
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
|
|
@ -66,7 +78,6 @@ class ValidateReviewColorspace(pyblish.api.InstancePlugin,
|
|||
)
|
||||
return
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
if rop_node.evalParm("colorcorrect") != 2:
|
||||
# any colorspace settings other than default requires
|
||||
# 'Color Correct' parm to be set to 'OpenColorIO'
|
||||
|
|
|
|||
|
|
@ -20,6 +20,16 @@ class ValidateSceneReview(pyblish.api.InstancePlugin):
|
|||
report = []
|
||||
instance_node = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# This plugin is triggered when marking render as reviewable.
|
||||
# Therefore, this plugin will run on over wrong instances.
|
||||
# TODO: Don't run this plugin on wrong instances.
|
||||
# This plugin should run only on review product type
|
||||
# with instance node of opengl type.
|
||||
if instance_node.type().name() != "opengl":
|
||||
self.log.debug("Skipping Validation. Rop node {} "
|
||||
"is not an OpenGl node.".format(instance_node.path()))
|
||||
return
|
||||
|
||||
invalid = self.get_invalid_scene_path(instance_node)
|
||||
if invalid:
|
||||
report.append(invalid)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class ValidateSopOutputNode(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcache", "vdbcache"]
|
||||
families = ["pointcache", "vdbcache", "model"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Output Node (SOP)"
|
||||
actions = [SelectROPAction, SelectInvalidAction]
|
||||
|
|
|
|||
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
29
client/ayon_core/hosts/houdini/startup/OPmenu.xml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!-- OPMenu Stencil.
|
||||
It's used to extend the OPMenu.
|
||||
-->
|
||||
|
||||
<menuDocument>
|
||||
<menu>
|
||||
<!-- Operator type and asset options. -->
|
||||
<subMenu id="opmenu.vhda_options_create">
|
||||
<insertBefore>opmenu.unsynchronize</insertBefore>
|
||||
<scriptItem id="opmenu.vhda_create_ayon">
|
||||
<insertAfter>opmenu.vhda_create</insertAfter>
|
||||
<label>Create New (AYON)...</label>
|
||||
<context>
|
||||
</context>
|
||||
<scriptCode>
|
||||
<![CDATA[
|
||||
from ayon_core.hosts.houdini.api.creator_node_shelves import create_interactive
|
||||
|
||||
node = kwargs["node"]
|
||||
if node not in hou.selectedNodes():
|
||||
node.setSelected(True)
|
||||
create_interactive("io.openpype.creators.houdini.hda", **kwargs)
|
||||
]]>
|
||||
</scriptCode>
|
||||
</scriptItem>
|
||||
</subMenu>
|
||||
</menu>
|
||||
</menuDocument>
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import OptionalPyblishPluginMixin
|
||||
from ayon_core.pipeline.publish import RepairAction, PublishValidationError
|
||||
|
||||
|
||||
class ValidateAlembicDefaultsPointcache(
|
||||
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
|
||||
):
|
||||
"""Validate the attributes on the instance are defaults.
|
||||
|
||||
The defaults are defined in the project settings.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcache"]
|
||||
hosts = ["maya"]
|
||||
label = "Validate Alembic Options Defaults"
|
||||
actions = [RepairAction]
|
||||
optional = True
|
||||
|
||||
plugin_name = "ExtractAlembic"
|
||||
|
||||
@classmethod
|
||||
def _get_settings(cls, context):
|
||||
maya_settings = context.data["project_settings"]["maya"]
|
||||
settings = maya_settings["publish"]["ExtractAlembic"]
|
||||
return settings
|
||||
|
||||
@classmethod
|
||||
def _get_publish_attributes(cls, instance):
|
||||
attributes = instance.data["publish_attributes"][
|
||||
cls.plugin_name(
|
||||
instance.data["publish_attributes"]
|
||||
)
|
||||
]
|
||||
|
||||
return attributes
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
settings = self._get_settings(instance.context)
|
||||
|
||||
attributes = self._get_publish_attributes(instance)
|
||||
|
||||
msg = (
|
||||
"Alembic Extract setting \"{}\" is not the default value:"
|
||||
"\nCurrent: {}"
|
||||
"\nDefault Value: {}\n"
|
||||
)
|
||||
errors = []
|
||||
for key, value in attributes.items():
|
||||
default_value = settings[key]
|
||||
|
||||
# Lists are best to compared sorted since we cant rely on the order
|
||||
# of the items.
|
||||
if isinstance(value, list):
|
||||
value = sorted(value)
|
||||
default_value = sorted(default_value)
|
||||
|
||||
if value != default_value:
|
||||
errors.append(msg.format(key, value, default_value))
|
||||
|
||||
if errors:
|
||||
raise PublishValidationError("\n".join(errors))
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
# Find create instance twin.
|
||||
create_context = instance.context.data["create_context"]
|
||||
create_instance = create_context.get_instance_by_id(
|
||||
instance.data["instance_id"]
|
||||
)
|
||||
|
||||
# Set the settings values on the create context then save to workfile.
|
||||
publish_attributes = instance.data["publish_attributes"]
|
||||
plugin_name = cls.plugin_name(publish_attributes)
|
||||
attributes = cls._get_publish_attributes(instance)
|
||||
settings = cls._get_settings(instance.context)
|
||||
create_publish_attributes = create_instance.data["publish_attributes"]
|
||||
for key in attributes:
|
||||
create_publish_attributes[plugin_name][key] = settings[key]
|
||||
|
||||
create_context.save_changes()
|
||||
|
||||
|
||||
class ValidateAlembicDefaultsAnimation(
|
||||
ValidateAlembicDefaultsPointcache
|
||||
):
|
||||
"""Validate the attributes on the instance are defaults.
|
||||
|
||||
The defaults are defined in the project settings.
|
||||
"""
|
||||
label = "Validate Alembic Options Defaults"
|
||||
families = ["animation"]
|
||||
plugin_name = "ExtractAnimation"
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
import pyblish.api
|
||||
import ayon_core.hosts.maya.api.action
|
||||
from ayon_core.pipeline.publish import (
|
||||
PublishValidationError,
|
||||
ValidateContentsOrder,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from maya import cmds
|
||||
|
||||
|
||||
class ValidateAnimatedReferenceRig(pyblish.api.InstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate all nodes in skeletonAnim_SET are referenced"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
hosts = ["maya"]
|
||||
families = ["animation.fbx"]
|
||||
label = "Animated Reference Rig"
|
||||
accepted_controllers = ["transform", "locator"]
|
||||
actions = [ayon_core.hosts.maya.api.action.SelectInvalidAction]
|
||||
optional = False
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
animated_sets = instance.data.get("animated_skeleton", [])
|
||||
if not animated_sets:
|
||||
self.log.debug(
|
||||
"No nodes found in skeletonAnim_SET. "
|
||||
"Skipping validation of animated reference rig..."
|
||||
)
|
||||
return
|
||||
|
||||
for animated_reference in animated_sets:
|
||||
is_referenced = cmds.referenceQuery(
|
||||
animated_reference, isNodeReferenced=True)
|
||||
if not bool(is_referenced):
|
||||
raise PublishValidationError(
|
||||
"All the content in skeletonAnim_SET"
|
||||
" should be referenced nodes"
|
||||
)
|
||||
invalid_controls = self.validate_controls(animated_sets)
|
||||
if invalid_controls:
|
||||
raise PublishValidationError(
|
||||
"All the content in skeletonAnim_SET"
|
||||
" should be transforms"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_controls(self, set_members):
|
||||
"""Check if the controller set contains only accepted node types.
|
||||
|
||||
Checks if all its set members are within the hierarchy of the root
|
||||
Checks if the node types of the set members valid
|
||||
|
||||
Args:
|
||||
set_members: list of nodes of the skeleton_anim_set
|
||||
hierarchy: list of nodes which reside under the root node
|
||||
|
||||
Returns:
|
||||
errors (list)
|
||||
"""
|
||||
|
||||
# Validate control types
|
||||
invalid = []
|
||||
set_members = cmds.ls(set_members, long=True)
|
||||
for node in set_members:
|
||||
if cmds.nodeType(node) not in self.accepted_controllers:
|
||||
invalid.append(node)
|
||||
|
||||
return invalid
|
||||
|
|
@ -35,8 +35,12 @@ class ImageCreator(Creator):
|
|||
create_empty_group = False
|
||||
|
||||
stub = api.stub() # only after PS is up
|
||||
top_level_selected_items = stub.get_selected_layers()
|
||||
if pre_create_data.get("use_selection"):
|
||||
try:
|
||||
top_level_selected_items = stub.get_selected_layers()
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
only_single_item_selected = len(top_level_selected_items) == 1
|
||||
if (
|
||||
only_single_item_selected or
|
||||
|
|
@ -50,11 +54,12 @@ class ImageCreator(Creator):
|
|||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
groups_to_create.append(group)
|
||||
else:
|
||||
stub.select_layers(stub.get_layers())
|
||||
try:
|
||||
stub.select_layers(stub.get_layers())
|
||||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
except:
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
groups_to_create.append(group)
|
||||
|
||||
# create empty group if nothing selected
|
||||
|
|
|
|||
|
|
@ -1,26 +1,23 @@
|
|||
Updated as of 26 May 2023
|
||||
Last Updated: 1 April 2024
|
||||
----------------------------
|
||||
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
|
||||
modules for scripting access (DaVinciResolve.py) and some representative examples.
|
||||
|
||||
From v16.2.0 onwards, the nodeIndex parameters accepted by SetLUT() and SetCDL() are 1-based instead of 0-based, i.e. 1 <= nodeIndex <= total number of nodes.
|
||||
|
||||
|
||||
Overview
|
||||
--------
|
||||
As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page,
|
||||
As with Blackmagic Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page,
|
||||
or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when
|
||||
allowing scripting access from outside of the Resolve application.
|
||||
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
DaVinci Resolve scripting requires one of the following to be installed (for all users):
|
||||
|
||||
Lua 5.1
|
||||
Python 2.7 64-bit
|
||||
Python >= 3.6 64-bit
|
||||
|
||||
Python 2.7 64-bit
|
||||
|
||||
Using a script
|
||||
--------------
|
||||
|
|
@ -64,6 +61,7 @@ The interactive Console window allows for an easy way to execute simple scriptin
|
|||
and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
|
||||
|
||||
This example Python script creates a simple project:
|
||||
|
||||
#!/usr/bin/env python
|
||||
import DaVinciResolveScript as dvr_script
|
||||
resolve = dvr_script.scriptapp("Resolve")
|
||||
|
|
@ -80,9 +78,8 @@ Running DaVinci Resolve in headless mode
|
|||
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled.
|
||||
However, the various scripting APIs will continue to work as expected.
|
||||
|
||||
|
||||
Basic Resolve API
|
||||
-----------------
|
||||
DaVinci Resolve API
|
||||
-------------------
|
||||
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
|
||||
|
||||
Resolve
|
||||
|
|
@ -101,6 +98,12 @@ Resolve
|
|||
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
|
||||
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
|
||||
Quit() --> None # Quits the Resolve App.
|
||||
ImportRenderPreset(presetPath) --> Bool # Import a preset from presetPath (string) and set it as current preset for rendering.
|
||||
ExportRenderPreset(presetName, exportPath) --> Bool # Export a preset to a given path (string) if presetName(string) exists.
|
||||
ImportBurnInPreset(presetPath) --> Bool # Import a data burn in preset from a given presetPath (string)
|
||||
ExportBurnInPreset(presetName, exportPath) --> Bool # Export a data burn in preset to a given path (string) if presetName (string) exists.
|
||||
GetKeyframeMode() --> keyframeMode # Returns the currently set keyframe mode (int). Refer to section 'Keyframe Mode information' below for details.
|
||||
SetKeyframeMode(keyframeMode) --> Bool # Returns True when 'keyframeMode'(enum) is successfully set. Refer to section 'Keyframe Mode information' below for details.
|
||||
|
||||
ProjectManager
|
||||
ArchiveProject(projectName,
|
||||
|
|
@ -131,6 +134,14 @@ ProjectManager
|
|||
# 'DbType': 'Disk' or 'PostgreSQL' (string)
|
||||
# 'DbName': database name (string)
|
||||
# 'IpAddress': IP address of the PostgreSQL server (string, optional key - defaults to '127.0.0.1')
|
||||
CreateCloudProject({cloudSettings}) --> Project # Creates and returns a cloud project.
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
ImportCloudProject(filePath, {cloudSettings}) --> Bool # Returns True if import cloud project is successful; False otherwise
|
||||
# 'filePath': String; filePath of file to import
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
RestoreCloudProject(folderPath, {cloudSettings}) --> Bool # Returns True if restore cloud project is successful; False otherwise
|
||||
# 'folderPath': String; path of folder to restore
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
|
||||
Project
|
||||
GetMediaPool() --> MediaPool # Returns the Media Pool object.
|
||||
|
|
@ -175,6 +186,9 @@ Project
|
|||
startOffsetInSamples, durationInSamples)
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for project when supplied presetName (string). Returns true if successful.
|
||||
ExportCurrentFrameAsStill(filePath) --> Bool # Exports current frame as still to supplied filePath. filePath must end in valid export file format. Returns True if succssful, False otherwise.
|
||||
GetColorGroupsList() --> [ColorGroups...] # Returns a list of all group objects in the timeline.
|
||||
AddColorGroup(groupName) --> ColorGroup # Creates a new ColorGroup. groupName must be a unique string.
|
||||
DeleteColorGroup(colorGroup) --> Bool # Deletes the given color group and sets clips to ungrouped.
|
||||
|
||||
MediaStorage
|
||||
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage.
|
||||
|
|
@ -198,7 +212,7 @@ MediaPool
|
|||
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), "recordFrame" (int).
|
||||
ImportTimelineFromFile(filePath, {importOptions}) --> Timeline # Creates timeline based on parameters within given file (AAF/EDL/XML/FCPXML/DRT/ADL) and optional importOptions dict, with support for the keys:
|
||||
ImportTimelineFromFile(filePath, {importOptions}) --> Timeline # Creates timeline based on parameters within given file (AAF/EDL/XML/FCPXML/DRT/ADL/OTIO) and optional importOptions dict, with support for the keys:
|
||||
# "timelineName": string, specifies the name of the timeline to be created. Not valid for DRT import
|
||||
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default. Not valid for DRT import
|
||||
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
|
||||
|
|
@ -225,6 +239,8 @@ MediaPool
|
|||
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
|
||||
# If no clips are specified, all clips from media pool will be used.
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool
|
||||
CreateStereoClip(LeftMediaPoolItem,
|
||||
RightMediaPoolItem) --> MediaPoolItem # Takes in two existing media pool items and creates a new 3D stereoscopic media pool entry replacing the input media in the media pool.
|
||||
|
||||
Folder
|
||||
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
|
||||
|
|
@ -233,6 +249,8 @@ Folder
|
|||
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool folder
|
||||
Export(filePath) --> bool # Returns true if export of DRB folder to filePath is successful, false otherwise
|
||||
TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise
|
||||
ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise.
|
||||
|
||||
MediaPoolItem
|
||||
GetName() --> string # Returns the clip name.
|
||||
|
|
@ -340,8 +358,12 @@ Timeline
|
|||
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
|
||||
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline
|
||||
CreateSubtitlesFromAudio() --> Bool # Creates subtitles from audio for the timeline. Returns True on success, False otherwise.
|
||||
CreateSubtitlesFromAudio({autoCaptionSettings}) --> Bool # Creates subtitles from audio for the timeline.
|
||||
# Takes in optional dictionary {autoCaptionSettings}. Check 'Auto Caption Settings' subsection below for more information.
|
||||
# Returns True on success, False otherwise.
|
||||
DetectSceneCuts() --> Bool # Detects and makes scene cuts along the timeline. Returns True if successful, False otherwise.
|
||||
ConvertTimelineToStereo() --> Bool # Converts timeline to stereo. Returns True if successful; False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the timeline's node graph object.
|
||||
|
||||
TimelineItem
|
||||
GetName() --> string # Returns the item name.
|
||||
|
|
@ -390,12 +412,7 @@ TimelineItem
|
|||
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
|
||||
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
|
||||
ApplyArriCdlLut() --> Bool # Applies ARRI CDL and LUT. Returns True if successful, False otherwise.
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
|
||||
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
|
||||
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
|
||||
|
|
@ -411,11 +428,17 @@ TimelineItem
|
|||
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline item
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for clip when supplied presetName (string). Returns true if successful.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
CreateMagicMask(mode) --> Bool # Returns True if magic mask was created successfully, False otherwise. mode can "F" (forward), "B" (backward), or "BI" (bidirection)
|
||||
RegenerateMagicMask() --> Bool # Returns True if magic mask was regenerated successfully, False otherwise.
|
||||
Stabilize() --> Bool # Returns True if stabilization was successful, False otherwise
|
||||
SmartReframe() --> Bool # Performs Smart Reframe. Returns True if successful, False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the clip's node graph object.
|
||||
GetColorGroup() --> ColorGroup # Returns the clip's color group if one exists.
|
||||
AssignToColorGroup(ColorGroup) --> Bool # Returns True if TiItem to successfully assigned to given ColorGroup. ColorGroup must be an existing group in the current project.
|
||||
RemoveFromColorGroup() --> Bool # Returns True if the TiItem is successfully removed from the ColorGroup it is in.
|
||||
ExportLUT(exportType, path) --> Bool # Exports LUTs from tiItem referring to value passed in 'exportType' (enum) for LUT size. Refer to. 'ExportLUT notes' section for possible values.
|
||||
# Saves generated LUT in the provided 'path' (string). 'path' should include the intended file name.
|
||||
# If an empty or incorrect extension is provided, the appropriate extension (.cube/.vlt) will be appended at the end of the path.
|
||||
|
||||
Gallery
|
||||
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
|
||||
|
|
@ -428,17 +451,63 @@ GalleryStillAlbum
|
|||
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
|
||||
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
|
||||
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
|
||||
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm).
|
||||
ImportStills([filePaths]) --> Bool # Imports GalleryStill from each filePath in [filePaths] list. True if at least one still is imported successfully. False otherwise.
|
||||
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm, drx).
|
||||
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
|
||||
|
||||
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
|
||||
|
||||
Graph
|
||||
GetNumNodes() --> int # Returns the number of nodes in the graph
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= self.GetNumNodes().
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
GetToolsInNode(nodeIndex) --> [toolsList] # Returns toolsList (list of strings) of the tools used in the node indicated by given nodeIndex (int).
|
||||
|
||||
ColorGroup
|
||||
GetName() --> String # Returns the name (string) of the ColorGroup.
|
||||
SetName(groupName) --> Bool # Renames ColorGroup to groupName (string).
|
||||
GetClipsInTimeline(Timeline=CurrTimeline) --> [TimelineItem] # Returns a list of TimelineItem that are in colorGroup in the given Timeline. Timeline is Current Timeline by default.
|
||||
GetPreClipNodeGraph() --> Graph # Returns the ColorGroup Pre-clip graph.
|
||||
GetPostClipNodeGraph() --> Graph # Returns the ColorGroup Post-clip graph.
|
||||
|
||||
List and Dict Data Structures
|
||||
-----------------------------
|
||||
Beside primitive data types, Resolve's Python API mainly uses list and dict data structures. Lists are denoted by [ ... ] and dicts are denoted by { ... } above.
|
||||
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
|
||||
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
|
||||
|
||||
Keyframe Mode information
|
||||
-------------------------
|
||||
This section covers additional notes for the functions Resolve.GetKeyframeMode() and Resolve.SetKeyframeMode(keyframeMode).
|
||||
|
||||
'keyframeMode' can be one of the following enums:
|
||||
- resolve.KEYFRAME_MODE_ALL == 0
|
||||
- resolve.KEYFRAME_MODE_COLOR == 1
|
||||
- resolve.KEYFRAME_MODE_SIZING == 2
|
||||
|
||||
Integer values returned by Resolve.GetKeyframeMode() will correspond to the enums above.
|
||||
|
||||
Cloud Projects Settings
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject"
|
||||
|
||||
All three functions take in a {cloudSettings} dict, that have the following keys:
|
||||
* resolve.CLOUD_SETTING_PROJECT_NAME: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_PROJECT_MEDIA_PATH: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_IS_COLLAB: Bool, [False by default]
|
||||
* resolve.CLOUD_SETTING_SYNC_MODE: syncMode (see below), [resolve.CLOUD_SYNC_PROXY_ONLY by default]
|
||||
* resolve.CLOUD_SETTING_IS_CAMERA_ACCESS: Bool [False by default]
|
||||
|
||||
Where syncMode is one of the following values:
|
||||
* resolve.CLOUD_SYNC_NONE,
|
||||
* resolve.CLOUD_SYNC_PROXY_ONLY,
|
||||
* resolve.CLOUD_SYNC_PROXY_AND_ORIG
|
||||
|
||||
All three "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject" require resolve.PROJECT_MEDIA_PATH to be defined. "ProjectManager:CreateCloudProject" also requires resolve.PROJECT_NAME to be defined.
|
||||
|
||||
Looking up Project and Clip properties
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
|
||||
|
|
@ -478,6 +547,49 @@ Affects:
|
|||
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
|
||||
• for '2x Enhanced' --> MediaPoolItem:SetClipProperty('Super Scale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0]
|
||||
|
||||
Auto Caption Settings
|
||||
----------------------
|
||||
This section covers the supported settings for the method Timeline.CreateSubtitlesFromAudio({autoCaptionSettings})
|
||||
|
||||
The parameter setting is a dictionary containing the following keys:
|
||||
* resolve.SUBTITLE_LANGUAGE: languageID (see below), [resolve.AUTO_CAPTION_AUTO by default]
|
||||
* resolve.SUBTITLE_CAPTION_PRESET: presetType (see below), [resolve.AUTO_CAPTION_SUBTITLE_DEFAULT by default]
|
||||
* resolve.SUBTITLE_CHARS_PER_LINE: Number between 1 and 60 inclusive [42 by default]
|
||||
* resolve.SUBTITLE_LINE_BREAK: lineBreakType (see below), [resolve.AUTO_CAPTION_LINE_SINGLE by default]
|
||||
* resolve.SUBTITLE_GAP: Number between 0 and 10 inclusive [0 by default]
|
||||
|
||||
Note that the default values for some keys may change based on values defined for other keys, as per the UI.
|
||||
For example, if the following dictionary is supplied,
|
||||
CreateSubtitlesFromAudio( { resolve.SUBTITLE_LANGUAGE = resolve.AUTO_CAPTION_KOREAN,
|
||||
resolve.SUBTITLE_CAPTION_PRESET = resolve.AUTO_CAPTION_NETFLIX } )
|
||||
the default value for resolve.SUBTITLE_CHARS_PER_LINE will be 16 instead of 42
|
||||
|
||||
languageIDs:
|
||||
* resolve.AUTO_CAPTION_AUTO
|
||||
* resolve.AUTO_CAPTION_DANISH
|
||||
* resolve.AUTO_CAPTION_DUTCH
|
||||
* resolve.AUTO_CAPTION_ENGLISH
|
||||
* resolve.AUTO_CAPTION_FRENCH
|
||||
* resolve.AUTO_CAPTION_GERMAN
|
||||
* resolve.AUTO_CAPTION_ITALIAN
|
||||
* resolve.AUTO_CAPTION_JAPANESE
|
||||
* resolve.AUTO_CAPTION_KOREAN
|
||||
* resolve.AUTO_CAPTION_MANDARIN_SIMPLIFIED
|
||||
* resolve.AUTO_CAPTION_MANDARIN_TRADITIONAL
|
||||
* resolve.AUTO_CAPTION_NORWEGIAN
|
||||
* resolve.AUTO_CAPTION_PORTUGUESE
|
||||
* resolve.AUTO_CAPTION_RUSSIAN
|
||||
* resolve.AUTO_CAPTION_SPANISH
|
||||
* resolve.AUTO_CAPTION_SWEDISH
|
||||
|
||||
presetTypes:
|
||||
* resolve.AUTO_CAPTION_SUBTITLE_DEFAULT
|
||||
* resolve.AUTO_CAPTION_TELETEXT
|
||||
* resolve.AUTO_CAPTION_NETFLIX
|
||||
|
||||
lineBreakTypes:
|
||||
* resolve.AUTO_CAPTION_LINE_SINGLE
|
||||
* resolve.AUTO_CAPTION_LINE_DOUBLE
|
||||
|
||||
Looking up Render Settings
|
||||
--------------------------
|
||||
|
|
@ -531,6 +643,8 @@ exportType can be one of the following constants:
|
|||
- resolve.EXPORT_DOLBY_VISION_VER_4_0
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_5_1
|
||||
- resolve.EXPORT_OTIO
|
||||
- resolve.EXPORT_ALE
|
||||
- resolve.EXPORT_ALE_CDL
|
||||
exportSubtype can be one of the following enums:
|
||||
- resolve.EXPORT_NONE
|
||||
- resolve.EXPORT_AAF_NEW
|
||||
|
|
@ -627,7 +741,8 @@ The supported keys with their accepted values are:
|
|||
- MOTION_EST_STANDARD_BETTER
|
||||
- MOTION_EST_ENHANCED_FASTER
|
||||
- MOTION_EST_ENHANCED_BETTER
|
||||
- MOTION_EST_SPEED_WRAP
|
||||
- MOTION_EST_SPEED_WARP_BETTER
|
||||
- MOTION_EST_SPEED_WARP_FASTER
|
||||
"Scaling" : A value from the following constants
|
||||
- SCALE_USE_PROJECT = 0
|
||||
- SCALE_CROP
|
||||
|
|
@ -659,6 +774,16 @@ as a single argument.
|
|||
|
||||
Getting the values for the keys that uses constants will return the number which is in the constant
|
||||
|
||||
ExportLUT notes
|
||||
---------------
|
||||
The following section covers additional notes for TimelineItem.ExportLUT(exportType, path).
|
||||
|
||||
Supported values for 'exportType' (enum) are:
|
||||
- resolve.EXPORT_LUT_17PTCUBE
|
||||
- resolve.EXPORT_LUT_33PTCUBE
|
||||
- resolve.EXPORT_LUT_65PTCUBE
|
||||
- resolve.EXPORT_LUT_PANASONICVLUT
|
||||
|
||||
Deprecated Resolve API Functions
|
||||
--------------------------------
|
||||
The following API functions are deprecated.
|
||||
|
|
@ -693,7 +818,12 @@ TimelineItem
|
|||
GetFusionCompNames() --> {names...} # Returns a dict of Fusion composition names associated with the timeline item.
|
||||
GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item.
|
||||
GetVersionNames(versionType) --> {names...} # Returns a dict of version names by provided versionType: 0 - local, 1 - remote.
|
||||
|
||||
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
|
||||
Unsupported Resolve API Functions
|
||||
---------------------------------
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
from pathlib import Path
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
|
||||
from ayon_core.hosts.resolve.api import lib
|
||||
|
||||
|
||||
class LoadEditorialPackage(load.LoaderPlugin):
|
||||
"""Load editorial package to timeline.
|
||||
|
||||
Loading timeline from OTIO file included media sources
|
||||
and timeline structure.
|
||||
"""
|
||||
|
||||
product_types = {"editorial_pkg"}
|
||||
|
||||
representations = {"*"}
|
||||
extensions = {"otio"}
|
||||
|
||||
label = "Load as Timeline"
|
||||
order = -10
|
||||
icon = "ei.align-left"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
files = get_representation_path(context["representation"])
|
||||
|
||||
search_folder_path = Path(files).parent / "resources"
|
||||
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
|
||||
# create versioned bin for editorial package
|
||||
version_name = context["version"]["name"]
|
||||
bin_name = f"{name}_{version_name}"
|
||||
lib.create_bin(bin_name)
|
||||
|
||||
import_options = {
|
||||
"timelineName": "Editorial Package Timeline",
|
||||
"importSourceClips": True,
|
||||
"sourceClipsPath": search_folder_path.as_posix(),
|
||||
}
|
||||
|
||||
timeline = media_pool.ImportTimelineFromFile(files, import_options)
|
||||
print("Timeline imported: ", timeline)
|
||||
|
||||
def update(self, container, context):
|
||||
# TODO: implement update method in future
|
||||
pass
|
||||
|
|
@ -80,17 +80,21 @@ def get_engine_versions(env=None):
|
|||
def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path:
|
||||
"""Get UE Editor executable path."""
|
||||
ue_path = engine_path / "Engine/Binaries"
|
||||
|
||||
ue_name = "UnrealEditor"
|
||||
|
||||
# handle older versions of Unreal Engine
|
||||
if engine_version.split(".")[0] == "4":
|
||||
ue_name = "UE4Editor"
|
||||
|
||||
if platform.system().lower() == "windows":
|
||||
if engine_version.split(".")[0] == "4":
|
||||
ue_path /= "Win64/UE4Editor.exe"
|
||||
elif engine_version.split(".")[0] == "5":
|
||||
ue_path /= "Win64/UnrealEditor.exe"
|
||||
ue_path /= f"Win64/{ue_name}.exe"
|
||||
|
||||
elif platform.system().lower() == "linux":
|
||||
ue_path /= "Linux/UE4Editor"
|
||||
ue_path /= f"Linux/{ue_name}"
|
||||
|
||||
elif platform.system().lower() == "darwin":
|
||||
ue_path /= "Mac/UE4Editor"
|
||||
ue_path /= f"Mac/{ue_name}"
|
||||
|
||||
return ue_path
|
||||
|
||||
|
|
|
|||
|
|
@ -139,6 +139,7 @@ from .path_tools import (
|
|||
)
|
||||
|
||||
from .ayon_info import (
|
||||
is_in_ayon_launcher_process,
|
||||
is_running_from_build,
|
||||
is_using_ayon_console,
|
||||
is_staging_enabled,
|
||||
|
|
@ -248,6 +249,7 @@ __all__ = [
|
|||
|
||||
"Logger",
|
||||
|
||||
"is_in_ayon_launcher_process",
|
||||
"is_running_from_build",
|
||||
"is_using_ayon_console",
|
||||
"is_staging_enabled",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import json
|
||||
import datetime
|
||||
import platform
|
||||
|
|
@ -25,6 +26,18 @@ def get_ayon_launcher_version():
|
|||
return content["__version__"]
|
||||
|
||||
|
||||
def is_in_ayon_launcher_process():
|
||||
"""Determine if current process is running from AYON launcher.
|
||||
|
||||
Returns:
|
||||
bool: True if running from AYON launcher.
|
||||
|
||||
"""
|
||||
ayon_executable_path = os.path.normpath(os.environ["AYON_EXECUTABLE"])
|
||||
executable_path = os.path.normpath(sys.executable)
|
||||
return ayon_executable_path == executable_path
|
||||
|
||||
|
||||
def is_running_from_build():
|
||||
"""Determine if current process is running from build or code.
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +0,0 @@
|
|||
from .clockify_module import ClockifyModule
|
||||
|
||||
__all__ = (
|
||||
"ClockifyModule",
|
||||
)
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
from .deadline_module import DeadlineModule
|
||||
from .version import __version__
|
||||
|
||||
|
||||
__all__ = (
|
||||
"DeadlineModule",
|
||||
"__version__"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -29,15 +29,11 @@ from ayon_core.pipeline.publish.lib import (
|
|||
JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError)
|
||||
|
||||
|
||||
# TODO both 'requests_post' and 'requests_get' should not set 'verify' based
|
||||
# on environment variable. This should be done in a more controlled way,
|
||||
# e.g. each deadline url could have checkbox to enabled/disable
|
||||
# ssl verification.
|
||||
def requests_post(*args, **kwargs):
|
||||
"""Wrap request post method.
|
||||
|
||||
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
|
||||
variable is found. This is useful when Deadline server is
|
||||
Disabling SSL certificate validation if ``verify`` kwarg is set to False.
|
||||
This is useful when Deadline server is
|
||||
running with self-signed certificates and its certificate is not
|
||||
added to trusted certificates on client machines.
|
||||
|
||||
|
|
@ -46,9 +42,9 @@ def requests_post(*args, **kwargs):
|
|||
of defense SSL is providing, and it is not recommended.
|
||||
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
|
||||
True) else True # noqa
|
||||
auth = kwargs.get("auth")
|
||||
if auth:
|
||||
kwargs["auth"] = tuple(auth) # explicit cast to tuple
|
||||
# add 10sec timeout before bailing out
|
||||
kwargs['timeout'] = 10
|
||||
return requests.post(*args, **kwargs)
|
||||
|
|
@ -57,8 +53,8 @@ def requests_post(*args, **kwargs):
|
|||
def requests_get(*args, **kwargs):
|
||||
"""Wrap request get method.
|
||||
|
||||
Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment
|
||||
variable is found. This is useful when Deadline server is
|
||||
Disabling SSL certificate validation if ``verify`` kwarg is set to False.
|
||||
This is useful when Deadline server is
|
||||
running with self-signed certificates and its certificate is not
|
||||
added to trusted certificates on client machines.
|
||||
|
||||
|
|
@ -67,9 +63,9 @@ def requests_get(*args, **kwargs):
|
|||
of defense SSL is providing, and it is not recommended.
|
||||
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("OPENPYPE_DONT_VERIFY_SSL",
|
||||
True) else True # noqa
|
||||
auth = kwargs.get("auth")
|
||||
if auth:
|
||||
kwargs["auth"] = tuple(auth)
|
||||
# add 10sec timeout before bailing out
|
||||
kwargs['timeout'] = 10
|
||||
return requests.get(*args, **kwargs)
|
||||
|
|
@ -434,9 +430,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
"""Plugin entry point."""
|
||||
self._instance = instance
|
||||
context = instance.context
|
||||
self._deadline_url = context.data.get("defaultDeadline")
|
||||
self._deadline_url = instance.data.get(
|
||||
"deadlineUrl", self._deadline_url)
|
||||
self._deadline_url = instance.data["deadline"]["url"]
|
||||
|
||||
assert self._deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
|
|
@ -460,7 +454,9 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.plugin_info = self.get_plugin_info()
|
||||
self.aux_files = self.get_aux_files()
|
||||
|
||||
job_id = self.process_submission()
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
job_id = self.process_submission(auth, verify)
|
||||
self.log.info("Submitted job to Deadline: {}.".format(job_id))
|
||||
|
||||
# TODO: Find a way that's more generic and not render type specific
|
||||
|
|
@ -473,10 +469,10 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
job_info=render_job_info,
|
||||
plugin_info=render_plugin_info
|
||||
)
|
||||
render_job_id = self.submit(payload)
|
||||
render_job_id = self.submit(payload, auth, verify)
|
||||
self.log.info("Render job id: %s", render_job_id)
|
||||
|
||||
def process_submission(self):
|
||||
def process_submission(self, auth=None, verify=True):
|
||||
"""Process data for submission.
|
||||
|
||||
This takes Deadline JobInfo, PluginInfo, AuxFile, creates payload
|
||||
|
|
@ -487,7 +483,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
|
||||
"""
|
||||
payload = self.assemble_payload()
|
||||
return self.submit(payload)
|
||||
return self.submit(payload, auth, verify)
|
||||
|
||||
@abstractmethod
|
||||
def get_job_info(self):
|
||||
|
|
@ -577,7 +573,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
"AuxFiles": aux_files or self.aux_files
|
||||
}
|
||||
|
||||
def submit(self, payload):
|
||||
def submit(self, payload, auth, verify):
|
||||
"""Submit payload to Deadline API end-point.
|
||||
|
||||
This takes payload in the form of JSON file and POST it to
|
||||
|
|
@ -585,6 +581,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
|
||||
Args:
|
||||
payload (dict): dict to become json in deadline submission.
|
||||
auth (tuple): (username, password)
|
||||
verify (bool): verify SSL certificate if present
|
||||
|
||||
Returns:
|
||||
str: resulting Deadline job id.
|
||||
|
|
@ -594,7 +592,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
|
||||
"""
|
||||
url = "{}/api/jobs".format(self._deadline_url)
|
||||
response = requests_post(url, json=payload)
|
||||
response = requests_post(
|
||||
url, json=payload, auth=auth, verify=verify)
|
||||
if not response.ok:
|
||||
self.log.error("Submission failed!")
|
||||
self.log.error(response.status_code)
|
||||
|
|
|
|||
|
|
@ -19,23 +19,23 @@ class DeadlineModule(AYONAddon, IPluginPaths):
|
|||
|
||||
def initialize(self, studio_settings):
|
||||
# This module is always enabled
|
||||
deadline_urls = {}
|
||||
deadline_servers_info = {}
|
||||
enabled = self.name in studio_settings
|
||||
if enabled:
|
||||
deadline_settings = studio_settings[self.name]
|
||||
deadline_urls = {
|
||||
url_item["name"]: url_item["value"]
|
||||
deadline_servers_info = {
|
||||
url_item["name"]: url_item
|
||||
for url_item in deadline_settings["deadline_urls"]
|
||||
}
|
||||
|
||||
if enabled and not deadline_urls:
|
||||
if enabled and not deadline_servers_info:
|
||||
enabled = False
|
||||
self.log.warning((
|
||||
"Deadline Webservice URLs are not specified. Disabling addon."
|
||||
))
|
||||
|
||||
self.enabled = enabled
|
||||
self.deadline_urls = deadline_urls
|
||||
self.deadline_servers_info = deadline_servers_info
|
||||
|
||||
def get_plugin_paths(self):
|
||||
"""Deadline plugin paths."""
|
||||
|
|
@ -45,13 +45,15 @@ class DeadlineModule(AYONAddon, IPluginPaths):
|
|||
}
|
||||
|
||||
@staticmethod
|
||||
def get_deadline_pools(webservice, log=None):
|
||||
def get_deadline_pools(webservice, auth=None, log=None):
|
||||
"""Get pools from Deadline.
|
||||
Args:
|
||||
webservice (str): Server url.
|
||||
log (Logger)
|
||||
auth (Optional[Tuple[str, str]]): Tuple containing username,
|
||||
password
|
||||
log (Optional[Logger]): Logger to log errors to, if provided.
|
||||
Returns:
|
||||
list: Pools.
|
||||
List[str]: Pools.
|
||||
Throws:
|
||||
RuntimeError: If deadline webservice is unreachable.
|
||||
|
||||
|
|
@ -63,7 +65,10 @@ class DeadlineModule(AYONAddon, IPluginPaths):
|
|||
|
||||
argument = "{}/api/pools?NamesOnly=true".format(webservice)
|
||||
try:
|
||||
response = requests_get(argument)
|
||||
kwargs = {}
|
||||
if auth:
|
||||
kwargs["auth"] = auth
|
||||
response = requests_get(argument, **kwargs)
|
||||
except requests.exceptions.ConnectionError as exc:
|
||||
msg = 'Cannot connect to DL web service {}'.format(webservice)
|
||||
log.error(msg)
|
||||
|
|
|
|||
|
|
@ -13,17 +13,45 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
|
|||
"""Collect Deadline Webservice URL from instance."""
|
||||
|
||||
# Run before collect_render.
|
||||
order = pyblish.api.CollectorOrder + 0.005
|
||||
order = pyblish.api.CollectorOrder + 0.225
|
||||
label = "Deadline Webservice from the Instance"
|
||||
families = ["rendering", "renderlayer"]
|
||||
hosts = ["maya"]
|
||||
targets = ["local"]
|
||||
families = ["render",
|
||||
"rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"vray_rop",
|
||||
"publish.hou",
|
||||
"image"] # for Fusion
|
||||
|
||||
def process(self, instance):
|
||||
instance.data["deadlineUrl"] = self._collect_deadline_url(instance)
|
||||
instance.data["deadlineUrl"] = \
|
||||
instance.data["deadlineUrl"].strip().rstrip("/")
|
||||
if not instance.data.get("farm"):
|
||||
self.log.debug("Should not be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
if not instance.data.get("deadline"):
|
||||
instance.data["deadline"] = {}
|
||||
|
||||
# todo: separate logic should be removed, all hosts should have same
|
||||
host_name = instance.context.data["hostName"]
|
||||
if host_name == "maya":
|
||||
deadline_url = self._collect_deadline_url(instance)
|
||||
else:
|
||||
deadline_url = (instance.data.get("deadlineUrl") or # backwards
|
||||
instance.data.get("deadline", {}).get("url"))
|
||||
if deadline_url:
|
||||
instance.data["deadline"]["url"] = deadline_url.strip().rstrip("/")
|
||||
else:
|
||||
instance.data["deadline"]["url"] = instance.context.data["deadline"]["defaultUrl"] # noqa
|
||||
self.log.debug(
|
||||
"Using {} for submission.".format(instance.data["deadlineUrl"]))
|
||||
"Using {} for submission".format(instance.data["deadline"]["url"]))
|
||||
|
||||
def _collect_deadline_url(self, render_instance):
|
||||
# type: (pyblish.api.Instance) -> str
|
||||
|
|
@ -49,13 +77,13 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
|
|||
["project_settings"]
|
||||
["deadline"]
|
||||
)
|
||||
|
||||
default_server = render_instance.context.data["defaultDeadline"]
|
||||
default_server_url = (render_instance.context.data["deadline"]
|
||||
["defaultUrl"])
|
||||
# QUESTION How and where is this is set? Should be removed?
|
||||
instance_server = render_instance.data.get("deadlineServers")
|
||||
if not instance_server:
|
||||
self.log.debug("Using default server.")
|
||||
return default_server
|
||||
return default_server_url
|
||||
|
||||
# Get instance server as sting.
|
||||
if isinstance(instance_server, int):
|
||||
|
|
@ -66,7 +94,7 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
default_servers = {
|
||||
url_item["name"]: url_item["value"]
|
||||
for url_item in deadline_settings["deadline_urls"]
|
||||
for url_item in deadline_settings["deadline_servers_info"]
|
||||
}
|
||||
project_servers = (
|
||||
render_instance.context.data
|
||||
|
|
|
|||
|
|
@ -18,10 +18,9 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
# Run before collect_deadline_server_instance.
|
||||
order = pyblish.api.CollectorOrder + 0.0025
|
||||
order = pyblish.api.CollectorOrder + 0.200
|
||||
label = "Default Deadline Webservice"
|
||||
|
||||
pass_mongo_url = False
|
||||
targets = ["local"]
|
||||
|
||||
def process(self, context):
|
||||
try:
|
||||
|
|
@ -33,15 +32,17 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
|||
deadline_settings = context.data["project_settings"]["deadline"]
|
||||
deadline_server_name = deadline_settings["deadline_server"]
|
||||
|
||||
deadline_webservice = None
|
||||
dl_server_info = None
|
||||
if deadline_server_name:
|
||||
deadline_webservice = deadline_module.deadline_urls.get(
|
||||
dl_server_info = deadline_module.deadline_servers_info.get(
|
||||
deadline_server_name)
|
||||
|
||||
default_deadline_webservice = deadline_module.deadline_urls["default"]
|
||||
deadline_webservice = (
|
||||
deadline_webservice
|
||||
or default_deadline_webservice
|
||||
)
|
||||
if dl_server_info:
|
||||
deadline_url = dl_server_info["value"]
|
||||
else:
|
||||
default_dl_server_info = deadline_module.deadline_servers_info[0]
|
||||
deadline_url = default_dl_server_info["value"]
|
||||
|
||||
context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa
|
||||
context.data["deadline"] = {}
|
||||
context.data["deadline"]["defaultUrl"] = (
|
||||
deadline_url.strip().rstrip("/"))
|
||||
|
|
|
|||
|
|
@ -26,27 +26,32 @@ class CollectDeadlinePools(pyblish.api.InstancePlugin,
|
|||
|
||||
order = pyblish.api.CollectorOrder + 0.420
|
||||
label = "Collect Deadline Pools"
|
||||
hosts = ["aftereffects",
|
||||
"fusion",
|
||||
"harmony"
|
||||
"nuke",
|
||||
"maya",
|
||||
"max",
|
||||
"houdini"]
|
||||
hosts = [
|
||||
"aftereffects",
|
||||
"fusion",
|
||||
"harmony",
|
||||
"maya",
|
||||
"max",
|
||||
"houdini",
|
||||
"nuke",
|
||||
]
|
||||
|
||||
families = ["render",
|
||||
"rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"vray_rop",
|
||||
"publish.hou"]
|
||||
families = [
|
||||
"render",
|
||||
"prerender",
|
||||
"rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"vray_rop",
|
||||
"publish.hou",
|
||||
]
|
||||
|
||||
primary_pool = None
|
||||
secondary_pool = None
|
||||
|
|
|
|||
|
|
@ -0,0 +1,92 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect user credentials
|
||||
|
||||
Requires:
|
||||
context -> project_settings
|
||||
instance.data["deadline"]["url"]
|
||||
|
||||
Provides:
|
||||
instance.data["deadline"] -> require_authentication (bool)
|
||||
instance.data["deadline"] -> auth (tuple (str, str)) -
|
||||
(username, password) or None
|
||||
"""
|
||||
import pyblish.api
|
||||
|
||||
from ayon_api import get_server_api_connection
|
||||
from ayon_core.modules.deadline.deadline_module import DeadlineModule
|
||||
from ayon_core.modules.deadline import __version__
|
||||
|
||||
|
||||
class CollectDeadlineUserCredentials(pyblish.api.InstancePlugin):
|
||||
"""Collects user name and password for artist if DL requires authentication
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.250
|
||||
label = "Collect Deadline User Credentials"
|
||||
|
||||
targets = ["local"]
|
||||
hosts = ["aftereffects",
|
||||
"blender",
|
||||
"fusion",
|
||||
"harmony",
|
||||
"nuke",
|
||||
"maya",
|
||||
"max",
|
||||
"houdini"]
|
||||
|
||||
families = ["render",
|
||||
"rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender",
|
||||
"usdrender",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
"vray_rop",
|
||||
"publish.hou"]
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data.get("farm"):
|
||||
self.log.debug("Should not be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
collected_deadline_url = instance.data["deadline"]["url"]
|
||||
if not collected_deadline_url:
|
||||
raise ValueError("Instance doesn't have '[deadline][url]'.")
|
||||
context_data = instance.context.data
|
||||
deadline_settings = context_data["project_settings"]["deadline"]
|
||||
|
||||
deadline_server_name = None
|
||||
# deadline url might be set directly from instance, need to find
|
||||
# metadata for it
|
||||
for deadline_info in deadline_settings["deadline_urls"]:
|
||||
dl_settings_url = deadline_info["value"].strip().rstrip("/")
|
||||
if dl_settings_url == collected_deadline_url:
|
||||
deadline_server_name = deadline_info["name"]
|
||||
break
|
||||
|
||||
if not deadline_server_name:
|
||||
raise ValueError(f"Collected {collected_deadline_url} doesn't "
|
||||
"match any site configured in Studio Settings")
|
||||
|
||||
instance.data["deadline"]["require_authentication"] = (
|
||||
deadline_info["require_authentication"]
|
||||
)
|
||||
instance.data["deadline"]["auth"] = None
|
||||
|
||||
instance.data["deadline"]["verify"] = (
|
||||
not deadline_info["not_verify_ssl"])
|
||||
|
||||
if not deadline_info["require_authentication"]:
|
||||
return
|
||||
# TODO import 'get_addon_site_settings' when available
|
||||
# in public 'ayon_api'
|
||||
local_settings = get_server_api_connection().get_addon_site_settings(
|
||||
DeadlineModule.name, __version__)
|
||||
local_settings = local_settings["local_settings"]
|
||||
for server_info in local_settings:
|
||||
if deadline_server_name == server_info["server_name"]:
|
||||
instance.data["deadline"]["auth"] = (server_info["username"],
|
||||
server_info["password"])
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Deadline Authentication</title>
|
||||
<description>
|
||||
## Deadline authentication is required
|
||||
|
||||
This project has set in Settings that Deadline requires authentication.
|
||||
|
||||
### How to repair?
|
||||
|
||||
Please go to Ayon Server > Site Settings and provide your Deadline username and password.
|
||||
In some cases the password may be empty if Deadline is configured to allow that. Ask your administrator.
|
||||
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -174,7 +174,9 @@ class BlenderSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
||||
payload = self.assemble_payload()
|
||||
return self.submit(payload)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
return self.submit(payload, auth=auth, verify=verify)
|
||||
|
||||
def from_published_scene(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -2,9 +2,10 @@ import os
|
|||
import re
|
||||
import json
|
||||
import getpass
|
||||
import requests
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_post
|
||||
|
||||
|
||||
class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
|
||||
"""Submit CelAction2D scene to Deadline
|
||||
|
|
@ -30,11 +31,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
context = instance.context
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
assert deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
self.deadline_url = "{}/api/jobs".format(deadline_url)
|
||||
|
|
@ -196,8 +193,11 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
self.expected_files(instance, render_path)
|
||||
self.log.debug("__ expectedFiles: `{}`".format(
|
||||
instance.data["expectedFiles"]))
|
||||
|
||||
response = requests.post(self.deadline_url, json=payload)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
response = requests_post(self.deadline_url, json=payload,
|
||||
auth=auth,
|
||||
verify=verify)
|
||||
|
||||
if not response.ok:
|
||||
self.log.error(
|
||||
|
|
|
|||
|
|
@ -2,17 +2,13 @@ import os
|
|||
import json
|
||||
import getpass
|
||||
|
||||
import requests
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_post
|
||||
from ayon_core.pipeline.publish import (
|
||||
AYONPyblishPluginMixin
|
||||
)
|
||||
from ayon_core.lib import (
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
)
|
||||
from ayon_core.lib import NumberDef
|
||||
|
||||
|
||||
class FusionSubmitDeadline(
|
||||
|
|
@ -64,11 +60,6 @@ class FusionSubmitDeadline(
|
|||
decimals=0,
|
||||
minimum=1,
|
||||
maximum=10
|
||||
),
|
||||
BoolDef(
|
||||
"suspend_publish",
|
||||
default=False,
|
||||
label="Suspend publish"
|
||||
)
|
||||
]
|
||||
|
||||
|
|
@ -80,10 +71,6 @@ class FusionSubmitDeadline(
|
|||
attribute_values = self.get_attr_values_from_data(
|
||||
instance.data)
|
||||
|
||||
# add suspend_publish attributeValue to instance data
|
||||
instance.data["suspend_publish"] = attribute_values[
|
||||
"suspend_publish"]
|
||||
|
||||
context = instance.context
|
||||
|
||||
key = "__hasRun{}".format(self.__class__.__name__)
|
||||
|
|
@ -94,11 +81,7 @@ class FusionSubmitDeadline(
|
|||
|
||||
from ayon_core.hosts.fusion.api.lib import get_frame_path
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
assert deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
# Collect all saver instances in context that are to be rendered
|
||||
|
|
@ -258,7 +241,9 @@ class FusionSubmitDeadline(
|
|||
|
||||
# E.g. http://192.168.0.1:8082/api/jobs
|
||||
url = "{}/api/jobs".format(deadline_url)
|
||||
response = requests.post(url, json=payload)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
response = requests_post(url, json=payload, auth=auth, verify=verify)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ from openpype_modules.deadline import abstract_submit_deadline
|
|||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from ayon_core.lib import (
|
||||
is_in_tests,
|
||||
BoolDef,
|
||||
TextDef,
|
||||
NumberDef
|
||||
)
|
||||
|
|
@ -86,15 +85,10 @@ class HoudiniSubmitDeadline(
|
|||
priority = 50
|
||||
chunk_size = 1
|
||||
group = ""
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef(
|
||||
"suspend_publish",
|
||||
default=False,
|
||||
label="Suspend publish"
|
||||
),
|
||||
NumberDef(
|
||||
"priority",
|
||||
label="Priority",
|
||||
|
|
@ -194,7 +188,7 @@ class HoudiniSubmitDeadline(
|
|||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
|
||||
|
||||
if split_render_job and is_export_job:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"export_priority", self.export_priority
|
||||
|
|
@ -315,6 +309,11 @@ class HoudiniSubmitDeadline(
|
|||
return attr.asdict(plugin_info)
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data["farm"]:
|
||||
self.log.debug("Render on farm is disabled. "
|
||||
"Skipping deadline submission.")
|
||||
return
|
||||
|
||||
super(HoudiniSubmitDeadline, self).process(instance)
|
||||
|
||||
# TODO: Avoid the need for this logic here, needed for submit publish
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ from ayon_core.pipeline.publish.lib import (
|
|||
replace_with_published_scene_path
|
||||
)
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
from ayon_core.hosts.max.api.lib import (
|
||||
from ayon_max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_multipass_setting
|
||||
)
|
||||
from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from ayon_max.api.lib_rendersettings import RenderSettings
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
||||
|
|
@ -181,25 +181,35 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
self.log.debug("Submitting 3dsMax render..")
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
if instance.data.get("multiCamera"):
|
||||
self.log.debug("Submitting jobs for multiple cameras..")
|
||||
payload = self._use_published_name_for_multiples(
|
||||
payload_data, project_settings)
|
||||
job_infos, plugin_infos = payload
|
||||
for job_info, plugin_info in zip(job_infos, plugin_infos):
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
self.submit(
|
||||
self.assemble_payload(job_info, plugin_info),
|
||||
auth=auth,
|
||||
verify=verify
|
||||
)
|
||||
else:
|
||||
payload = self._use_published_name(payload_data, project_settings)
|
||||
job_info, plugin_info = payload
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
self.submit(
|
||||
self.assemble_payload(job_info, plugin_info),
|
||||
auth=auth,
|
||||
verify=verify
|
||||
)
|
||||
|
||||
def _use_published_name(self, data, project_settings):
|
||||
# Not all hosts can import these modules.
|
||||
from ayon_core.hosts.max.api.lib import (
|
||||
from ayon_max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_multipass_setting
|
||||
)
|
||||
from ayon_core.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from ayon_max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
instance = self._instance
|
||||
job_info = copy.deepcopy(self.job_info)
|
||||
|
|
|
|||
|
|
@ -39,8 +39,8 @@ from ayon_core.lib import (
|
|||
EnumDef,
|
||||
is_in_tests,
|
||||
)
|
||||
from ayon_core.hosts.maya.api.lib_rendersettings import RenderSettings
|
||||
from ayon_core.hosts.maya.api.lib import get_attr_in_layer
|
||||
from ayon_maya.api.lib_rendersettings import RenderSettings
|
||||
from ayon_maya.api.lib import get_attr_in_layer
|
||||
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
|
|
@ -292,7 +292,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
return plugin_payload
|
||||
|
||||
def process_submission(self):
|
||||
def process_submission(self, auth=None, verify=True):
|
||||
from maya import cmds
|
||||
instance = self._instance
|
||||
|
||||
|
|
@ -332,7 +332,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
if "vrayscene" in instance.data["families"]:
|
||||
self.log.debug("Submitting V-Ray scene render..")
|
||||
vray_export_payload = self._get_vray_export_payload(payload_data)
|
||||
export_job = self.submit(vray_export_payload)
|
||||
|
||||
export_job = self.submit(vray_export_payload,
|
||||
auth=auth,
|
||||
verify=verify)
|
||||
|
||||
payload = self._get_vray_render_payload(payload_data)
|
||||
|
||||
|
|
@ -351,7 +354,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
else:
|
||||
# Submit main render job
|
||||
job_info, plugin_info = payload
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
self.submit(self.assemble_payload(job_info, plugin_info),
|
||||
auth=auth,
|
||||
verify=verify)
|
||||
|
||||
def _tile_render(self, payload):
|
||||
"""Submit as tile render per frame with dependent assembly jobs."""
|
||||
|
|
@ -451,7 +456,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
# Submit frame tile jobs
|
||||
frame_tile_job_id = {}
|
||||
for frame, tile_job_payload in frame_payloads.items():
|
||||
job_id = self.submit(tile_job_payload)
|
||||
job_id = self.submit(tile_job_payload,
|
||||
instance.data["deadline"]["auth"])
|
||||
frame_tile_job_id[frame] = job_id
|
||||
|
||||
# Define assembly payloads
|
||||
|
|
@ -554,12 +560,18 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
# Submit assembly jobs
|
||||
assembly_job_ids = []
|
||||
num_assemblies = len(assembly_payloads)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
for i, payload in enumerate(assembly_payloads):
|
||||
self.log.debug(
|
||||
"submitting assembly job {} of {}".format(i + 1,
|
||||
num_assemblies)
|
||||
)
|
||||
assembly_job_id = self.submit(payload)
|
||||
assembly_job_id = self.submit(
|
||||
payload,
|
||||
auth=auth,
|
||||
verify=verify
|
||||
)
|
||||
assembly_job_ids.append(assembly_job_id)
|
||||
|
||||
instance.data["assemblySubmissionJobs"] = assembly_job_ids
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ import json
|
|||
import getpass
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_post
|
||||
from ayon_core.pipeline.publish import (
|
||||
AYONPyblishPluginMixin
|
||||
)
|
||||
|
|
@ -76,11 +76,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
default=cls.use_gpu,
|
||||
label="Use GPU"
|
||||
),
|
||||
BoolDef(
|
||||
"suspend_publish",
|
||||
default=False,
|
||||
label="Suspend publish"
|
||||
),
|
||||
BoolDef(
|
||||
"workfile_dependency",
|
||||
default=cls.workfile_dependency,
|
||||
|
|
@ -100,20 +95,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
instance.data["attributeValues"] = self.get_attr_values_from_data(
|
||||
instance.data)
|
||||
|
||||
# add suspend_publish attributeValue to instance data
|
||||
instance.data["suspend_publish"] = instance.data["attributeValues"][
|
||||
"suspend_publish"]
|
||||
|
||||
families = instance.data["families"]
|
||||
|
||||
node = instance.data["transientData"]["node"]
|
||||
context = instance.context
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
assert deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
self.deadline_url = "{}/api/jobs".format(deadline_url)
|
||||
|
|
@ -436,7 +423,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
|
||||
self.log.debug("__ expectedFiles: `{}`".format(
|
||||
instance.data["expectedFiles"]))
|
||||
response = requests.post(self.deadline_url, json=payload, timeout=10)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
response = requests_post(self.deadline_url,
|
||||
json=payload,
|
||||
timeout=10,
|
||||
auth=auth,
|
||||
verify=verify)
|
||||
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import json
|
|||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import requests
|
||||
import ayon_api
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_post
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.lib import EnumDef, is_in_tests
|
||||
from ayon_core.pipeline.version_start import get_versioning_start
|
||||
|
|
@ -147,9 +147,6 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
instance_settings = self.get_attr_values_from_data(instance.data)
|
||||
initial_status = instance_settings.get("publishJobState", "Active")
|
||||
# TODO: Remove this backwards compatibility of `suspend_publish`
|
||||
if instance.data.get("suspend_publish"):
|
||||
initial_status = "Suspended"
|
||||
|
||||
args = [
|
||||
"--headless",
|
||||
|
|
@ -212,7 +209,10 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
|
|||
self.log.debug("Submitting Deadline publish job ...")
|
||||
|
||||
url = "{}/api/jobs".format(self.deadline_url)
|
||||
response = requests.post(url, json=payload, timeout=10)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
response = requests_post(
|
||||
url, json=payload, timeout=10, auth=auth, verify=verify)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
|
|
@ -344,11 +344,7 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
deadline_publish_job_id = None
|
||||
if submission_type == "deadline":
|
||||
# get default deadline webservice url from deadline module
|
||||
self.deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
self.deadline_url = instance.data.get("deadlineUrl")
|
||||
self.deadline_url = instance.data["deadline"]["url"]
|
||||
assert self.deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
deadline_publish_job_id = \
|
||||
|
|
@ -356,7 +352,9 @@ class ProcessSubmittedCacheJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
# Inject deadline url to instances.
|
||||
for inst in instances:
|
||||
inst["deadlineUrl"] = self.deadline_url
|
||||
if "deadline" not in inst:
|
||||
inst["deadline"] = {}
|
||||
inst["deadline"] = instance.data["deadline"]
|
||||
|
||||
# publish job file
|
||||
publish_job = {
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ import json
|
|||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import requests
|
||||
import clique
|
||||
import ayon_api
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_post
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.lib import EnumDef, is_in_tests
|
||||
from ayon_core.pipeline.version_start import get_versioning_start
|
||||
|
|
@ -88,9 +88,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
hosts = ["fusion", "max", "maya", "nuke", "houdini",
|
||||
"celaction", "aftereffects", "harmony", "blender"]
|
||||
|
||||
families = ["render.farm", "render.frames_farm",
|
||||
"prerender.farm", "prerender.frames_farm",
|
||||
"renderlayer", "imagesequence",
|
||||
families = ["render", "render.farm", "render.frames_farm",
|
||||
"prerender", "prerender.farm", "prerender.frames_farm",
|
||||
"renderlayer", "imagesequence", "image",
|
||||
"vrayscene", "maxrender",
|
||||
"arnold_rop", "mantra_rop",
|
||||
"karma_rop", "vray_rop",
|
||||
|
|
@ -224,9 +224,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
instance_settings = self.get_attr_values_from_data(instance.data)
|
||||
initial_status = instance_settings.get("publishJobState", "Active")
|
||||
# TODO: Remove this backwards compatibility of `suspend_publish`
|
||||
if instance.data.get("suspend_publish"):
|
||||
initial_status = "Suspended"
|
||||
|
||||
args = [
|
||||
"--headless",
|
||||
|
|
@ -306,7 +303,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
self.log.debug("Submitting Deadline publish job ...")
|
||||
|
||||
url = "{}/api/jobs".format(self.deadline_url)
|
||||
response = requests.post(url, json=payload, timeout=10)
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
verify = instance.data["deadline"]["verify"]
|
||||
response = requests_post(
|
||||
url, json=payload, timeout=10, auth=auth, verify=verify)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
|
|
@ -314,7 +314,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
|
||||
return deadline_publish_job_id
|
||||
|
||||
|
||||
def process(self, instance):
|
||||
# type: (pyblish.api.Instance) -> None
|
||||
"""Process plugin.
|
||||
|
|
@ -461,18 +460,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
}
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
self.deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
self.deadline_url = instance.data.get("deadlineUrl")
|
||||
self.deadline_url = instance.data["deadline"]["url"]
|
||||
assert self.deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
deadline_publish_job_id = \
|
||||
self._submit_deadline_post_job(instance, render_job, instances)
|
||||
|
||||
# Inject deadline url to instances.
|
||||
# Inject deadline url to instances to query DL for job id for overrides
|
||||
for inst in instances:
|
||||
inst["deadlineUrl"] = self.deadline_url
|
||||
inst["deadline"] = instance.data["deadline"]
|
||||
|
||||
# publish job file
|
||||
publish_job = {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import PublishXmlValidationError
|
||||
|
||||
from openpype_modules.deadline.abstract_submit_deadline import requests_get
|
||||
|
||||
|
||||
|
|
@ -8,27 +10,42 @@ class ValidateDeadlineConnection(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Validate Deadline Web Service"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
hosts = ["maya", "nuke"]
|
||||
families = ["renderlayer", "render"]
|
||||
hosts = ["maya", "nuke", "aftereffects", "harmony", "fusion"]
|
||||
families = ["renderlayer", "render", "render.farm"]
|
||||
|
||||
# cache
|
||||
responses = {}
|
||||
|
||||
def process(self, instance):
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
self.log.debug(
|
||||
"We have deadline URL on instance {}".format(deadline_url)
|
||||
)
|
||||
if not instance.data.get("farm"):
|
||||
self.log.debug("Should not be processed on farm, skipping.")
|
||||
return
|
||||
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
assert deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
kwargs = {}
|
||||
if instance.data["deadline"]["require_authentication"]:
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
kwargs["auth"] = auth
|
||||
|
||||
if not auth[0]:
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Deadline requires authentication. "
|
||||
"At least username is required to be set in "
|
||||
"Site Settings.")
|
||||
|
||||
if deadline_url not in self.responses:
|
||||
self.responses[deadline_url] = requests_get(deadline_url)
|
||||
self.responses[deadline_url] = requests_get(deadline_url, **kwargs)
|
||||
|
||||
response = self.responses[deadline_url]
|
||||
if response.status_code == 401:
|
||||
raise PublishXmlValidationError(
|
||||
self,
|
||||
"Deadline requires authentication. "
|
||||
"Provided credentials are not working. "
|
||||
"Please change them in Site Settings")
|
||||
assert response.ok, "Response must be ok"
|
||||
assert response.text.startswith("Deadline Web Service "), (
|
||||
"Web service did not respond with 'Deadline Web Service'"
|
||||
|
|
|
|||
|
|
@ -37,8 +37,9 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
|
|||
self.log.debug("Skipping local instance.")
|
||||
return
|
||||
|
||||
deadline_url = self.get_deadline_url(instance)
|
||||
pools = self.get_pools(deadline_url)
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
pools = self.get_pools(deadline_url,
|
||||
instance.data["deadline"].get("auth"))
|
||||
|
||||
invalid_pools = {}
|
||||
primary_pool = instance.data.get("primaryPool")
|
||||
|
|
@ -61,22 +62,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin,
|
|||
formatting_data={"pools_str": ", ".join(pools)}
|
||||
)
|
||||
|
||||
def get_deadline_url(self, instance):
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
if instance.data.get("deadlineUrl"):
|
||||
# if custom one is set in instance, use that
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
return deadline_url
|
||||
|
||||
def get_pools(self, deadline_url):
|
||||
def get_pools(self, deadline_url, auth):
|
||||
if deadline_url not in self.pools_per_url:
|
||||
self.log.debug(
|
||||
"Querying available pools for Deadline url: {}".format(
|
||||
deadline_url)
|
||||
)
|
||||
pools = DeadlineModule.get_deadline_pools(deadline_url,
|
||||
auth=auth,
|
||||
log=self.log)
|
||||
# some DL return "none" as a pool name
|
||||
if "none" not in pools:
|
||||
pools.append("none")
|
||||
self.log.info("Available pools: {}".format(pools))
|
||||
self.pools_per_url[deadline_url] = pools
|
||||
|
||||
|
|
|
|||
|
|
@ -199,16 +199,16 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin):
|
|||
(dict): Job info from Deadline
|
||||
|
||||
"""
|
||||
# get default deadline webservice url from deadline module
|
||||
deadline_url = instance.context.data["defaultDeadline"]
|
||||
# if custom one is set in instance, use that
|
||||
if instance.data.get("deadlineUrl"):
|
||||
deadline_url = instance.data.get("deadlineUrl")
|
||||
deadline_url = instance.data["deadline"]["url"]
|
||||
assert deadline_url, "Requires Deadline Webservice URL"
|
||||
|
||||
url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
|
||||
try:
|
||||
response = requests_get(url)
|
||||
kwargs = {}
|
||||
auth = instance.data["deadline"]["auth"]
|
||||
if auth:
|
||||
kwargs["auth"] = auth
|
||||
response = requests_get(url, **kwargs)
|
||||
except requests.exceptions.ConnectionError:
|
||||
self.log.error("Deadline is not accessible at "
|
||||
"{}".format(deadline_url))
|
||||
|
|
|
|||
1
client/ayon_core/modules/deadline/version.py
Normal file
1
client/ayon_core/modules/deadline/version.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
__version__ = "0.1.12"
|
||||
|
|
@ -7,7 +7,7 @@ from ayon_core.lib import Logger, run_subprocess, AYONSettingsRegistry
|
|||
from ayon_core.lib.vendor_bin_utils import find_tool_in_custom_paths
|
||||
|
||||
from .rr_job import SubmitFile
|
||||
from .rr_job import RRjob, SubmitterParameter # noqa F401
|
||||
from .rr_job import RRJob, SubmitterParameter # noqa F401
|
||||
|
||||
|
||||
class Api:
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -459,36 +459,6 @@ def is_representation_from_latest(representation):
|
|||
)
|
||||
|
||||
|
||||
def get_template_data_from_session(session=None, settings=None):
|
||||
"""Template data for template fill from session keys.
|
||||
|
||||
Args:
|
||||
session (Union[Dict[str, str], None]): The Session to use. If not
|
||||
provided use the currently active global Session.
|
||||
settings (Optional[Dict[str, Any]]): Prepared studio or project
|
||||
settings.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: All available data from session.
|
||||
"""
|
||||
|
||||
if session is not None:
|
||||
project_name = session["AYON_PROJECT_NAME"]
|
||||
folder_path = session["AYON_FOLDER_PATH"]
|
||||
task_name = session["AYON_TASK_NAME"]
|
||||
host_name = session["AYON_HOST_NAME"]
|
||||
else:
|
||||
context = get_current_context()
|
||||
project_name = context["project_name"]
|
||||
folder_path = context["folder_path"]
|
||||
task_name = context["task_name"]
|
||||
host_name = get_current_host_name()
|
||||
|
||||
return get_template_data_with_names(
|
||||
project_name, folder_path, task_name, host_name, settings
|
||||
)
|
||||
|
||||
|
||||
def get_current_context_template_data(settings=None):
|
||||
"""Prepare template data for current context.
|
||||
|
||||
|
|
|
|||
|
|
@ -681,7 +681,7 @@ class PublishAttributeValues(AttributeValues):
|
|||
|
||||
@property
|
||||
def parent(self):
|
||||
self.publish_attributes.parent
|
||||
return self.publish_attributes.parent
|
||||
|
||||
|
||||
class PublishAttributes:
|
||||
|
|
@ -1987,12 +1987,12 @@ class CreateContext:
|
|||
"Folder '{}' was not found".format(folder_path)
|
||||
)
|
||||
|
||||
task_name = None
|
||||
if task_entity is None:
|
||||
task_name = self.get_current_task_name()
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
current_task_name = self.get_current_task_name()
|
||||
if current_task_name:
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], current_task_name
|
||||
)
|
||||
|
||||
if pre_create_data is None:
|
||||
pre_create_data = {}
|
||||
|
|
@ -2018,7 +2018,7 @@ class CreateContext:
|
|||
|
||||
instance_data = {
|
||||
"folderPath": folder_entity["path"],
|
||||
"task": task_name,
|
||||
"task": task_entity["name"] if task_entity else None,
|
||||
"productType": creator.product_type,
|
||||
"variant": variant
|
||||
}
|
||||
|
|
@ -2053,7 +2053,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
self.log.warning(
|
||||
|
|
@ -2163,7 +2163,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2197,7 +2197,7 @@ class CreateContext:
|
|||
try:
|
||||
convertor.find_instances()
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed_info.append(
|
||||
prepare_failed_convertor_operation_info(
|
||||
convertor.identifier, sys.exc_info()
|
||||
|
|
@ -2373,7 +2373,7 @@ class CreateContext:
|
|||
exc_info = sys.exc_info()
|
||||
self.log.warning(error_message.format(identifier, exc_info[1]))
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2440,7 +2440,7 @@ class CreateContext:
|
|||
error_message.format(identifier, exc_info[1])
|
||||
)
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed = True
|
||||
add_traceback = True
|
||||
exc_info = sys.exc_info()
|
||||
|
|
@ -2546,7 +2546,7 @@ class CreateContext:
|
|||
try:
|
||||
self.run_convertor(convertor_identifier)
|
||||
|
||||
except:
|
||||
except: # noqa: E722
|
||||
failed_info.append(
|
||||
prepare_failed_convertor_operation_info(
|
||||
convertor_identifier, sys.exc_info()
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ class RenderInstance(object):
|
|||
anatomyData = attr.ib(default=None)
|
||||
outputDir = attr.ib(default=None)
|
||||
context = attr.ib(default=None)
|
||||
deadline = attr.ib(default=None)
|
||||
|
||||
# The source instance the data of this render instance should merge into
|
||||
source_instance = attr.ib(default=None, type=pyblish.api.Instance)
|
||||
|
|
@ -215,13 +216,12 @@ class AbstractCollectRender(pyblish.api.ContextPlugin):
|
|||
|
||||
# add additional data
|
||||
data = self.add_additional_data(data)
|
||||
render_instance_dict = attr.asdict(render_instance)
|
||||
|
||||
# Merge into source instance if provided, otherwise create instance
|
||||
instance = render_instance_dict.pop("source_instance", None)
|
||||
instance = render_instance.source_instance
|
||||
if instance is None:
|
||||
instance = context.create_instance(render_instance.name)
|
||||
|
||||
render_instance_dict = attr.asdict(render_instance)
|
||||
instance.data.update(render_instance_dict)
|
||||
instance.data.update(data)
|
||||
|
||||
|
|
|
|||
|
|
@ -73,8 +73,8 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
- 'parent' - direct parent name, project name used if is under
|
||||
project
|
||||
|
||||
Required document fields:
|
||||
Folder: 'path' -> Plan to require: 'folderType'
|
||||
Required entity fields:
|
||||
Folder: 'path', 'folderType'
|
||||
|
||||
Args:
|
||||
folder_entity (Dict[str, Any]): Folder entity.
|
||||
|
|
@ -101,6 +101,8 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
return {
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
"type": folder_entity["folderType"],
|
||||
"path": path,
|
||||
},
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
|
|||
263
client/ayon_core/pipeline/thumbnails.py
Normal file
263
client/ayon_core/pipeline/thumbnails.py
Normal file
|
|
@ -0,0 +1,263 @@
|
|||
import os
|
||||
import time
|
||||
import collections
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.lib.local_settings import get_ayon_appdirs
|
||||
|
||||
|
||||
FileInfo = collections.namedtuple(
|
||||
"FileInfo",
|
||||
("path", "size", "modification_time")
|
||||
)
|
||||
|
||||
|
||||
class ThumbnailsCache:
|
||||
"""Cache of thumbnails on local storage.
|
||||
|
||||
Thumbnails are cached to appdirs to predefined directory. Each project has
|
||||
own subfolder with thumbnails -> that's because each project has own
|
||||
thumbnail id validation and file names are thumbnail ids with matching
|
||||
extension. Extensions are predefined (.png and .jpeg).
|
||||
|
||||
Cache has cleanup mechanism which is triggered on initialized by default.
|
||||
|
||||
The cleanup has 2 levels:
|
||||
1. soft cleanup which remove all files that are older then 'days_alive'
|
||||
2. max size cleanup which remove all files until the thumbnails folder
|
||||
contains less then 'max_filesize'
|
||||
- this is time consuming so it's not triggered automatically
|
||||
|
||||
Args:
|
||||
cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails).
|
||||
"""
|
||||
|
||||
# Lifetime of thumbnails (in seconds)
|
||||
# - default 3 days
|
||||
days_alive = 3
|
||||
# Max size of thumbnail directory (in bytes)
|
||||
# - default 2 Gb
|
||||
max_filesize = 2 * 1024 * 1024 * 1024
|
||||
|
||||
def __init__(self, cleanup=True):
|
||||
self._thumbnails_dir = None
|
||||
self._days_alive_secs = self.days_alive * 24 * 60 * 60
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
def get_thumbnails_dir(self):
|
||||
"""Root directory where thumbnails are stored.
|
||||
|
||||
Returns:
|
||||
str: Path to thumbnails root.
|
||||
"""
|
||||
|
||||
if self._thumbnails_dir is None:
|
||||
self._thumbnails_dir = get_ayon_appdirs("thumbnails")
|
||||
return self._thumbnails_dir
|
||||
|
||||
thumbnails_dir = property(get_thumbnails_dir)
|
||||
|
||||
def get_thumbnails_dir_file_info(self):
|
||||
"""Get information about all files in thumbnails directory.
|
||||
|
||||
Returns:
|
||||
List[FileInfo]: List of file information about all files.
|
||||
"""
|
||||
|
||||
thumbnails_dir = self.thumbnails_dir
|
||||
files_info = []
|
||||
if not os.path.exists(thumbnails_dir):
|
||||
return files_info
|
||||
|
||||
for root, _, filenames in os.walk(thumbnails_dir):
|
||||
for filename in filenames:
|
||||
path = os.path.join(root, filename)
|
||||
files_info.append(FileInfo(
|
||||
path, os.path.getsize(path), os.path.getmtime(path)
|
||||
))
|
||||
return files_info
|
||||
|
||||
def get_thumbnails_dir_size(self, files_info=None):
|
||||
"""Got full size of thumbnail directory.
|
||||
|
||||
Args:
|
||||
files_info (List[FileInfo]): Prepared file information about
|
||||
files in thumbnail directory.
|
||||
|
||||
Returns:
|
||||
int: File size of all files in thumbnail directory.
|
||||
"""
|
||||
|
||||
if files_info is None:
|
||||
files_info = self.get_thumbnails_dir_file_info()
|
||||
|
||||
if not files_info:
|
||||
return 0
|
||||
|
||||
return sum(
|
||||
file_info.size
|
||||
for file_info in files_info
|
||||
)
|
||||
|
||||
def cleanup(self, check_max_size=False):
|
||||
"""Cleanup thumbnails directory.
|
||||
|
||||
Args:
|
||||
check_max_size (bool): Also cleanup files to match max size of
|
||||
thumbnails directory.
|
||||
"""
|
||||
|
||||
thumbnails_dir = self.get_thumbnails_dir()
|
||||
# Skip if thumbnails dir does not exist yet
|
||||
if not os.path.exists(thumbnails_dir):
|
||||
return
|
||||
|
||||
self._soft_cleanup(thumbnails_dir)
|
||||
if check_max_size:
|
||||
self._max_size_cleanup(thumbnails_dir)
|
||||
|
||||
def _soft_cleanup(self, thumbnails_dir):
|
||||
current_time = time.time()
|
||||
for root, _, filenames in os.walk(thumbnails_dir):
|
||||
for filename in filenames:
|
||||
path = os.path.join(root, filename)
|
||||
modification_time = os.path.getmtime(path)
|
||||
if current_time - modification_time > self._days_alive_secs:
|
||||
os.remove(path)
|
||||
|
||||
def _max_size_cleanup(self, thumbnails_dir):
|
||||
files_info = self.get_thumbnails_dir_file_info()
|
||||
size = self.get_thumbnails_dir_size(files_info)
|
||||
if size < self.max_filesize:
|
||||
return
|
||||
|
||||
sorted_file_info = collections.deque(
|
||||
sorted(files_info, key=lambda item: item.modification_time)
|
||||
)
|
||||
diff = size - self.max_filesize
|
||||
while diff > 0:
|
||||
if not sorted_file_info:
|
||||
break
|
||||
|
||||
file_info = sorted_file_info.popleft()
|
||||
diff -= file_info.size
|
||||
os.remove(file_info.path)
|
||||
|
||||
def get_thumbnail_filepath(self, project_name, thumbnail_id):
|
||||
"""Get thumbnail by thumbnail id.
|
||||
|
||||
Args:
|
||||
project_name (str): Name of project.
|
||||
thumbnail_id (str): Thumbnail id.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Path to thumbnail image or None if thumbnail
|
||||
is not cached yet.
|
||||
"""
|
||||
|
||||
if not thumbnail_id:
|
||||
return None
|
||||
|
||||
for ext in (
|
||||
".png",
|
||||
".jpeg",
|
||||
):
|
||||
filepath = os.path.join(
|
||||
self.thumbnails_dir, project_name, thumbnail_id + ext
|
||||
)
|
||||
if os.path.exists(filepath):
|
||||
return filepath
|
||||
return None
|
||||
|
||||
def get_project_dir(self, project_name):
|
||||
"""Path to root directory for specific project.
|
||||
|
||||
Args:
|
||||
project_name (str): Name of project for which root directory path
|
||||
should be returned.
|
||||
|
||||
Returns:
|
||||
str: Path to root of project's thumbnails.
|
||||
"""
|
||||
|
||||
return os.path.join(self.thumbnails_dir, project_name)
|
||||
|
||||
def make_sure_project_dir_exists(self, project_name):
|
||||
project_dir = self.get_project_dir(project_name)
|
||||
if not os.path.exists(project_dir):
|
||||
os.makedirs(project_dir)
|
||||
return project_dir
|
||||
|
||||
def store_thumbnail(self, project_name, thumbnail_id, content, mime_type):
|
||||
"""Store thumbnail to cache folder.
|
||||
|
||||
Args:
|
||||
project_name (str): Project where the thumbnail belong to.
|
||||
thumbnail_id (str): Thumbnail id.
|
||||
content (bytes): Byte content of thumbnail file.
|
||||
mime_type (str): Type of content.
|
||||
|
||||
Returns:
|
||||
str: Path to cached thumbnail image file.
|
||||
"""
|
||||
|
||||
if mime_type == "image/png":
|
||||
ext = ".png"
|
||||
elif mime_type == "image/jpeg":
|
||||
ext = ".jpeg"
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unknown mime type for thumbnail \"{}\"".format(mime_type))
|
||||
|
||||
project_dir = self.make_sure_project_dir_exists(project_name)
|
||||
thumbnail_path = os.path.join(project_dir, thumbnail_id + ext)
|
||||
with open(thumbnail_path, "wb") as stream:
|
||||
stream.write(content)
|
||||
|
||||
current_time = time.time()
|
||||
os.utime(thumbnail_path, (current_time, current_time))
|
||||
|
||||
return thumbnail_path
|
||||
|
||||
|
||||
class _CacheItems:
|
||||
thumbnails_cache = ThumbnailsCache()
|
||||
|
||||
|
||||
def get_thumbnail_path(project_name, thumbnail_id):
|
||||
"""Get path to thumbnail image.
|
||||
|
||||
Args:
|
||||
project_name (str): Project where thumbnail belongs to.
|
||||
thumbnail_id (Union[str, None]): Thumbnail id.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Path to thumbnail image or None if thumbnail
|
||||
id is not valid or thumbnail was not possible to receive.
|
||||
|
||||
"""
|
||||
if not thumbnail_id:
|
||||
return None
|
||||
|
||||
filepath = _CacheItems.thumbnails_cache.get_thumbnail_filepath(
|
||||
project_name, thumbnail_id
|
||||
)
|
||||
if filepath is not None:
|
||||
return filepath
|
||||
|
||||
# 'ayon_api' had a bug, public function
|
||||
# 'get_thumbnail_by_id' did not return output of
|
||||
# 'ServerAPI' method.
|
||||
con = ayon_api.get_server_api_connection()
|
||||
result = con.get_thumbnail_by_id(project_name, thumbnail_id)
|
||||
|
||||
if result is not None and result.is_valid:
|
||||
return _CacheItems.thumbnails_cache.store_thumbnail(
|
||||
project_name,
|
||||
thumbnail_id,
|
||||
result.content,
|
||||
result.content_type
|
||||
)
|
||||
return None
|
||||
|
|
@ -33,6 +33,7 @@ import collections
|
|||
import pyblish.api
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline.template_data import get_folder_template_data
|
||||
from ayon_core.pipeline.version_start import get_versioning_start
|
||||
|
||||
|
||||
|
|
@ -383,24 +384,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# - 'folder', 'hierarchy', 'parent', 'folder'
|
||||
folder_entity = instance.data.get("folderEntity")
|
||||
if folder_entity:
|
||||
folder_name = folder_entity["name"]
|
||||
folder_path = folder_entity["path"]
|
||||
hierarchy_parts = folder_path.split("/")
|
||||
hierarchy_parts.pop(0)
|
||||
hierarchy_parts.pop(-1)
|
||||
parent_name = project_entity["name"]
|
||||
if hierarchy_parts:
|
||||
parent_name = hierarchy_parts[-1]
|
||||
|
||||
hierarchy = "/".join(hierarchy_parts)
|
||||
anatomy_data.update({
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
},
|
||||
})
|
||||
folder_data = get_folder_template_data(
|
||||
folder_entity,
|
||||
project_entity["name"]
|
||||
)
|
||||
anatomy_data.update(folder_data)
|
||||
return
|
||||
|
||||
if instance.data.get("newAssetPublishing"):
|
||||
|
|
@ -418,6 +406,11 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": folder_name,
|
||||
"path": instance.data["folderPath"],
|
||||
# TODO get folder type from hierarchy
|
||||
# Using 'Shot' is current default behavior of editorial
|
||||
# (or 'newAssetPublishing') publishing.
|
||||
"type": "Shot",
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ def prepare_changes(old_entity, new_entity):
|
|||
|
||||
Returns:
|
||||
dict[str, Any]: Changes that have new entity.
|
||||
|
||||
|
||||
"""
|
||||
changes = {}
|
||||
for key in set(new_entity.keys()):
|
||||
|
|
@ -108,68 +108,6 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Integrate Asset"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["workfile",
|
||||
"pointcache",
|
||||
"pointcloud",
|
||||
"proxyAbc",
|
||||
"camera",
|
||||
"animation",
|
||||
"model",
|
||||
"maxScene",
|
||||
"mayaAscii",
|
||||
"mayaScene",
|
||||
"setdress",
|
||||
"layout",
|
||||
"ass",
|
||||
"vdbcache",
|
||||
"scene",
|
||||
"vrayproxy",
|
||||
"vrayscene_layer",
|
||||
"render",
|
||||
"prerender",
|
||||
"imagesequence",
|
||||
"review",
|
||||
"rendersetup",
|
||||
"rig",
|
||||
"plate",
|
||||
"look",
|
||||
"ociolook",
|
||||
"audio",
|
||||
"yetiRig",
|
||||
"yeticache",
|
||||
"nukenodes",
|
||||
"gizmo",
|
||||
"source",
|
||||
"matchmove",
|
||||
"image",
|
||||
"assembly",
|
||||
"fbx",
|
||||
"gltf",
|
||||
"textures",
|
||||
"action",
|
||||
"harmony.template",
|
||||
"harmony.palette",
|
||||
"editorial",
|
||||
"background",
|
||||
"camerarig",
|
||||
"redshiftproxy",
|
||||
"effect",
|
||||
"xgen",
|
||||
"hda",
|
||||
"usd",
|
||||
"staticMesh",
|
||||
"skeletalMesh",
|
||||
"mvLook",
|
||||
"mvUsd",
|
||||
"mvUsdComposition",
|
||||
"mvUsdOverride",
|
||||
"online",
|
||||
"uasset",
|
||||
"blendScene",
|
||||
"yeticacheUE",
|
||||
"tycache",
|
||||
"csv_ingest_file",
|
||||
]
|
||||
|
||||
default_template_name = "publish"
|
||||
|
||||
|
|
@ -359,7 +297,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
# Compute the resource file infos once (files belonging to the
|
||||
# version instance instead of an individual representation) so
|
||||
# we can re-use those file infos per representation
|
||||
# we can reuse those file infos per representation
|
||||
resource_file_infos = self.get_files_info(
|
||||
resource_destinations, anatomy
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import filter_profiles
|
||||
from ayon_core.host import ILoadHost
|
||||
from ayon_core.pipeline.load import any_outdated_containers
|
||||
from ayon_core.pipeline import (
|
||||
get_current_host_name,
|
||||
registered_host,
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
|
|
@ -18,17 +23,50 @@ class ShowInventory(pyblish.api.Action):
|
|||
host_tools.show_scene_inventory()
|
||||
|
||||
|
||||
class ValidateContainers(OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin):
|
||||
|
||||
class ValidateOutdatedContainers(
|
||||
OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin
|
||||
):
|
||||
"""Containers are must be updated to latest version on publish."""
|
||||
|
||||
label = "Validate Outdated Containers"
|
||||
order = pyblish.api.ValidatorOrder
|
||||
hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"]
|
||||
|
||||
optional = True
|
||||
actions = [ShowInventory]
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, settings):
|
||||
# Disable plugin if host does not inherit from 'ILoadHost'
|
||||
# - not a host that can load containers
|
||||
host = registered_host()
|
||||
if not isinstance(host, ILoadHost):
|
||||
cls.enabled = False
|
||||
return
|
||||
|
||||
# Disable if no profile is found for the current host
|
||||
profiles = (
|
||||
settings
|
||||
["core"]
|
||||
["publish"]
|
||||
["ValidateOutdatedContainers"]
|
||||
["plugin_state_profiles"]
|
||||
)
|
||||
profile = filter_profiles(
|
||||
profiles, {"host_names": get_current_host_name()}
|
||||
)
|
||||
if not profile:
|
||||
cls.enabled = False
|
||||
return
|
||||
|
||||
# Apply settings from profile
|
||||
for attr_name in {
|
||||
"enabled",
|
||||
"optional",
|
||||
"active",
|
||||
}:
|
||||
setattr(cls, attr_name, profile[attr_name])
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,8 +1,14 @@
|
|||
import pyblish.api
|
||||
from ayon_core.pipeline.publish import PublishValidationError
|
||||
|
||||
from ayon_core.lib import filter_profiles
|
||||
from ayon_core.pipeline.publish import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin,
|
||||
get_current_host_name,
|
||||
)
|
||||
|
||||
|
||||
class ValidateVersion(pyblish.api.InstancePlugin):
|
||||
class ValidateVersion(pyblish.api.InstancePlugin, OptionalPyblishPluginMixin):
|
||||
"""Validate instance version.
|
||||
|
||||
AYON does not allow overwriting previously published versions.
|
||||
|
|
@ -11,13 +17,39 @@ class ValidateVersion(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
label = "Validate Version"
|
||||
hosts = ["nuke", "maya", "houdini", "blender",
|
||||
"photoshop", "aftereffects"]
|
||||
|
||||
optional = False
|
||||
active = True
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, settings):
|
||||
# Disable if no profile is found for the current host
|
||||
profiles = (
|
||||
settings
|
||||
["core"]
|
||||
["publish"]
|
||||
["ValidateVersion"]
|
||||
["plugin_state_profiles"]
|
||||
)
|
||||
profile = filter_profiles(
|
||||
profiles, {"host_names": get_current_host_name()}
|
||||
)
|
||||
if not profile:
|
||||
cls.enabled = False
|
||||
return
|
||||
|
||||
# Apply settings from profile
|
||||
for attr_name in {
|
||||
"enabled",
|
||||
"optional",
|
||||
"active",
|
||||
}:
|
||||
setattr(cls, attr_name, profile[attr_name])
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
version = instance.data.get("version")
|
||||
latest_version = instance.data.get("latestVersion")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,28 +1,31 @@
|
|||
"""OpenColorIO Wrapper.
|
||||
|
||||
Only to be interpreted by Python 3. It is run in subprocess in case
|
||||
Python 2 hosts needs to use it. Or it is used as module for Python 3
|
||||
processing.
|
||||
|
||||
Providing functionality:
|
||||
- get_colorspace - console command - python 2
|
||||
- returning all available color spaces
|
||||
found in input config path.
|
||||
- _get_colorspace_data - python 3 - module function
|
||||
- returning all available colorspaces
|
||||
found in input config path.
|
||||
- get_views - console command - python 2
|
||||
- returning all available viewers
|
||||
found in input config path.
|
||||
- _get_views_data - python 3 - module function
|
||||
- returning all available viewers
|
||||
found in input config path.
|
||||
Receive OpenColorIO information and store it in JSON format for processed
|
||||
that don't have access to OpenColorIO or their version of OpenColorIO is
|
||||
not compatible.
|
||||
"""
|
||||
|
||||
import click
|
||||
import json
|
||||
from pathlib import Path
|
||||
import PyOpenColorIO as ocio
|
||||
|
||||
import click
|
||||
|
||||
from ayon_core.pipeline.colorspace import (
|
||||
has_compatible_ocio_package,
|
||||
get_display_view_colorspace_name,
|
||||
get_config_file_rules_colorspace_from_filepath,
|
||||
get_config_version_data,
|
||||
get_ocio_config_views,
|
||||
get_ocio_config_colorspaces,
|
||||
)
|
||||
|
||||
|
||||
def _save_output_to_json_file(output, output_path):
|
||||
json_path = Path(output_path)
|
||||
with open(json_path, "w") as stream:
|
||||
json.dump(output, stream)
|
||||
|
||||
print(f"Data are saved to '{json_path}'")
|
||||
|
||||
|
||||
@click.group()
|
||||
|
|
@ -30,404 +33,185 @@ def main():
|
|||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@main.group()
|
||||
def config():
|
||||
"""Config related commands group
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config <command> *args
|
||||
"""
|
||||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@main.group()
|
||||
def colorspace():
|
||||
"""Colorspace related commands group
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config <command> *args
|
||||
"""
|
||||
pass # noqa: WPS100
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_colorspace",
|
||||
help=(
|
||||
"return all colorspaces from config file "
|
||||
"--path input arg is required"
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_colorspace(in_path, out_path):
|
||||
@main.command(
|
||||
name="get_ocio_config_colorspaces",
|
||||
help="return all colorspaces from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_ocio_config_colorspaces(config_path, output_path):
|
||||
"""Aggregate all colorspace to file.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_colorspace
|
||||
--in_path=<path> --out_path=<path>
|
||||
--config_path <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_colorspace_data(in_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Colorspace data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_colorspace_data(config_path):
|
||||
"""Return all found colorspace data.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available colorspaces
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError(
|
||||
f"Input path `{config_path}` should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
colorspace_data = {
|
||||
"roles": {},
|
||||
"colorspaces": {
|
||||
color.getName(): {
|
||||
"family": color.getFamily(),
|
||||
"categories": list(color.getCategories()),
|
||||
"aliases": list(color.getAliases()),
|
||||
"equalitygroup": color.getEqualityGroup(),
|
||||
}
|
||||
for color in config.getColorSpaces()
|
||||
},
|
||||
"displays_views": {
|
||||
f"{view} ({display})": {
|
||||
"display": display,
|
||||
"view": view
|
||||
|
||||
}
|
||||
for display in config.getDisplays()
|
||||
for view in config.getViews(display)
|
||||
},
|
||||
"looks": {}
|
||||
}
|
||||
|
||||
# add looks
|
||||
looks = config.getLooks()
|
||||
if looks:
|
||||
colorspace_data["looks"] = {
|
||||
look.getName(): {"process_space": look.getProcessSpace()}
|
||||
for look in looks
|
||||
}
|
||||
|
||||
# add roles
|
||||
roles = config.getRoles()
|
||||
if roles:
|
||||
colorspace_data["roles"] = {
|
||||
role: {"colorspace": colorspace}
|
||||
for (role, colorspace) in roles
|
||||
}
|
||||
|
||||
return colorspace_data
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_views",
|
||||
help=(
|
||||
"return all viewers from config file "
|
||||
"--path input arg is required"
|
||||
_save_output_to_json_file(
|
||||
get_ocio_config_colorspaces(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_views(in_path, out_path):
|
||||
|
||||
|
||||
@main.command(
|
||||
name="get_ocio_config_views",
|
||||
help="All viewers from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_ocio_config_views(config_path, output_path):
|
||||
"""Aggregate all viewers to file.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_views \
|
||||
--in_path=<path> --out_path=<path>
|
||||
--config_path <path> --output <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_views_data(in_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Viewer data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_views_data(config_path):
|
||||
"""Return all found viewer data.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available viewers
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
data_ = {}
|
||||
for display in config.getDisplays():
|
||||
for view in config.getViews(display):
|
||||
colorspace = config.getDisplayViewColorSpaceName(display, view)
|
||||
# Special token. See https://opencolorio.readthedocs.io/en/latest/guides/authoring/authoring.html#shared-views # noqa
|
||||
if colorspace == "<USE_DISPLAY_NAME>":
|
||||
colorspace = display
|
||||
|
||||
data_[f"{display}/{view}"] = {
|
||||
"display": display,
|
||||
"view": view,
|
||||
"colorspace": colorspace
|
||||
}
|
||||
|
||||
return data_
|
||||
|
||||
|
||||
@config.command(
|
||||
name="get_version",
|
||||
help=(
|
||||
"return major and minor version from config file "
|
||||
"--config_path input arg is required"
|
||||
"--out_path input arg is required"
|
||||
_save_output_to_json_file(
|
||||
get_ocio_config_views(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--config_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_version(config_path, out_path):
|
||||
"""Get version of config.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
@main.command(
|
||||
name="get_config_version_data",
|
||||
help="Get major and minor version from config file")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_config_version_data(config_path, output_path):
|
||||
"""Get version of config.
|
||||
|
||||
Args:
|
||||
config_path (str): ocio config file path string
|
||||
out_path (str): temp json file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config get_version \
|
||||
--config_path=<path> --out_path=<path>
|
||||
--config_path <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
out_data = _get_version_data(config_path)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(out_data, f_)
|
||||
|
||||
print(f"Config version data are saved to '{json_path}'")
|
||||
|
||||
|
||||
def _get_version_data(config_path):
|
||||
"""Return major and minor version info.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: minor and major keys with values
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
return {
|
||||
"major": config.getMajorVersion(),
|
||||
"minor": config.getMinorVersion()
|
||||
}
|
||||
|
||||
|
||||
@colorspace.command(
|
||||
name="get_config_file_rules_colorspace_from_filepath",
|
||||
help=(
|
||||
"return colorspace from filepath "
|
||||
"--config_path - ocio config file path (input arg is required) "
|
||||
"--filepath - any file path (input arg is required) "
|
||||
"--out_path - temp json file path (input arg is required)"
|
||||
_save_output_to_json_file(
|
||||
get_config_version_data(config_path),
|
||||
output_path
|
||||
)
|
||||
)
|
||||
@click.option("--config_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--filepath", required=True,
|
||||
help="path to file to get colorspace from",
|
||||
type=click.Path())
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath, out_path
|
||||
|
||||
|
||||
@main.command(
|
||||
name="get_config_file_rules_colorspace_from_filepath",
|
||||
help="Colorspace file rules from filepath")
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="OCIO config path to read ocio config file.",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--filepath",
|
||||
required=True,
|
||||
help="Path to file to get colorspace from.",
|
||||
type=click.Path())
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="Path where to write output json file.",
|
||||
type=click.Path())
|
||||
def _get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath, output_path
|
||||
):
|
||||
"""Get colorspace from file path wrapper.
|
||||
|
||||
Python 2 wrapped console command
|
||||
|
||||
Args:
|
||||
config_path (str): config file path string
|
||||
filepath (str): path string leading to file
|
||||
out_path (str): temp json file path string
|
||||
output_path (str): temp json file path string
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py \
|
||||
> python.exe ./ocio_wrapper.py \
|
||||
colorspace get_config_file_rules_colorspace_from_filepath \
|
||||
--config_path=<path> --filepath=<path> --out_path=<path>
|
||||
--config_path <path> --filepath <path> --output_path <path>
|
||||
"""
|
||||
json_path = Path(out_path)
|
||||
|
||||
colorspace = _get_config_file_rules_colorspace_from_filepath(
|
||||
config_path, filepath)
|
||||
|
||||
with open(json_path, "w") as f_:
|
||||
json.dump(colorspace, f_)
|
||||
|
||||
print(f"Colorspace name is saved to '{json_path}'")
|
||||
_save_output_to_json_file(
|
||||
get_config_file_rules_colorspace_from_filepath(config_path, filepath),
|
||||
output_path
|
||||
)
|
||||
|
||||
|
||||
def _get_config_file_rules_colorspace_from_filepath(config_path, filepath):
|
||||
"""Return found colorspace data found in v2 file rules.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
filepath (str): path string leading to v2 file rules
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
dict: aggregated available colorspaces
|
||||
"""
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError(
|
||||
f"Input path `{config_path}` should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config().CreateFromFile(str(config_path))
|
||||
|
||||
# TODO: use `parseColorSpaceFromString` instead if ocio v1
|
||||
colorspace = config.getColorSpaceFromFilepath(str(filepath))
|
||||
|
||||
return colorspace
|
||||
|
||||
|
||||
def _get_display_view_colorspace_name(config_path, display, view):
|
||||
"""Returns the colorspace attribute of the (display, view) pair.
|
||||
|
||||
Args:
|
||||
config_path (str): path string leading to config.ocio
|
||||
display (str): display name e.g. "ACES"
|
||||
view (str): view name e.g. "sRGB"
|
||||
|
||||
|
||||
Raises:
|
||||
IOError: Input config does not exist.
|
||||
|
||||
Returns:
|
||||
view color space name (str) e.g. "Output - sRGB"
|
||||
"""
|
||||
|
||||
config_path = Path(config_path)
|
||||
|
||||
if not config_path.is_file():
|
||||
raise IOError("Input path should be `config.ocio` file")
|
||||
|
||||
config = ocio.Config.CreateFromFile(str(config_path))
|
||||
colorspace = config.getDisplayViewColorSpaceName(display, view)
|
||||
|
||||
return colorspace
|
||||
|
||||
|
||||
@config.command(
|
||||
@main.command(
|
||||
name="get_display_view_colorspace_name",
|
||||
help=(
|
||||
"return default view colorspace name "
|
||||
"for the given display and view "
|
||||
"--path input arg is required"
|
||||
)
|
||||
)
|
||||
@click.option("--in_path", required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option("--out_path", required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
@click.option("--display", required=True,
|
||||
help="display name",
|
||||
type=click.STRING)
|
||||
@click.option("--view", required=True,
|
||||
help="view name",
|
||||
type=click.STRING)
|
||||
def get_display_view_colorspace_name(in_path, out_path,
|
||||
display, view):
|
||||
"Default view colorspace name for the given display and view"
|
||||
))
|
||||
@click.option(
|
||||
"--config_path",
|
||||
required=True,
|
||||
help="path where to read ocio config file",
|
||||
type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--display",
|
||||
required=True,
|
||||
help="Display name",
|
||||
type=click.STRING)
|
||||
@click.option(
|
||||
"--view",
|
||||
required=True,
|
||||
help="view name",
|
||||
type=click.STRING)
|
||||
@click.option(
|
||||
"--output_path",
|
||||
required=True,
|
||||
help="path where to write output json file",
|
||||
type=click.Path())
|
||||
def _get_display_view_colorspace_name(
|
||||
config_path, display, view, output_path
|
||||
):
|
||||
"""Aggregate view colorspace name to file.
|
||||
|
||||
Wrapper command for processes without access to OpenColorIO
|
||||
|
||||
Args:
|
||||
in_path (str): config file path string
|
||||
out_path (str): temp json file path string
|
||||
config_path (str): config file path string
|
||||
output_path (str): temp json file path string
|
||||
display (str): display name e.g. "ACES"
|
||||
view (str): view name e.g. "sRGB"
|
||||
|
||||
Example of use:
|
||||
> pyton.exe ./ocio_wrapper.py config \
|
||||
get_display_view_colorspace_name --in_path=<path> \
|
||||
--out_path=<path> --display=<display> --view=<view>
|
||||
get_display_view_colorspace_name --config_path <path> \
|
||||
--output_path <path> --display <display> --view <view>
|
||||
"""
|
||||
_save_output_to_json_file(
|
||||
get_display_view_colorspace_name(config_path, display, view),
|
||||
output_path
|
||||
)
|
||||
|
||||
out_data = _get_display_view_colorspace_name(in_path,
|
||||
display,
|
||||
view)
|
||||
|
||||
with open(out_path, "w") as f:
|
||||
json.dump(out_data, f)
|
||||
|
||||
print(f"Display view colorspace saved to '{out_path}'")
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
if not has_compatible_ocio_package():
|
||||
raise RuntimeError("OpenColorIO is not available.")
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -104,14 +104,11 @@ class WebServerTool:
|
|||
again. In that case, use existing running webserver.
|
||||
Check here is easier than capturing exception from thread.
|
||||
"""
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
result = True
|
||||
try:
|
||||
sock.bind((host_name, port))
|
||||
result = False
|
||||
except:
|
||||
print("Port is in use")
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
|
||||
result = con.connect_ex((host_name, port)) == 0
|
||||
|
||||
if result:
|
||||
print(f"Port {port} is already in use")
|
||||
return result
|
||||
|
||||
def call(self, func):
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from .hierarchy import (
|
|||
)
|
||||
from .thumbnails import ThumbnailsModel
|
||||
from .selection import HierarchyExpectedSelection
|
||||
from .users import UsersModel
|
||||
|
||||
|
||||
__all__ = (
|
||||
|
|
@ -32,4 +33,6 @@ __all__ = (
|
|||
"ThumbnailsModel",
|
||||
|
||||
"HierarchyExpectedSelection",
|
||||
|
||||
"UsersModel",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import ayon_api
|
|||
import six
|
||||
|
||||
from ayon_core.style import get_default_entity_icon_color
|
||||
from ayon_core.lib import CacheItem
|
||||
from ayon_core.lib import CacheItem, NestedCacheItem
|
||||
|
||||
PROJECTS_MODEL_SENDER = "projects.model"
|
||||
|
||||
|
|
@ -17,6 +17,49 @@ class AbstractHierarchyController:
|
|||
pass
|
||||
|
||||
|
||||
class StatusItem:
|
||||
"""Item representing status of project.
|
||||
|
||||
Args:
|
||||
name (str): Status name ("Not ready").
|
||||
color (str): Status color in hex ("#434a56").
|
||||
short (str): Short status name ("NRD").
|
||||
icon (str): Icon name in MaterialIcons ("fiber_new").
|
||||
state (Literal["not_started", "in_progress", "done", "blocked"]):
|
||||
Status state.
|
||||
|
||||
"""
|
||||
def __init__(self, name, color, short, icon, state):
|
||||
self.name = name
|
||||
self.color = color
|
||||
self.short = short
|
||||
self.icon = icon
|
||||
self.state = state
|
||||
|
||||
def to_data(self):
|
||||
return {
|
||||
"name": self.name,
|
||||
"color": self.color,
|
||||
"short": self.short,
|
||||
"icon": self.icon,
|
||||
"state": self.state,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_data(cls, data):
|
||||
return cls(**data)
|
||||
|
||||
@classmethod
|
||||
def from_project_item(cls, status_data):
|
||||
return cls(
|
||||
name=status_data["name"],
|
||||
color=status_data["color"],
|
||||
short=status_data["shortName"],
|
||||
icon=status_data["icon"],
|
||||
state=status_data["state"],
|
||||
)
|
||||
|
||||
|
||||
class ProjectItem:
|
||||
"""Item representing folder entity on a server.
|
||||
|
||||
|
|
@ -40,6 +83,23 @@ class ProjectItem:
|
|||
}
|
||||
self.icon = icon
|
||||
|
||||
@classmethod
|
||||
def from_entity(cls, project_entity):
|
||||
"""Creates folder item from entity.
|
||||
|
||||
Args:
|
||||
project_entity (dict[str, Any]): Project entity.
|
||||
|
||||
Returns:
|
||||
ProjectItem: Project item.
|
||||
|
||||
"""
|
||||
return cls(
|
||||
project_entity["name"],
|
||||
project_entity["active"],
|
||||
project_entity["library"],
|
||||
)
|
||||
|
||||
def to_data(self):
|
||||
"""Converts folder item to data.
|
||||
|
||||
|
|
@ -79,7 +139,7 @@ def _get_project_items_from_entitiy(projects):
|
|||
"""
|
||||
|
||||
return [
|
||||
ProjectItem(project["name"], project["active"], project["library"])
|
||||
ProjectItem.from_entity(project)
|
||||
for project in projects
|
||||
]
|
||||
|
||||
|
|
@ -87,18 +147,29 @@ def _get_project_items_from_entitiy(projects):
|
|||
class ProjectsModel(object):
|
||||
def __init__(self, controller):
|
||||
self._projects_cache = CacheItem(default_factory=list)
|
||||
self._project_items_by_name = {}
|
||||
self._projects_by_name = {}
|
||||
self._project_statuses_cache = NestedCacheItem(
|
||||
levels=1, default_factory=list
|
||||
)
|
||||
self._projects_by_name = NestedCacheItem(
|
||||
levels=1, default_factory=list
|
||||
)
|
||||
|
||||
self._is_refreshing = False
|
||||
self._controller = controller
|
||||
|
||||
def reset(self):
|
||||
self._projects_cache.reset()
|
||||
self._project_items_by_name = {}
|
||||
self._projects_by_name = {}
|
||||
self._project_statuses_cache.reset()
|
||||
self._projects_by_name.reset()
|
||||
|
||||
def refresh(self):
|
||||
"""Refresh project items.
|
||||
|
||||
This method will requery list of ProjectItem returned by
|
||||
'get_project_items'.
|
||||
|
||||
To reset all cached items use 'reset' method.
|
||||
"""
|
||||
self._refresh_projects_cache()
|
||||
|
||||
def get_project_items(self, sender):
|
||||
|
|
@ -117,12 +188,51 @@ class ProjectsModel(object):
|
|||
return self._projects_cache.get_data()
|
||||
|
||||
def get_project_entity(self, project_name):
|
||||
if project_name not in self._projects_by_name:
|
||||
"""Get project entity.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, Any], None]: Project entity or None if project
|
||||
was not found by name.
|
||||
|
||||
"""
|
||||
project_cache = self._projects_by_name[project_name]
|
||||
if not project_cache.is_valid:
|
||||
entity = None
|
||||
if project_name:
|
||||
entity = ayon_api.get_project(project_name)
|
||||
self._projects_by_name[project_name] = entity
|
||||
return self._projects_by_name[project_name]
|
||||
project_cache.update_data(entity)
|
||||
return project_cache.get_data()
|
||||
|
||||
def get_project_status_items(self, project_name, sender):
|
||||
"""Get project status items.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name.
|
||||
sender (Union[str, None]): Name of sender who asked for items.
|
||||
|
||||
Returns:
|
||||
list[StatusItem]: Status items for project.
|
||||
|
||||
"""
|
||||
statuses_cache = self._project_statuses_cache[project_name]
|
||||
if not statuses_cache.is_valid:
|
||||
with self._project_statuses_refresh_event_manager(
|
||||
sender, project_name
|
||||
):
|
||||
project_entity = None
|
||||
if project_name:
|
||||
project_entity = self.get_project_entity(project_name)
|
||||
statuses = []
|
||||
if project_entity:
|
||||
statuses = [
|
||||
StatusItem.from_project_item(status)
|
||||
for status in project_entity["statuses"]
|
||||
]
|
||||
statuses_cache.update_data(statuses)
|
||||
return statuses_cache.get_data()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _project_refresh_event_manager(self, sender):
|
||||
|
|
@ -143,6 +253,23 @@ class ProjectsModel(object):
|
|||
)
|
||||
self._is_refreshing = False
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _project_statuses_refresh_event_manager(self, sender, project_name):
|
||||
self._controller.emit_event(
|
||||
"projects.statuses.refresh.started",
|
||||
{"sender": sender, "project_name": project_name},
|
||||
PROJECTS_MODEL_SENDER
|
||||
)
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
self._controller.emit_event(
|
||||
"projects.statuses.refresh.finished",
|
||||
{"sender": sender, "project_name": project_name},
|
||||
PROJECTS_MODEL_SENDER
|
||||
)
|
||||
|
||||
def _refresh_projects_cache(self, sender=None):
|
||||
if self._is_refreshing:
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -1,234 +1,15 @@
|
|||
import os
|
||||
import time
|
||||
import collections
|
||||
|
||||
import ayon_api
|
||||
import appdirs
|
||||
|
||||
from ayon_core.lib import NestedCacheItem
|
||||
|
||||
FileInfo = collections.namedtuple(
|
||||
"FileInfo",
|
||||
("path", "size", "modification_time")
|
||||
)
|
||||
|
||||
|
||||
class ThumbnailsCache:
|
||||
"""Cache of thumbnails on local storage.
|
||||
|
||||
Thumbnails are cached to appdirs to predefined directory. Each project has
|
||||
own subfolder with thumbnails -> that's because each project has own
|
||||
thumbnail id validation and file names are thumbnail ids with matching
|
||||
extension. Extensions are predefined (.png and .jpeg).
|
||||
|
||||
Cache has cleanup mechanism which is triggered on initialized by default.
|
||||
|
||||
The cleanup has 2 levels:
|
||||
1. soft cleanup which remove all files that are older then 'days_alive'
|
||||
2. max size cleanup which remove all files until the thumbnails folder
|
||||
contains less then 'max_filesize'
|
||||
- this is time consuming so it's not triggered automatically
|
||||
|
||||
Args:
|
||||
cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails).
|
||||
"""
|
||||
|
||||
# Lifetime of thumbnails (in seconds)
|
||||
# - default 3 days
|
||||
days_alive = 3
|
||||
# Max size of thumbnail directory (in bytes)
|
||||
# - default 2 Gb
|
||||
max_filesize = 2 * 1024 * 1024 * 1024
|
||||
|
||||
def __init__(self, cleanup=True):
|
||||
self._thumbnails_dir = None
|
||||
self._days_alive_secs = self.days_alive * 24 * 60 * 60
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
def get_thumbnails_dir(self):
|
||||
"""Root directory where thumbnails are stored.
|
||||
|
||||
Returns:
|
||||
str: Path to thumbnails root.
|
||||
"""
|
||||
|
||||
if self._thumbnails_dir is None:
|
||||
# TODO use generic function
|
||||
directory = appdirs.user_data_dir("AYON", "Ynput")
|
||||
self._thumbnails_dir = os.path.join(directory, "thumbnails")
|
||||
return self._thumbnails_dir
|
||||
|
||||
thumbnails_dir = property(get_thumbnails_dir)
|
||||
|
||||
def get_thumbnails_dir_file_info(self):
|
||||
"""Get information about all files in thumbnails directory.
|
||||
|
||||
Returns:
|
||||
List[FileInfo]: List of file information about all files.
|
||||
"""
|
||||
|
||||
thumbnails_dir = self.thumbnails_dir
|
||||
files_info = []
|
||||
if not os.path.exists(thumbnails_dir):
|
||||
return files_info
|
||||
|
||||
for root, _, filenames in os.walk(thumbnails_dir):
|
||||
for filename in filenames:
|
||||
path = os.path.join(root, filename)
|
||||
files_info.append(FileInfo(
|
||||
path, os.path.getsize(path), os.path.getmtime(path)
|
||||
))
|
||||
return files_info
|
||||
|
||||
def get_thumbnails_dir_size(self, files_info=None):
|
||||
"""Got full size of thumbnail directory.
|
||||
|
||||
Args:
|
||||
files_info (List[FileInfo]): Prepared file information about
|
||||
files in thumbnail directory.
|
||||
|
||||
Returns:
|
||||
int: File size of all files in thumbnail directory.
|
||||
"""
|
||||
|
||||
if files_info is None:
|
||||
files_info = self.get_thumbnails_dir_file_info()
|
||||
|
||||
if not files_info:
|
||||
return 0
|
||||
|
||||
return sum(
|
||||
file_info.size
|
||||
for file_info in files_info
|
||||
)
|
||||
|
||||
def cleanup(self, check_max_size=False):
|
||||
"""Cleanup thumbnails directory.
|
||||
|
||||
Args:
|
||||
check_max_size (bool): Also cleanup files to match max size of
|
||||
thumbnails directory.
|
||||
"""
|
||||
|
||||
thumbnails_dir = self.get_thumbnails_dir()
|
||||
# Skip if thumbnails dir does not exist yet
|
||||
if not os.path.exists(thumbnails_dir):
|
||||
return
|
||||
|
||||
self._soft_cleanup(thumbnails_dir)
|
||||
if check_max_size:
|
||||
self._max_size_cleanup(thumbnails_dir)
|
||||
|
||||
def _soft_cleanup(self, thumbnails_dir):
|
||||
current_time = time.time()
|
||||
for root, _, filenames in os.walk(thumbnails_dir):
|
||||
for filename in filenames:
|
||||
path = os.path.join(root, filename)
|
||||
modification_time = os.path.getmtime(path)
|
||||
if current_time - modification_time > self._days_alive_secs:
|
||||
os.remove(path)
|
||||
|
||||
def _max_size_cleanup(self, thumbnails_dir):
|
||||
files_info = self.get_thumbnails_dir_file_info()
|
||||
size = self.get_thumbnails_dir_size(files_info)
|
||||
if size < self.max_filesize:
|
||||
return
|
||||
|
||||
sorted_file_info = collections.deque(
|
||||
sorted(files_info, key=lambda item: item.modification_time)
|
||||
)
|
||||
diff = size - self.max_filesize
|
||||
while diff > 0:
|
||||
if not sorted_file_info:
|
||||
break
|
||||
|
||||
file_info = sorted_file_info.popleft()
|
||||
diff -= file_info.size
|
||||
os.remove(file_info.path)
|
||||
|
||||
def get_thumbnail_filepath(self, project_name, thumbnail_id):
|
||||
"""Get thumbnail by thumbnail id.
|
||||
|
||||
Args:
|
||||
project_name (str): Name of project.
|
||||
thumbnail_id (str): Thumbnail id.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Path to thumbnail image or None if thumbnail
|
||||
is not cached yet.
|
||||
"""
|
||||
|
||||
if not thumbnail_id:
|
||||
return None
|
||||
|
||||
for ext in (
|
||||
".png",
|
||||
".jpeg",
|
||||
):
|
||||
filepath = os.path.join(
|
||||
self.thumbnails_dir, project_name, thumbnail_id + ext
|
||||
)
|
||||
if os.path.exists(filepath):
|
||||
return filepath
|
||||
return None
|
||||
|
||||
def get_project_dir(self, project_name):
|
||||
"""Path to root directory for specific project.
|
||||
|
||||
Args:
|
||||
project_name (str): Name of project for which root directory path
|
||||
should be returned.
|
||||
|
||||
Returns:
|
||||
str: Path to root of project's thumbnails.
|
||||
"""
|
||||
|
||||
return os.path.join(self.thumbnails_dir, project_name)
|
||||
|
||||
def make_sure_project_dir_exists(self, project_name):
|
||||
project_dir = self.get_project_dir(project_name)
|
||||
if not os.path.exists(project_dir):
|
||||
os.makedirs(project_dir)
|
||||
return project_dir
|
||||
|
||||
def store_thumbnail(self, project_name, thumbnail_id, content, mime_type):
|
||||
"""Store thumbnail to cache folder.
|
||||
|
||||
Args:
|
||||
project_name (str): Project where the thumbnail belong to.
|
||||
thumbnail_id (str): Id of thumbnail.
|
||||
content (bytes): Byte content of thumbnail file.
|
||||
mime_data (str): Type of content.
|
||||
|
||||
Returns:
|
||||
str: Path to cached thumbnail image file.
|
||||
"""
|
||||
|
||||
if mime_type == "image/png":
|
||||
ext = ".png"
|
||||
elif mime_type == "image/jpeg":
|
||||
ext = ".jpeg"
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unknown mime type for thumbnail \"{}\"".format(mime_type))
|
||||
|
||||
project_dir = self.make_sure_project_dir_exists(project_name)
|
||||
thumbnail_path = os.path.join(project_dir, thumbnail_id + ext)
|
||||
with open(thumbnail_path, "wb") as stream:
|
||||
stream.write(content)
|
||||
|
||||
current_time = time.time()
|
||||
os.utime(thumbnail_path, (current_time, current_time))
|
||||
|
||||
return thumbnail_path
|
||||
from ayon_core.pipeline.thumbnails import get_thumbnail_path
|
||||
|
||||
|
||||
class ThumbnailsModel:
|
||||
entity_cache_lifetime = 240 # In seconds
|
||||
|
||||
def __init__(self):
|
||||
self._thumbnail_cache = ThumbnailsCache()
|
||||
self._paths_cache = collections.defaultdict(dict)
|
||||
self._folders_cache = NestedCacheItem(
|
||||
levels=2, lifetime=self.entity_cache_lifetime)
|
||||
|
|
@ -283,28 +64,7 @@ class ThumbnailsModel:
|
|||
if thumbnail_id in project_cache:
|
||||
return project_cache[thumbnail_id]
|
||||
|
||||
filepath = self._thumbnail_cache.get_thumbnail_filepath(
|
||||
project_name, thumbnail_id
|
||||
)
|
||||
if filepath is not None:
|
||||
project_cache[thumbnail_id] = filepath
|
||||
return filepath
|
||||
|
||||
# 'ayon_api' had a bug, public function
|
||||
# 'get_thumbnail_by_id' did not return output of
|
||||
# 'ServerAPI' method.
|
||||
con = ayon_api.get_server_api_connection()
|
||||
result = con.get_thumbnail_by_id(project_name, thumbnail_id)
|
||||
if result is None:
|
||||
pass
|
||||
|
||||
elif result.is_valid:
|
||||
filepath = self._thumbnail_cache.store_thumbnail(
|
||||
project_name,
|
||||
thumbnail_id,
|
||||
result.content,
|
||||
result.content_type
|
||||
)
|
||||
filepath = get_thumbnail_path(project_name, thumbnail_id)
|
||||
project_cache[thumbnail_id] = filepath
|
||||
return filepath
|
||||
|
||||
|
|
|
|||
84
client/ayon_core/tools/common_models/users.py
Normal file
84
client/ayon_core/tools/common_models/users.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_core.lib import CacheItem
|
||||
|
||||
|
||||
class UserItem:
|
||||
def __init__(
|
||||
self,
|
||||
username,
|
||||
full_name,
|
||||
email,
|
||||
avatar_url,
|
||||
active,
|
||||
):
|
||||
self.username = username
|
||||
self.full_name = full_name
|
||||
self.email = email
|
||||
self.avatar_url = avatar_url
|
||||
self.active = active
|
||||
|
||||
@classmethod
|
||||
def from_entity_data(cls, user_data):
|
||||
return cls(
|
||||
user_data["name"],
|
||||
user_data["attrib"]["fullName"],
|
||||
user_data["attrib"]["email"],
|
||||
user_data["attrib"]["avatarUrl"],
|
||||
user_data["active"],
|
||||
)
|
||||
|
||||
|
||||
class UsersModel:
|
||||
def __init__(self, controller):
|
||||
self._controller = controller
|
||||
self._users_cache = CacheItem(default_factory=list)
|
||||
|
||||
def get_user_items(self):
|
||||
"""Get user items.
|
||||
|
||||
Returns:
|
||||
List[UserItem]: List of user items.
|
||||
|
||||
"""
|
||||
self._invalidate_cache()
|
||||
return self._users_cache.get_data()
|
||||
|
||||
def get_user_items_by_name(self):
|
||||
"""Get user items by name.
|
||||
|
||||
Implemented as most of cases using this model will need to find
|
||||
user information by username.
|
||||
|
||||
Returns:
|
||||
Dict[str, UserItem]: Dictionary of user items by name.
|
||||
|
||||
"""
|
||||
return {
|
||||
user_item.username: user_item
|
||||
for user_item in self.get_user_items()
|
||||
}
|
||||
|
||||
def get_user_item_by_username(self, username):
|
||||
"""Get user item by username.
|
||||
|
||||
Args:
|
||||
username (str): Username.
|
||||
|
||||
Returns:
|
||||
Union[UserItem, None]: User item or None if not found.
|
||||
|
||||
"""
|
||||
self._invalidate_cache()
|
||||
for user_item in self.get_user_items():
|
||||
if user_item.username == username:
|
||||
return user_item
|
||||
return None
|
||||
|
||||
def _invalidate_cache(self):
|
||||
if self._users_cache.is_valid:
|
||||
return
|
||||
self._users_cache.update_data([
|
||||
UserItem.from_entity_data(user)
|
||||
for user in ayon_api.get_users()
|
||||
])
|
||||
|
|
@ -290,6 +290,34 @@ class ActionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
painter.drawPixmap(extender_x, extender_y, pix)
|
||||
|
||||
|
||||
class ActionsProxyModel(QtCore.QSortFilterProxyModel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
|
||||
def lessThan(self, left, right):
|
||||
# Sort by action order and then by label
|
||||
left_value = left.data(ACTION_SORT_ROLE)
|
||||
right_value = right.data(ACTION_SORT_ROLE)
|
||||
|
||||
# Values are same -> use super sorting
|
||||
if left_value == right_value:
|
||||
# Default behavior is using DisplayRole
|
||||
return super().lessThan(left, right)
|
||||
|
||||
# Validate 'None' values
|
||||
if right_value is None:
|
||||
return True
|
||||
if left_value is None:
|
||||
return False
|
||||
# Sort values and handle incompatible types
|
||||
try:
|
||||
return left_value < right_value
|
||||
except TypeError:
|
||||
return True
|
||||
|
||||
|
||||
class ActionsWidget(QtWidgets.QWidget):
|
||||
def __init__(self, controller, parent):
|
||||
super(ActionsWidget, self).__init__(parent)
|
||||
|
|
@ -316,10 +344,7 @@ class ActionsWidget(QtWidgets.QWidget):
|
|||
|
||||
model = ActionsQtModel(controller)
|
||||
|
||||
proxy_model = QtCore.QSortFilterProxyModel()
|
||||
proxy_model.setSortCaseSensitivity(QtCore.Qt.CaseInsensitive)
|
||||
proxy_model.setSortRole(ACTION_SORT_ROLE)
|
||||
|
||||
proxy_model = ActionsProxyModel()
|
||||
proxy_model.setSourceModel(model)
|
||||
view.setModel(proxy_model)
|
||||
|
||||
|
|
@ -359,7 +384,8 @@ class ActionsWidget(QtWidgets.QWidget):
|
|||
def _on_model_refresh(self):
|
||||
self._proxy_model.sort(0)
|
||||
# Force repaint all items
|
||||
self._view.update()
|
||||
viewport = self._view.viewport()
|
||||
viewport.update()
|
||||
|
||||
def _on_animation(self):
|
||||
time_now = time.time()
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ class VersionItem:
|
|||
thumbnail_id (Union[str, None]): Thumbnail id.
|
||||
published_time (Union[str, None]): Published time in format
|
||||
'%Y%m%dT%H%M%SZ'.
|
||||
status (Union[str, None]): Status name.
|
||||
author (Union[str, None]): Author.
|
||||
frame_range (Union[str, None]): Frame range.
|
||||
duration (Union[int, None]): Duration.
|
||||
|
|
@ -132,6 +133,7 @@ class VersionItem:
|
|||
thumbnail_id,
|
||||
published_time,
|
||||
author,
|
||||
status,
|
||||
frame_range,
|
||||
duration,
|
||||
handles,
|
||||
|
|
@ -146,6 +148,7 @@ class VersionItem:
|
|||
self.is_hero = is_hero
|
||||
self.published_time = published_time
|
||||
self.author = author
|
||||
self.status = status
|
||||
self.frame_range = frame_range
|
||||
self.duration = duration
|
||||
self.handles = handles
|
||||
|
|
@ -185,6 +188,7 @@ class VersionItem:
|
|||
"is_hero": self.is_hero,
|
||||
"published_time": self.published_time,
|
||||
"author": self.author,
|
||||
"status": self.status,
|
||||
"frame_range": self.frame_range,
|
||||
"duration": self.duration,
|
||||
"handles": self.handles,
|
||||
|
|
@ -488,6 +492,27 @@ class FrontendLoaderController(_BaseLoaderController):
|
|||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_project_status_items(self, project_name, sender=None):
|
||||
"""Items for all projects available on server.
|
||||
|
||||
Triggers event topics "projects.statuses.refresh.started" and
|
||||
"projects.statuses.refresh.finished" with data:
|
||||
{
|
||||
"sender": sender,
|
||||
"project_name": project_name
|
||||
}
|
||||
|
||||
Args:
|
||||
project_name (Union[str, None]): Project name.
|
||||
sender (Optional[str]): Sender who requested the items.
|
||||
|
||||
Returns:
|
||||
list[StatusItem]: List of status items.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_product_items(self, project_name, folder_ids, sender=None):
|
||||
"""Product items for folder ids.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue