mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'develop' into feature/remove-maya-addon
# Conflicts: # server_addon/maya/client/ayon_maya/plugins/publish/collect_yeti_rig.py
This commit is contained in:
commit
3afb29ff59
98 changed files with 4497 additions and 7952 deletions
91
client/ayon_core/pipeline/entity_uri.py
Normal file
91
client/ayon_core/pipeline/entity_uri.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
from typing import Optional, Union
|
||||
from urllib.parse import urlparse, parse_qs
|
||||
|
||||
|
||||
def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
||||
"""Parse AYON entity URI into individual components.
|
||||
|
||||
URI specification:
|
||||
ayon+entity://{project}/{folder}?product={product}
|
||||
&version={version}
|
||||
&representation={representation}
|
||||
URI example:
|
||||
ayon+entity://test/hero?product=modelMain&version=2&representation=usd
|
||||
|
||||
However - if the netloc is `ayon://` it will by default also resolve as
|
||||
`ayon+entity://` on AYON server, thus we need to support both. The shorter
|
||||
`ayon://` is preferred for user readability.
|
||||
|
||||
Example:
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
||||
>>> )
|
||||
{'project': 'test', 'folderPath': '/char/villain',
|
||||
'product': 'modelMain', 'version': 1,
|
||||
'representation': 'usd'}
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
||||
>>> )
|
||||
{'project': 'project', 'folderPath': '/folder',
|
||||
'product': 'renderMain', 'version': 3,
|
||||
'representation': 'exr'}
|
||||
|
||||
Returns:
|
||||
dict[str, Union[str, int]]: The individual key with their values as
|
||||
found in the ayon entity URI.
|
||||
|
||||
"""
|
||||
|
||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||
return {}
|
||||
|
||||
parsed = urlparse(uri)
|
||||
if parsed.scheme not in {"ayon+entity", "ayon"}:
|
||||
return {}
|
||||
|
||||
result = {
|
||||
"project": parsed.netloc,
|
||||
"folderPath": "/" + parsed.path.strip("/")
|
||||
}
|
||||
query = parse_qs(parsed.query)
|
||||
for key in ["product", "version", "representation"]:
|
||||
if key in query:
|
||||
result[key] = query[key][0]
|
||||
|
||||
# Convert version to integer if it is a digit
|
||||
version = result.get("version")
|
||||
if version is not None and version.isdigit():
|
||||
result["version"] = int(version)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def construct_ayon_entity_uri(
|
||||
project_name: str,
|
||||
folder_path: str,
|
||||
product: str,
|
||||
version: Union[int, str],
|
||||
representation_name: str
|
||||
) -> str:
|
||||
"""Construct AYON entity URI from its components
|
||||
|
||||
Returns:
|
||||
str: AYON Entity URI to query entity path.
|
||||
"""
|
||||
if isinstance(version, int) and version < 0:
|
||||
version = "hero"
|
||||
if not (isinstance(version, int) or version in {"latest", "hero"}):
|
||||
raise ValueError(
|
||||
"Version must either be integer, 'latest' or 'hero'. "
|
||||
"Got: {}".format(version)
|
||||
)
|
||||
return (
|
||||
"ayon://{project}/{folder_path}?product={product}&version={version}"
|
||||
"&representation={representation}".format(
|
||||
project=project_name,
|
||||
folder_path=folder_path,
|
||||
product=product,
|
||||
version=version,
|
||||
representation=representation_name
|
||||
)
|
||||
)
|
||||
|
|
@ -5,7 +5,7 @@ import logging
|
|||
import inspect
|
||||
import collections
|
||||
import numbers
|
||||
from typing import Any
|
||||
from typing import Optional, Union, Any
|
||||
|
||||
import ayon_api
|
||||
|
||||
|
|
@ -739,6 +739,91 @@ def get_representation_path(representation, root=None):
|
|||
)
|
||||
|
||||
|
||||
def get_representation_path_by_names(
|
||||
project_name: str,
|
||||
folder_path: str,
|
||||
product_name: str,
|
||||
version_name: str,
|
||||
representation_name: str,
|
||||
anatomy: Optional[Anatomy] = None) -> Optional[str]:
|
||||
"""Get (latest) filepath for representation for folder and product.
|
||||
|
||||
See `get_representation_by_names` for more details.
|
||||
|
||||
Returns:
|
||||
str: The representation path if the representation exists.
|
||||
|
||||
"""
|
||||
representation = get_representation_by_names(
|
||||
project_name,
|
||||
folder_path,
|
||||
product_name,
|
||||
version_name,
|
||||
representation_name
|
||||
)
|
||||
if not representation:
|
||||
return
|
||||
|
||||
if not anatomy:
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
if representation:
|
||||
path = get_representation_path_with_anatomy(representation, anatomy)
|
||||
return str(path).replace("\\", "/")
|
||||
|
||||
|
||||
def get_representation_by_names(
|
||||
project_name: str,
|
||||
folder_path: str,
|
||||
product_name: str,
|
||||
version_name: Union[int, str],
|
||||
representation_name: str,
|
||||
) -> Optional[dict]:
|
||||
"""Get representation entity for asset and subset.
|
||||
|
||||
If version_name is "hero" then return the hero version
|
||||
If version_name is "latest" then return the latest version
|
||||
Otherwise use version_name as the exact integer version name.
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(folder_path, dict) and "name" in folder_path:
|
||||
# Allow explicitly passing asset document
|
||||
folder_entity = folder_path
|
||||
else:
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path, fields=["id"])
|
||||
if not folder_entity:
|
||||
return
|
||||
|
||||
if isinstance(product_name, dict) and "name" in product_name:
|
||||
# Allow explicitly passing subset document
|
||||
product_entity = product_name
|
||||
else:
|
||||
product_entity = ayon_api.get_product_by_name(
|
||||
project_name,
|
||||
product_name,
|
||||
folder_id=folder_entity["id"],
|
||||
fields=["id"])
|
||||
if not product_entity:
|
||||
return
|
||||
|
||||
if version_name == "hero":
|
||||
version_entity = ayon_api.get_hero_version_by_product_id(
|
||||
project_name, product_id=product_entity["id"])
|
||||
elif version_name == "latest":
|
||||
version_entity = ayon_api.get_last_version_by_product_id(
|
||||
project_name, product_id=product_entity["id"])
|
||||
else:
|
||||
version_entity = ayon_api.get_version_by_name(
|
||||
project_name, version_name, product_id=product_entity["id"])
|
||||
if not version_entity:
|
||||
return
|
||||
|
||||
return ayon_api.get_representation_by_name(
|
||||
project_name, representation_name, version_id=version_entity["id"])
|
||||
|
||||
|
||||
def is_compatible_loader(Loader, context):
|
||||
"""Return whether a loader is compatible with a context.
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import inspect
|
|||
import copy
|
||||
import tempfile
|
||||
import xml.etree.ElementTree
|
||||
from typing import Optional, Union
|
||||
|
||||
import pyblish.util
|
||||
import pyblish.plugin
|
||||
|
|
@ -20,7 +21,6 @@ from ayon_core.pipeline import (
|
|||
Anatomy
|
||||
)
|
||||
from ayon_core.pipeline.plugin_discover import DiscoverResult
|
||||
|
||||
from .constants import (
|
||||
DEFAULT_PUBLISH_TEMPLATE,
|
||||
DEFAULT_HERO_PUBLISH_TEMPLATE,
|
||||
|
|
@ -933,3 +933,48 @@ def get_publish_instance_families(instance):
|
|||
families.discard(family)
|
||||
output.extend(families)
|
||||
return output
|
||||
|
||||
|
||||
def get_instance_expected_output_path(
|
||||
instance: pyblish.api.Instance,
|
||||
representation_name: str,
|
||||
ext: Union[str, None],
|
||||
version: Optional[str] = None
|
||||
):
|
||||
"""Return expected publish filepath for representation in instance
|
||||
|
||||
This does not validate whether the instance has any representation by the
|
||||
given name, extension and/or version.
|
||||
|
||||
Arguments:
|
||||
instance (pyblish.api.Instance): Publish instance
|
||||
representation_name (str): Representation name
|
||||
ext (Union[str, None]): Extension for the file.
|
||||
When None, the `ext` will be set to the representation name.
|
||||
version (Optional[int]): If provided, force it to format to this
|
||||
particular version.
|
||||
|
||||
Returns:
|
||||
str: Resolved path
|
||||
|
||||
"""
|
||||
|
||||
if ext is None:
|
||||
ext = representation_name
|
||||
if version is None:
|
||||
version = instance.data["version"]
|
||||
|
||||
context = instance.context
|
||||
anatomy = context.data["anatomy"]
|
||||
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
template_data.update({
|
||||
"ext": ext,
|
||||
"representation": representation_name,
|
||||
"variant": instance.data.get("variant"),
|
||||
"version": version
|
||||
})
|
||||
|
||||
path_template_obj = anatomy.get_template_item("publish", "default")["path"]
|
||||
template_filled = path_template_obj.format_strict(template_data)
|
||||
return os.path.normpath(template_filled)
|
||||
|
|
|
|||
|
|
@ -1,92 +1,241 @@
|
|||
import dataclasses
|
||||
import os
|
||||
import re
|
||||
import logging
|
||||
|
||||
import ayon_api
|
||||
try:
|
||||
from pxr import Usd, UsdGeom, Sdf, Kind
|
||||
from pxr import UsdGeom, Sdf, Kind
|
||||
except ImportError:
|
||||
# Allow to fall back on Multiverse 6.3.0+ pxr usd library
|
||||
from mvpxr import Usd, UsdGeom, Sdf, Kind
|
||||
|
||||
from ayon_core.pipeline import Anatomy, get_current_project_name
|
||||
from ayon_core.pipeline.template_data import get_template_data
|
||||
from mvpxr import UsdGeom, Sdf, Kind
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# The predefined steps order used for bootstrapping USD Shots and Assets.
|
||||
# These are ordered in order from strongest to weakest opinions, like in USD.
|
||||
PIPELINE = {
|
||||
"shot": [
|
||||
"usdLighting",
|
||||
"usdFx",
|
||||
"usdSimulation",
|
||||
"usdAnimation",
|
||||
"usdLayout",
|
||||
],
|
||||
"asset": ["usdShade", "usdModel"],
|
||||
}
|
||||
@dataclasses.dataclass
|
||||
class Layer:
|
||||
layer: Sdf.Layer
|
||||
path: str
|
||||
# Allow to anchor a layer to another so that when the layer would be
|
||||
# exported it'd write itself out relative to its anchor
|
||||
anchor: 'Layer' = None
|
||||
|
||||
@property
|
||||
def identifier(self):
|
||||
return self.layer.identifier
|
||||
|
||||
def get_full_path(self):
|
||||
"""Return full path relative to the anchor layer"""
|
||||
if not os.path.isabs(self.path) and self.anchor:
|
||||
anchor_path = self.anchor.get_full_path()
|
||||
root = os.path.dirname(anchor_path)
|
||||
return os.path.normpath(os.path.join(root, self.path))
|
||||
return self.path
|
||||
|
||||
def export(self, path=None, args=None):
|
||||
"""Save the layer"""
|
||||
if path is None:
|
||||
path = self.get_full_path()
|
||||
|
||||
if args is None:
|
||||
args = self.layer.GetFileFormatArguments()
|
||||
|
||||
self.layer.Export(path, args=args)
|
||||
|
||||
@classmethod
|
||||
def create_anonymous(cls, path, tag="LOP", anchor=None):
|
||||
"""Create an anonymous layer instance.
|
||||
|
||||
Arguments:
|
||||
path (str): The layer's filepath.
|
||||
tag (Optional[str]): The tag to give to the anonymous layer.
|
||||
This defaults to 'LOP' because Houdini requires that tag for
|
||||
its in-memory layers that it will be able to manage. In other
|
||||
integrations no similar requirements have been found so it was
|
||||
deemed a 'safe' default for now.
|
||||
anchor (Optional[Layer]): Another layer to relatively anchor to.
|
||||
"""
|
||||
sdf_layer = Sdf.Layer.CreateAnonymous(tag)
|
||||
return cls(layer=sdf_layer, path=path, anchor=anchor)
|
||||
|
||||
|
||||
def create_asset(
|
||||
filepath, asset_name, reference_layers, kind=Kind.Tokens.component
|
||||
def setup_asset_layer(
|
||||
layer,
|
||||
asset_name,
|
||||
reference_layers=None,
|
||||
kind=Kind.Tokens.component,
|
||||
define_class=True,
|
||||
force_add_payload=False,
|
||||
set_payload_path=False
|
||||
):
|
||||
"""
|
||||
Creates an asset file that consists of a top level layer and sublayers for
|
||||
shading and geometry.
|
||||
Adds an asset prim to the layer with the `reference_layers` added as
|
||||
references for e.g. geometry and shading.
|
||||
|
||||
The referenced layers will be moved into a separate `./payload.usd` file
|
||||
that the asset file uses to allow deferred loading of the heavier
|
||||
geometrical data. An example would be:
|
||||
|
||||
asset.usd <-- out filepath
|
||||
payload.usd <-- always automatically added in-between
|
||||
look.usd <-- reference layer 0 from `reference_layers` argument
|
||||
model.usd <-- reference layer 1 from `reference_layers` argument
|
||||
|
||||
If `define_class` is enabled then a `/__class__/{asset_name}` class
|
||||
definition will be created that the root asset inherits from
|
||||
|
||||
Examples:
|
||||
>>> create_asset("/path/to/asset.usd",
|
||||
>>> asset_name="test",
|
||||
>>> reference_layers=["./model.usd", "./look.usd"])
|
||||
|
||||
Returns:
|
||||
List[Tuple[Sdf.Layer, str]]: List of created layers with their
|
||||
preferred output save paths.
|
||||
|
||||
Args:
|
||||
filepath (str): Filepath where the asset.usd file will be saved.
|
||||
layer (Sdf.Layer): Layer to set up the asset structure for.
|
||||
asset_name (str): The name for the Asset identifier and default prim.
|
||||
reference_layers (list): USD Files to reference in the asset.
|
||||
Note that the bottom layer (first file, like a model) would
|
||||
be last in the list. The strongest layer will be the first
|
||||
index.
|
||||
asset_name (str): The name for the Asset identifier and default prim.
|
||||
kind (pxr.Kind): A USD Kind for the root asset.
|
||||
define_class: Define a `/__class__/{asset_name}` class which the
|
||||
root asset prim will inherit from.
|
||||
force_add_payload (bool): Generate payload layer even if no
|
||||
reference paths are set - thus generating an enmpty layer.
|
||||
set_payload_path (bool): Whether to directly set the payload asset
|
||||
path to `./payload.usd` or not Defaults to True.
|
||||
|
||||
"""
|
||||
# Define root prim for the asset and make it the default for the stage.
|
||||
prim_name = asset_name
|
||||
|
||||
if define_class:
|
||||
class_prim = Sdf.PrimSpec(
|
||||
layer.pseudoRoot,
|
||||
"__class__",
|
||||
Sdf.SpecifierClass,
|
||||
)
|
||||
Sdf.PrimSpec(
|
||||
class_prim,
|
||||
prim_name,
|
||||
Sdf.SpecifierClass,
|
||||
)
|
||||
|
||||
asset_prim = Sdf.PrimSpec(
|
||||
layer.pseudoRoot,
|
||||
prim_name,
|
||||
Sdf.SpecifierDef,
|
||||
"Xform"
|
||||
)
|
||||
|
||||
if define_class:
|
||||
asset_prim.inheritPathList.prependedItems[:] = [
|
||||
"/__class__/{}".format(prim_name)
|
||||
]
|
||||
|
||||
# Define Kind
|
||||
# Usually we will "loft up" the kind authored into the exported geometry
|
||||
# layer rather than re-stamping here; we'll leave that for a later
|
||||
# tutorial, and just be explicit here.
|
||||
asset_prim.kind = kind
|
||||
|
||||
# Set asset info
|
||||
asset_prim.assetInfo["name"] = asset_name
|
||||
asset_prim.assetInfo["identifier"] = "%s/%s.usd" % (asset_name, asset_name)
|
||||
|
||||
# asset.assetInfo["version"] = asset_version
|
||||
set_layer_defaults(layer, default_prim=asset_name)
|
||||
|
||||
created_layers = []
|
||||
|
||||
# Add references to the asset prim
|
||||
if force_add_payload or reference_layers:
|
||||
# Create a relative payload file to filepath through which we sublayer
|
||||
# the heavier payloads
|
||||
# Prefix with `LOP` just so so that if Houdini ROP were to save
|
||||
# the nodes it's capable of exporting with explicit save path
|
||||
payload_layer = Sdf.Layer.CreateAnonymous("LOP",
|
||||
args={"format": "usda"})
|
||||
set_layer_defaults(payload_layer, default_prim=asset_name)
|
||||
created_layers.append(Layer(layer=payload_layer,
|
||||
path="./payload.usd"))
|
||||
|
||||
# Add payload
|
||||
if set_payload_path:
|
||||
payload_identifier = "./payload.usd"
|
||||
else:
|
||||
payload_identifier = payload_layer.identifier
|
||||
|
||||
asset_prim.payloadList.prependedItems[:] = [
|
||||
Sdf.Payload(assetPath=payload_identifier)
|
||||
]
|
||||
|
||||
# Add sublayers to the payload layer
|
||||
# Note: Sublayering is tricky because it requires that the sublayers
|
||||
# actually define the path at defaultPrim otherwise the payload
|
||||
# reference will not find the defaultPrim and turn up empty.
|
||||
if reference_layers:
|
||||
for ref_layer in reference_layers:
|
||||
payload_layer.subLayerPaths.append(ref_layer)
|
||||
|
||||
return created_layers
|
||||
|
||||
|
||||
def create_asset(
|
||||
filepath,
|
||||
asset_name,
|
||||
reference_layers=None,
|
||||
kind=Kind.Tokens.component,
|
||||
define_class=True
|
||||
):
|
||||
"""Creates and saves a prepared asset stage layer.
|
||||
|
||||
Creates an asset file that consists of a top level asset prim, asset info
|
||||
and references in the provided `reference_layers`.
|
||||
|
||||
Returns:
|
||||
list: Created layers
|
||||
|
||||
"""
|
||||
# Also see create_asset.py in PixarAnimationStudios/USD endToEnd example
|
||||
|
||||
log.info("Creating asset at %s", filepath)
|
||||
sdf_layer = Sdf.Layer.CreateAnonymous()
|
||||
layer = Layer(layer=sdf_layer, path=filepath)
|
||||
|
||||
created_layers = setup_asset_layer(
|
||||
layer=sdf_layer,
|
||||
asset_name=asset_name,
|
||||
reference_layers=reference_layers,
|
||||
kind=kind,
|
||||
define_class=define_class,
|
||||
set_payload_path=True
|
||||
)
|
||||
for created_layer in created_layers:
|
||||
created_layer.anchor = layer
|
||||
created_layer.export()
|
||||
|
||||
# Make the layer ascii - good for readability, plus the file is small
|
||||
root_layer = Sdf.Layer.CreateNew(filepath, args={"format": "usda"})
|
||||
stage = Usd.Stage.Open(root_layer)
|
||||
log.debug("Creating asset at %s", filepath)
|
||||
layer.export(args={"format": "usda"})
|
||||
|
||||
# Define a prim for the asset and make it the default for the stage.
|
||||
asset_prim = UsdGeom.Xform.Define(stage, "/%s" % asset_name).GetPrim()
|
||||
stage.SetDefaultPrim(asset_prim)
|
||||
|
||||
# Let viewing applications know how to orient a free camera properly
|
||||
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
|
||||
|
||||
# Usually we will "loft up" the kind authored into the exported geometry
|
||||
# layer rather than re-stamping here; we'll leave that for a later
|
||||
# tutorial, and just be explicit here.
|
||||
model = Usd.ModelAPI(asset_prim)
|
||||
if kind:
|
||||
model.SetKind(kind)
|
||||
|
||||
model.SetAssetName(asset_name)
|
||||
model.SetAssetIdentifier("%s/%s.usd" % (asset_name, asset_name))
|
||||
|
||||
# Add references to the asset prim
|
||||
references = asset_prim.GetReferences()
|
||||
for reference_filepath in reference_layers:
|
||||
references.AddReference(reference_filepath)
|
||||
|
||||
stage.GetRootLayer().Save()
|
||||
return [layer] + created_layers
|
||||
|
||||
|
||||
def create_shot(filepath, layers, create_layers=False):
|
||||
"""Create a shot with separate layers for departments.
|
||||
|
||||
Examples:
|
||||
>>> create_shot("/path/to/shot.usd",
|
||||
>>> layers=["lighting.usd", "fx.usd", "animation.usd"])
|
||||
"/path/to/shot.usd"
|
||||
|
||||
Args:
|
||||
filepath (str): Filepath where the asset.usd file will be saved.
|
||||
layers (str): When provided this will be added verbatim in the
|
||||
layers (list): When provided this will be added verbatim in the
|
||||
subLayerPaths layers. When the provided layer paths do not exist
|
||||
they are generated using Sdf.Layer.CreateNew
|
||||
they are generated using Sdf.Layer.CreateNew
|
||||
create_layers (bool): Whether to create the stub layers on disk if
|
||||
they do not exist yet.
|
||||
|
||||
|
|
@ -95,10 +244,9 @@ def create_shot(filepath, layers, create_layers=False):
|
|||
|
||||
"""
|
||||
# Also see create_shot.py in PixarAnimationStudios/USD endToEnd example
|
||||
root_layer = Sdf.Layer.CreateAnonymous()
|
||||
|
||||
stage = Usd.Stage.CreateNew(filepath)
|
||||
log.info("Creating shot at %s" % filepath)
|
||||
|
||||
created_layers = [root_layer]
|
||||
for layer_path in layers:
|
||||
if create_layers and not os.path.exists(layer_path):
|
||||
# We use the Sdf API here to quickly create layers. Also, we're
|
||||
|
|
@ -108,143 +256,114 @@ def create_shot(filepath, layers, create_layers=False):
|
|||
if not os.path.exists(layer_folder):
|
||||
os.makedirs(layer_folder)
|
||||
|
||||
Sdf.Layer.CreateNew(layer_path)
|
||||
new_layer = Sdf.Layer.CreateNew(layer_path)
|
||||
created_layers.append(new_layer)
|
||||
|
||||
stage.GetRootLayer().subLayerPaths.append(layer_path)
|
||||
root_layer.subLayerPaths.append(layer_path)
|
||||
|
||||
# Lets viewing applications know how to orient a free camera properly
|
||||
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
|
||||
stage.GetRootLayer().Save()
|
||||
set_layer_defaults(root_layer)
|
||||
log.debug("Creating shot at %s" % filepath)
|
||||
root_layer.Export(filepath, args={"format": "usda"})
|
||||
|
||||
return filepath
|
||||
return created_layers
|
||||
|
||||
|
||||
def create_model(filename, folder_path, variant_product_names):
|
||||
"""Create a USD Model file.
|
||||
def add_ordered_sublayer(layer, contribution_path, layer_id, order=None,
|
||||
add_sdf_arguments_metadata=True):
|
||||
"""Add sublayer paths in the Sdf.Layer at given "orders"
|
||||
|
||||
For each of the variation paths it will payload the path and set its
|
||||
relevant variation name.
|
||||
USD does not provide a way to set metadata per sublayer entry, but we can
|
||||
'sneak it in' by adding it as part of the file url after :SDF_FORMAT_ARGS:
|
||||
There they will then just be unused args that we can parse later again
|
||||
to access our data.
|
||||
|
||||
A higher order will appear earlier in the subLayerPaths as a stronger
|
||||
opinion. An unordered layer (`order=None`) will be stronger than any
|
||||
ordered opinion and thus will be inserted at the start of the list.
|
||||
|
||||
Args:
|
||||
layer (Sdf.Layer): Layer to add sublayers in.
|
||||
contribution_path (str): Path/URI to add.
|
||||
layer_id (str): Token that if found for an existing layer it will
|
||||
replace that layer.
|
||||
order (Any[int, None]): Order to place the contribution in
|
||||
the sublayers. When `None` no ordering is considered nor will
|
||||
ordering metadata be written if `add_sdf_arguments_metadata` is
|
||||
False.
|
||||
add_sdf_arguments_metadata (bool): Add metadata into the filepath
|
||||
to store the `layer_id` and `order` so ordering can be maintained
|
||||
in the future as intended.
|
||||
|
||||
Returns:
|
||||
str: The resulting contribution path (which maybe include the
|
||||
sdf format args metadata if enabled)
|
||||
|
||||
"""
|
||||
|
||||
project_name = get_current_project_name()
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
assert folder_entity, "Folder not found: %s" % folder_path
|
||||
# Add the order with the contribution path so that for future
|
||||
# contributions we can again use it to magically fit into the
|
||||
# ordering. We put this in the path because sublayer paths do
|
||||
# not allow customData to be stored.
|
||||
def _format_path(path, layer_id, order):
|
||||
# TODO: Avoid this hack to store 'order' and 'layer' metadata
|
||||
# for sublayers; in USD sublayers can't hold customdata
|
||||
if not add_sdf_arguments_metadata:
|
||||
return path
|
||||
data = {"layer_id": str(layer_id)}
|
||||
if order is not None:
|
||||
data["order"] = str(order)
|
||||
return Sdf.Layer.CreateIdentifier(path, data)
|
||||
|
||||
variants = []
|
||||
for product_name in variant_product_names:
|
||||
prefix = "usdModel"
|
||||
if product_name.startswith(prefix):
|
||||
# Strip off `usdModel_`
|
||||
variant = product_name[len(prefix):]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Model products must start with usdModel: %s" % product_name
|
||||
# If the layer was already in the layers, then replace it
|
||||
for index, existing_path in enumerate(layer.subLayerPaths):
|
||||
args = get_sdf_format_args(existing_path)
|
||||
existing_layer = args.get("layer_id")
|
||||
if existing_layer == layer_id:
|
||||
# Put it in the same position where it was before when swapping
|
||||
# it with the original, also take over its order metadata
|
||||
order = args.get("order")
|
||||
if order is not None:
|
||||
order = int(order)
|
||||
else:
|
||||
order = None
|
||||
contribution_path = _format_path(contribution_path,
|
||||
order=order,
|
||||
layer_id=layer_id)
|
||||
log.debug(
|
||||
f"Replacing existing layer: {layer.subLayerPaths[index]} "
|
||||
f"-> {contribution_path}"
|
||||
)
|
||||
layer.subLayerPaths[index] = contribution_path
|
||||
return contribution_path
|
||||
|
||||
path = get_usd_master_path(
|
||||
folder_entity=folder_entity,
|
||||
product_name=product_name,
|
||||
representation="usd"
|
||||
)
|
||||
variants.append((variant, path))
|
||||
contribution_path = _format_path(contribution_path,
|
||||
order=order,
|
||||
layer_id=layer_id)
|
||||
|
||||
stage = _create_variants_file(
|
||||
filename,
|
||||
variants=variants,
|
||||
variantset="model",
|
||||
variant_prim="/root",
|
||||
reference_prim="/root/geo",
|
||||
as_payload=True,
|
||||
)
|
||||
# If an order is defined and other layers are ordered than place it before
|
||||
# the first order where existing order is lower
|
||||
if order is not None:
|
||||
for index, existing_path in enumerate(layer.subLayerPaths):
|
||||
args = get_sdf_format_args(existing_path)
|
||||
existing_order = args.get("order")
|
||||
if existing_order is not None and int(existing_order) < order:
|
||||
log.debug(
|
||||
f"Inserting new layer at {index}: {contribution_path}"
|
||||
)
|
||||
layer.subLayerPaths.insert(index, contribution_path)
|
||||
return
|
||||
# Weakest ordered opinion
|
||||
layer.subLayerPaths.append(contribution_path)
|
||||
return contribution_path
|
||||
|
||||
UsdGeom.SetStageMetersPerUnit(stage, 1)
|
||||
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
|
||||
|
||||
# modelAPI = Usd.ModelAPI(root_prim)
|
||||
# modelAPI.SetKind(Kind.Tokens.component)
|
||||
|
||||
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
|
||||
# for more on assetInfo
|
||||
# modelAPI.SetAssetName(asset)
|
||||
# modelAPI.SetAssetIdentifier(asset)
|
||||
|
||||
stage.GetRootLayer().Save()
|
||||
# If no paths found with an order to put it next to
|
||||
# then put the sublayer at the end
|
||||
log.debug(f"Appending new layer: {contribution_path}")
|
||||
layer.subLayerPaths.insert(0, contribution_path)
|
||||
return contribution_path
|
||||
|
||||
|
||||
def create_shade(filename, folder_path, variant_product_names):
|
||||
"""Create a master USD shade file for an asset.
|
||||
|
||||
For each available model variation this should generate a reference
|
||||
to a `usdShade_{modelVariant}` product.
|
||||
|
||||
"""
|
||||
|
||||
project_name = get_current_project_name()
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
assert folder_entity, "Folder not found: %s" % folder_path
|
||||
|
||||
variants = []
|
||||
|
||||
for product_name in variant_product_names:
|
||||
prefix = "usdModel"
|
||||
if product_name.startswith(prefix):
|
||||
# Strip off `usdModel_`
|
||||
variant = product_name[len(prefix):]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Model products must start " "with usdModel: %s" % product_name
|
||||
)
|
||||
|
||||
shade_product_name = re.sub(
|
||||
"^usdModel", "usdShade", product_name
|
||||
)
|
||||
path = get_usd_master_path(
|
||||
folder_entity=folder_entity,
|
||||
product_name=shade_product_name,
|
||||
representation="usd"
|
||||
)
|
||||
variants.append((variant, path))
|
||||
|
||||
stage = _create_variants_file(
|
||||
filename, variants=variants, variantset="model", variant_prim="/root"
|
||||
)
|
||||
|
||||
stage.GetRootLayer().Save()
|
||||
|
||||
|
||||
def create_shade_variation(filename, folder_path, model_variant, shade_variants):
|
||||
"""Create the master Shade file for a specific model variant.
|
||||
|
||||
This should reference all shade variants for the specific model variant.
|
||||
|
||||
"""
|
||||
|
||||
project_name = get_current_project_name()
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
assert folder_entity, "Folder not found: %s" % folder_path
|
||||
|
||||
variants = []
|
||||
for variant in shade_variants:
|
||||
product_name = "usdShade_{model}_{shade}".format(
|
||||
model=model_variant, shade=variant
|
||||
)
|
||||
path = get_usd_master_path(
|
||||
folder_entity=folder_entity,
|
||||
product_name=product_name,
|
||||
representation="usd"
|
||||
)
|
||||
variants.append((variant, path))
|
||||
|
||||
stage = _create_variants_file(
|
||||
filename, variants=variants, variantset="shade", variant_prim="/root"
|
||||
)
|
||||
|
||||
stage.GetRootLayer().Save()
|
||||
|
||||
|
||||
def _create_variants_file(
|
||||
filename,
|
||||
def add_variant_references_to_layer(
|
||||
variants,
|
||||
variantset,
|
||||
default_variant=None,
|
||||
|
|
@ -252,112 +371,316 @@ def _create_variants_file(
|
|||
reference_prim=None,
|
||||
set_default_variant=True,
|
||||
as_payload=False,
|
||||
skip_variant_on_single_file=True,
|
||||
skip_variant_on_single_file=False,
|
||||
layer=None
|
||||
):
|
||||
"""Add or set a prim's variants to reference specified paths in the layer.
|
||||
|
||||
root_layer = Sdf.Layer.CreateNew(filename, args={"format": "usda"})
|
||||
stage = Usd.Stage.Open(root_layer)
|
||||
Note:
|
||||
This does not clear any of the other opinions than replacing
|
||||
`prim.referenceList.prependedItems` with the new reference.
|
||||
If `as_payload=True` then this only does it for payloads and leaves
|
||||
references as they were in-tact.
|
||||
|
||||
root_prim = stage.DefinePrim(variant_prim)
|
||||
stage.SetDefaultPrim(root_prim)
|
||||
Note:
|
||||
If `skip_variant_on_single_file=True` it does *not* check if any
|
||||
other variants do exist; it only checks whether you are currently
|
||||
adding more than one since it'd be hard to find out whether previously
|
||||
this was also skipped and should now if you're adding a new one
|
||||
suddenly also be its original 'variant'. As such it's recommended to
|
||||
keep this disabled unless you know you're not updating the file later
|
||||
into the same variant set.
|
||||
|
||||
def _reference(path):
|
||||
"""Reference/Payload path depending on function arguments"""
|
||||
Examples:
|
||||
>>> layer = add_variant_references_to_layer("model.usd",
|
||||
>>> variants=[
|
||||
>>> ("main", "main.usd"),
|
||||
>>> ("damaged", "damaged.usd"),
|
||||
>>> ("twisted", "twisted.usd")
|
||||
>>> ],
|
||||
>>> variantset="model")
|
||||
>>> layer.Export("model.usd", args={"format": "usda"})
|
||||
|
||||
if reference_prim:
|
||||
prim = stage.DefinePrim(reference_prim)
|
||||
else:
|
||||
prim = root_prim
|
||||
Arguments:
|
||||
variants (List[List[str, str]): List of two-tuples of variant name to
|
||||
the filepath that should be referenced in for that variant.
|
||||
variantset (str): Name of the variant set
|
||||
default_variant (str): Default variant to set. If not provided
|
||||
the first variant will be used.
|
||||
variant_prim (str): Variant prim?
|
||||
reference_prim (str): Path to the reference prim where to add the
|
||||
references and variant sets.
|
||||
set_default_variant (bool): Whether to set the default variant.
|
||||
When False no default variant will be set, even if a value
|
||||
was provided to `default_variant`
|
||||
as_payload (bool): When enabled, instead of referencing use payloads
|
||||
skip_variant_on_single_file (bool): If this is enabled and only
|
||||
a single variant is provided then do not create the variant set
|
||||
but just reference that single file.
|
||||
layer (Optional[Sdf.Layer]): When provided operate on this layer,
|
||||
otherwise create an anonymous layer in memory.
|
||||
|
||||
if as_payload:
|
||||
# Payload
|
||||
prim.GetPayloads().AddPayload(Sdf.Payload(path))
|
||||
else:
|
||||
# Reference
|
||||
prim.GetReferences().AddReference(Sdf.Reference(path))
|
||||
Returns:
|
||||
Sdf.Layer: The layer with the added references inside the variants.
|
||||
|
||||
"""
|
||||
if layer is None:
|
||||
layer = Sdf.Layer.CreateAnonymous()
|
||||
set_layer_defaults(layer, default_prim=variant_prim.strip("/"))
|
||||
|
||||
prim_path_to_get_variants = Sdf.Path(variant_prim)
|
||||
root_prim = get_or_define_prim_spec(layer, variant_prim, "Xform")
|
||||
|
||||
# TODO: Define why there's a need for separate variant_prim and
|
||||
# reference_prim attribute. When should they differ? Does it even work?
|
||||
if not reference_prim:
|
||||
reference_prim = root_prim
|
||||
else:
|
||||
reference_prim = get_or_define_prim_spec(layer, reference_prim,
|
||||
"Xform")
|
||||
|
||||
assert variants, "Must have variants, got: %s" % variants
|
||||
|
||||
log.info(filename)
|
||||
|
||||
if skip_variant_on_single_file and len(variants) == 1:
|
||||
# Reference directly, no variants
|
||||
variant_path = variants[0][1]
|
||||
_reference(variant_path)
|
||||
if as_payload:
|
||||
# Payload
|
||||
reference_prim.payloadList.prependedItems.append(
|
||||
Sdf.Payload(variant_path)
|
||||
)
|
||||
else:
|
||||
# Reference
|
||||
reference_prim.referenceList.prependedItems.append(
|
||||
Sdf.Reference(variant_path)
|
||||
)
|
||||
|
||||
log.info("Non-variants..")
|
||||
log.info("Path: %s" % variant_path)
|
||||
log.debug("Creating without variants due to single file only.")
|
||||
log.debug("Path: %s", variant_path)
|
||||
|
||||
else:
|
||||
# Variants
|
||||
append = Usd.ListPositionBackOfAppendList
|
||||
variant_set = root_prim.GetVariantSets().AddVariantSet(
|
||||
variantset, append
|
||||
)
|
||||
|
||||
for variant, variant_path in variants:
|
||||
|
||||
for variant, variant_filepath in variants:
|
||||
if default_variant is None:
|
||||
default_variant = variant
|
||||
|
||||
variant_set.AddVariant(variant, append)
|
||||
variant_set.SetVariantSelection(variant)
|
||||
with variant_set.GetVariantEditContext():
|
||||
_reference(variant_path)
|
||||
set_variant_reference(layer,
|
||||
prim_path=prim_path_to_get_variants,
|
||||
variant_selections=[[variantset, variant]],
|
||||
path=variant_filepath,
|
||||
as_payload=as_payload)
|
||||
|
||||
log.info("Variants..")
|
||||
log.info("Variant: %s" % variant)
|
||||
log.info("Path: %s" % variant_path)
|
||||
if set_default_variant and default_variant is not None:
|
||||
# Set default variant selection
|
||||
root_prim.variantSelections[variantset] = default_variant
|
||||
|
||||
if set_default_variant:
|
||||
variant_set.SetVariantSelection(default_variant)
|
||||
|
||||
return stage
|
||||
return layer
|
||||
|
||||
|
||||
def get_usd_master_path(folder_entity, product_name, representation):
|
||||
"""Get the filepath for a .usd file of a product.
|
||||
def set_layer_defaults(layer,
|
||||
up_axis=UsdGeom.Tokens.y,
|
||||
meters_per_unit=1.0,
|
||||
default_prim=None):
|
||||
"""Set some default metadata for the SdfLayer.
|
||||
|
||||
This will return the path to an unversioned master file generated by
|
||||
`usd_master_file.py`.
|
||||
Arguments:
|
||||
layer (Sdf.Layer): The layer to set default for via Sdf API.
|
||||
up_axis (UsdGeom.Token); Which axis is the up-axis
|
||||
meters_per_unit (float): Meters per unit
|
||||
default_prim (Optional[str]: Default prim name
|
||||
|
||||
"""
|
||||
# Set default prim
|
||||
if default_prim is not None:
|
||||
layer.defaultPrim = default_prim
|
||||
|
||||
# Let viewing applications know how to orient a free camera properly
|
||||
# Similar to: UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
|
||||
layer.pseudoRoot.SetInfo(UsdGeom.Tokens.upAxis, up_axis)
|
||||
|
||||
# Set meters per unit
|
||||
layer.pseudoRoot.SetInfo(UsdGeom.Tokens.metersPerUnit,
|
||||
float(meters_per_unit))
|
||||
|
||||
|
||||
def get_or_define_prim_spec(layer, prim_path, type_name):
|
||||
"""Get or create a PrimSpec in the layer.
|
||||
|
||||
Note:
|
||||
This creates a Sdf.PrimSpec with Sdf.SpecifierDef but if the PrimSpec
|
||||
already exists this will not force it to be a Sdf.SpecifierDef and
|
||||
it may remain what it was, e.g. Sdf.SpecifierOver
|
||||
|
||||
Args:
|
||||
folder_entity (Union[str, dict]): Folder entity.
|
||||
product_name (str): Product name.
|
||||
representation (str): Representation name.
|
||||
layer (Sdf.Layer): The layer to create it in.
|
||||
prim_path (Any[str, Sdf.Path]): Prim path to create.
|
||||
type_name (str): Type name for the PrimSpec.
|
||||
This will only be set if the prim does not exist in the layer
|
||||
yet. It does not update type for an existing prim.
|
||||
|
||||
Returns:
|
||||
Sdf.PrimSpec: The PrimSpec in the layer for the given prim path.
|
||||
|
||||
"""
|
||||
prim_spec = layer.GetPrimAtPath(prim_path)
|
||||
if prim_spec:
|
||||
return prim_spec
|
||||
|
||||
project_name = get_current_project_name()
|
||||
project_entity = ayon_api.get_project(project_name)
|
||||
anatomy = Anatomy(project_name, project_entity=project_entity)
|
||||
prim_spec = Sdf.CreatePrimInLayer(layer, prim_path)
|
||||
prim_spec.specifier = Sdf.SpecifierDef
|
||||
prim_spec.typeName = type_name
|
||||
return prim_spec
|
||||
|
||||
template_data = get_template_data(project_entity, folder_entity)
|
||||
template_data.update({
|
||||
"product": {
|
||||
"name": product_name
|
||||
},
|
||||
"subset": product_name,
|
||||
"representation": representation,
|
||||
"version": 0, # stub version zero
|
||||
})
|
||||
|
||||
template_obj = anatomy.get_template_item(
|
||||
"publish", "default", "path"
|
||||
def variant_nested_prim_path(prim_path, variant_selections):
|
||||
"""Return the Sdf.Path for a nested variant selection at prim path.
|
||||
|
||||
Examples:
|
||||
>>> prim_path = Sdf.Path("/asset")
|
||||
>>> variant_spec = variant_nested_prim_path(
|
||||
>>> prim_path,
|
||||
>>> variant_selections=[["model", "main"], ["look", "main"]]
|
||||
>>> )
|
||||
>>> variant_spec.path
|
||||
|
||||
Args:
|
||||
prim_path (Sdf.PrimPath): The prim path to create the spec in
|
||||
variant_selections (List[List[str, str]]): A list of variant set names
|
||||
and variant names to get the prim spec in.
|
||||
|
||||
Returns:
|
||||
Sdf.Path: The variant prim path
|
||||
|
||||
"""
|
||||
variant_prim_path = Sdf.Path(prim_path)
|
||||
for variant_set_name, variant_name in variant_selections:
|
||||
variant_prim_path = variant_prim_path.AppendVariantSelection(
|
||||
variant_set_name, variant_name)
|
||||
return variant_prim_path
|
||||
|
||||
|
||||
def add_ordered_reference(
|
||||
layer,
|
||||
prim_path,
|
||||
reference,
|
||||
order
|
||||
):
|
||||
"""Add reference alongside other ordered references.
|
||||
|
||||
Args:
|
||||
layer (Sdf.Layer): Layer to operate in.
|
||||
prim_path (Sdf.Path): Prim path to reference into.
|
||||
This may include variant selections to reference into a prim
|
||||
inside the variant selection.
|
||||
reference (Sdf.Reference): Reference to add.
|
||||
order (int): Order.
|
||||
|
||||
Returns:
|
||||
Sdf.PrimSpec: The prim spec for the prim path.
|
||||
|
||||
"""
|
||||
assert isinstance(order, int), "order must be integer"
|
||||
|
||||
# Sdf.Reference is immutable, see: `pxr/usd/sdf/wrapReference.cpp`
|
||||
# A Sdf.Reference can't be edited in Python so we create a new entry
|
||||
# matching the original with the extra data entry added.
|
||||
custom_data = reference.customData
|
||||
custom_data["ayon_order"] = order
|
||||
reference = Sdf.Reference(
|
||||
assetPath=reference.assetPath,
|
||||
primPath=reference.primPath,
|
||||
layerOffset=reference.layerOffset,
|
||||
customData=custom_data
|
||||
)
|
||||
path = template_obj.format_strict(template_data)
|
||||
|
||||
# Remove the version folder
|
||||
product_folder = os.path.dirname(os.path.dirname(path))
|
||||
master_folder = os.path.join(product_folder, "master")
|
||||
fname = "{0}.{1}".format(product_name, representation)
|
||||
# TODO: inherit type from outside of variants if it has it
|
||||
prim_spec = get_or_define_prim_spec(layer, prim_path, "Xform")
|
||||
|
||||
return os.path.join(master_folder, fname).replace("\\", "/")
|
||||
# Insert new entry at correct order
|
||||
entries = list(prim_spec.referenceList.prependedItems)
|
||||
|
||||
if not entries:
|
||||
prim_spec.referenceList.prependedItems.append(reference)
|
||||
return prim_spec
|
||||
|
||||
for index, existing_ref in enumerate(entries):
|
||||
existing_order = existing_ref.customData.get("order")
|
||||
if existing_order is not None and existing_order < order:
|
||||
log.debug(
|
||||
f"Inserting new reference at {index}: {reference}"
|
||||
)
|
||||
entries.insert(index, reference)
|
||||
break
|
||||
else:
|
||||
prim_spec.referenceList.prependedItems.append(reference)
|
||||
return prim_spec
|
||||
|
||||
prim_spec.referenceList.prependedItems[:] = entries
|
||||
return prim_spec
|
||||
|
||||
|
||||
def parse_avalon_uri(uri):
|
||||
# URI Pattern: avalon://{folder}/{product}.{ext}
|
||||
pattern = r"avalon://(?P<folder>[^/.]*)/(?P<product>[^/]*)\.(?P<ext>.*)"
|
||||
if uri.startswith("avalon://"):
|
||||
match = re.match(pattern, uri)
|
||||
if match:
|
||||
return match.groupdict()
|
||||
def set_variant_reference(
|
||||
sdf_layer,
|
||||
prim_path,
|
||||
variant_selections,
|
||||
path,
|
||||
as_payload=False,
|
||||
append=True
|
||||
):
|
||||
"""Get or define variant selection at prim path and add a reference
|
||||
|
||||
If the Variant Prim already exists the prepended references are replaced
|
||||
with a reference to `path`, it is overridden.
|
||||
|
||||
Args:
|
||||
sdf_layer (Sdf.Layer): Layer to operate in.
|
||||
prim_path (Any[str, Sdf.Path]): Prim path to add variant to.
|
||||
variant_selections (List[List[str, str]]): A list of variant set names
|
||||
and variant names to get the prim spec in.
|
||||
path (str): Path to reference or payload
|
||||
as_payload (bool): When enabled it will generate a payload instead of
|
||||
a reference. Defaults to False.
|
||||
append (bool): When enabled it will append the reference of payload
|
||||
to prepended items, otherwise it will replace it.
|
||||
|
||||
Returns:
|
||||
Sdf.PrimSpec: The prim spec for the prim path at the given
|
||||
variant selection.
|
||||
|
||||
"""
|
||||
prim_path = Sdf.Path(prim_path)
|
||||
# TODO: inherit type from outside of variants if it has it
|
||||
get_or_define_prim_spec(sdf_layer, prim_path, "Xform")
|
||||
variant_prim_path = variant_nested_prim_path(prim_path, variant_selections)
|
||||
variant_prim = get_or_define_prim_spec(sdf_layer,
|
||||
variant_prim_path,
|
||||
"Xform")
|
||||
# Replace the prepended references or payloads
|
||||
if as_payload:
|
||||
# Payload
|
||||
if append:
|
||||
variant_prim.payloadList.prependedItems.append(
|
||||
Sdf.Payload(assetPath=path)
|
||||
)
|
||||
else:
|
||||
variant_prim.payloadList.prependedItems[:] = [
|
||||
Sdf.Payload(assetPath=path)
|
||||
]
|
||||
else:
|
||||
# Reference
|
||||
if append:
|
||||
variant_prim.referenceList.prependedItems.append(
|
||||
Sdf.Reference(assetPath=path)
|
||||
)
|
||||
else:
|
||||
variant_prim.referenceList.prependedItems[:] = [
|
||||
Sdf.Reference(assetPath=path)
|
||||
]
|
||||
|
||||
return variant_prim
|
||||
|
||||
|
||||
def get_sdf_format_args(path):
|
||||
"""Return SDF_FORMAT_ARGS parsed to `dict`"""
|
||||
_raw_path, data = Sdf.Layer.SplitIdentifier(path)
|
||||
return data
|
||||
|
|
|
|||
|
|
@ -64,7 +64,8 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"skeletalMesh",
|
||||
"xgen",
|
||||
"yeticacheUE",
|
||||
"tycache"
|
||||
"tycache",
|
||||
"usd"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,879 @@
|
|||
from operator import attrgetter
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
import pyblish.api
|
||||
from pxr import Sdf
|
||||
|
||||
from ayon_core.lib import (
|
||||
TextDef,
|
||||
BoolDef,
|
||||
UISeparatorDef,
|
||||
UILabelDef,
|
||||
EnumDef
|
||||
)
|
||||
from ayon_core.pipeline.usdlib import (
|
||||
get_or_define_prim_spec,
|
||||
add_ordered_reference,
|
||||
variant_nested_prim_path,
|
||||
setup_asset_layer,
|
||||
add_ordered_sublayer,
|
||||
set_layer_defaults
|
||||
)
|
||||
from ayon_core.pipeline.entity_uri import (
|
||||
construct_ayon_entity_uri,
|
||||
parse_ayon_entity_uri
|
||||
)
|
||||
from ayon_core.pipeline.load.utils import get_representation_path_by_names
|
||||
from ayon_core.pipeline.publish.lib import get_instance_expected_output_path
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
# This global toggle is here mostly for debugging purposes and should usually
|
||||
# be True so that new publishes merge and extend on previous contributions.
|
||||
# With this enabled a new variant model layer publish would e.g. merge with
|
||||
# the model layer's other variants nicely, so you can build up an asset by
|
||||
# individual publishes instead of requiring to republish each contribution
|
||||
# all the time at the same time
|
||||
BUILD_INTO_LAST_VERSIONS = True
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _BaseContribution:
|
||||
# What are we contributing?
|
||||
instance: pyblish.api.Instance # instance that contributes it
|
||||
|
||||
# Where are we contributing to?
|
||||
layer_id: str # usually the department or task name
|
||||
target_product: str # target product the layer should merge to
|
||||
|
||||
order: int
|
||||
|
||||
|
||||
class SublayerContribution(_BaseContribution):
|
||||
"""Sublayer contribution"""
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class VariantContribution(_BaseContribution):
|
||||
"""Reference contribution within a Variant Set"""
|
||||
|
||||
# Variant
|
||||
variant_set_name: str
|
||||
variant_name: str
|
||||
variant_is_default: bool # Whether to author variant selection opinion
|
||||
|
||||
|
||||
def get_representation_path_in_publish_context(
|
||||
context: pyblish.api.Context,
|
||||
project_name,
|
||||
folder_path,
|
||||
product_name,
|
||||
version_name,
|
||||
representation_name,
|
||||
):
|
||||
"""Return resolved path for product if present in publishing context.
|
||||
|
||||
Allow resolving 'latest' paths from a publishing context's instances
|
||||
as if they will exist after publishing without them being integrated yet.
|
||||
|
||||
Use first instance that has same folder path and product name,
|
||||
and contains representation with passed name.
|
||||
|
||||
Args:
|
||||
context (pyblish.api.Context): Publishing context.
|
||||
project_name (str): Project name.
|
||||
folder_path (str): Folder path.
|
||||
product_name (str): Product name.
|
||||
version_name (str): Version name.
|
||||
representation_name (str): Representation name.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: Returns the path if it could be resolved
|
||||
|
||||
"""
|
||||
# The AYON publishing logic is set up in such a way that you can not
|
||||
# publish to another project. As such, we know if the project name we're
|
||||
# looking for doesn't match the publishing context it'll not be in there.
|
||||
if context.data["projectName"] != project_name:
|
||||
return
|
||||
|
||||
if version_name == "hero":
|
||||
raise NotImplementedError(
|
||||
"Hero version resolving not implemented from context"
|
||||
)
|
||||
|
||||
# Search first in publish context to allow resolving latest versions
|
||||
# from e.g. the current publish session if the context is provided
|
||||
specific_version = isinstance(version_name, int)
|
||||
for instance in context:
|
||||
if instance.data.get("folderPath") != folder_path:
|
||||
continue
|
||||
|
||||
if instance.data.get("productName") != product_name:
|
||||
continue
|
||||
|
||||
# Only consider if the instance has a representation by
|
||||
# that name
|
||||
representations = instance.data.get("representations", [])
|
||||
if not any(representation.get("name") == representation_name
|
||||
for representation in representations):
|
||||
continue
|
||||
|
||||
return get_instance_expected_output_path(
|
||||
instance,
|
||||
representation_name=representation_name,
|
||||
ext=None,
|
||||
version=version_name if specific_version else None
|
||||
)
|
||||
|
||||
|
||||
def get_instance_uri_path(
|
||||
instance,
|
||||
resolve=True
|
||||
):
|
||||
"""Return path for instance's usd representation"""
|
||||
context = instance.context
|
||||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
project_name = context.data["projectName"]
|
||||
|
||||
# Get the layer's published path
|
||||
path = construct_ayon_entity_uri(
|
||||
project_name=project_name,
|
||||
folder_path=folder_path,
|
||||
product=product_name,
|
||||
version="latest",
|
||||
representation_name="usd"
|
||||
)
|
||||
|
||||
# Resolve contribution path
|
||||
# TODO: Remove this when Asset Resolver is used
|
||||
if resolve:
|
||||
query = parse_ayon_entity_uri(path)
|
||||
names = {
|
||||
"project_name": query["project"],
|
||||
"folder_path": query["folderPath"],
|
||||
"product_name": query["product"],
|
||||
"version_name": query["version"],
|
||||
"representation_name": query["representation"],
|
||||
}
|
||||
|
||||
# We want to resolve the paths live from the publishing context
|
||||
path = get_representation_path_in_publish_context(context, **names)
|
||||
if path:
|
||||
return path
|
||||
|
||||
# If for whatever reason we were unable to retrieve from the context
|
||||
# then get the path from an existing database entry
|
||||
path = get_representation_path_by_names(**query)
|
||||
|
||||
# Ensure `None` for now is also a string
|
||||
path = str(path)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def get_last_publish(instance, representation="usd"):
|
||||
"""Wrapper to quickly get last representation publish path"""
|
||||
return get_representation_path_by_names(
|
||||
project_name=instance.context.data["projectName"],
|
||||
folder_path=instance.data["folderPath"],
|
||||
product_name=instance.data["productName"],
|
||||
version_name="latest",
|
||||
representation_name=representation
|
||||
)
|
||||
|
||||
|
||||
def add_representation(instance, name,
|
||||
files, staging_dir, ext=None,
|
||||
output_name=None):
|
||||
"""Add a representation to publish and integrate.
|
||||
|
||||
A representation must exist of either a single file or a
|
||||
single file sequence. It can *not* contain multiple files.
|
||||
|
||||
For the integration to succeed the instance must provide the context
|
||||
for asset, frame range, etc. even though the representation can
|
||||
override some parts of it.
|
||||
|
||||
Arguments:
|
||||
instance (pyblish.api.Instance): Publish instance
|
||||
name (str): The representation name
|
||||
files (str | List[str]): List of files or single file of the
|
||||
representation. This should be the filename only.
|
||||
staging_dir (str): The directory containing the files.
|
||||
ext (Optional[str]): Explicit extension for the output
|
||||
output_name (Optional[str]): Output name suffix for the
|
||||
destination file to ensure the file is unique if
|
||||
multiple representations share the same extension.
|
||||
|
||||
Returns:
|
||||
dict: Representation data for integration.
|
||||
|
||||
"""
|
||||
if ext is None:
|
||||
# TODO: Use filename
|
||||
ext = name
|
||||
|
||||
representation = {
|
||||
"name": name,
|
||||
"ext": ext,
|
||||
"stagingDir": staging_dir,
|
||||
"files": files
|
||||
}
|
||||
if output_name:
|
||||
representation["outputName"] = output_name
|
||||
|
||||
instance.data.setdefault("representations", []).append(representation)
|
||||
return representation
|
||||
|
||||
|
||||
class CollectUSDLayerContributions(pyblish.api.InstancePlugin,
|
||||
publish.OpenPypePyblishPluginMixin):
|
||||
"""Collect the USD Layer Contributions and create dependent instances.
|
||||
|
||||
Our contributions go to the layer
|
||||
|
||||
Instance representation -> Department Layer -> Asset
|
||||
|
||||
So that for example:
|
||||
modelMain --> variant 'main' in model.usd -> asset.usd
|
||||
modelDamaged --> variant 'damaged' in model.usd -> asset.usd
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.35
|
||||
label = "Collect USD Layer Contributions (Asset/Shot)"
|
||||
families = ["usd"]
|
||||
enabled = True
|
||||
|
||||
# A contribution defines a contribution into a (department) layer which
|
||||
# will get layered into the target product, usually the asset or shot.
|
||||
# We need to at least know what it targets (e.g. where does it go into) and
|
||||
# in what order (which contribution is stronger?)
|
||||
# Preferably the bootstrapped data (e.g. the Shot) preserves metadata about
|
||||
# the contributions so that we can design a system where custom
|
||||
# contributions outside the predefined orders are possible to be
|
||||
# managed. So that if a particular asset requires an extra contribution
|
||||
# level, you can add itdirectly from the publisher at that particular
|
||||
# order. Future publishes will then see the existing contribution and will
|
||||
# persist adding it to future bootstraps at that order
|
||||
contribution_layers: Dict[str, int] = {
|
||||
# asset layers
|
||||
"model": 100,
|
||||
"assembly": 150,
|
||||
"groom": 175,
|
||||
"look": 200,
|
||||
"rig": 300,
|
||||
# shot layers
|
||||
"layout": 200,
|
||||
"animation": 300,
|
||||
"simulation": 400,
|
||||
"fx": 500,
|
||||
"lighting": 600,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
# Override contribution_layers logic to turn data into Dict[str, int]
|
||||
plugin_settings = project_settings["core"]["publish"].get(
|
||||
"CollectUSDLayerContributions", {}
|
||||
)
|
||||
|
||||
cls.enabled = plugin_settings.get("enabled", cls.enabled)
|
||||
|
||||
# Define contribution layers via settings
|
||||
contribution_layers = {}
|
||||
for entry in plugin_settings.get("contribution_layers", []):
|
||||
contribution_layers[entry["name"]] = int(entry["order"])
|
||||
if contribution_layers:
|
||||
cls.contribution_layers = contribution_layers
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
if not attr_values.get("contribution_enabled"):
|
||||
return
|
||||
|
||||
instance.data["productGroup"] = (
|
||||
instance.data.get("productGroup") or "USD Layer"
|
||||
)
|
||||
|
||||
# Allow formatting in variant set name and variant name
|
||||
data = instance.data.copy()
|
||||
data["layer"] = attr_values["contribution_layer"]
|
||||
for key in [
|
||||
"contribution_variant_set_name",
|
||||
"contribution_variant"
|
||||
]:
|
||||
attr_values[key] = attr_values[key].format(**data)
|
||||
|
||||
# Define contribution
|
||||
order = self.contribution_layers.get(
|
||||
attr_values["contribution_layer"], 0
|
||||
)
|
||||
|
||||
if attr_values["contribution_apply_as_variant"]:
|
||||
contribution = VariantContribution(
|
||||
instance=instance,
|
||||
layer_id=attr_values["contribution_layer"],
|
||||
target_product=attr_values["contribution_target_product"],
|
||||
variant_set_name=attr_values["contribution_variant_set_name"],
|
||||
variant_name=attr_values["contribution_variant"],
|
||||
variant_is_default=attr_values["contribution_variant_is_default"], # noqa: E501
|
||||
order=order
|
||||
)
|
||||
else:
|
||||
contribution = SublayerContribution(
|
||||
instance=instance,
|
||||
layer_id=attr_values["contribution_layer"],
|
||||
target_product=attr_values["contribution_target_product"],
|
||||
order=order
|
||||
)
|
||||
|
||||
asset_product = contribution.target_product
|
||||
layer_product = "{}_{}".format(asset_product, contribution.layer_id)
|
||||
|
||||
# Layer contribution instance
|
||||
layer_instance = self.get_or_create_instance(
|
||||
product_name=layer_product,
|
||||
variant=contribution.layer_id,
|
||||
source_instance=instance,
|
||||
families=["usd", "usdLayer"],
|
||||
)
|
||||
layer_instance.data.setdefault("usd_contributions", []).append(
|
||||
contribution
|
||||
)
|
||||
layer_instance.data["usd_layer_id"] = contribution.layer_id
|
||||
layer_instance.data["usd_layer_order"] = contribution.order
|
||||
|
||||
layer_instance.data["productGroup"] = (
|
||||
instance.data.get("productGroup") or "USD Layer"
|
||||
)
|
||||
|
||||
# Asset/Shot contribution instance
|
||||
target_instance = self.get_or_create_instance(
|
||||
product_name=asset_product,
|
||||
variant=asset_product,
|
||||
source_instance=layer_instance,
|
||||
families=["usd", "usdAsset"],
|
||||
)
|
||||
target_instance.data["contribution_target_product_init"] = attr_values[
|
||||
"contribution_target_product_init"
|
||||
]
|
||||
|
||||
self.log.info(
|
||||
f"Contributing {instance.data['productName']} to "
|
||||
f"{layer_product} -> {asset_product}"
|
||||
)
|
||||
|
||||
def find_instance(self, context, data, ignore_instance):
|
||||
"""Return instance in context that has matching `instance.data`.
|
||||
|
||||
If no matching instance is found, then `None` is returned.
|
||||
"""
|
||||
for instance in context:
|
||||
if instance is ignore_instance:
|
||||
continue
|
||||
|
||||
if all(instance.data.get(key) == value
|
||||
for key, value in data.items()):
|
||||
return instance
|
||||
|
||||
def get_or_create_instance(self,
|
||||
product_name,
|
||||
variant,
|
||||
source_instance,
|
||||
families):
|
||||
"""Get or create the instance matching the product/variant.
|
||||
|
||||
The source instance will be used to do additional matching, like
|
||||
ensuring it's a product for the same asset and task. If the instance
|
||||
already exists in the `context` then the existing one is returned.
|
||||
|
||||
For each source instance this is called the sources will be appended
|
||||
to a `instance.data["source_instances"]` list on the returned instance.
|
||||
|
||||
Arguments:
|
||||
product_name (str): product name
|
||||
variant (str): Variant name
|
||||
source_instance (pyblish.api.Instance): Source instance to
|
||||
be related to for asset, task.
|
||||
families (list): The families required to be set on the instance.
|
||||
|
||||
Returns:
|
||||
pyblish.api.Instance: The resulting instance.
|
||||
|
||||
"""
|
||||
|
||||
# Potentially the instance already exists due to multiple instances
|
||||
# contributing to the same layer or asset - so we first check for
|
||||
# existence
|
||||
context = source_instance.context
|
||||
|
||||
# Required matching vars
|
||||
data = {
|
||||
"folderPath": source_instance.data["folderPath"],
|
||||
"task": source_instance.data.get("task"),
|
||||
"productName": product_name,
|
||||
"variant": variant,
|
||||
"families": families
|
||||
}
|
||||
existing_instance = self.find_instance(context, data,
|
||||
ignore_instance=source_instance)
|
||||
if existing_instance:
|
||||
existing_instance.append(source_instance.id)
|
||||
existing_instance.data["source_instances"].append(source_instance)
|
||||
return existing_instance
|
||||
|
||||
# Otherwise create the instance
|
||||
new_instance = context.create_instance(name=product_name)
|
||||
new_instance.data.update(data)
|
||||
|
||||
new_instance.data["label"] = (
|
||||
"{0} ({1})".format(product_name, new_instance.data["folderPath"])
|
||||
)
|
||||
new_instance.data["family"] = "usd"
|
||||
new_instance.data["productType"] = "usd"
|
||||
new_instance.data["icon"] = "link"
|
||||
new_instance.data["comment"] = "Automated bootstrap USD file."
|
||||
new_instance.append(source_instance.id)
|
||||
new_instance.data["source_instances"] = [source_instance]
|
||||
|
||||
# The contribution target publishes should never match versioning of
|
||||
# the workfile but should just always increment from their last version
|
||||
# so that there will never be conflicts between contributions from
|
||||
# different departments and scenes.
|
||||
new_instance.data["followWorkfileVersion"] = False
|
||||
|
||||
return new_instance
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
|
||||
return [
|
||||
UISeparatorDef("usd_container_settings1"),
|
||||
UILabelDef(label="<b>USD Contribution</b>"),
|
||||
BoolDef("contribution_enabled",
|
||||
label="Enable",
|
||||
tooltip=(
|
||||
"When enabled this publish instance will be added "
|
||||
"into a department layer into a target product, "
|
||||
"usually an asset or shot.\n"
|
||||
"When disabled this publish instance will not be "
|
||||
"added into another USD file and remain as is.\n"
|
||||
"In both cases the USD data itself is free to have "
|
||||
"references and sublayers of its own."
|
||||
),
|
||||
default=True),
|
||||
TextDef("contribution_target_product",
|
||||
label="Target product",
|
||||
tooltip=(
|
||||
"The target product the contribution should be added "
|
||||
"to. Usually this is the asset or shot product.\nThe "
|
||||
"department layer will be added to this product, and "
|
||||
"the contribution itself will be added to the "
|
||||
"department layer."
|
||||
),
|
||||
default="usdAsset"),
|
||||
EnumDef("contribution_target_product_init",
|
||||
label="Initialize as",
|
||||
tooltip=(
|
||||
"The target product's USD file will be initialized "
|
||||
"based on this type if there's no existing USD of "
|
||||
"that product yet.\nIf there's already an existing "
|
||||
"product with the name of the 'target product' this "
|
||||
"setting will do nothing."
|
||||
),
|
||||
items=["asset", "shot"],
|
||||
default="asset"),
|
||||
|
||||
# Asset layer, e.g. model.usd, look.usd, rig.usd
|
||||
EnumDef("contribution_layer",
|
||||
label="Add to department layer",
|
||||
tooltip=(
|
||||
"The layer the contribution should be made to in the "
|
||||
"target product.\nThe layers have their own "
|
||||
"predefined ordering.\nA higher order (further down "
|
||||
"the list) will contribute as a stronger opinion."
|
||||
),
|
||||
items=list(cls.contribution_layers.keys()),
|
||||
default="model"),
|
||||
BoolDef("contribution_apply_as_variant",
|
||||
label="Add as variant",
|
||||
tooltip=(
|
||||
"When enabled the contribution to the department "
|
||||
"layer will be added as a variant where the variant "
|
||||
"on the default root prim will be added as a "
|
||||
"reference.\nWhen disabled the contribution will be "
|
||||
"appended to as a sublayer to the department layer "
|
||||
"instead."
|
||||
),
|
||||
default=True),
|
||||
TextDef("contribution_variant_set_name",
|
||||
label="Variant Set Name",
|
||||
default="{layer}"),
|
||||
TextDef("contribution_variant",
|
||||
label="Variant Name",
|
||||
default="{variant}"),
|
||||
BoolDef("contribution_variant_is_default",
|
||||
label="Set as default variant selection",
|
||||
tooltip=(
|
||||
"Whether to set this instance's variant name as the "
|
||||
"default selected variant name for the variant set.\n"
|
||||
"It is always expected to be enabled for only one "
|
||||
"variant name in the variant set.\n"
|
||||
"The behavior is unpredictable if multiple instances "
|
||||
"for the same variant set have this enabled."
|
||||
),
|
||||
default=False),
|
||||
UISeparatorDef("usd_container_settings3"),
|
||||
]
|
||||
|
||||
|
||||
class CollectUSDLayerContributionsHoudiniLook(CollectUSDLayerContributions):
|
||||
"""
|
||||
This is solely here to expose the attribute definitions for the
|
||||
Houdini "look" family.
|
||||
"""
|
||||
# TODO: Improve how this is built for the look family
|
||||
hosts = ["houdini"]
|
||||
families = ["look"]
|
||||
label = CollectUSDLayerContributions.label + " (Look)"
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
defs = super(CollectUSDLayerContributionsHoudiniLook,
|
||||
cls).get_attribute_defs()
|
||||
|
||||
# Update default for department layer to look
|
||||
layer_def = next(d for d in defs if d.key == "contribution_layer")
|
||||
layer_def.default = "look"
|
||||
|
||||
return defs
|
||||
|
||||
|
||||
class ExtractUSDLayerContribution(publish.Extractor):
|
||||
|
||||
families = ["usdLayer"]
|
||||
label = "Extract USD Layer Contributions (Asset/Shot)"
|
||||
order = pyblish.api.ExtractorOrder + 0.45
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
self.log.debug(f"Building layer: {folder_path} > {product_name}")
|
||||
|
||||
path = get_last_publish(instance)
|
||||
if path and BUILD_INTO_LAST_VERSIONS:
|
||||
sdf_layer = Sdf.Layer.OpenAsAnonymous(path)
|
||||
default_prim = sdf_layer.defaultPrim
|
||||
else:
|
||||
default_prim = folder_path.rsplit("/", 1)[-1] # use folder name
|
||||
sdf_layer = Sdf.Layer.CreateAnonymous()
|
||||
set_layer_defaults(sdf_layer, default_prim=default_prim)
|
||||
|
||||
contributions = instance.data.get("usd_contributions", [])
|
||||
for contribution in sorted(contributions, key=attrgetter("order")):
|
||||
path = get_instance_uri_path(contribution.instance)
|
||||
if isinstance(contribution, VariantContribution):
|
||||
# Add contribution as a reference inside a variant
|
||||
self.log.debug(f"Adding variant: {contribution}")
|
||||
|
||||
# Make sure at least the prim exists outside the variant
|
||||
# selection, so it can house the variant selection and the
|
||||
# variants themselves
|
||||
prim_path = Sdf.Path(f"/{default_prim}")
|
||||
prim_spec = get_or_define_prim_spec(sdf_layer,
|
||||
prim_path,
|
||||
"Xform")
|
||||
|
||||
variant_prim_path = variant_nested_prim_path(
|
||||
prim_path=prim_path,
|
||||
variant_selections=[
|
||||
(contribution.variant_set_name,
|
||||
contribution.variant_name)
|
||||
]
|
||||
)
|
||||
|
||||
# Remove any existing matching entry of same product
|
||||
variant_prim_spec = sdf_layer.GetPrimAtPath(variant_prim_path)
|
||||
if variant_prim_spec:
|
||||
self.remove_previous_reference_contribution(
|
||||
prim_spec=variant_prim_spec,
|
||||
instance=contribution.instance
|
||||
)
|
||||
|
||||
# Add the contribution at the indicated order
|
||||
self.add_reference_contribution(sdf_layer,
|
||||
variant_prim_path,
|
||||
path,
|
||||
contribution)
|
||||
|
||||
# Set default variant selection
|
||||
variant_set_name = contribution.variant_set_name
|
||||
variant_name = contribution.variant_name
|
||||
if contribution.variant_is_default or \
|
||||
variant_set_name not in prim_spec.variantSelections:
|
||||
prim_spec.variantSelections[variant_set_name] = variant_name # noqa: E501
|
||||
|
||||
elif isinstance(contribution, SublayerContribution):
|
||||
# Sublayer source file
|
||||
self.log.debug(f"Adding sublayer: {contribution}")
|
||||
|
||||
# This replaces existing versions of itself so that
|
||||
# republishing does not continuously add more versions of the
|
||||
# same product
|
||||
product_name = contribution.instance.data["productName"]
|
||||
add_ordered_sublayer(
|
||||
layer=sdf_layer,
|
||||
contribution_path=path,
|
||||
layer_id=product_name,
|
||||
order=None, # unordered
|
||||
add_sdf_arguments_metadata=True
|
||||
)
|
||||
else:
|
||||
raise TypeError(f"Unsupported contribution: {contribution}")
|
||||
|
||||
# Save the file
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.usd"
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
sdf_layer.Export(filepath, args={"format": "usda"})
|
||||
|
||||
add_representation(
|
||||
instance,
|
||||
name="usd",
|
||||
files=filename,
|
||||
staging_dir=staging_dir
|
||||
)
|
||||
|
||||
def remove_previous_reference_contribution(self,
|
||||
prim_spec: Sdf.PrimSpec,
|
||||
instance: pyblish.api.Instance):
|
||||
# Remove existing contributions of the same product - ignoring
|
||||
# the picked version and representation. We assume there's only ever
|
||||
# one version of a product you want to have referenced into a Prim.
|
||||
remove_indices = set()
|
||||
for index, ref in enumerate(prim_spec.referenceList.prependedItems):
|
||||
ref: Sdf.Reference # type hint
|
||||
|
||||
uri = ref.customData.get("ayon_uri")
|
||||
if uri and self.instance_match_ayon_uri(instance, uri):
|
||||
self.log.debug("Removing existing reference: %s", ref)
|
||||
remove_indices.add(index)
|
||||
|
||||
if remove_indices:
|
||||
prim_spec.referenceList.prependedItems[:] = [
|
||||
ref for index, ref
|
||||
in enumerate(prim_spec.referenceList.prependedItems)
|
||||
if index not in remove_indices
|
||||
]
|
||||
|
||||
def add_reference_contribution(self,
|
||||
layer: Sdf.Layer,
|
||||
prim_path: Sdf.Path,
|
||||
filepath: str,
|
||||
contribution: VariantContribution):
|
||||
instance = contribution.instance
|
||||
uri = construct_ayon_entity_uri(
|
||||
project_name=instance.data["projectEntity"]["name"],
|
||||
folder_path=instance.data["folderPath"],
|
||||
product=instance.data["productName"],
|
||||
version=instance.data["version"],
|
||||
representation_name="usd"
|
||||
)
|
||||
reference = Sdf.Reference(assetPath=filepath,
|
||||
customData={"ayon_uri": uri})
|
||||
add_ordered_reference(
|
||||
layer=layer,
|
||||
prim_path=prim_path,
|
||||
reference=reference,
|
||||
order=contribution.order
|
||||
)
|
||||
|
||||
def instance_match_ayon_uri(self, instance, ayon_uri):
|
||||
|
||||
uri_data = parse_ayon_entity_uri(ayon_uri)
|
||||
if not uri_data:
|
||||
return False
|
||||
|
||||
# Check if project, asset and product match
|
||||
if instance.data["projectEntity"]["name"] != uri_data.get("project"):
|
||||
return False
|
||||
|
||||
if instance.data["folderPath"] != uri_data.get("folderPath"):
|
||||
return False
|
||||
|
||||
if instance.data["productName"] != uri_data.get("product"):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ExtractUSDAssetContribution(publish.Extractor):
|
||||
|
||||
families = ["usdAsset"]
|
||||
label = "Extract USD Asset/Shot Contributions"
|
||||
order = ExtractUSDLayerContribution.order + 0.01
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
self.log.debug(f"Building asset: {folder_path} > {product_name}")
|
||||
folder_name = folder_path.rsplit("/", 1)[-1]
|
||||
|
||||
# Contribute layers to asset
|
||||
# Use existing asset and add to it, or initialize a new asset layer
|
||||
path = get_last_publish(instance)
|
||||
payload_layer = None
|
||||
if path and BUILD_INTO_LAST_VERSIONS:
|
||||
# If there's a payload file, put it in the payload instead
|
||||
folder = os.path.dirname(path)
|
||||
payload_path = os.path.join(folder, "payload.usd")
|
||||
if os.path.exists(payload_path):
|
||||
payload_layer = Sdf.Layer.OpenAsAnonymous(payload_path)
|
||||
|
||||
asset_layer = Sdf.Layer.OpenAsAnonymous(path)
|
||||
else:
|
||||
# If no existing publish of this product exists then we initialize
|
||||
# the layer as either a default asset or shot structure.
|
||||
init_type = instance.data["contribution_target_product_init"]
|
||||
asset_layer, payload_layer = self.init_layer(
|
||||
asset_name=folder_name, init_type=init_type
|
||||
)
|
||||
|
||||
# Author timeCodesPerSecond and framesPerSecond if the asset layer
|
||||
# is currently lacking any but our current context does specify an FPS
|
||||
fps = instance.data.get("fps", instance.context.data.get("fps"))
|
||||
if fps is not None:
|
||||
if (
|
||||
not asset_layer.HasTimeCodesPerSecond()
|
||||
and not asset_layer.HasFramesPerSecond()
|
||||
):
|
||||
# Author FPS on the asset layer since there is no opinion yet
|
||||
self.log.info("Authoring FPS on Asset Layer: %s FPS", fps)
|
||||
asset_layer.timeCodesPerSecond = fps
|
||||
asset_layer.framesPerSecond = fps
|
||||
|
||||
if fps != asset_layer.timeCodesPerSecond:
|
||||
self.log.warning(
|
||||
"Current instance FPS '%s' does not match asset layer "
|
||||
"timecodes per second '%s'",
|
||||
fps, asset_layer.timeCodesPerSecond
|
||||
)
|
||||
if fps != asset_layer.framesPerSecond:
|
||||
self.log.warning(
|
||||
"Current instance FPS '%s' does not match asset layer "
|
||||
"frames per second '%s'",
|
||||
fps, asset_layer.framesPerSecond
|
||||
)
|
||||
|
||||
target_layer = payload_layer if payload_layer else asset_layer
|
||||
|
||||
# Get unique layer instances (remove duplicate entries)
|
||||
processed_ids = set()
|
||||
layer_instances = []
|
||||
for layer_inst in instance.data["source_instances"]:
|
||||
if layer_inst.id in processed_ids:
|
||||
continue
|
||||
layer_instances.append(layer_inst)
|
||||
processed_ids.add(layer_inst.id)
|
||||
|
||||
# Insert the layer in contributions order
|
||||
def sort_by_order(instance):
|
||||
return instance.data["usd_layer_order"]
|
||||
|
||||
for layer_instance in sorted(layer_instances,
|
||||
key=sort_by_order,
|
||||
reverse=True):
|
||||
|
||||
layer_id = layer_instance.data["usd_layer_id"]
|
||||
order = layer_instance.data["usd_layer_order"]
|
||||
|
||||
path = get_instance_uri_path(instance=layer_instance)
|
||||
add_ordered_sublayer(target_layer,
|
||||
contribution_path=path,
|
||||
layer_id=layer_id,
|
||||
order=order,
|
||||
# Add the sdf argument metadata which allows
|
||||
# us to later detect whether another path
|
||||
# has the same layer id, so we can replace it
|
||||
# it.
|
||||
add_sdf_arguments_metadata=True)
|
||||
|
||||
# Save the file
|
||||
staging_dir = self.staging_dir(instance)
|
||||
filename = f"{instance.name}.usd"
|
||||
filepath = os.path.join(staging_dir, filename)
|
||||
asset_layer.Export(filepath, args={"format": "usda"})
|
||||
|
||||
add_representation(
|
||||
instance,
|
||||
name="usd",
|
||||
files=filename,
|
||||
staging_dir=staging_dir
|
||||
)
|
||||
|
||||
if payload_layer:
|
||||
payload_path = os.path.join(staging_dir, "payload.usd")
|
||||
payload_layer.Export(payload_path, args={"format": "usda"})
|
||||
self.add_relative_file(instance, payload_path)
|
||||
|
||||
def init_layer(self, asset_name, init_type):
|
||||
"""Initialize layer if no previous version exists"""
|
||||
|
||||
if init_type == "asset":
|
||||
asset_layer = Sdf.Layer.CreateAnonymous()
|
||||
created_layers = setup_asset_layer(asset_layer, asset_name,
|
||||
force_add_payload=True,
|
||||
set_payload_path=True)
|
||||
payload_layer = created_layers[0].layer
|
||||
return asset_layer, payload_layer
|
||||
|
||||
elif init_type == "shot":
|
||||
shot_layer = Sdf.Layer.CreateAnonymous()
|
||||
set_layer_defaults(shot_layer, default_prim=None)
|
||||
return shot_layer, None
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"USD Target Product contribution can only initialize "
|
||||
"as 'asset' or 'shot', got: '{}'".format(init_type)
|
||||
)
|
||||
|
||||
def add_relative_file(self, instance, source, staging_dir=None):
|
||||
"""Add transfer for a relative path form staging to publish dir.
|
||||
|
||||
Unlike files in representations, the file will not be renamed and
|
||||
will be ingested one-to-one into the publish directory.
|
||||
|
||||
Note: This file does not get registered as a representation, because
|
||||
representation files always get renamed by the publish template
|
||||
system. These files get included in the `representation["files"]`
|
||||
info with all the representations of the version - and thus will
|
||||
appear multiple times per version.
|
||||
|
||||
"""
|
||||
# TODO: It can be nice to force a particular representation no matter
|
||||
# what to adhere to a certain filename on integration because e.g. a
|
||||
# particular file format relies on that file named like that or alike
|
||||
# and still allow regular registering with the database as a file of
|
||||
# the version. As such we might want to tweak integrator logic?
|
||||
if staging_dir is None:
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
assert isinstance(staging_dir, str), "Staging dir must be string"
|
||||
publish_dir: str = instance.data["publishDir"]
|
||||
|
||||
relative_path = os.path.relpath(source, staging_dir)
|
||||
destination = os.path.join(publish_dir, relative_path)
|
||||
destination = os.path.normpath(destination)
|
||||
|
||||
transfers = instance.data.setdefault("transfers", [])
|
||||
self.log.debug(f"Adding relative file {source} -> {relative_path}")
|
||||
transfers.append((source, destination))
|
||||
|
|
@ -1,6 +1,13 @@
|
|||
import inspect
|
||||
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.publish import ValidateContentsOrder
|
||||
from ayon_core.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishValidationError
|
||||
)
|
||||
|
||||
|
||||
class ValidateResources(pyblish.api.InstancePlugin):
|
||||
|
|
@ -10,19 +17,95 @@ class ValidateResources(pyblish.api.InstancePlugin):
|
|||
these could be textures, image planes, cache files or other linked
|
||||
media.
|
||||
|
||||
A single resource entry MUST contain `source` and `files`:
|
||||
{
|
||||
"source": "/path/to/file.<UDIM>.exr",
|
||||
"files": ['/path/to/file.1001.exr', '/path/to/file.1002.exr']
|
||||
}
|
||||
|
||||
It may contain additional metadata like `attribute` or `node` so other
|
||||
publishing plug-ins can detect where the resource was used. The
|
||||
`color_space` data is also frequently used (e.g. in Maya and Houdini)
|
||||
|
||||
This validates:
|
||||
- The resources are existing files.
|
||||
- The resources have correctly collected the data.
|
||||
- The resources must be unique to the source filepath so that multiple
|
||||
source filepaths do not write to the same publish filepath.
|
||||
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
label = "Validate Resources"
|
||||
label = "Resources"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
for resource in instance.data.get('resources', []):
|
||||
resources = instance.data.get("resources", [])
|
||||
if not resources:
|
||||
self.log.debug("No resources to validate..")
|
||||
return
|
||||
|
||||
# Validate the `resources` data structure is valid
|
||||
invalid_data = False
|
||||
for resource in resources:
|
||||
# Required data
|
||||
assert "source" in resource, "No source found"
|
||||
assert "files" in resource, "No files from source"
|
||||
assert all(os.path.exists(f) for f in resource['files'])
|
||||
if "source" not in resource:
|
||||
invalid_data = True
|
||||
self.log.error("Missing 'source' in resource: %s", resource)
|
||||
if "files" not in resource or not resource["files"]:
|
||||
invalid_data = True
|
||||
self.log.error("Missing 'files' in resource: %s", resource)
|
||||
if not all(os.path.exists(f) for f in resource.get("files", [])):
|
||||
invalid_data = True
|
||||
self.log.error(
|
||||
"Resource contains files that do not exist "
|
||||
"on disk: %s", resource
|
||||
)
|
||||
|
||||
# Ensure unique resource names
|
||||
basenames = defaultdict(set)
|
||||
for resource in resources:
|
||||
files = resource.get("files", [])
|
||||
for filename in files:
|
||||
|
||||
# Use normalized paths in comparison and ignore case
|
||||
# sensitivity
|
||||
filename = os.path.normpath(filename).lower()
|
||||
|
||||
basename = os.path.splitext(os.path.basename(filename))[0]
|
||||
basenames[basename].add(filename)
|
||||
|
||||
invalid_resources = list()
|
||||
for basename, sources in basenames.items():
|
||||
if len(sources) > 1:
|
||||
invalid_resources.extend(sources)
|
||||
self.log.error(
|
||||
"Non-unique resource filename: {0}\n- {1}".format(
|
||||
basename,
|
||||
"\n- ".join(sources)
|
||||
)
|
||||
)
|
||||
|
||||
if invalid_data or invalid_resources:
|
||||
raise PublishValidationError(
|
||||
"Invalid resources in instance.",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Invalid resources
|
||||
|
||||
Used resources, like textures, must exist on disk and must have
|
||||
unique filenames.
|
||||
|
||||
#### Filenames must be unique
|
||||
|
||||
In most cases this will invalidate due to using the same filenames
|
||||
from different folders, and as such the file to be transferred is
|
||||
unique but has the same filename. Either rename the source files or
|
||||
make sure to use the same source file if they are intended to
|
||||
be the same file.
|
||||
|
||||
"""
|
||||
)
|
||||
|
|
|
|||
|
|
@ -57,6 +57,31 @@ class CollectFramesFixDefModel(BaseSettingsModel):
|
|||
True,
|
||||
title="Show 'Rewrite latest version' toggle"
|
||||
)
|
||||
|
||||
|
||||
class ContributionLayersModel(BaseSettingsModel):
|
||||
_layout = "compact"
|
||||
name: str = SettingsField(title="Name")
|
||||
order: str = SettingsField(
|
||||
title="Order",
|
||||
description="Higher order means a higher strength and stacks the "
|
||||
"layer on top.")
|
||||
|
||||
|
||||
class CollectUSDLayerContributionsModel(BaseSettingsModel):
|
||||
enabled: bool = SettingsField(True, title="Enabled")
|
||||
contribution_layers: list[ContributionLayersModel] = SettingsField(
|
||||
title="Department Layer Orders",
|
||||
description=(
|
||||
"Define available department layers and their strength "
|
||||
"ordering inside the USD contribution workflow."
|
||||
)
|
||||
)
|
||||
|
||||
@validator("contribution_layers")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class PluginStateByHostModelProfile(BaseSettingsModel):
|
||||
|
|
@ -792,6 +817,10 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
default_factory=CollectFramesFixDefModel,
|
||||
title="Collect Frames to Fix",
|
||||
)
|
||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = SettingsField(
|
||||
default_factory=CollectUSDLayerContributionsModel,
|
||||
title="Collect USD Layer Contributions",
|
||||
)
|
||||
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
||||
default_factory=ValidateBaseModel,
|
||||
title="Validate Editorial Asset Name"
|
||||
|
|
@ -884,6 +913,23 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"enabled": True,
|
||||
"rewrite_version_enable": True
|
||||
},
|
||||
"CollectUSDLayerContributions": {
|
||||
"enabled": True,
|
||||
"contribution_layers": [
|
||||
# Asset layers
|
||||
{"name": "model", "order": 100},
|
||||
{"name": "assembly", "order": 150},
|
||||
{"name": "groom", "order": 175},
|
||||
{"name": "look", "order": 300},
|
||||
{"name": "rig", "order": 100},
|
||||
# Shot layers
|
||||
{"name": "layout", "order": 200},
|
||||
{"name": "animation", "order": 300},
|
||||
{"name": "simulation", "order": 400},
|
||||
{"name": "fx", "order": 500},
|
||||
{"name": "lighting", "order": 600},
|
||||
],
|
||||
},
|
||||
"ValidateEditorialAssetName": {
|
||||
"enabled": True,
|
||||
"optional": False,
|
||||
|
|
|
|||
|
|
@ -50,6 +50,30 @@ class RedshiftRenderPluginInfo():
|
|||
Version = attr.ib(default="1")
|
||||
|
||||
|
||||
@attr.s
|
||||
class HuskStandalonePluginInfo():
|
||||
"""Requires Deadline Husk Standalone Plugin.
|
||||
See Deadline Plug-in:
|
||||
https://github.com/BigRoy/HuskStandaloneSubmitter
|
||||
Also see Husk options here:
|
||||
https://www.sidefx.com/docs/houdini/ref/utils/husk.html
|
||||
"""
|
||||
SceneFile = attr.ib()
|
||||
# TODO: Below parameters are only supported by custom version of the plugin
|
||||
Renderer = attr.ib(default=None)
|
||||
RenderSettings = attr.ib(default="/Render/rendersettings")
|
||||
Purpose = attr.ib(default="geometry,render")
|
||||
Complexity = attr.ib(default="veryhigh")
|
||||
Snapshot = attr.ib(default=-1)
|
||||
LogLevel = attr.ib(default="2")
|
||||
PreRender = attr.ib(default="")
|
||||
PreFrame = attr.ib(default="")
|
||||
PostFrame = attr.ib(default="")
|
||||
PostRender = attr.ib(default="")
|
||||
RestartDelegate = attr.ib(default="")
|
||||
Version = attr.ib(default="")
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(
|
||||
abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
AYONPyblishPluginMixin
|
||||
|
|
@ -69,8 +93,7 @@ class HoudiniSubmitDeadline(
|
|||
label = "Submit Render to Deadline"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["houdini"]
|
||||
families = ["usdrender",
|
||||
"redshift_rop",
|
||||
families = ["redshift_rop",
|
||||
"arnold_rop",
|
||||
"mantra_rop",
|
||||
"karma_rop",
|
||||
|
|
@ -149,11 +172,14 @@ class HoudiniSubmitDeadline(
|
|||
|
||||
job_type = "[RENDER]"
|
||||
if split_render_job and not is_export_job:
|
||||
# Convert from family to Deadline plugin name
|
||||
# i.e., arnold_rop -> Arnold
|
||||
plugin = (
|
||||
instance.data["productType"].replace("_rop", "").capitalize()
|
||||
)
|
||||
product_type = instance.data["productType"]
|
||||
plugin = {
|
||||
"usdrender": "HuskStandalone",
|
||||
}.get(product_type)
|
||||
if not plugin:
|
||||
# Convert from product type to Deadline plugin name
|
||||
# i.e., arnold_rop -> Arnold
|
||||
plugin = product_type.replace("_rop", "").capitalize()
|
||||
else:
|
||||
plugin = "Houdini"
|
||||
if split_render_job:
|
||||
|
|
@ -185,7 +211,8 @@ class HoudiniSubmitDeadline(
|
|||
# Make sure we make job frame dependent so render tasks pick up a soon
|
||||
# as export tasks are done
|
||||
if split_render_job and not is_export_job:
|
||||
job_info.IsFrameDependent = True
|
||||
job_info.IsFrameDependent = bool(instance.data.get(
|
||||
"splitRenderFrameDependent", True))
|
||||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
|
|
@ -207,6 +234,13 @@ class HoudiniSubmitDeadline(
|
|||
)
|
||||
job_info.Group = self.group
|
||||
|
||||
# Apply render globals, like e.g. data from collect machine list
|
||||
render_globals = instance.data.get("renderGlobals", {})
|
||||
if render_globals:
|
||||
self.log.debug("Applying 'renderGlobals' to job info: %s",
|
||||
render_globals)
|
||||
job_info.update(render_globals)
|
||||
|
||||
job_info.Comment = context.data.get("comment")
|
||||
|
||||
keys = [
|
||||
|
|
@ -292,6 +326,10 @@ class HoudiniSubmitDeadline(
|
|||
" - using version configured in Deadline"
|
||||
))
|
||||
|
||||
elif product_type == "usdrender":
|
||||
plugin_info = self._get_husk_standalone_plugin_info(
|
||||
instance, hou_major_minor)
|
||||
|
||||
else:
|
||||
self.log.error(
|
||||
"Product type '%s' not supported yet to split render job",
|
||||
|
|
@ -321,3 +359,45 @@ class HoudiniSubmitDeadline(
|
|||
# Store output dir for unified publisher (filesequence)
|
||||
output_dir = os.path.dirname(instance.data["files"][0])
|
||||
instance.data["outputDir"] = output_dir
|
||||
|
||||
def _get_husk_standalone_plugin_info(self, instance, hou_major_minor):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
# Supply additional parameters from the USD Render ROP
|
||||
# to the Husk Standalone Render Plug-in
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
snapshot_interval = -1
|
||||
if rop_node.evalParm("dosnapshot"):
|
||||
snapshot_interval = rop_node.evalParm("snapshotinterval")
|
||||
|
||||
restart_delegate = 0
|
||||
if rop_node.evalParm("husk_restartdelegate"):
|
||||
restart_delegate = rop_node.evalParm("husk_restartdelegateframes")
|
||||
|
||||
rendersettings = (
|
||||
rop_node.evalParm("rendersettings")
|
||||
or "/Render/rendersettings"
|
||||
)
|
||||
return HuskStandalonePluginInfo(
|
||||
SceneFile=instance.data["ifdFile"],
|
||||
Renderer=rop_node.evalParm("renderer"),
|
||||
RenderSettings=rendersettings,
|
||||
Purpose=rop_node.evalParm("husk_purpose"),
|
||||
Complexity=rop_node.evalParm("husk_complexity"),
|
||||
Snapshot=snapshot_interval,
|
||||
PreRender=rop_node.evalParm("husk_prerender"),
|
||||
PreFrame=rop_node.evalParm("husk_preframe"),
|
||||
PostFrame=rop_node.evalParm("husk_postframe"),
|
||||
PostRender=rop_node.evalParm("husk_postrender"),
|
||||
RestartDelegate=restart_delegate,
|
||||
Version=hou_major_minor
|
||||
)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadlineUsdRender(HoudiniSubmitDeadline):
|
||||
# Do not use published workfile paths for USD Render ROP because the
|
||||
# Export Job doesn't seem to occur using the published path either, so
|
||||
# output paths then do not match the actual rendered paths
|
||||
use_published = False
|
||||
families = ["usdrender"]
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
|||
"vrayscene", "maxrender",
|
||||
"arnold_rop", "mantra_rop",
|
||||
"karma_rop", "vray_rop",
|
||||
"redshift_rop"]
|
||||
"redshift_rop", "usdrender"]
|
||||
settings_category = "deadline"
|
||||
|
||||
aov_filter = [
|
||||
|
|
|
|||
|
|
@ -367,6 +367,28 @@ def maintained_selection():
|
|||
node.setSelected(on=True)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def parm_values(overrides):
|
||||
"""Override Parameter values during the context.
|
||||
Arguments:
|
||||
overrides (List[Tuple[hou.Parm, Any]]): The overrides per parm
|
||||
that should be applied during context.
|
||||
"""
|
||||
|
||||
originals = []
|
||||
try:
|
||||
for parm, value in overrides:
|
||||
originals.append((parm, parm.eval()))
|
||||
parm.set(value)
|
||||
yield
|
||||
finally:
|
||||
for parm, value in originals:
|
||||
# Parameter might not exist anymore so first
|
||||
# check whether it's still valid
|
||||
if hou.parm(parm.path()):
|
||||
parm.set(value)
|
||||
|
||||
|
||||
def reset_framerange(fps=True, frame_range=True):
|
||||
"""Set frame range and FPS to current folder."""
|
||||
|
||||
|
|
|
|||
|
|
@ -134,6 +134,7 @@ class HoudiniCreator(Creator, HoudiniCreatorBase):
|
|||
|
||||
instance_data["instance_node"] = instance_node.path()
|
||||
instance_data["instance_id"] = instance_node.path()
|
||||
instance_data["families"] = self.get_publish_families()
|
||||
instance = CreatedInstance(
|
||||
self.product_type,
|
||||
product_name,
|
||||
|
|
@ -182,6 +183,7 @@ class HoudiniCreator(Creator, HoudiniCreatorBase):
|
|||
node_path = instance.path()
|
||||
node_data["instance_id"] = node_path
|
||||
node_data["instance_node"] = node_path
|
||||
node_data["families"] = self.get_publish_families()
|
||||
if "AYON_productName" in node_data:
|
||||
node_data["productName"] = node_data.pop("AYON_productName")
|
||||
|
||||
|
|
@ -211,6 +213,7 @@ class HoudiniCreator(Creator, HoudiniCreatorBase):
|
|||
values["AYON_productName"] = values.pop("productName")
|
||||
values.pop("instance_node", None)
|
||||
values.pop("instance_id", None)
|
||||
values.pop("families", None)
|
||||
imprint(node, values, update=update)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
|
|
@ -252,6 +255,21 @@ class HoudiniCreator(Creator, HoudiniCreatorBase):
|
|||
node.setUserData('nodeshape', shape)
|
||||
node.setColor(color)
|
||||
|
||||
def get_publish_families(self):
|
||||
"""Return families for the instances of this creator.
|
||||
|
||||
Allow a Creator to define multiple families so that a creator can
|
||||
e.g. specify `usd` and `usdrop`.
|
||||
|
||||
There is no need to override this method if you only have the
|
||||
primary family defined by the `product_type` property as that will
|
||||
always be set.
|
||||
|
||||
Returns:
|
||||
List[str]: families for instances of this creator
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_network_categories(self):
|
||||
"""Return in which network view type this creator should show.
|
||||
|
||||
|
|
|
|||
|
|
@ -2,9 +2,12 @@
|
|||
|
||||
import contextlib
|
||||
import logging
|
||||
import json
|
||||
import itertools
|
||||
from typing import List
|
||||
|
||||
from pxr import Sdf
|
||||
|
||||
import hou
|
||||
from pxr import Usd, Sdf, Tf, Vt, UsdRender
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -119,11 +122,13 @@ def get_usd_rop_loppath(node):
|
|||
return node.parm("loppath").evalAsNode()
|
||||
|
||||
|
||||
def get_layer_save_path(layer):
|
||||
def get_layer_save_path(layer, expand_string=True):
|
||||
"""Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer.
|
||||
|
||||
Args:
|
||||
layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from.
|
||||
expand_string (bool): Whether to expand any houdini vars in the save
|
||||
path before computing the absolute path.
|
||||
|
||||
Returns:
|
||||
str or None: Path to save to when data exists.
|
||||
|
|
@ -136,6 +141,8 @@ def get_layer_save_path(layer):
|
|||
save_path = hou_layer_info.customData.get("HoudiniSavePath", None)
|
||||
if save_path:
|
||||
# Unfortunately this doesn't actually resolve the full absolute path
|
||||
if expand_string:
|
||||
save_path = hou.text.expandString(save_path)
|
||||
return layer.ComputeAbsolutePath(save_path)
|
||||
|
||||
|
||||
|
|
@ -181,7 +188,18 @@ def iter_layer_recursive(layer):
|
|||
yield layer
|
||||
|
||||
|
||||
def get_configured_save_layers(usd_rop):
|
||||
def get_configured_save_layers(usd_rop, strip_above_layer_break=True):
|
||||
"""Retrieve the layer save paths from a USD ROP.
|
||||
|
||||
Arguments:
|
||||
usdrop (hou.RopNode): USD Rop Node
|
||||
strip_above_layer_break (Optional[bool]): Whether to exclude any
|
||||
layers that are above layer breaks. This defaults to True.
|
||||
|
||||
Returns:
|
||||
List[Sdf.Layer]: The layers with configured save paths.
|
||||
|
||||
"""
|
||||
|
||||
lop_node = get_usd_rop_loppath(usd_rop)
|
||||
stage = lop_node.stage(apply_viewport_overrides=False)
|
||||
|
|
@ -192,10 +210,170 @@ def get_configured_save_layers(usd_rop):
|
|||
|
||||
root_layer = stage.GetRootLayer()
|
||||
|
||||
if strip_above_layer_break:
|
||||
layers_above_layer_break = set(lop_node.layersAboveLayerBreak())
|
||||
else:
|
||||
layers_above_layer_break = set()
|
||||
|
||||
save_layers = []
|
||||
for layer in iter_layer_recursive(root_layer):
|
||||
if (
|
||||
strip_above_layer_break and
|
||||
layer.identifier in layers_above_layer_break
|
||||
):
|
||||
continue
|
||||
|
||||
save_path = get_layer_save_path(layer)
|
||||
if save_path is not None:
|
||||
save_layers.append(layer)
|
||||
|
||||
return save_layers
|
||||
|
||||
|
||||
def setup_lop_python_layer(layer, node, savepath=None,
|
||||
apply_file_format_args=True):
|
||||
"""Set up Sdf.Layer with HoudiniLayerInfo prim for metadata.
|
||||
|
||||
This is the same as `loputils.createPythonLayer` but can be run on top
|
||||
of `pxr.Sdf.Layer` instances that are already created in a Python LOP node.
|
||||
That's useful if your layer creation itself is built to be DCC agnostic,
|
||||
then we just need to run this after per layer to make it explicitly
|
||||
stored for houdini.
|
||||
|
||||
By default, Houdini doesn't apply the FileFormatArguments supplied to
|
||||
the created layer; however it does support USD's file save suffix
|
||||
of `:SDF_FORMAT_ARGS:` to supply them. With `apply_file_format_args` any
|
||||
file format args set on the layer's creation will be added to the
|
||||
save path through that.
|
||||
|
||||
Note: The `node.addHeldLayer` call will only work from a LOP python node
|
||||
whenever `node.editableStage()` or `node.editableLayer()` was called.
|
||||
|
||||
Arguments:
|
||||
layer (Sdf.Layer): An existing layer (most likely just created
|
||||
in the current runtime)
|
||||
node (hou.LopNode): The Python LOP node to attach the layer to so
|
||||
it does not get garbage collected/mangled after the downstream.
|
||||
savepath (Optional[str]): When provided the HoudiniSaveControl
|
||||
will be set to Explicit with HoudiniSavePath to this path.
|
||||
apply_file_format_args (Optional[bool]): When enabled any
|
||||
FileFormatArgs defined for the layer on creation will be set
|
||||
in the HoudiniSavePath so Houdini USD ROP will use them top.
|
||||
|
||||
Returns:
|
||||
Sdf.PrimSpec: The Created HoudiniLayerInfo prim spec.
|
||||
|
||||
"""
|
||||
# Add a Houdini Layer Info prim where we can put the save path.
|
||||
p = Sdf.CreatePrimInLayer(layer, '/HoudiniLayerInfo')
|
||||
p.specifier = Sdf.SpecifierDef
|
||||
p.typeName = 'HoudiniLayerInfo'
|
||||
if savepath:
|
||||
if apply_file_format_args:
|
||||
args = layer.GetFileFormatArguments()
|
||||
savepath = Sdf.Layer.CreateIdentifier(savepath, args)
|
||||
|
||||
p.customData['HoudiniSavePath'] = savepath
|
||||
p.customData['HoudiniSaveControl'] = 'Explicit'
|
||||
# Let everyone know what node created this layer.
|
||||
p.customData['HoudiniCreatorNode'] = node.sessionId()
|
||||
p.customData['HoudiniEditorNodes'] = Vt.IntArray([node.sessionId()])
|
||||
node.addHeldLayer(layer.identifier)
|
||||
|
||||
return p
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def remap_paths(rop_node, mapping):
|
||||
"""Enable the AyonRemapPaths output processor with provided `mapping`"""
|
||||
from ayon_houdini.api.lib import parm_values
|
||||
|
||||
if not mapping:
|
||||
# Do nothing
|
||||
yield
|
||||
return
|
||||
|
||||
# Houdini string parms need to escape backslashes due to the support
|
||||
# of expressions - as such we do so on the json data
|
||||
value = json.dumps(mapping).replace("\\", "\\\\")
|
||||
with outputprocessors(
|
||||
rop_node,
|
||||
processors=["ayon_remap_paths"],
|
||||
disable_all_others=True,
|
||||
):
|
||||
with parm_values([
|
||||
(rop_node.parm("ayon_remap_paths_remap_json"), value)
|
||||
]):
|
||||
yield
|
||||
|
||||
|
||||
def get_usd_render_rop_rendersettings(rop_node, stage=None, logger=None):
|
||||
"""Return the chosen UsdRender.Settings from the stage (if any).
|
||||
|
||||
Args:
|
||||
rop_node (hou.Node): The Houdini USD Render ROP node.
|
||||
stage (pxr.Usd.Stage): The USD stage to find the render settings
|
||||
in. This is usually the stage from the LOP path the USD Render
|
||||
ROP node refers to.
|
||||
logger (logging.Logger): Logger to log warnings to if no render
|
||||
settings were find in stage.
|
||||
|
||||
Returns:
|
||||
Optional[UsdRender.Settings]: Render Settings.
|
||||
|
||||
"""
|
||||
if logger is None:
|
||||
logger = log
|
||||
|
||||
if stage is None:
|
||||
lop_node = get_usd_rop_loppath(rop_node)
|
||||
stage = lop_node.stage()
|
||||
|
||||
path = rop_node.evalParm("rendersettings")
|
||||
if not path:
|
||||
# Default behavior
|
||||
path = "/Render/rendersettings"
|
||||
|
||||
prim = stage.GetPrimAtPath(path)
|
||||
if not prim:
|
||||
logger.warning("No render settings primitive found at: %s", path)
|
||||
return
|
||||
|
||||
render_settings = UsdRender.Settings(prim)
|
||||
if not render_settings:
|
||||
logger.warning("Prim at %s is not a valid RenderSettings prim.", path)
|
||||
return
|
||||
|
||||
return render_settings
|
||||
|
||||
|
||||
def get_schema_type_names(type_name: str) -> List[str]:
|
||||
"""Return schema type name for type name and its derived types
|
||||
|
||||
This can be useful for checking whether a `Sdf.PrimSpec`'s type name is of
|
||||
a given type or any of its derived types.
|
||||
|
||||
Args:
|
||||
type_name (str): The type name, like e.g. 'UsdGeomMesh'
|
||||
|
||||
Returns:
|
||||
List[str]: List of schema type names and their derived types.
|
||||
|
||||
"""
|
||||
schema_registry = Usd.SchemaRegistry
|
||||
type_ = Tf.Type.FindByName(type_name)
|
||||
|
||||
if type_ == Tf.Type.Unknown:
|
||||
type_ = schema_registry.GetTypeFromSchemaTypeName(type_name)
|
||||
if type_ == Tf.Type.Unknown:
|
||||
# Type not found
|
||||
return []
|
||||
|
||||
results = []
|
||||
derived = type_.GetAllDerivedTypes()
|
||||
for derived_type in itertools.chain([type_], derived):
|
||||
schema_type_name = schema_registry.GetSchemaTypeName(derived_type)
|
||||
if schema_type_name:
|
||||
results.append(schema_type_name)
|
||||
|
||||
return results
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating publishable Houdini Digital Assets."""
|
||||
import hou
|
||||
from assettools import setToolSubmenu
|
||||
|
||||
import ayon_api
|
||||
from ayon_core.pipeline import (
|
||||
|
|
@ -16,6 +15,132 @@ from ayon_core.lib import (
|
|||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
# region assettools
|
||||
# logic based on Houdini 19.5.752 `assettools.py` because
|
||||
# this logic was removed in Houdini 20+
|
||||
def get_tool_submenus(hda_def):
|
||||
"""Returns the tab submenu entries of this node.
|
||||
|
||||
Note: A node could be placed in multiple entries at once.
|
||||
|
||||
Arguments:
|
||||
hda_def: the HDA Definition by hou.node.type().definition()
|
||||
|
||||
Returns:
|
||||
Optional[list[str]]: A list of submenus
|
||||
"""
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
if hda_def.hasSection('Tools.shelf'):
|
||||
sections = hda_def.sections()
|
||||
ts_section = sections['Tools.shelf'].contents()
|
||||
try:
|
||||
root = ET.fromstring(ts_section)
|
||||
except ET.ParseError:
|
||||
return None
|
||||
tool = root[0]
|
||||
submenus = tool.findall('toolSubmenu')
|
||||
if submenus:
|
||||
tool_submenus = []
|
||||
for submenu in submenus:
|
||||
if submenu is not None:
|
||||
text = submenu.text
|
||||
if text:
|
||||
tool_submenus.append(submenu.text)
|
||||
if tool_submenus:
|
||||
return tool_submenus
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def set_tool_submenu(hda_def,
|
||||
new_submenu='Digital Assets'):
|
||||
"""Sets the tab menu entry for a node.
|
||||
|
||||
Arguments:
|
||||
hda_def: the HDA Definition by hou.node.type().definition()
|
||||
new_submenu (Optional[str]): This will be the new submenu, replacing
|
||||
old_submenu entry
|
||||
"""
|
||||
|
||||
context_dict = {
|
||||
'Shop': 'SHOP',
|
||||
'Cop2': 'COP2',
|
||||
'Object': 'OBJ',
|
||||
'Chop': 'CHOP',
|
||||
'Sop': 'SOP',
|
||||
'Vop': 'VOP',
|
||||
'VopNet': 'VOPNET',
|
||||
'Driver': 'ROP',
|
||||
'TOP': 'TOP',
|
||||
'Top': 'TOP',
|
||||
'Lop': 'LOP',
|
||||
'Dop': 'DOP'}
|
||||
|
||||
utils_dict = {
|
||||
'Shop': 'shoptoolutils',
|
||||
'Cop2': 'cop2toolutils',
|
||||
'Object': 'objecttoolutils',
|
||||
'Chop': 'choptoolutils',
|
||||
'Sop': 'soptoolutils',
|
||||
'Vop': 'voptoolutils',
|
||||
'VopNet': 'vopnettoolutils',
|
||||
'Driver': 'drivertoolutils',
|
||||
'TOP': 'toptoolutils',
|
||||
'Top': 'toptoolutils',
|
||||
'Lop': 'loptoolutils',
|
||||
'Dop': 'doptoolutils'}
|
||||
|
||||
if hda_def.hasSection('Tools.shelf'):
|
||||
old_submenu = get_tool_submenus(hda_def)[0]
|
||||
else:
|
||||
# Add default tools shelf section
|
||||
content = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<shelfDocument>
|
||||
<!-- This file contains definitions of shelves, toolbars, and tools.
|
||||
It should not be hand-edited when it is being used by the application.
|
||||
Note, that two definitions of the same element are not allowed in
|
||||
a single file. -->
|
||||
<tool name="$HDA_DEFAULT_TOOL" label="$HDA_LABEL" icon="$HDA_ICON">
|
||||
<toolMenuContext name="viewer">
|
||||
<contextNetType>SOP</contextNetType>
|
||||
</toolMenuContext>
|
||||
<toolMenuContext name="network">
|
||||
<contextOpType>$HDA_TABLE_AND_NAME</contextOpType>
|
||||
</toolMenuContext>
|
||||
<toolSubmenu>Digital Assets</toolSubmenu>
|
||||
<script scriptType="python"><![CDATA[import soptoolutils
|
||||
soptoolutils.genericTool(kwargs, \'$HDA_NAME\')]]></script>
|
||||
</tool>
|
||||
</shelfDocument>
|
||||
"""
|
||||
|
||||
nodetype_category_name = hda_def.nodeType().category().name()
|
||||
context = context_dict[nodetype_category_name]
|
||||
util = utils_dict[nodetype_category_name]
|
||||
content = content.replace(
|
||||
"<contextNetType>SOP</contextNetType>",
|
||||
f"<contextNetType>{context}</contextNetType>")
|
||||
content = content.replace('soptoolutils', util)
|
||||
hda_def.addSection('Tools.shelf', content)
|
||||
old_submenu = 'Digital Assets'
|
||||
|
||||
# Replace submenu
|
||||
tools = hda_def.sections()["Tools.shelf"]
|
||||
content = tools.contents()
|
||||
content = content.replace(
|
||||
f"<toolSubmenu>{old_submenu}</toolSubmenu>",
|
||||
f"<toolSubmenu>{new_submenu}</toolSubmenu>"
|
||||
)
|
||||
|
||||
hda_def.addSection('Tools.shelf', content)
|
||||
# endregion
|
||||
|
||||
|
||||
class CreateHDA(plugin.HoudiniCreator):
|
||||
"""Publish Houdini Digital Asset file."""
|
||||
|
||||
|
|
@ -121,7 +246,7 @@ class CreateHDA(plugin.HoudiniCreator):
|
|||
hda_def.setUserInfo(get_ayon_username())
|
||||
|
||||
if pre_create_data.get("use_project"):
|
||||
setToolSubmenu(hda_def, "AYON/{}".format(self.project_name))
|
||||
set_tool_submenu(hda_def, "AYON/{}".format(self.project_name))
|
||||
|
||||
return hda_node
|
||||
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@ import hou
|
|||
class CreateUSD(plugin.HoudiniCreator):
|
||||
"""Universal Scene Description"""
|
||||
identifier = "io.openpype.creators.houdini.usd"
|
||||
label = "USD (experimental)"
|
||||
label = "USD"
|
||||
product_type = "usd"
|
||||
icon = "gears"
|
||||
icon = "cubes"
|
||||
enabled = False
|
||||
description = "Create USD"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
|
||||
|
|
@ -49,3 +50,6 @@ class CreateUSD(plugin.HoudiniCreator):
|
|||
hou.ropNodeTypeCategory(),
|
||||
hou.lopNodeTypeCategory()
|
||||
]
|
||||
|
||||
def get_publish_families(self):
|
||||
return ["usd", "usdrop"]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,73 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating USD looks with textures."""
|
||||
import inspect
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class CreateUSDLook(plugin.HoudiniCreator):
|
||||
"""Universal Scene Description Look"""
|
||||
|
||||
identifier = "io.openpype.creators.houdini.usd.look"
|
||||
label = "Look"
|
||||
product_type = "look"
|
||||
icon = "paint-brush"
|
||||
enabled = True
|
||||
description = "Create USD Look"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "usd"})
|
||||
|
||||
instance = super(CreateUSDLook, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
parms = {
|
||||
"lopoutput": "$HIP/pyblish/{}.usd".format(product_name),
|
||||
"enableoutputprocessor_simplerelativepaths": False,
|
||||
|
||||
# Set the 'default prim' by default to the folder name being
|
||||
# published to
|
||||
"defaultprim": '/`strsplit(chs("folderPath"), "/", -1)`',
|
||||
}
|
||||
|
||||
if self.selected_nodes:
|
||||
parms["loppath"] = self.selected_nodes[0].path()
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
to_lock = [
|
||||
"fileperframe",
|
||||
# Lock some Avalon attributes
|
||||
"family",
|
||||
"id",
|
||||
]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_detail_description(self):
|
||||
return inspect.cleandoc("""Publish looks in USD data.
|
||||
|
||||
From the Houdini Solaris context (LOPs) this will publish the look for
|
||||
an asset as a USD file with the used textures.
|
||||
|
||||
Any assets used by the look will be relatively remapped to the USD
|
||||
file and integrated into the publish as `resources`.
|
||||
|
||||
""")
|
||||
|
||||
def get_network_categories(self):
|
||||
return [
|
||||
hou.ropNodeTypeCategory(),
|
||||
hou.lopNodeTypeCategory()
|
||||
]
|
||||
|
||||
def get_publish_families(self):
|
||||
return ["usd", "look", "usdrop"]
|
||||
|
|
@ -1,24 +1,66 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating USD renders."""
|
||||
from ayon_houdini.api import plugin
|
||||
from ayon_core.lib import BoolDef, EnumDef
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
def get_usd_rop_renderers():
|
||||
"""Return all available renderers supported by USD Render ROP.
|
||||
Note that the USD Render ROP does not include all Hydra renderers, because
|
||||
it excludes the GL ones like Houdini GL and Storm. USD Render ROP only
|
||||
lists the renderers that have `aovsupport` enabled. Also see:
|
||||
https://www.sidefx.com/docs/houdini/nodes/out/usdrender.html#list
|
||||
Returns:
|
||||
dict[str, str]: Plug-in name to display name mapping.
|
||||
"""
|
||||
return {
|
||||
info["name"]: info["displayname"] for info
|
||||
in hou.lop.availableRendererInfo() if info.get('aovsupport')
|
||||
}
|
||||
|
||||
|
||||
class CreateUSDRender(plugin.HoudiniCreator):
|
||||
"""USD Render ROP in /stage"""
|
||||
identifier = "io.openpype.creators.houdini.usdrender"
|
||||
label = "USD Render (experimental)"
|
||||
label = "USD Render"
|
||||
product_type = "usdrender"
|
||||
icon = "magic"
|
||||
description = "Create USD Render"
|
||||
|
||||
default_renderer = "Karma CPU"
|
||||
# Default render target
|
||||
render_target = "farm_split"
|
||||
|
||||
def create(self, product_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
|
||||
instance_data["parent"] = hou.node("/stage")
|
||||
# Transfer settings from pre create to instance
|
||||
creator_attributes = instance_data.setdefault(
|
||||
"creator_attributes", dict())
|
||||
|
||||
for key in ["render_target", "review"]:
|
||||
if key in pre_create_data:
|
||||
creator_attributes[key] = pre_create_data[key]
|
||||
|
||||
# TODO: Support creation in /stage if wanted by user
|
||||
# pre_create_data["parent"] = "/stage"
|
||||
|
||||
# Remove the active, we are checking the bypass flag of the nodes
|
||||
instance_data.pop("active", None)
|
||||
instance_data.update({"node_type": "usdrender"})
|
||||
|
||||
# Override default value for the Export Chunk Size because if the
|
||||
# a single USD file is written as opposed to per frame we want to
|
||||
# ensure only one machine picks up that sequence
|
||||
# TODO: Probably better to change the default somehow for just this
|
||||
# Creator on the HoudiniSubmitDeadline plug-in, if possible?
|
||||
(
|
||||
instance_data
|
||||
.setdefault("publish_attributes", {})
|
||||
.setdefault("HoudiniSubmitDeadlineUsdRender", {})["export_chunk"]
|
||||
) = 1000
|
||||
|
||||
instance = super(CreateUSDRender, self).create(
|
||||
product_name,
|
||||
instance_data,
|
||||
|
|
@ -26,15 +68,98 @@ class CreateUSDRender(plugin.HoudiniCreator):
|
|||
|
||||
instance_node = hou.node(instance.get("instance_node"))
|
||||
|
||||
|
||||
parms = {
|
||||
# Render frame range
|
||||
"trange": 1
|
||||
}
|
||||
if self.selected_nodes:
|
||||
parms["loppath"] = self.selected_nodes[0].path()
|
||||
|
||||
if pre_create_data.get("render_target") == "farm_split":
|
||||
# Do not trigger the husk render, only trigger the USD export
|
||||
parms["runcommand"] = False
|
||||
# By default, the render ROP writes out the render file to a
|
||||
# temporary directory. But if we want to render the USD file on
|
||||
# the farm we instead want it in the project available
|
||||
# to all machines. So we ensure all USD files are written to a
|
||||
# folder to our choice. The
|
||||
# `__render__.usd` (default name, defined by `lopoutput` parm)
|
||||
# in that folder will then be the file to render.
|
||||
parms["savetodirectory_directory"] = "$HIP/render/usd/$HIPNAME/$OS"
|
||||
parms["lopoutput"] = "__render__.usd"
|
||||
parms["allframesatonce"] = True
|
||||
|
||||
# By default strip any Houdini custom data from the output file
|
||||
# since the renderer doesn't care about it
|
||||
parms["clearhoudinicustomdata"] = True
|
||||
|
||||
# Use the first selected LOP node if "Use Selection" is enabled
|
||||
# and the user had any nodes selected
|
||||
if self.selected_nodes:
|
||||
for node in self.selected_nodes:
|
||||
if node.type().category() == hou.lopNodeTypeCategory():
|
||||
parms["loppath"] = node.path()
|
||||
break
|
||||
|
||||
# Set default renderer if defined in settings
|
||||
if pre_create_data.get("renderer"):
|
||||
parms["renderer"] = pre_create_data.get("renderer")
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock some Avalon attributes
|
||||
# Lock some AYON attributes
|
||||
to_lock = ["productType", "id"]
|
||||
self.lock_parameters(instance_node, to_lock)
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""get instance attribute definitions.
|
||||
Attributes defined in this method are exposed in
|
||||
publish tab in the publisher UI.
|
||||
"""
|
||||
|
||||
render_target_items = {
|
||||
"local": "Local machine rendering",
|
||||
"local_no_render": "Use existing frames (local)",
|
||||
"farm": "Farm Rendering",
|
||||
"farm_split": "Farm Rendering - Split export & render jobs",
|
||||
}
|
||||
|
||||
return [
|
||||
BoolDef("review",
|
||||
label="Review",
|
||||
tooltip="Mark as reviewable",
|
||||
default=True),
|
||||
EnumDef("render_target",
|
||||
items=render_target_items,
|
||||
label="Render target",
|
||||
default=self.render_target)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
|
||||
# Retrieve available renderers and convert default renderer to
|
||||
# plug-in name if settings provided the display name
|
||||
renderer_plugin_to_display_name = get_usd_rop_renderers()
|
||||
default_renderer = self.default_renderer or None
|
||||
if (
|
||||
default_renderer
|
||||
and default_renderer not in renderer_plugin_to_display_name
|
||||
):
|
||||
# Map default renderer display name to plugin name
|
||||
for name, display_name in renderer_plugin_to_display_name.items():
|
||||
if default_renderer == display_name:
|
||||
default_renderer = name
|
||||
break
|
||||
else:
|
||||
# Default renderer not found in available renderers
|
||||
default_renderer = None
|
||||
|
||||
attrs = super(CreateUSDRender, self).get_pre_create_attr_defs()
|
||||
attrs += [
|
||||
EnumDef("renderer",
|
||||
label="Renderer",
|
||||
default=default_renderer,
|
||||
items=renderer_plugin_to_display_name),
|
||||
]
|
||||
|
||||
return attrs + self.get_instance_attr_defs()
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ class CollectFarmInstances(plugin.HoudiniInstancePlugin):
|
|||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
"vray_rop",
|
||||
"usdrender"]
|
||||
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect farm instances"
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@ This will add additional families to different instance based on
|
|||
the creator_identifier parameter.
|
||||
"""
|
||||
import pyblish.api
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class CollectPointcacheType(pyblish.api.InstancePlugin):
|
||||
class CollectPointcacheType(plugin.HoudiniInstancePlugin):
|
||||
"""Collect data type for different instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
|
|
|||
|
|
@ -1,152 +0,0 @@
|
|||
import hou
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import usdlib
|
||||
from ayon_houdini.api import lib, plugin
|
||||
import ayon_houdini.api.usd as hou_usdlib
|
||||
|
||||
|
||||
class CollectInstancesUsdLayered(plugin.HoudiniContextPlugin):
|
||||
"""Collect Instances from a ROP Network and its configured layer paths.
|
||||
|
||||
The output nodes of the ROP node will only be published when *any* of the
|
||||
layers remain set to 'publish' by the user.
|
||||
|
||||
This works differently from most of our Avalon instances in the pipeline.
|
||||
As opposed to storing `ayon.create.instance` as id on the node we store
|
||||
`pyblish.avalon.usdlayered`.
|
||||
|
||||
Additionally this instance has no need for storing folder, product type,
|
||||
product name or name on the nodes. Instead all information is retrieved
|
||||
solely from the output filepath, which is an Avalon URI:
|
||||
avalon://{folder}/{product}.{representation}
|
||||
|
||||
Each final ROP node is considered a dependency for any of the Configured
|
||||
Save Path layers it sets along the way. As such, the instances shown in
|
||||
the Pyblish UI are solely the configured layers. The encapsulating usd
|
||||
files are generated whenever *any* of the dependencies is published.
|
||||
|
||||
These dependency instances are stored in:
|
||||
instance.data["publishDependencies"]
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.01
|
||||
label = "Collect Instances (USD Configured Layers)"
|
||||
|
||||
def process(self, context):
|
||||
|
||||
stage = hou.node("/stage")
|
||||
if not stage:
|
||||
# Likely Houdini version <18
|
||||
return
|
||||
|
||||
nodes = stage.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop)
|
||||
for node in nodes:
|
||||
|
||||
if not node.parm("id"):
|
||||
continue
|
||||
|
||||
if node.evalParm("id") != "pyblish.avalon.usdlayered":
|
||||
continue
|
||||
|
||||
has_product_type = node.evalParm("productType")
|
||||
assert has_product_type, (
|
||||
"'%s' is missing 'productType'" % node.name()
|
||||
)
|
||||
|
||||
self.process_node(node, context)
|
||||
|
||||
def sort_by_family(instance):
|
||||
"""Sort by family"""
|
||||
return instance.data.get(
|
||||
"families",
|
||||
instance.data.get("productType")
|
||||
)
|
||||
|
||||
# Sort/grouped by family (preserving local index)
|
||||
context[:] = sorted(context, key=sort_by_family)
|
||||
|
||||
return context
|
||||
|
||||
def process_node(self, node, context):
|
||||
|
||||
# Allow a single ROP node or a full ROP network of USD ROP nodes
|
||||
# to be processed as a single entry that should "live together" on
|
||||
# a publish.
|
||||
if node.type().name() == "ropnet":
|
||||
# All rop nodes inside ROP Network
|
||||
ropnodes = node.recursiveGlob("*", filter=hou.nodeTypeFilter.Rop)
|
||||
else:
|
||||
# A single node
|
||||
ropnodes = [node]
|
||||
|
||||
data = lib.read(node)
|
||||
|
||||
# Don't use the explicit "colorbleed.usd.layered" family for publishing
|
||||
# instead use the "colorbleed.usd" family to integrate.
|
||||
data["publishFamilies"] = ["colorbleed.usd"]
|
||||
|
||||
# For now group ALL of them into USD Layer product group
|
||||
# Allow this product to be grouped into a USD Layer on creation
|
||||
data["productGroup"] = "USD Layer"
|
||||
|
||||
instances = list()
|
||||
dependencies = []
|
||||
for ropnode in ropnodes:
|
||||
|
||||
# Create a dependency instance per ROP Node.
|
||||
lopoutput = ropnode.evalParm("lopoutput")
|
||||
dependency_save_data = self.get_save_data(lopoutput)
|
||||
dependency = context.create_instance(dependency_save_data["name"])
|
||||
dependency.append(ropnode)
|
||||
dependency.data.update(data)
|
||||
dependency.data.update(dependency_save_data)
|
||||
dependency.data["productType"] = "colorbleed.usd.dependency"
|
||||
dependency.data["optional"] = False
|
||||
dependencies.append(dependency)
|
||||
|
||||
# Hide the dependency instance from the context
|
||||
context.pop()
|
||||
|
||||
# Get all configured layers for this USD ROP node
|
||||
# and create a Pyblish instance for each one
|
||||
layers = hou_usdlib.get_configured_save_layers(ropnode)
|
||||
for layer in layers:
|
||||
save_path = hou_usdlib.get_layer_save_path(layer)
|
||||
save_data = self.get_save_data(save_path)
|
||||
if not save_data:
|
||||
continue
|
||||
self.log.info(save_path)
|
||||
|
||||
instance = context.create_instance(save_data["name"])
|
||||
instance[:] = [node]
|
||||
|
||||
# Set the instance data
|
||||
instance.data.update(data)
|
||||
instance.data.update(save_data)
|
||||
instance.data["usdLayer"] = layer
|
||||
|
||||
instances.append(instance)
|
||||
|
||||
# Store the collected ROP node dependencies
|
||||
self.log.debug("Collected dependencies: %s" % (dependencies,))
|
||||
for instance in instances:
|
||||
instance.data["publishDependencies"] = dependencies
|
||||
|
||||
def get_save_data(self, save_path):
|
||||
|
||||
# Resolve Avalon URI
|
||||
uri_data = usdlib.parse_avalon_uri(save_path)
|
||||
if not uri_data:
|
||||
self.log.warning("Non Avalon URI Layer Path: %s" % save_path)
|
||||
return {}
|
||||
|
||||
# Collect folder + product from URI
|
||||
name = "{product[name]} ({folder[path]})".format(**uri_data)
|
||||
fname = "{folder[path]}_{product[name]}.{ext}".format(**uri_data)
|
||||
|
||||
data = dict(uri_data)
|
||||
data["usdSavePath"] = save_path
|
||||
data["usdFilename"] = fname
|
||||
data["name"] = name
|
||||
return data
|
||||
|
|
@ -21,7 +21,8 @@ class CollectLocalRenderInstances(plugin.HoudiniInstancePlugin):
|
|||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
"vray_rop",
|
||||
"usdrender"]
|
||||
|
||||
label = "Collect local render instances"
|
||||
|
||||
|
|
|
|||
|
|
@ -5,99 +5,70 @@ import hou
|
|||
import pxr.UsdRender
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
def get_var_changed(variable=None):
|
||||
"""Return changed variables and operators that use it.
|
||||
|
||||
Note: `varchange` hscript states that it forces a recook of the nodes
|
||||
that use Variables. That was tested in Houdini
|
||||
18.0.391.
|
||||
|
||||
Args:
|
||||
variable (str, Optional): A specific variable to query the operators
|
||||
for. When None is provided it will return all variables that have
|
||||
had recent changes and require a recook. Defaults to None.
|
||||
|
||||
Returns:
|
||||
dict: Variable that changed with the operators that use it.
|
||||
|
||||
"""
|
||||
cmd = "varchange -V"
|
||||
if variable:
|
||||
cmd += " {0}".format(variable)
|
||||
output, _ = hou.hscript(cmd)
|
||||
|
||||
changed = {}
|
||||
for line in output.split("Variable: "):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
split = line.split()
|
||||
var = split[0]
|
||||
operators = split[1:]
|
||||
changed[var] = operators
|
||||
|
||||
return changed
|
||||
from ayon_houdini.api.usd import (
|
||||
get_usd_render_rop_rendersettings
|
||||
)
|
||||
|
||||
|
||||
class CollectRenderProducts(plugin.HoudiniInstancePlugin):
|
||||
"""Collect USD Render Products."""
|
||||
"""Collect USD Render Products.
|
||||
|
||||
The render products are collected from the USD Render ROP node by detecting
|
||||
what the selected Render Settings prim path is, then finding those
|
||||
Render Settings in the USD Stage and collecting the targeted Render
|
||||
Products and their expected filenames.
|
||||
|
||||
Note: Product refers USD Render Product, not to an AYON Product
|
||||
|
||||
"""
|
||||
|
||||
label = "Collect Render Products"
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
# This plugin should run after CollectUsdRender
|
||||
# and, before CollectLocalRenderInstances
|
||||
order = pyblish.api.CollectorOrder + 0.04
|
||||
families = ["usdrender"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
node = instance.data.get("output_node")
|
||||
if not node:
|
||||
rop_path = instance.data["instance_node"].path()
|
||||
raise RuntimeError(
|
||||
"No output node found. Make sure to connect an "
|
||||
rop_path = rop_node.path()
|
||||
self.log.error(
|
||||
"No output node found. Make sure to connect a valid "
|
||||
"input to the USD ROP: %s" % rop_path
|
||||
)
|
||||
return
|
||||
|
||||
# Workaround Houdini 18.0.391 bug where $HIPNAME doesn't automatically
|
||||
# update after scene save.
|
||||
if hou.applicationVersion() == (18, 0, 391):
|
||||
self.log.debug(
|
||||
"Checking for recook to workaround " "$HIPNAME refresh bug..."
|
||||
)
|
||||
changed = get_var_changed("HIPNAME").get("HIPNAME")
|
||||
if changed:
|
||||
self.log.debug("Recooking for $HIPNAME refresh bug...")
|
||||
for operator in changed:
|
||||
hou.node(operator).cook(force=True)
|
||||
|
||||
# Make sure to recook any 'cache' nodes in the history chain
|
||||
chain = [node]
|
||||
chain.extend(node.inputAncestors())
|
||||
for input_node in chain:
|
||||
if input_node.type().name() == "cache":
|
||||
input_node.cook(force=True)
|
||||
|
||||
stage = node.stage()
|
||||
override_output_image = rop_node.evalParm("outputimage")
|
||||
|
||||
filenames = []
|
||||
for prim in stage.Traverse():
|
||||
|
||||
if not prim.IsA(pxr.UsdRender.Product):
|
||||
files_by_product = {}
|
||||
stage = node.stage()
|
||||
for prim_path in self.get_render_products(rop_node, stage):
|
||||
prim = stage.GetPrimAtPath(prim_path)
|
||||
if not prim or not prim.IsA(pxr.UsdRender.Product):
|
||||
self.log.warning("Found invalid render product path "
|
||||
"configured in render settings that is not a "
|
||||
"Render Product prim: %s", prim_path)
|
||||
continue
|
||||
|
||||
render_product = pxr.UsdRender.Product(prim)
|
||||
# Get Render Product Name
|
||||
product = pxr.UsdRender.Product(prim)
|
||||
if override_output_image:
|
||||
name = override_output_image
|
||||
else:
|
||||
# We force taking it from any random time sample as opposed to
|
||||
# "default" that the USD Api falls back to since that won't
|
||||
# return time sampled values if they were set per time sample.
|
||||
name = render_product.GetProductNameAttr().Get(time=0)
|
||||
|
||||
# We force taking it from any random time sample as opposed to
|
||||
# "default" that the USD Api falls back to since that won't return
|
||||
# time sampled values if they were set per time sample.
|
||||
name = product.GetProductNameAttr().Get(time=0)
|
||||
dirname = os.path.dirname(name)
|
||||
basename = os.path.basename(name)
|
||||
|
||||
dollarf_regex = r"(\$F([0-9]?))"
|
||||
frame_regex = r"^(.+\.)([0-9]+)(\.[a-zA-Z]+)$"
|
||||
if re.match(dollarf_regex, basename):
|
||||
# TODO: Confirm this actually is allowed USD stages and HUSK
|
||||
# Substitute $F
|
||||
|
|
@ -109,11 +80,28 @@ class CollectRenderProducts(plugin.HoudiniInstancePlugin):
|
|||
filename_base = re.sub(dollarf_regex, replace, basename)
|
||||
filename = os.path.join(dirname, filename_base)
|
||||
else:
|
||||
# Last group of digits in the filename before the extension
|
||||
# The frame number must always be prefixed by underscore or dot
|
||||
# Allow product names like:
|
||||
# - filename.1001.exr
|
||||
# - filename.1001.aov.exr
|
||||
# - filename.aov.1001.exr
|
||||
# - filename_1001.exr
|
||||
frame_regex = r"(.*[._])(\d+)(?!.*\d)(.*\.[A-Za-z0-9]+$)"
|
||||
|
||||
# It may be the case that the current USD stage has stored
|
||||
# product name samples (e.g. when loading a USD file with
|
||||
# time samples) where it does not refer to e.g. $F4. And thus
|
||||
# it refers to the actual path like /path/to/frame.1001.exr
|
||||
# TODO: It would be better to maybe sample product name
|
||||
# attribute `ValueMightBeTimeVarying` and if so get it per
|
||||
# frame using `attr.Get(time=frame)` to ensure we get the
|
||||
# actual product name set at that point in time?
|
||||
# Substitute basename.0001.ext
|
||||
def replace(match):
|
||||
prefix, frame, ext = match.groups()
|
||||
head, frame, tail = match.groups()
|
||||
padding = "#" * len(frame)
|
||||
return prefix + padding + ext
|
||||
return head + padding + tail
|
||||
|
||||
filename_base = re.sub(frame_regex, replace, basename)
|
||||
filename = os.path.join(dirname, filename_base)
|
||||
|
|
@ -126,8 +114,135 @@ class CollectRenderProducts(plugin.HoudiniInstancePlugin):
|
|||
|
||||
filenames.append(filename)
|
||||
|
||||
prim_path = str(prim.GetPath())
|
||||
self.log.info("Collected %s name: %s" % (prim_path, filename))
|
||||
# TODO: Improve AOV name detection logic
|
||||
aov_identifier = self.get_aov_identifier(render_product)
|
||||
if aov_identifier in files_by_product:
|
||||
self.log.error(
|
||||
"Multiple render products are identified as the same AOV "
|
||||
"which means one of the two will not be ingested during"
|
||||
"publishing. AOV: '%s'", aov_identifier
|
||||
)
|
||||
self.log.warning("Skipping Render Product: %s", render_product)
|
||||
|
||||
files_by_product[aov_identifier] = self.generate_expected_files(
|
||||
instance,
|
||||
filename
|
||||
)
|
||||
|
||||
aov_label = f"'{aov_identifier}' aov in " if aov_identifier else ""
|
||||
self.log.debug("Render Product %s%s", aov_label, prim_path)
|
||||
self.log.debug("Product name: %s", filename)
|
||||
|
||||
# Filenames for Deadline
|
||||
instance.data["files"] = filenames
|
||||
instance.data.setdefault("expectedFiles", []).append(files_by_product)
|
||||
|
||||
# Farm Publishing add review logic expects this key to exist and
|
||||
# be True if render is a multipart Exr.
|
||||
# otherwise it will most probably fail the AOV filter as multipartExr
|
||||
# files mostly don't include aov name in the file path.
|
||||
# Assume multipartExr is 'True' as long as we have one AOV.
|
||||
instance.data["multipartExr"] = len(files_by_product) <= 1
|
||||
|
||||
def get_aov_identifier(self, render_product):
|
||||
"""Return the AOV identifier for a Render Product
|
||||
|
||||
A Render Product does not really define what 'AOV' it is, it
|
||||
defines the product name (output path) and the render vars to
|
||||
include.
|
||||
|
||||
So we need to define what in particular of a `UsdRenderProduct`
|
||||
we use to separate the AOV (and thus apply sub-grouping with).
|
||||
|
||||
For now we'll consider any Render Product that only refers
|
||||
to a single rendervar that the rendervars prim name is the AOV
|
||||
otherwise we'll assume renderproduct to be a combined multilayer
|
||||
'main' layer
|
||||
|
||||
Args:
|
||||
render_product (pxr.UsdRender.Product): The Render Product
|
||||
|
||||
Returns:
|
||||
str: The AOV identifier
|
||||
|
||||
"""
|
||||
targets = render_product.GetOrderedVarsRel().GetTargets()
|
||||
if len(targets) > 1:
|
||||
# Cryptomattes usually are combined render vars, for example:
|
||||
# - crypto_asset, crypto_asset01, crypto_asset02, crypto_asset03
|
||||
# - crypto_object, crypto_object01, etc.
|
||||
# These still refer to the same AOV so we take the common prefix
|
||||
# e.g. `crypto_asset` or `crypto` (if multiple are combined)
|
||||
if all(target.name.startswith("crypto") for target in targets):
|
||||
start = os.path.commonpath([target.name for target in targets])
|
||||
return start.rstrip("_") # remove any trailing _
|
||||
|
||||
# Main layer
|
||||
return ""
|
||||
elif len(targets) == 1:
|
||||
# AOV for a single var
|
||||
return targets[0].name
|
||||
else:
|
||||
self.log.warning(
|
||||
f"Render product has no rendervars set: {render_product}")
|
||||
return ""
|
||||
|
||||
def get_render_products(self, usdrender_rop, stage):
|
||||
""""The render products in the defined render settings
|
||||
|
||||
Args:
|
||||
usdrender_rop (hou.Node): The Houdini USD Render ROP node.
|
||||
stage (pxr.Usd.Stage): The USD stage to find the render settings
|
||||
in. This is usually the stage from the LOP path the USD Render
|
||||
ROP node refers to.
|
||||
|
||||
Returns:
|
||||
List[Sdf.Path]: Render Product paths enabled in the render settings
|
||||
|
||||
"""
|
||||
render_settings = get_usd_render_rop_rendersettings(usdrender_rop,
|
||||
stage,
|
||||
logger=self.log)
|
||||
if not render_settings:
|
||||
return []
|
||||
|
||||
return render_settings.GetProductsRel().GetTargets()
|
||||
|
||||
def generate_expected_files(self, instance, path):
|
||||
"""Generate full sequence of expected files from a filepath.
|
||||
|
||||
The filepath should have '#' token as placeholder for frame numbers or
|
||||
should have %04d or %d placeholders. The `#` characters indicate frame
|
||||
number and padding, e.g. #### becomes 0001 for frame 1.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): The publish instance.
|
||||
path (str): The filepath to generate the list of output files for.
|
||||
|
||||
Returns:
|
||||
list: Filepath per frame.
|
||||
|
||||
"""
|
||||
|
||||
folder = os.path.dirname(path)
|
||||
filename = os.path.basename(path)
|
||||
|
||||
if "#" in filename:
|
||||
def replace(match):
|
||||
return "%0{}d".format(len(match.group()))
|
||||
|
||||
filename = re.sub("#+", replace, filename)
|
||||
|
||||
if "%" not in filename:
|
||||
# Not a sequence, single file
|
||||
return path
|
||||
|
||||
expected_files = []
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
for frame in range(int(start), (int(end) + 1)):
|
||||
expected_files.append(
|
||||
os.path.join(folder, (filename % frame)).replace("\\", "/"))
|
||||
|
||||
return expected_files
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ class CollectReviewableInstances(plugin.HoudiniInstancePlugin):
|
|||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
"vray_rop",
|
||||
"usdrender"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
|
|
|
|||
|
|
@ -1,122 +0,0 @@
|
|||
import pyblish.api
|
||||
import ayon_api
|
||||
from ayon_core.pipeline import usdlib, KnownPublishError
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
class CollectUsdBootstrap(plugin.HoudiniInstancePlugin):
|
||||
"""Collect special Asset/Shot bootstrap instances if those are needed.
|
||||
|
||||
Some specific products are intended to be part of the default structure
|
||||
of an "Asset" or "Shot" in our USD pipeline. For example, for an Asset
|
||||
we layer a Model and Shade USD file over each other and expose that in
|
||||
a Asset USD file, ready to use.
|
||||
|
||||
On the first publish of any of the components of a Asset or Shot the
|
||||
missing pieces are bootstrapped and generated in the pipeline too. This
|
||||
means that on the very first publish of your model the Asset USD file
|
||||
will exist too.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.35
|
||||
label = "Collect USD Bootstrap"
|
||||
families = ["usd", "usd.layered"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
# Detect whether the current product is a product in a pipeline
|
||||
def get_bootstrap(instance):
|
||||
instance_product_name = instance.data["productName"]
|
||||
for name, layers in usdlib.PIPELINE.items():
|
||||
if instance_product_name in set(layers):
|
||||
return name # e.g. "asset"
|
||||
else:
|
||||
return
|
||||
|
||||
bootstrap = get_bootstrap(instance)
|
||||
if bootstrap:
|
||||
self.add_bootstrap(instance, bootstrap)
|
||||
|
||||
# Check if any of the dependencies requires a bootstrap
|
||||
for dependency in instance.data.get("publishDependencies", list()):
|
||||
bootstrap = get_bootstrap(dependency)
|
||||
if bootstrap:
|
||||
self.add_bootstrap(dependency, bootstrap)
|
||||
|
||||
def add_bootstrap(self, instance, bootstrap):
|
||||
|
||||
self.log.debug("Add bootstrap for: %s" % bootstrap)
|
||||
|
||||
project_name = instance.context.data["projectName"]
|
||||
folder_path = instance.data["folderPath"]
|
||||
folder_name = folder_path.rsplit("/", 1)[-1]
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
if not folder_entity:
|
||||
raise KnownPublishError(
|
||||
"Folder '{}' does not exist".format(folder_path)
|
||||
)
|
||||
|
||||
# Check which are not about to be created and don't exist yet
|
||||
required = {"shot": ["usdShot"], "asset": ["usdAsset"]}.get(bootstrap)
|
||||
|
||||
require_all_layers = instance.data.get("requireAllLayers", False)
|
||||
if require_all_layers:
|
||||
# USD files load fine in usdview and Houdini even when layered or
|
||||
# referenced files do not exist. So by default we don't require
|
||||
# the layers to exist.
|
||||
layers = usdlib.PIPELINE.get(bootstrap)
|
||||
if layers:
|
||||
required += list(layers)
|
||||
|
||||
self.log.debug("Checking required bootstrap: %s" % required)
|
||||
for product_name in required:
|
||||
if self._product_exists(
|
||||
project_name, instance, product_name, folder_entity
|
||||
):
|
||||
continue
|
||||
|
||||
self.log.debug(
|
||||
"Creating {0} USD bootstrap: {1} {2}".format(
|
||||
bootstrap, folder_path, product_name
|
||||
)
|
||||
)
|
||||
|
||||
product_type = "usd.bootstrap"
|
||||
new = instance.context.create_instance(product_name)
|
||||
new.data["productName"] = product_name
|
||||
new.data["label"] = "{0} ({1})".format(product_name, folder_name)
|
||||
new.data["productType"] = product_type
|
||||
new.data["family"] = product_type
|
||||
new.data["comment"] = "Automated bootstrap USD file."
|
||||
new.data["publishFamilies"] = ["usd"]
|
||||
|
||||
# Do not allow the user to toggle this instance
|
||||
new.data["optional"] = False
|
||||
|
||||
# Copy some data from the instance for which we bootstrap
|
||||
for key in ["folderPath"]:
|
||||
new.data[key] = instance.data[key]
|
||||
|
||||
def _product_exists(
|
||||
self, project_name, instance, product_name, folder_entity
|
||||
):
|
||||
"""Return whether product exists in current context or in database."""
|
||||
# Allow it to be created during this publish session
|
||||
context = instance.context
|
||||
|
||||
folder_path = folder_entity["path"]
|
||||
for inst in context:
|
||||
if (
|
||||
inst.data["productName"] == product_name
|
||||
and inst.data["folderPath"] == folder_path
|
||||
):
|
||||
return True
|
||||
|
||||
# Or, if they already exist in the database we can
|
||||
# skip them too.
|
||||
if ayon_api.get_product_by_name(
|
||||
project_name, product_name, folder_entity["id"], fields={"id"}
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
|
@ -1,18 +1,66 @@
|
|||
import copy
|
||||
import os
|
||||
import hou
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_houdini.api import plugin
|
||||
import ayon_houdini.api.usd as usdlib
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
def copy_instance_data(instance_src, instance_dest, attr):
|
||||
"""Copy instance data from `src` instance to `dest` instance.
|
||||
|
||||
Examples:
|
||||
>>> copy_instance_data(instance_src, instance_dest,
|
||||
>>> attr="publish_attributes.CollectRopFrameRange")
|
||||
|
||||
Arguments:
|
||||
instance_src (pyblish.api.Instance): Source instance to copy from
|
||||
instance_dest (pyblish.api.Instance): Target instance to copy to
|
||||
attr (str): Attribute on the source instance to copy. This can be
|
||||
a nested key joined by `.` to only copy sub entries of dictionaries
|
||||
in the source instance's data.
|
||||
|
||||
Raises:
|
||||
KeyError: If the key does not exist on the source instance.
|
||||
AssertionError: If a parent key already exists on the destination
|
||||
instance but is not of the correct type (= is not a dict)
|
||||
|
||||
"""
|
||||
|
||||
src_data = instance_src.data
|
||||
dest_data = instance_dest.data
|
||||
keys = attr.split(".")
|
||||
for i, key in enumerate(keys):
|
||||
if key not in src_data:
|
||||
break
|
||||
|
||||
src_value = src_data[key]
|
||||
if i != len(key):
|
||||
dest_data = dest_data.setdefault(key, {})
|
||||
assert isinstance(dest_data, dict), "Destination must be a dict"
|
||||
src_data = src_value
|
||||
else:
|
||||
# Last iteration - assign the value
|
||||
dest_data[key] = copy.deepcopy(src_value)
|
||||
|
||||
|
||||
class CollectUsdLayers(plugin.HoudiniInstancePlugin):
|
||||
"""Collect the USD Layers that have configured save paths."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.35
|
||||
order = pyblish.api.CollectorOrder + 0.25
|
||||
label = "Collect USD Layers"
|
||||
families = ["usd"]
|
||||
families = ["usdrop"]
|
||||
|
||||
def process(self, instance):
|
||||
# TODO: Replace this with a Hidden Creator so we collect these BEFORE
|
||||
# starting the publish so the user sees them before publishing
|
||||
# - however user should not be able to individually enable/disable
|
||||
# this from the main ROP its created from?
|
||||
|
||||
output = instance.data.get("output_node")
|
||||
if not output:
|
||||
|
|
@ -29,13 +77,16 @@ class CollectUsdLayers(plugin.HoudiniInstancePlugin):
|
|||
creator = info.customData.get("HoudiniCreatorNode")
|
||||
|
||||
self.log.debug("Found configured save path: "
|
||||
"%s -> %s" % (layer, save_path))
|
||||
"%s -> %s", layer, save_path)
|
||||
|
||||
# Log node that configured this save path
|
||||
if creator:
|
||||
self.log.debug("Created by: %s" % creator)
|
||||
creator_node = hou.nodeBySessionId(creator) if creator else None
|
||||
if creator_node:
|
||||
self.log.debug(
|
||||
"Created by: %s", creator_node.path()
|
||||
)
|
||||
|
||||
save_layers.append((layer, save_path))
|
||||
save_layers.append((layer, save_path, creator_node))
|
||||
|
||||
# Store on the instance
|
||||
instance.data["usdConfiguredSavePaths"] = save_layers
|
||||
|
|
@ -43,23 +94,65 @@ class CollectUsdLayers(plugin.HoudiniInstancePlugin):
|
|||
# Create configured layer instances so User can disable updating
|
||||
# specific configured layers for publishing.
|
||||
context = instance.context
|
||||
product_type = "usdlayer"
|
||||
for layer, save_path in save_layers:
|
||||
for layer, save_path, creator_node in save_layers:
|
||||
name = os.path.basename(save_path)
|
||||
label = "{0} -> {1}".format(instance.data["name"], name)
|
||||
layer_inst = context.create_instance(name)
|
||||
|
||||
layer_inst.data["productType"] = product_type
|
||||
layer_inst.data["family"] = product_type
|
||||
layer_inst.data["families"] = [product_type]
|
||||
layer_inst.data["productName"] = "__stub__"
|
||||
layer_inst.data["label"] = label
|
||||
layer_inst.data["folderPath"] = instance.data["folderPath"]
|
||||
layer_inst.data["instance_node"] = instance.data["instance_node"]
|
||||
# include same USD ROP
|
||||
layer_inst.append(rop_node)
|
||||
# include layer data
|
||||
layer_inst.append((layer, save_path))
|
||||
|
||||
# Allow this product to be grouped into a USD Layer on creation
|
||||
layer_inst.data["productGroup"] = "USD Layer"
|
||||
staging_dir, fname = os.path.split(save_path)
|
||||
fname_no_ext, ext = os.path.splitext(fname)
|
||||
|
||||
variant = fname_no_ext
|
||||
|
||||
# Strip off any trailing version number in the form of _v[0-9]+
|
||||
variant = re.sub("_v[0-9]+$", "", variant)
|
||||
|
||||
layer_inst.data["usd_layer"] = layer
|
||||
layer_inst.data["usd_layer_save_path"] = save_path
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
variant_base = instance.data["variant"]
|
||||
subset = get_product_name(
|
||||
project_name=project_name,
|
||||
# TODO: This should use task from `instance`
|
||||
task_name=context.data["anatomyData"]["task"]["name"],
|
||||
task_type=context.data["anatomyData"]["task"]["type"],
|
||||
host_name=context.data["hostName"],
|
||||
product_type="usd",
|
||||
variant=variant_base + "_" + variant,
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
label = "{0} -> {1}".format(instance.data["name"], subset)
|
||||
family = "usd"
|
||||
layer_inst.data["family"] = family
|
||||
layer_inst.data["families"] = [family]
|
||||
layer_inst.data["subset"] = subset
|
||||
layer_inst.data["label"] = label
|
||||
layer_inst.data["asset"] = instance.data["asset"]
|
||||
layer_inst.data["task"] = instance.data.get("task")
|
||||
layer_inst.data["instance_node"] = instance.data["instance_node"]
|
||||
layer_inst.data["render"] = False
|
||||
layer_inst.data["output_node"] = creator_node
|
||||
|
||||
# Inherit "use handles" from the source instance
|
||||
# TODO: Do we want to maybe copy full `publish_attributes` instead?
|
||||
copy_instance_data(
|
||||
instance, layer_inst,
|
||||
attr="publish_attributes.CollectRopFrameRange.use_handles"
|
||||
)
|
||||
|
||||
# Allow this subset to be grouped into a USD Layer on creation
|
||||
layer_inst.data["subsetGroup"] = "USD Layer"
|
||||
|
||||
# For now just assume the representation will get published
|
||||
representation = {
|
||||
"name": "usd",
|
||||
"ext": ext.lstrip("."),
|
||||
"stagingDir": staging_dir,
|
||||
"files": fname
|
||||
}
|
||||
layer_inst.data.setdefault("representations", []).append(
|
||||
representation)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,243 @@
|
|||
import re
|
||||
|
||||
import os
|
||||
import glob
|
||||
from typing import List, Optional
|
||||
import dataclasses
|
||||
|
||||
import pyblish.api
|
||||
import hou
|
||||
from pxr import Sdf
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
# Colorspace attributes differ per renderer implementation in the USD data
|
||||
# Some have dedicated input names like Arnold and Redshift, whereas others like
|
||||
# MaterialX store `colorSpace` metadata on the asset property itself.
|
||||
# See `get_colorspace` method on the plug-in for more details
|
||||
COLORSPACE_ATTRS = [
|
||||
"inputs:color_space", # Image Vop (arnold::image)
|
||||
"inputs:tex0_colorSpace", # RS Texture Vop (redshift::TextureSampler)
|
||||
# TODO: USD UV Texture VOP doesn't seem to use colorspaces from the actual
|
||||
# OCIO configuration so we skip these for now. Especially since the
|
||||
# texture is usually used for 'preview' purposes anyway.
|
||||
# "inputs:sourceColorSpace", # USD UV Texture Vop (usduvtexture::2.0)
|
||||
]
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Resource:
|
||||
attribute: str # property path
|
||||
source: str # unresolved source path
|
||||
files: List[str] # resolve list of files, e.g. multiple for <UDIM>
|
||||
color_space: str = None # colorspace of the resource
|
||||
|
||||
|
||||
def get_layer_property_paths(layer: Sdf.Layer) -> List[Sdf.Path]:
|
||||
"""Return all property paths from a layer"""
|
||||
paths = []
|
||||
|
||||
def collect_paths(path):
|
||||
if not path.IsPropertyPath():
|
||||
return
|
||||
paths.append(path)
|
||||
|
||||
layer.Traverse("/", collect_paths)
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
class CollectUsdLookAssets(plugin.HoudiniInstancePlugin):
|
||||
"""Collect all assets introduced by the look.
|
||||
|
||||
We are looking to collect e.g. all texture resources so we can transfer
|
||||
them with the publish and write then to the publish location.
|
||||
|
||||
If possible, we'll also try to identify the colorspace of the asset.
|
||||
|
||||
"""
|
||||
# TODO: Implement $F frame support (per frame values)
|
||||
# TODO: If input image is already a published texture or resource than
|
||||
# preferably we'd keep the link in-tact and NOT update it. We can just
|
||||
# start ignoring AYON URIs
|
||||
|
||||
label = "Collect USD Look Assets"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["houdini"]
|
||||
families = ["look"]
|
||||
|
||||
exclude_suffixes = [".usd", ".usda", ".usdc", ".usdz", ".abc", ".vbd"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop: hou.RopNode = hou.node(instance.data.get("instance_node"))
|
||||
if not rop:
|
||||
return
|
||||
|
||||
lop_node: hou.LopNode = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
return
|
||||
|
||||
above_break_layers = set(lop_node.layersAboveLayerBreak())
|
||||
|
||||
stage = lop_node.stage()
|
||||
layers = [
|
||||
layer for layer
|
||||
in stage.GetLayerStack(includeSessionLayers=False)
|
||||
if layer.identifier not in above_break_layers
|
||||
]
|
||||
|
||||
instance_resources = self.get_layer_assets(layers)
|
||||
|
||||
# Define a relative asset remapping for the USD Extractor so that
|
||||
# any textures are remapped to their 'relative' publish path.
|
||||
# All textures will be in a relative `./resources/` folder
|
||||
remap = {}
|
||||
for resource in instance_resources:
|
||||
source = resource.source
|
||||
name = os.path.basename(source)
|
||||
remap[os.path.normpath(source)] = f"./resources/{name}"
|
||||
instance.data["assetRemap"] = remap
|
||||
|
||||
# Store resources on instance
|
||||
resources = instance.data.setdefault("resources", [])
|
||||
for resource in instance_resources:
|
||||
resources.append(dataclasses.asdict(resource))
|
||||
|
||||
# Log all collected textures
|
||||
# Note: It is fine for a single texture to be included more than once
|
||||
# where even one of them does not have a color space set, but the other
|
||||
# does. For example, there may be a USD UV Texture just for a GL
|
||||
# preview material which does not specify an OCIO color
|
||||
# space.
|
||||
all_files = []
|
||||
for resource in instance_resources:
|
||||
all_files.append(f"{resource.attribute}:")
|
||||
|
||||
for filepath in resource.files:
|
||||
if resource.color_space:
|
||||
file_label = f"- {filepath} ({resource.color_space})"
|
||||
else:
|
||||
file_label = f"- {filepath}"
|
||||
all_files.append(file_label)
|
||||
|
||||
self.log.info(
|
||||
"Collected assets:\n{}".format(
|
||||
"\n".join(all_files)
|
||||
)
|
||||
)
|
||||
|
||||
def get_layer_assets(self, layers: List[Sdf.Layer]) -> List[Resource]:
|
||||
# TODO: Correctly resolve paths using Asset Resolver.
|
||||
# Preferably this would use one cached
|
||||
# resolver context to optimize the path resolving.
|
||||
# TODO: Fix for timesamples - if timesamples, then `.default` might
|
||||
# not be authored on the spec
|
||||
|
||||
resources: List[Resource] = list()
|
||||
for layer in layers:
|
||||
for path in get_layer_property_paths(layer):
|
||||
|
||||
spec = layer.GetAttributeAtPath(path)
|
||||
if not spec:
|
||||
continue
|
||||
|
||||
if spec.typeName != "asset":
|
||||
continue
|
||||
|
||||
asset: Sdf.AssetPath = spec.default
|
||||
base, ext = os.path.splitext(asset.path)
|
||||
if ext in self.exclude_suffixes:
|
||||
continue
|
||||
|
||||
filepath = asset.path.replace("\\", "/")
|
||||
|
||||
# Expand <UDIM> to all files of the available files on disk
|
||||
# TODO: Add support for `<TILE>`
|
||||
# TODO: Add support for `<ATTR:name INDEX:name DEFAULT:value>`
|
||||
if "<UDIM>" in filepath.upper():
|
||||
pattern = re.sub(
|
||||
r"<UDIM>",
|
||||
# UDIM is always four digits
|
||||
"[0-9]" * 4,
|
||||
filepath,
|
||||
flags=re.IGNORECASE
|
||||
)
|
||||
files = glob.glob(pattern)
|
||||
else:
|
||||
# Single file
|
||||
files = [filepath]
|
||||
|
||||
# Detect the colorspace of the input asset property
|
||||
colorspace = self.get_colorspace(spec)
|
||||
|
||||
resource = Resource(
|
||||
attribute=path.pathString,
|
||||
source=asset.path,
|
||||
files=files,
|
||||
color_space=colorspace
|
||||
)
|
||||
resources.append(resource)
|
||||
|
||||
# Sort by filepath
|
||||
resources.sort(key=lambda r: r.source)
|
||||
|
||||
return resources
|
||||
|
||||
def get_colorspace(self, spec: Sdf.AttributeSpec) -> Optional[str]:
|
||||
"""Return colorspace for a Asset attribute spec.
|
||||
|
||||
There is currently no USD standard on how colorspaces should be
|
||||
represented for shaders or asset properties - each renderer's material
|
||||
implementations seem to currently use their own way of specifying the
|
||||
colorspace on the shader. As such, this comes with some guesswork.
|
||||
|
||||
Args:
|
||||
spec (Sdf.AttributeSpec): The asset type attribute to retrieve
|
||||
the colorspace for.
|
||||
|
||||
Returns:
|
||||
Optional[str]: The colorspace for the given attribute, if any.
|
||||
|
||||
"""
|
||||
# TODO: Support Karma, V-Ray, Renderman texture colorspaces
|
||||
# Materialx image defines colorspace as custom info on the attribute
|
||||
if spec.HasInfo("colorSpace"):
|
||||
return spec.GetInfo("colorSpace")
|
||||
|
||||
# Arnold materials define the colorspace as a separate primvar
|
||||
# TODO: Fix for timesamples - if timesamples, then `.default` might
|
||||
# not be authored on the spec
|
||||
prim_path = spec.path.GetPrimPath()
|
||||
layer = spec.layer
|
||||
for name in COLORSPACE_ATTRS:
|
||||
colorspace_property_path = prim_path.AppendProperty(name)
|
||||
colorspace_spec = layer.GetAttributeAtPath(
|
||||
colorspace_property_path
|
||||
)
|
||||
if colorspace_spec and colorspace_spec.default:
|
||||
return colorspace_spec.default
|
||||
|
||||
|
||||
class CollectUsdLookResourceTransfers(plugin.HoudiniInstancePlugin):
|
||||
"""Define the publish direct file transfers for any found resources.
|
||||
|
||||
This ensures that any source texture will end up in the published look
|
||||
in the `resourcesDir`.
|
||||
|
||||
"""
|
||||
label = "Collect USD Look Transfers"
|
||||
order = pyblish.api.CollectorOrder + 0.496
|
||||
hosts = ["houdini"]
|
||||
families = ["look"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
resources_dir = instance.data["resourcesDir"]
|
||||
transfers = instance.data.setdefault("transfers", [])
|
||||
for resource in instance.data.get("resources", []):
|
||||
for src in resource["files"]:
|
||||
dest = os.path.join(resources_dir, os.path.basename(src))
|
||||
transfers.append((src, dest))
|
||||
self.log.debug("Registering transfer: %s -> %s", src, dest)
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
import hou
|
||||
import pyblish.api
|
||||
|
||||
from ayon_houdini.api import (
|
||||
colorspace,
|
||||
plugin
|
||||
)
|
||||
from ayon_houdini.api.lib import (
|
||||
evalParmNoFrame,
|
||||
get_color_management_preferences
|
||||
)
|
||||
|
||||
|
||||
class CollectUsdRender(plugin.HoudiniInstancePlugin):
|
||||
"""Collect publishing data for USD Render ROP.
|
||||
|
||||
If `rendercommand` parm is disabled (and thus no rendering triggers by the
|
||||
usd render rop) it is assumed to be a "Split Render" job where the farm
|
||||
will get an additional render job after the USD file is extracted.
|
||||
|
||||
Provides:
|
||||
instance -> ifdFile
|
||||
instance -> colorspaceConfig
|
||||
instance -> colorspaceDisplay
|
||||
instance -> colorspaceView
|
||||
|
||||
"""
|
||||
|
||||
label = "Collect USD Render Rop"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["houdini"]
|
||||
families = ["usdrender"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
if instance.data["splitRender"]:
|
||||
# USD file output
|
||||
lop_output = evalParmNoFrame(
|
||||
rop, "lopoutput", pad_character="#"
|
||||
)
|
||||
|
||||
# The file is usually relative to the Output Processor's 'Save to
|
||||
# Directory' which forces all USD files to end up in that directory
|
||||
# TODO: It is possible for a user to disable this
|
||||
# TODO: When enabled I think only the basename of the `lopoutput`
|
||||
# parm is preserved, any parent folders defined are likely ignored
|
||||
folder = evalParmNoFrame(
|
||||
rop, "savetodirectory_directory", pad_character="#"
|
||||
)
|
||||
|
||||
export_file = os.path.join(folder, lop_output)
|
||||
|
||||
# Substitute any # characters in the name back to their $F4
|
||||
# equivalent
|
||||
def replace_to_f(match):
|
||||
number = len(match.group(0))
|
||||
if number <= 1:
|
||||
number = "" # make it just $F not $F1 or $F0
|
||||
return "$F{}".format(number)
|
||||
|
||||
export_file = re.sub("#+", replace_to_f, export_file)
|
||||
self.log.debug(
|
||||
"Found export file: {}".format(export_file)
|
||||
)
|
||||
instance.data["ifdFile"] = export_file
|
||||
|
||||
# The render job is not frame dependent but fully dependent on
|
||||
# the job having been completed, since the extracted file is a
|
||||
# single file.
|
||||
if "$F" not in export_file:
|
||||
instance.data["splitRenderFrameDependent"] = False
|
||||
|
||||
# update the colorspace data
|
||||
colorspace_data = get_color_management_preferences()
|
||||
instance.data["colorspaceConfig"] = colorspace_data["config"]
|
||||
instance.data["colorspaceDisplay"] = colorspace_data["display"]
|
||||
instance.data["colorspaceView"] = colorspace_data["view"]
|
||||
|
||||
# stub required data for Submit Publish Job publish plug-in
|
||||
instance.data["attachTo"] = []
|
||||
instance.data["renderProducts"] = colorspace.ARenderProduct()
|
||||
|
|
@ -15,13 +15,20 @@ class ExtractRender(plugin.HoudiniExtractorPlugin):
|
|||
"karma_rop",
|
||||
"redshift_rop",
|
||||
"arnold_rop",
|
||||
"vray_rop"]
|
||||
"vray_rop",
|
||||
"usdrender"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_attribute = instance.data["creator_attributes"]
|
||||
product_type = instance.data["productType"]
|
||||
rop_node = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# TODO: This section goes against pyblish concepts where
|
||||
# pyblish plugins should change the state of the scene.
|
||||
# However, in ayon publisher tool users can have options and
|
||||
# these options should some how synced with the houdini nodes.
|
||||
# More info: https://github.com/ynput/ayon-core/issues/417
|
||||
|
||||
# Align split parameter value on rop node to the render target.
|
||||
if instance.data["splitRender"]:
|
||||
if product_type == "arnold_rop":
|
||||
|
|
@ -32,6 +39,8 @@ class ExtractRender(plugin.HoudiniExtractorPlugin):
|
|||
rop_node.setParms({"RS_archive_enable": 1})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "2"})
|
||||
elif product_type == "usdrender":
|
||||
rop_node.setParms({"runcommand": 0})
|
||||
else:
|
||||
if product_type == "arnold_rop":
|
||||
rop_node.setParms({"ar_ass_export_enable": 0})
|
||||
|
|
@ -41,6 +50,8 @@ class ExtractRender(plugin.HoudiniExtractorPlugin):
|
|||
rop_node.setParms({"RS_archive_enable": 0})
|
||||
elif product_type == "vray_rop":
|
||||
rop_node.setParms({"render_export_mode": "1"})
|
||||
elif product_type == "usdrender":
|
||||
rop_node.setParms({"runcommand": 1})
|
||||
|
||||
if instance.data.get("farm"):
|
||||
self.log.debug("Render should be processed on farm, skipping local render.")
|
||||
|
|
|
|||
|
|
@ -1,19 +1,21 @@
|
|||
import os
|
||||
import hou
|
||||
from typing import List, AnyStr
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish.lib import get_instance_expected_output_path
|
||||
from ayon_houdini.api import plugin
|
||||
from ayon_houdini.api.lib import render_rop
|
||||
from ayon_houdini.api.usd import remap_paths
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class ExtractUSD(plugin.HoudiniExtractorPlugin):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract USD"
|
||||
families = ["usd",
|
||||
"usdModel",
|
||||
"usdSetDress"]
|
||||
families = ["usdrop"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -27,7 +29,18 @@ class ExtractUSD(plugin.HoudiniExtractorPlugin):
|
|||
|
||||
self.log.info("Writing USD '%s' to '%s'" % (file_name, staging_dir))
|
||||
|
||||
render_rop(ropnode)
|
||||
mapping = self.get_source_to_publish_paths(instance.context)
|
||||
|
||||
# Allow instance-specific path remapping overrides, e.g. changing
|
||||
# paths on used resources/textures for looks
|
||||
instance_mapping = instance.data.get("assetRemap", {})
|
||||
if instance_mapping:
|
||||
self.log.debug("Instance-specific asset path remapping:\n"
|
||||
f"{instance_mapping}")
|
||||
mapping.update(instance_mapping)
|
||||
|
||||
with remap_paths(ropnode, mapping):
|
||||
render_rop(ropnode)
|
||||
|
||||
assert os.path.exists(output), "Output does not exist: %s" % output
|
||||
|
||||
|
|
@ -41,3 +54,51 @@ class ExtractUSD(plugin.HoudiniExtractorPlugin):
|
|||
"stagingDir": staging_dir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def get_source_to_publish_paths(self, context):
|
||||
"""Define a mapping of all current instances in context from source
|
||||
file to publish file so this can be used on the USD save to remap
|
||||
asset layer paths on publish via AyonRemapPaths output processor"""
|
||||
|
||||
mapping = {}
|
||||
for instance in context:
|
||||
if not instance.data.get("active", True):
|
||||
continue
|
||||
|
||||
if not instance.data.get("publish", True):
|
||||
continue
|
||||
|
||||
for repre in instance.data.get("representations", []):
|
||||
name = repre.get("name")
|
||||
ext = repre.get("ext")
|
||||
|
||||
# TODO: The remapping might need to get more involved if the
|
||||
# asset paths that are set use e.g. $F
|
||||
# TODO: If the representation has multiple files we might need
|
||||
# to define the path remapping per file of the sequence
|
||||
path = get_instance_expected_output_path(
|
||||
instance, representation_name=name, ext=ext
|
||||
)
|
||||
for source_path in get_source_paths(instance, repre):
|
||||
source_path = os.path.normpath(source_path)
|
||||
mapping[source_path] = path
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def get_source_paths(
|
||||
instance: pyblish.api.Instance,
|
||||
repre: dict
|
||||
) -> List[AnyStr]:
|
||||
"""Return the full source filepaths for an instance's representations"""
|
||||
|
||||
staging = repre.get("stagingDir", instance.data.get("stagingDir"))
|
||||
files = repre.get("files", [])
|
||||
if isinstance(files, list):
|
||||
return [os.path.join(staging, fname) for fname in files]
|
||||
elif isinstance(files, str):
|
||||
# Single file
|
||||
return [os.path.join(staging, files)]
|
||||
|
||||
raise TypeError(f"Unsupported type for representation files: {files} "
|
||||
"(supports list or str)")
|
||||
|
|
|
|||
|
|
@ -1,322 +0,0 @@
|
|||
import os
|
||||
import contextlib
|
||||
import sys
|
||||
from collections import deque
|
||||
import hou
|
||||
|
||||
import ayon_api
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import get_representation_path
|
||||
from ayon_houdini.api import plugin
|
||||
import ayon_houdini.api.usd as hou_usdlib
|
||||
from ayon_houdini.api.lib import render_rop
|
||||
|
||||
|
||||
class ExitStack(object):
|
||||
"""Context manager for dynamic management of a stack of exit callbacks.
|
||||
|
||||
For example:
|
||||
|
||||
with ExitStack() as stack:
|
||||
files = [stack.enter_context(open(fname)) for fname in filenames]
|
||||
# All opened files will automatically be closed at the end of
|
||||
# the with statement, even if attempts to open files later
|
||||
# in the list raise an exception
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._exit_callbacks = deque()
|
||||
|
||||
def pop_all(self):
|
||||
"""Preserve the context stack by transferring it to a new instance"""
|
||||
new_stack = type(self)()
|
||||
new_stack._exit_callbacks = self._exit_callbacks
|
||||
self._exit_callbacks = deque()
|
||||
return new_stack
|
||||
|
||||
def _push_cm_exit(self, cm, cm_exit):
|
||||
"""Helper to correctly register callbacks to __exit__ methods"""
|
||||
|
||||
def _exit_wrapper(*exc_details):
|
||||
return cm_exit(cm, *exc_details)
|
||||
|
||||
_exit_wrapper.__self__ = cm
|
||||
self.push(_exit_wrapper)
|
||||
|
||||
def push(self, exit):
|
||||
"""Registers a callback with the standard __exit__ method signature.
|
||||
|
||||
Can suppress exceptions the same way __exit__ methods can.
|
||||
|
||||
Also accepts any object with an __exit__ method (registering a call
|
||||
to the method instead of the object itself)
|
||||
|
||||
"""
|
||||
# We use an unbound method rather than a bound method to follow
|
||||
# the standard lookup behaviour for special methods
|
||||
_cb_type = type(exit)
|
||||
try:
|
||||
exit_method = _cb_type.__exit__
|
||||
except AttributeError:
|
||||
# Not a context manager, so assume its a callable
|
||||
self._exit_callbacks.append(exit)
|
||||
else:
|
||||
self._push_cm_exit(exit, exit_method)
|
||||
return exit # Allow use as a decorator
|
||||
|
||||
def callback(self, callback, *args, **kwds):
|
||||
"""Registers an arbitrary callback and arguments.
|
||||
|
||||
Cannot suppress exceptions.
|
||||
"""
|
||||
|
||||
def _exit_wrapper(exc_type, exc, tb):
|
||||
callback(*args, **kwds)
|
||||
|
||||
# We changed the signature, so using @wraps is not appropriate, but
|
||||
# setting __wrapped__ may still help with introspection
|
||||
_exit_wrapper.__wrapped__ = callback
|
||||
self.push(_exit_wrapper)
|
||||
return callback # Allow use as a decorator
|
||||
|
||||
def enter_context(self, cm):
|
||||
"""Enters the supplied context manager
|
||||
|
||||
If successful, also pushes its __exit__ method as a callback and
|
||||
returns the result of the __enter__ method.
|
||||
"""
|
||||
# We look up the special methods on the type to match the with
|
||||
# statement
|
||||
_cm_type = type(cm)
|
||||
_exit = _cm_type.__exit__
|
||||
result = _cm_type.__enter__(cm)
|
||||
self._push_cm_exit(cm, _exit)
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
"""Immediately unwind the context stack"""
|
||||
self.__exit__(None, None, None)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_details):
|
||||
# We manipulate the exception state so it behaves as though
|
||||
# we were actually nesting multiple with statements
|
||||
frame_exc = sys.exc_info()[1]
|
||||
|
||||
def _fix_exception_context(new_exc, old_exc):
|
||||
while 1:
|
||||
exc_context = new_exc.__context__
|
||||
if exc_context in (None, frame_exc):
|
||||
break
|
||||
new_exc = exc_context
|
||||
new_exc.__context__ = old_exc
|
||||
|
||||
# Callbacks are invoked in LIFO order to match the behaviour of
|
||||
# nested context managers
|
||||
suppressed_exc = False
|
||||
while self._exit_callbacks:
|
||||
cb = self._exit_callbacks.pop()
|
||||
try:
|
||||
if cb(*exc_details):
|
||||
suppressed_exc = True
|
||||
exc_details = (None, None, None)
|
||||
except Exception:
|
||||
new_exc_details = sys.exc_info()
|
||||
# simulate the stack of exceptions by setting the context
|
||||
_fix_exception_context(new_exc_details[1], exc_details[1])
|
||||
if not self._exit_callbacks:
|
||||
raise
|
||||
exc_details = new_exc_details
|
||||
return suppressed_exc
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def parm_values(overrides):
|
||||
"""Override Parameter values during the context."""
|
||||
|
||||
originals = []
|
||||
try:
|
||||
for parm, value in overrides:
|
||||
originals.append((parm, parm.eval()))
|
||||
parm.set(value)
|
||||
yield
|
||||
finally:
|
||||
for parm, value in originals:
|
||||
# Parameter might not exist anymore so first
|
||||
# check whether it's still valid
|
||||
if hou.parm(parm.path()):
|
||||
parm.set(value)
|
||||
|
||||
|
||||
class ExtractUSDLayered(plugin.HoudiniExtractorPlugin):
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Layered USD"
|
||||
families = ["usdLayered", "usdShade"]
|
||||
|
||||
# Force Output Processors so it will always save any file
|
||||
# into our unique staging directory with processed Avalon paths
|
||||
output_processors = ["avalon_uri_processor", "stagingdir_processor"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.info("Extracting: %s" % instance)
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
fname = instance.data.get("usdFilename")
|
||||
|
||||
# The individual rop nodes are collected as "publishDependencies"
|
||||
dependencies = instance.data["publishDependencies"]
|
||||
ropnodes = [dependency[0] for dependency in dependencies]
|
||||
assert all(
|
||||
node.type().name() in {"usd", "usd_rop"} for node in ropnodes
|
||||
)
|
||||
|
||||
# Main ROP node, either a USD Rop or ROP network with
|
||||
# multiple USD ROPs
|
||||
node = hou.node(instance.data["instance_node"])
|
||||
|
||||
# Collect any output dependencies that have not been processed yet
|
||||
# during extraction of other instances
|
||||
outputs = [fname]
|
||||
active_dependencies = [
|
||||
dep
|
||||
for dep in dependencies
|
||||
if dep.data.get("publish", True)
|
||||
and not dep.data.get("_isExtracted", False)
|
||||
]
|
||||
for dependency in active_dependencies:
|
||||
outputs.append(dependency.data["usdFilename"])
|
||||
|
||||
pattern = r"*[/\]{0} {0}"
|
||||
save_pattern = " ".join(pattern.format(fname) for fname in outputs)
|
||||
|
||||
# Run a stack of context managers before we start the render to
|
||||
# temporarily adjust USD ROP settings for our publish output.
|
||||
rop_overrides = {
|
||||
# This sets staging directory on the processor to force our
|
||||
# output files to end up in the Staging Directory.
|
||||
"stagingdiroutputprocessor_stagingDir": staging_dir,
|
||||
# Force the Avalon URI Output Processor to refactor paths for
|
||||
# references, payloads and layers to published paths.
|
||||
"avalonurioutputprocessor_use_publish_paths": True,
|
||||
# Only write out specific USD files based on our outputs
|
||||
"savepattern": save_pattern,
|
||||
}
|
||||
overrides = list()
|
||||
with ExitStack() as stack:
|
||||
|
||||
for ropnode in ropnodes:
|
||||
manager = hou_usdlib.outputprocessors(
|
||||
ropnode,
|
||||
processors=self.output_processors,
|
||||
disable_all_others=True,
|
||||
)
|
||||
stack.enter_context(manager)
|
||||
|
||||
# Some of these must be added after we enter the output
|
||||
# processor context manager because those parameters only
|
||||
# exist when the Output Processor is added to the ROP node.
|
||||
for name, value in rop_overrides.items():
|
||||
parm = ropnode.parm(name)
|
||||
assert parm, "Parm not found: %s.%s" % (
|
||||
ropnode.path(),
|
||||
name,
|
||||
)
|
||||
overrides.append((parm, value))
|
||||
|
||||
stack.enter_context(parm_values(overrides))
|
||||
|
||||
# Render the single ROP node or the full ROP network
|
||||
render_rop(node)
|
||||
|
||||
# Assert all output files in the Staging Directory
|
||||
for output_fname in outputs:
|
||||
path = os.path.join(staging_dir, output_fname)
|
||||
assert os.path.exists(path), "Output file must exist: %s" % path
|
||||
|
||||
# Set up the dependency for publish if they have new content
|
||||
# compared to previous publishes
|
||||
project_name = instance.context.data["projectName"]
|
||||
for dependency in active_dependencies:
|
||||
dependency_fname = dependency.data["usdFilename"]
|
||||
|
||||
filepath = os.path.join(staging_dir, dependency_fname)
|
||||
similar = self._compare_with_latest_publish(
|
||||
project_name, dependency, filepath
|
||||
)
|
||||
if similar:
|
||||
# Deactivate this dependency
|
||||
self.log.debug(
|
||||
"Dependency matches previous publish version,"
|
||||
" deactivating %s for publish" % dependency
|
||||
)
|
||||
dependency.data["publish"] = False
|
||||
else:
|
||||
self.log.debug("Extracted dependency: %s" % dependency)
|
||||
# This dependency should be published
|
||||
dependency.data["files"] = [dependency_fname]
|
||||
dependency.data["stagingDir"] = staging_dir
|
||||
dependency.data["_isExtracted"] = True
|
||||
|
||||
# Store the created files on the instance
|
||||
if "files" not in instance.data:
|
||||
instance.data["files"] = []
|
||||
instance.data["files"].append(fname)
|
||||
|
||||
def _compare_with_latest_publish(self, project_name, dependency, new_file):
|
||||
import filecmp
|
||||
|
||||
_, ext = os.path.splitext(new_file)
|
||||
|
||||
# Compare this dependency with the latest published version
|
||||
# to detect whether we should make this into a new publish
|
||||
# version. If not, skip it.
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, dependency.data["folderPath"], fields={"id"}
|
||||
)
|
||||
product_entity = ayon_api.get_product_by_name(
|
||||
project_name,
|
||||
dependency.data["productName"],
|
||||
folder_entity["id"],
|
||||
fields={"id"}
|
||||
)
|
||||
if not product_entity:
|
||||
# Subset doesn't exist yet. Definitely new file
|
||||
self.log.debug("No existing product..")
|
||||
return False
|
||||
|
||||
version_entity = ayon_api.get_last_version_by_product_id(
|
||||
project_name, product_entity["id"], fields={"id"}
|
||||
)
|
||||
if not version_entity:
|
||||
self.log.debug("No existing version..")
|
||||
return False
|
||||
|
||||
representation = ayon_api.get_representation_by_name(
|
||||
project_name, ext.lstrip("."), version_entity["id"]
|
||||
)
|
||||
if not representation:
|
||||
self.log.debug("No existing representation..")
|
||||
return False
|
||||
|
||||
old_file = get_representation_path(representation)
|
||||
if not os.path.exists(old_file):
|
||||
return False
|
||||
|
||||
return filecmp.cmp(old_file, new_file)
|
||||
|
||||
def staging_dir(self, instance):
|
||||
"""Provide a temporary directory in which to store extracted files
|
||||
|
||||
Upon calling this method the staging directory is stored inside
|
||||
the instance.data['stagingDir']
|
||||
"""
|
||||
|
||||
from ayon_core.pipeline.publish import get_instance_staging_dir
|
||||
|
||||
return get_instance_staging_dir(instance)
|
||||
|
|
@ -22,9 +22,12 @@ class ValidateBypassed(plugin.HoudiniInstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
if len(instance) == 0:
|
||||
# Ignore instances without any nodes
|
||||
if not instance.data.get("instance_node"):
|
||||
# Ignore instances without an instance node
|
||||
# e.g. in memory bootstrap instances
|
||||
self.log.debug(
|
||||
"Skipping instance without instance node: {}".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
|
|
|
|||
|
|
@ -1,15 +1,15 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validator for checking that export is a single frame."""
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_core.pipeline.publish import ValidateContentsOrder
|
||||
from ayon_houdini.api.action import SelectInvalidAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateSingleFrame(pyblish.api.InstancePlugin,
|
||||
class ValidateSingleFrame(plugin.HoudiniInstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate Export is a Single Frame.
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class ValidateHoudiniNotApprenticeLicense(plugin.HoudiniInstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usd", "abc", "fbx", "camera"]
|
||||
families = ["usdrop", "abc", "fbx", "camera"]
|
||||
label = "Houdini Apprentice License"
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -30,6 +30,15 @@ class ValidateInstanceInContextHoudini(plugin.HoudiniInstancePlugin,
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
if not attr_values and not instance.data.get("instance_node"):
|
||||
# Skip instances that do not have the attr values because that
|
||||
# hints these are runtime-instances, like e.g. USD layer
|
||||
# contributions. We will confirm that by checking these do not
|
||||
# have an instance node. We do not need to check these because they
|
||||
# 'spawn off' from an original instance that has the check itself.
|
||||
return
|
||||
|
||||
folder_path = instance.data.get("folderPath")
|
||||
task = instance.data.get("task")
|
||||
context = self.get_context(instance)
|
||||
|
|
|
|||
|
|
@ -37,6 +37,13 @@ class ValidateNoErrors(plugin.HoudiniInstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
if not instance.data.get("instance_node"):
|
||||
self.log.debug(
|
||||
"Skipping 'Validate no errors' because instance "
|
||||
"has no instance node: {}".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
validate_nodes = []
|
||||
|
||||
if len(instance) > 0:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import hou
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUsdRenderProducts(plugin.HoudiniInstancePlugin):
|
||||
"""Validate at least one render product is present"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrender"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Render Products"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### No Render Products
|
||||
|
||||
The render submission specified no Render Product outputs and
|
||||
as such would not generate any rendered files.
|
||||
|
||||
This is usually the case if no Render Settings or Render
|
||||
Products were created.
|
||||
|
||||
Make sure to create the Render Settings
|
||||
relevant to the renderer you want to use.
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if not instance.data.get("output_node"):
|
||||
self.log.warning("No valid LOP node to render found.")
|
||||
return
|
||||
|
||||
if not instance.data.get("files", []):
|
||||
node_path = instance.data["instance_node"]
|
||||
node = hou.node(node_path)
|
||||
rendersettings_path = (
|
||||
node.evalParm("rendersettings") or "/Render/rendersettings"
|
||||
)
|
||||
raise PublishValidationError(
|
||||
message=(
|
||||
"No Render Products found in Render Settings "
|
||||
"for '{}' at '{}'".format(node_path, rendersettings_path)
|
||||
),
|
||||
description=self.get_description(),
|
||||
title=self.label
|
||||
)
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
import inspect
|
||||
|
||||
import hou
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
from ayon_core.pipeline.publish import RepairAction, OptionalPyblishPluginMixin
|
||||
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUSDAssetContributionDefaultPrim(plugin.HoudiniInstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate the default prim is set when USD contribution is set to asset.
|
||||
|
||||
If the USD asset contributions is enabled and the user has it set to
|
||||
initialize asset as "asset" then most likely they are looking to publish
|
||||
into an asset structure - which should have a default prim that matches
|
||||
the folder's name. To ensure that's the case we force require the
|
||||
value to be set on the ROP node.
|
||||
|
||||
Note that another validator "Validate USD Rop Default Prim" enforces the
|
||||
primitive actually exists (or has modifications) if the ROP specifies
|
||||
a default prim - so that does not have to be validated with this validator.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrop"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD Asset Contribution Default Prim"
|
||||
actions = [SelectROPAction, RepairAction]
|
||||
|
||||
# TODO: Unfortunately currently this does not show as optional toggle
|
||||
# because the product type is `usd` and not `usdrop` - however we do
|
||||
# not want to run this for ALL `usd` product types?
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
# Check if instance is set to be an asset contribution
|
||||
settings = self.get_attr_values_from_data_for_plugin_name(
|
||||
"CollectUSDLayerContributions", instance.data
|
||||
)
|
||||
if (
|
||||
not settings.get("contribution_enabled", False)
|
||||
or settings.get("contribution_target_product_init") != "asset"
|
||||
):
|
||||
return
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
default_prim = rop_node.evalParm("defaultprim")
|
||||
if not default_prim:
|
||||
raise PublishValidationError(
|
||||
f"No default prim specified on ROP node: {rop_node.path()}",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
folder_name = instance.data["folderPath"].rsplit("/", 1)[-1]
|
||||
if not default_prim.lstrip("/") == folder_name:
|
||||
raise PublishValidationError(
|
||||
f"Default prim specified on ROP node does not match the "
|
||||
f"asset's folder name: '{default_prim}' "
|
||||
f"(should be: '/{folder_name}')",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
rop_node.parm("defaultprim").set(
|
||||
"/`strsplit(chs(\"folderPath\"), \"/\", -1)`"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_attr_values_from_data_for_plugin_name(
|
||||
plugin_name: str, data: dict) -> dict:
|
||||
return (
|
||||
data
|
||||
.get("publish_attributes", {})
|
||||
.get(plugin_name, {})
|
||||
)
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Default primitive not set to current asset
|
||||
|
||||
The USD instance has **USD Contribution** enabled and is set to
|
||||
initialize as **asset**. The asset requires a default root
|
||||
primitive with the name of the folder it's related to.
|
||||
|
||||
For example, you're working in `/asset/char_hero` then the
|
||||
folder's name is `char_hero`. For the asset hence all prims should
|
||||
live under `/char_hero` root primitive.
|
||||
|
||||
This validation solely ensures the **default primitive** on the ROP
|
||||
node is set to match the folder name.
|
||||
"""
|
||||
)
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import hou
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
import ayon_houdini.api.usd as hou_usdlib
|
||||
|
||||
|
||||
class ValidateUSDLayerPathBackslashes(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD loaded paths have no backslashes.
|
||||
|
||||
This is a crucial validation for HUSK USD rendering as Houdini's
|
||||
USD Render ROP will fail to write out a .usd file for rendering that
|
||||
correctly preserves the backslashes, e.g. it will incorrectly convert a
|
||||
'\t' to a TAB character disallowing HUSK to find those specific files.
|
||||
|
||||
This validation is redundant for usdModel since that flattens the model
|
||||
before write. As such it will never have any used layers with a path.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdSetDress", "usdShade", "usd", "usdrender"]
|
||||
label = "USD Layer path backslashes"
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
lop_path = hou_usdlib.get_usd_rop_loppath(rop)
|
||||
stage = lop_path.stage(apply_viewport_overrides=False)
|
||||
|
||||
invalid = []
|
||||
for layer in stage.GetUsedLayers():
|
||||
references = layer.externalReferences
|
||||
|
||||
for ref in references:
|
||||
|
||||
# Ignore anonymous layers
|
||||
if ref.startswith("anon:"):
|
||||
continue
|
||||
|
||||
# If any backslashes in the path consider it invalid
|
||||
if "\\" in ref:
|
||||
self.log.error("Found invalid path: %s" % ref)
|
||||
invalid.append(layer)
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError((
|
||||
"Loaded layers have backslashes. "
|
||||
"This is invalid for HUSK USD rendering."),
|
||||
title=self.label)
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import hou
|
||||
from pxr import Usd, UsdShade, UsdGeom
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
def has_material(prim: Usd.Prim,
|
||||
include_subsets: bool=True,
|
||||
purpose=UsdShade.Tokens.allPurpose) -> bool:
|
||||
"""Return whether primitive has any material binding."""
|
||||
search_from = [prim]
|
||||
if include_subsets:
|
||||
subsets = UsdShade.MaterialBindingAPI(prim).GetMaterialBindSubsets()
|
||||
for subset in subsets:
|
||||
search_from.append(subset.GetPrim())
|
||||
|
||||
bounds = UsdShade.MaterialBindingAPI.ComputeBoundMaterials(search_from,
|
||||
purpose)
|
||||
for (material, relationship) in zip(*bounds):
|
||||
material_prim = material.GetPrim()
|
||||
if material_prim.IsValid():
|
||||
# Has a material binding
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class ValidateUsdLookAssignments(plugin.HoudiniInstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate all geometry prims have a material binding.
|
||||
|
||||
Note: This does not necessarily validate the material binding is authored
|
||||
by the current layers if the input already had material bindings.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["look"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate All Geometry Has Material Assignment"
|
||||
actions = [SelectROPAction]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
lop_node: hou.LopNode = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
return
|
||||
|
||||
# We iterate the composed stage for code simplicity; however this
|
||||
# means that it does not validate across e.g. multiple model variants
|
||||
# but only checks against the current composed stage. Likely this is
|
||||
# also what you actually want to validate, because your look might not
|
||||
# apply to *all* model variants.
|
||||
stage = lop_node.stage()
|
||||
invalid = []
|
||||
for prim in stage.Traverse():
|
||||
if not prim.IsA(UsdGeom.Gprim):
|
||||
continue
|
||||
|
||||
if not has_material(prim):
|
||||
invalid.append(prim.GetPath())
|
||||
|
||||
for path in sorted(invalid):
|
||||
self.log.warning("No material binding on: %s", path.pathString)
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Found geometry without material bindings.",
|
||||
title="No assigned materials",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc(
|
||||
"""### Geometry has no material assignments.
|
||||
|
||||
A look publish should usually define a material assignment for all
|
||||
geometry of a model. As such, this validates whether all geometry
|
||||
currently has at least one material binding applied.
|
||||
|
||||
"""
|
||||
)
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
from typing import List, Union
|
||||
from functools import partial
|
||||
|
||||
import hou
|
||||
from pxr import Sdf
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import PublishValidationError
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api.usd import get_schema_type_names
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
def get_applied_items(list_proxy) -> List[Union[Sdf.Reference, Sdf.Payload]]:
|
||||
"""Backwards compatible equivalent of `GetAppliedItems()`"""
|
||||
return list_proxy.ApplyEditsToList([])
|
||||
|
||||
|
||||
class ValidateUsdLookContents(plugin.HoudiniInstancePlugin):
|
||||
"""Validate no meshes are defined in the look.
|
||||
|
||||
Usually, a published look should not contain generated meshes in the output
|
||||
but only the materials, material bindings and render geometry settings.
|
||||
|
||||
To avoid accidentally including a Mesh definition we ensure none of the
|
||||
generated output layers for the instance is defining any Mesh type.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["look"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Look No Meshes/Lights"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
disallowed_types = [
|
||||
"UsdGeomBoundable", # Meshes/Lights/Procedurals
|
||||
"UsdRenderSettingsBase", # Render Settings
|
||||
"UsdRenderVar", # Render Var
|
||||
"UsdGeomCamera" # Cameras
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
lop_node: hou.LopNode = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
return
|
||||
|
||||
# Get layers below layer break
|
||||
above_break_layers = set(layer for layer in lop_node.layersAboveLayerBreak())
|
||||
stage = lop_node.stage()
|
||||
layers = [
|
||||
layer for layer
|
||||
in stage.GetLayerStack(includeSessionLayers=False)
|
||||
if layer.identifier not in above_break_layers
|
||||
]
|
||||
if not layers:
|
||||
return
|
||||
|
||||
# The Sdf.PrimSpec type name will not have knowledge about inherited
|
||||
# types for the type, name. So we pre-collect all invalid types
|
||||
# and their child types to ensure we match inherited types as well.
|
||||
disallowed_type_names = set()
|
||||
for type_name in self.disallowed_types:
|
||||
disallowed_type_names.update(get_schema_type_names(type_name))
|
||||
|
||||
# Find invalid prims
|
||||
invalid = []
|
||||
|
||||
def collect_invalid(layer: Sdf.Layer, path: Sdf.Path):
|
||||
"""Collect invalid paths into the `invalid` list"""
|
||||
if not path.IsPrimPath():
|
||||
return
|
||||
|
||||
prim = layer.GetPrimAtPath(path)
|
||||
if prim.typeName in disallowed_type_names:
|
||||
self.log.warning(
|
||||
"Disallowed prim type '%s' at %s",
|
||||
prim.typeName, prim.path.pathString
|
||||
)
|
||||
invalid.append(path)
|
||||
return
|
||||
|
||||
# TODO: We should allow referencing or payloads, but if so - we
|
||||
# should still check whether the loaded reference or payload
|
||||
# introduces any geometry. If so, disallow it because that
|
||||
# opinion would 'define' geometry in the output
|
||||
references= get_applied_items(prim.referenceList)
|
||||
if references:
|
||||
self.log.warning(
|
||||
"Disallowed references are added at %s: %s",
|
||||
prim.path.pathString,
|
||||
", ".join(ref.assetPath for ref in references)
|
||||
)
|
||||
invalid.append(path)
|
||||
|
||||
payloads = get_applied_items(prim.payloadList)
|
||||
if payloads:
|
||||
self.log.warning(
|
||||
"Disallowed payloads are added at %s: %s",
|
||||
prim.path.pathString,
|
||||
", ".join(payload.assetPath for payload in payloads)
|
||||
)
|
||||
invalid.append(path)
|
||||
|
||||
for layer in layers:
|
||||
layer.Traverse("/", partial(collect_invalid, layer))
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Invalid look members found.",
|
||||
title="Look Invalid Members",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc(
|
||||
"""### Look contains invalid members
|
||||
|
||||
A look publish should usually only contain materials, material
|
||||
bindings and render geometry settings.
|
||||
|
||||
This validation invalidates any creation of:
|
||||
- Render Settings,
|
||||
- Lights,
|
||||
- Cameras,
|
||||
- Geometry (Meshes, Curves and other geometry types)
|
||||
|
||||
To avoid writing out loaded geometry into the output make sure to
|
||||
add a Layer Break after loading all the content you do **not** want
|
||||
to save into the output file. Then your materials, material
|
||||
bindings and render geometry settings are overrides applied to the
|
||||
loaded content after the **Layer Break LOP** node.
|
||||
|
||||
If you happen to write out additional data for the meshes via
|
||||
e.g. a SOP Modify make sure to import to LOPs only the relevant
|
||||
attributes, mark them as static attributes, static topology and
|
||||
set the Primitive Definitions to be Overlay instead of Defines.
|
||||
|
||||
Currently, to avoid issues with referencing/payloading geometry
|
||||
from external files any references or payloads are also disallowed
|
||||
for looks.
|
||||
|
||||
"""
|
||||
)
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import hou
|
||||
from pxr import Sdf, UsdShade
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api.usd import get_schema_type_names
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateLookShaderDefs(plugin.HoudiniInstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate Material primitives are defined types instead of overs"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["look"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate Look Shaders Are Defined"
|
||||
actions = [SelectROPAction]
|
||||
optional = True
|
||||
|
||||
# Types to validate at the low-level Sdf API
|
||||
# For Usd API we validate directly against `UsdShade.Material`
|
||||
validate_types = [
|
||||
"UsdShadeMaterial"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
lop_node: hou.LopNode = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
return
|
||||
|
||||
# Get layers below layer break
|
||||
above_break_layers = set(
|
||||
layer for layer in lop_node.layersAboveLayerBreak())
|
||||
stage = lop_node.stage()
|
||||
layers = [
|
||||
layer for layer
|
||||
in stage.GetLayerStack(includeSessionLayers=False)
|
||||
if layer.identifier not in above_break_layers
|
||||
]
|
||||
if not layers:
|
||||
return
|
||||
|
||||
# The Sdf.PrimSpec type name will not have knowledge about inherited
|
||||
# types for the type, name. So we pre-collect all invalid types
|
||||
# and their child types to ensure we match inherited types as well.
|
||||
validate_type_names = set()
|
||||
for type_name in self.validate_types:
|
||||
validate_type_names.update(get_schema_type_names(type_name))
|
||||
|
||||
invalid = []
|
||||
for layer in layers:
|
||||
def log_overs(path: Sdf.Path):
|
||||
if not path.IsPrimPath():
|
||||
return
|
||||
prim_spec = layer.GetPrimAtPath(path)
|
||||
|
||||
if not prim_spec.typeName:
|
||||
# Typeless may mean Houdini generated the material or
|
||||
# shader as override because upstream the nodes already
|
||||
# existed. So we check the stage instead to identify
|
||||
# the composed type of the prim
|
||||
prim = stage.GetPrimAtPath(path)
|
||||
if not prim:
|
||||
return
|
||||
|
||||
if not prim.IsA(UsdShade.Material):
|
||||
return
|
||||
|
||||
self.log.debug("Material Prim has no type defined: %s",
|
||||
path)
|
||||
|
||||
elif prim_spec.typeName not in validate_type_names:
|
||||
return
|
||||
|
||||
if prim_spec.specifier != Sdf.SpecifierDef:
|
||||
specifier = {
|
||||
Sdf.SpecifierDef: "Def",
|
||||
Sdf.SpecifierOver: "Over",
|
||||
Sdf.SpecifierClass: "Class"
|
||||
}[prim_spec.specifier]
|
||||
|
||||
self.log.warning(
|
||||
"Material is not defined but specified as "
|
||||
"'%s': %s", specifier, path
|
||||
)
|
||||
invalid.append(path)
|
||||
|
||||
layer.Traverse("/", log_overs)
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Found Materials not specifying an authored definition.",
|
||||
title="Materials not defined",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_description():
|
||||
return inspect.cleandoc(
|
||||
"""### Materials are not defined types
|
||||
|
||||
There are materials in your current look that do not **define** the
|
||||
material primitives, but rather **override** or specify a
|
||||
**class**. This is most likely not what you want since you want
|
||||
most looks to define new materials instead of overriding existing
|
||||
materials.
|
||||
|
||||
Usually this happens if your current scene loads an input asset
|
||||
that already has the materials you're creating in your current
|
||||
scene as well. For example, if you are loading the Asset that
|
||||
contains the previously publish of your look without muting the
|
||||
look layer. As such, Houdini sees the materials already exist and
|
||||
will not make new definitions, but only write "override changes".
|
||||
However, once your look publish would replace the previous one then
|
||||
suddenly the materials would be missing and only specified as
|
||||
overrides.
|
||||
|
||||
So, in most cases this is solved by Layer Muting upstream the
|
||||
look layers of the loaded asset.
|
||||
|
||||
If for a specific case the materials already existing in the input
|
||||
is correct then you can either specify new material names for what
|
||||
you're creating in the current scene or disable this validation
|
||||
if you are sure you want to write overrides in your look publish
|
||||
instead of definitions.
|
||||
"""
|
||||
)
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import hou
|
||||
from pxr import UsdShade, UsdRender, UsdLux
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
import ayon_houdini.api.usd as hou_usdlib
|
||||
|
||||
|
||||
def fullname(o):
|
||||
"""Get fully qualified class name"""
|
||||
module = o.__module__
|
||||
if module is None or module == str.__module__:
|
||||
return o.__name__
|
||||
return module + "." + o.__name__
|
||||
|
||||
|
||||
class ValidateUsdModel(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD Model.
|
||||
|
||||
Disallow Shaders, Render settings, products and vars and Lux lights.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdModel"]
|
||||
label = "Validate USD Model"
|
||||
optional = True
|
||||
|
||||
disallowed = [
|
||||
UsdShade.Shader,
|
||||
UsdRender.Settings,
|
||||
UsdRender.Product,
|
||||
UsdRender.Var,
|
||||
UsdLux.Light,
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
lop_path = hou_usdlib.get_usd_rop_loppath(rop)
|
||||
stage = lop_path.stage(apply_viewport_overrides=False)
|
||||
|
||||
invalid = []
|
||||
for prim in stage.Traverse():
|
||||
|
||||
for klass in self.disallowed:
|
||||
if klass(prim):
|
||||
# Get full class name without pxr. prefix
|
||||
name = fullname(klass).split("pxr.", 1)[-1]
|
||||
path = str(prim.GetPath())
|
||||
self.log.warning("Disallowed %s: %s" % (name, path))
|
||||
|
||||
invalid.append(prim)
|
||||
|
||||
if invalid:
|
||||
prim_paths = sorted([str(prim.GetPath()) for prim in invalid])
|
||||
raise PublishValidationError(
|
||||
"Found invalid primitives: {}".format(prim_paths))
|
||||
|
||||
|
||||
class ValidateUsdShade(ValidateUsdModel):
|
||||
"""Validate usdShade.
|
||||
|
||||
Disallow Render settings, products, vars and Lux lights.
|
||||
|
||||
"""
|
||||
|
||||
families = ["usdShade"]
|
||||
label = "Validate USD Shade"
|
||||
|
||||
disallowed = [
|
||||
UsdRender.Settings,
|
||||
UsdRender.Product,
|
||||
UsdRender.Var,
|
||||
UsdLux.Light,
|
||||
]
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
|
|
@ -16,18 +18,23 @@ class ValidateUSDOutputNode(plugin.HoudiniInstancePlugin):
|
|||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usd"]
|
||||
# Validate early so that this error reports higher than others to the user
|
||||
# so that if another invalidation is due to the output node being invalid
|
||||
# the user will likely first focus on this first issue
|
||||
order = pyblish.api.ValidatorOrder - 0.4
|
||||
families = ["usdrop"]
|
||||
label = "Validate Output Node (USD)"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
path = invalid[0]
|
||||
raise PublishValidationError(
|
||||
("Output node(s) `{}` are incorrect. "
|
||||
"See plug-in log for details.").format(invalid),
|
||||
title=self.label
|
||||
"Output node '{}' has no valid LOP path set.".format(path),
|
||||
title=self.label,
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
|
@ -35,12 +42,12 @@ class ValidateUSDOutputNode(plugin.HoudiniInstancePlugin):
|
|||
|
||||
import hou
|
||||
|
||||
output_node = instance.data["output_node"]
|
||||
output_node = instance.data.get("output_node")
|
||||
|
||||
if output_node is None:
|
||||
node = hou.node(instance.data.get("instance_node"))
|
||||
cls.log.error(
|
||||
"USD node '%s' LOP path does not exist. "
|
||||
"USD node '%s' configured LOP path does not exist. "
|
||||
"Ensure a valid LOP path is set." % node.path()
|
||||
)
|
||||
|
||||
|
|
@ -55,3 +62,13 @@ class ValidateUSDOutputNode(plugin.HoudiniInstancePlugin):
|
|||
% (output_node.path(), output_node.type().category().name())
|
||||
)
|
||||
return [output_node.path()]
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### USD ROP has invalid LOP path
|
||||
|
||||
The USD ROP node has no or an invalid LOP path set to be exported.
|
||||
Make sure to correctly configure what you want to export for the
|
||||
publish.
|
||||
"""
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,311 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import hou
|
||||
import pxr
|
||||
from pxr import UsdRender
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline.publish import PublishValidationError, RepairAction
|
||||
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api.usd import get_usd_render_rop_rendersettings
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUSDRenderSingleFile(plugin.HoudiniInstancePlugin):
|
||||
"""Validate the writing of a single USD Render Output file.
|
||||
|
||||
When writing to single file with USD Render ROP make sure to write the
|
||||
output USD file from a single process to avoid overwriting it with
|
||||
different processes.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrender"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD Render ROP Settings"
|
||||
actions = [SelectROPAction, RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if instance.data.get("creator_attributes",
|
||||
{}).get("render_target") != "farm_split":
|
||||
# Validation is only relevant when submitting a farm job where the
|
||||
# export and render are separate jobs.
|
||||
return
|
||||
|
||||
# Get configured settings for this instance
|
||||
submission_data = (
|
||||
instance.data
|
||||
.get("publish_attributes", {})
|
||||
.get("HoudiniSubmitDeadlineUsdRender", {})
|
||||
)
|
||||
render_chunk_size = submission_data.get("chunk", 1)
|
||||
export_chunk_size = submission_data.get("export_chunk", 1)
|
||||
usd_file_per_frame = "$F" in instance.data["ifdFile"]
|
||||
frame_start_handle = instance.data["frameStartHandle"]
|
||||
frame_end_handle = instance.data["frameEndHandle"]
|
||||
num_frames = frame_end_handle - frame_start_handle + 1
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
|
||||
# Whether ROP node is set to render all Frames within a single process
|
||||
# When this is disabled then Husk will restart completely per frame
|
||||
# no matter the chunk size.
|
||||
all_frames_at_once = rop_node.evalParm("allframesatonce")
|
||||
|
||||
invalid = False
|
||||
if usd_file_per_frame:
|
||||
# USD file per frame
|
||||
# If rendering multiple frames per task and USD file has $F then
|
||||
# log a warning that the optimization will be less efficient
|
||||
# since husk will still restart per frame.
|
||||
if render_chunk_size > 1:
|
||||
self.log.debug(
|
||||
"Render chunk size is bigger than one but export file is "
|
||||
"a USD file per frame. Husk does not allow rendering "
|
||||
"separate USD files in one process. As such, Husk will "
|
||||
"restart per frame even within the chunk to render the "
|
||||
"correct file per frame."
|
||||
)
|
||||
else:
|
||||
# Single export USD file
|
||||
# Export chunk size must be higher than the amount of frames to
|
||||
# ensure the file is written in one go on one machine and thus
|
||||
# ends up containing all frames correctly
|
||||
if export_chunk_size < num_frames:
|
||||
self.log.error(
|
||||
"The export chunk size %s is smaller than the amount of "
|
||||
"frames %s, so multiple tasks will try to export to "
|
||||
"the same file. Make sure to increase chunk "
|
||||
"size to higher than the amount of frames to render, "
|
||||
"more than >%s",
|
||||
export_chunk_size, num_frames, num_frames
|
||||
)
|
||||
invalid = True
|
||||
|
||||
if not all_frames_at_once:
|
||||
self.log.error(
|
||||
"Please enable 'Render All Frames With A Single Process' "
|
||||
"on the USD Render ROP node or add $F to the USD filename",
|
||||
)
|
||||
invalid = True
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Render USD file being overwritten during export.",
|
||||
title="Render USD file overwritten",
|
||||
description=self.get_description())
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
# Enable all frames at once and make the frames per task
|
||||
# very large
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
rop_node.parm("allframesatonce").set(True)
|
||||
|
||||
# Override instance setting for export chunk size
|
||||
create_context = instance.context.data["create_context"]
|
||||
created_instance = create_context.get_instance_by_id(
|
||||
instance.data["instance_id"]
|
||||
)
|
||||
created_instance.publish_attributes["HoudiniSubmitDeadlineUsdRender"]["export_chunk"] = 1000 # noqa
|
||||
create_context.save_changes()
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Render USD file configured incorrectly
|
||||
|
||||
The USD render ROP is currently configured to write a single
|
||||
USD file to render instead of a file per frame.
|
||||
|
||||
When that is the case, a single machine must produce that file in
|
||||
one process to avoid the file being overwritten by the other
|
||||
processes.
|
||||
|
||||
We resolve that by enabling _Render All Frames With A Single
|
||||
Process_ on the ROP node and ensure the export job task size
|
||||
is larger than the amount of frames of the sequence, so the file
|
||||
gets written in one go.
|
||||
|
||||
Run **Repair** to resolve this for you.
|
||||
|
||||
If instead you want to write separate render USD files, please
|
||||
include $F in the USD output filename on the `ROP node > Output >
|
||||
USD Export > Output File`
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
class ValidateUSDRenderArnoldSettings(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD Render Product names are correctly set absolute paths."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrender"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD Render Arnold Settings"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
node = instance.data.get("output_node")
|
||||
if not node:
|
||||
# No valid output node was set. We ignore it since it will
|
||||
# be validated by another plug-in.
|
||||
return
|
||||
|
||||
# Check only for Arnold renderer
|
||||
renderer = rop_node.evalParm("renderer")
|
||||
if renderer != "HdArnoldRendererPlugin":
|
||||
self.log.debug("Skipping Arnold Settings validation because "
|
||||
"renderer is set to: %s", renderer)
|
||||
return
|
||||
|
||||
# Validate Arnold Product Type is enabled on the Arnold Render Settings
|
||||
# This is confirmed by the `includeAovs` attribute on the RenderProduct
|
||||
stage: pxr.Usd.Stage = node.stage()
|
||||
invalid = False
|
||||
for prim_path in instance.data.get("usdRenderProducts", []):
|
||||
prim = stage.GetPrimAtPath(prim_path)
|
||||
include_aovs = prim.GetAttribute("includeAovs")
|
||||
if not include_aovs.IsValid() or not include_aovs.Get(0):
|
||||
self.log.error(
|
||||
"All Render Products must be set to 'Arnold Product "
|
||||
"Type' on the Arnold Render Settings node to ensure "
|
||||
"correct output of metadata and AOVs."
|
||||
)
|
||||
invalid = True
|
||||
break
|
||||
|
||||
# Ensure 'Delegate Products' is enabled for Husk
|
||||
if not rop_node.evalParm("husk_delegateprod"):
|
||||
invalid = True
|
||||
self.log.error("USD Render ROP has `Husk > Rendering > Delegate "
|
||||
"Products` disabled. Please enable to ensure "
|
||||
"correct output files")
|
||||
|
||||
# TODO: Detect bug of invalid Cryptomatte state?
|
||||
# Detect if any Render Products were set that do not actually exist
|
||||
# (e.g. invalid rendervar targets for a renderproduct) because that
|
||||
# is what originated the Cryptomatte enable->disable bug.
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Invalid Render Settings for Arnold render."
|
||||
)
|
||||
|
||||
|
||||
class ValidateUSDRenderCamera(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD Render Settings refer to a valid render camera.
|
||||
|
||||
The render camera is defined in priority by this order:
|
||||
1. ROP Node Override Camera Parm (if set)
|
||||
2. Render Product Camera (if set - this may differ PER render product!)
|
||||
3. Render Settings Camera (if set)
|
||||
|
||||
If None of these are set *or* a currently set entry resolves to an invalid
|
||||
camera prim path then we'll report it as an error.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrender"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD Render Camera"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
lop_node = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
# No valid output node was set. We ignore it since it will
|
||||
# be validated by another plug-in.
|
||||
return
|
||||
|
||||
stage = lop_node.stage()
|
||||
|
||||
render_settings = get_usd_render_rop_rendersettings(rop_node, stage,
|
||||
logger=self.log)
|
||||
if not render_settings:
|
||||
# Without render settings we basically have no defined
|
||||
self.log.error("No render settings found for %s.", rop_node.path())
|
||||
return
|
||||
|
||||
render_settings_camera = self._get_camera(render_settings)
|
||||
rop_camera = rop_node.evalParm("override_camera")
|
||||
|
||||
invalid = False
|
||||
camera_paths = set()
|
||||
for render_product in self.iter_render_products(render_settings,
|
||||
stage):
|
||||
render_product_camera = self._get_camera(render_product)
|
||||
|
||||
# Get first camera path as per order in in this plug-in docstring
|
||||
camera_path = next(
|
||||
(cam_path for cam_path in [rop_camera,
|
||||
render_product_camera,
|
||||
render_settings_camera]
|
||||
if cam_path),
|
||||
None
|
||||
)
|
||||
if not camera_path:
|
||||
self.log.error(
|
||||
"No render camera defined for render product: '%s'",
|
||||
render_product.GetPath()
|
||||
)
|
||||
invalid = True
|
||||
continue
|
||||
|
||||
camera_paths.add(camera_path)
|
||||
|
||||
# For the camera paths used across the render products detect
|
||||
# whether the path is a valid camera in the stage
|
||||
for camera_path in sorted(camera_paths):
|
||||
camera_prim = stage.GetPrimAtPath(camera_path)
|
||||
if not camera_prim or not camera_prim.IsValid():
|
||||
self.log.error(
|
||||
"Render camera path '%s' does not exist in stage.",
|
||||
camera_path
|
||||
)
|
||||
invalid = True
|
||||
continue
|
||||
|
||||
if not camera_prim.IsA(pxr.UsdGeom.Camera):
|
||||
self.log.error(
|
||||
"Render camera path '%s' is not a camera.",
|
||||
camera_path
|
||||
)
|
||||
invalid = True
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
f"No render camera found for {instance.name}.",
|
||||
title="Invalid Render Camera",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
def iter_render_products(self, render_settings, stage):
|
||||
for product_path in render_settings.GetProductsRel().GetTargets():
|
||||
prim = stage.GetPrimAtPath(product_path)
|
||||
if prim.IsA(UsdRender.Product):
|
||||
yield UsdRender.Product(prim)
|
||||
|
||||
def _get_camera(self, settings: UsdRender.SettingsBase):
|
||||
"""Return primary camera target from RenderSettings or RenderProduct"""
|
||||
camera_targets = settings.GetCameraRel().GetForwardedTargets()
|
||||
if camera_targets:
|
||||
return camera_targets[0]
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Missing render camera
|
||||
|
||||
No valid render camera was set for the USD Render Settings.
|
||||
|
||||
The configured render camera path must be a valid camera in the
|
||||
stage. Make sure it refers to an existing path and that it is
|
||||
a camera.
|
||||
|
||||
"""
|
||||
)
|
||||
|
|
@ -18,7 +18,7 @@ class ValidateUSDRenderProductNames(plugin.HoudiniInstancePlugin):
|
|||
def process(self, instance):
|
||||
|
||||
invalid = []
|
||||
for filepath in instance.data["files"]:
|
||||
for filepath in instance.data.get("files", []):
|
||||
|
||||
if not filepath:
|
||||
invalid.append("Detected empty output filepath.")
|
||||
|
|
|
|||
|
|
@ -0,0 +1,83 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import hou
|
||||
import inspect
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
OptionalPyblishPluginMixin,
|
||||
PublishValidationError
|
||||
)
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUSDRenderProductPaths(plugin.HoudiniInstancePlugin,
|
||||
OptionalPyblishPluginMixin):
|
||||
"""Validate USD Render Settings refer to a valid render camera.
|
||||
|
||||
The publishing logic uses a metadata `.json` in the render output images'
|
||||
folder to identify how the files should be published. To ensure multiple
|
||||
subsequent submitted versions of a scene do not override the same metadata
|
||||
json file we want to ensure the user has the render paths set up to
|
||||
contain the $HIPNAME in a parent folder.
|
||||
|
||||
"""
|
||||
# NOTE(colorbleed): This workflow might be relatively Colorbleed-specific
|
||||
# TODO: Preferably we find ways to make what this tries to avoid no issue
|
||||
# itself by e.g. changing how AYON deals with these metadata json files.
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrender"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD Render Product Paths"
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
current_file = instance.context.data["currentFile"]
|
||||
|
||||
# mimic `$HIPNAME:r` because `hou.text.collapseCommonVars can not
|
||||
# collapse it
|
||||
hipname_r = os.path.splitext(os.path.basename(current_file))[0]
|
||||
|
||||
invalid = False
|
||||
for filepath in instance.data.get("files", []):
|
||||
folder = os.path.dirname(filepath)
|
||||
|
||||
if hipname_r not in folder:
|
||||
filepath_raw = hou.text.collapseCommonVars(filepath, vars=[
|
||||
"$HIP", "$JOB", "$HIPNAME"
|
||||
])
|
||||
filepath_raw = filepath_raw.replace(hipname_r, "$HIPNAME:r")
|
||||
self.log.error("Invalid render output path:\n%s", filepath_raw)
|
||||
invalid = True
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Render path is invalid. Please make sure to include a "
|
||||
"folder with '$HIPNAME:r'.",
|
||||
title=self.label,
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Invalid render output path
|
||||
|
||||
The render output path must include the current scene name in
|
||||
a parent folder to ensure uniqueness across multiple workfile
|
||||
versions. Otherwise subsequent farm publishes could fail because
|
||||
newer versions will overwrite the metadata files of older versions.
|
||||
|
||||
The easiest way to do so is to include **`$HIPNAME:r`** somewhere
|
||||
in the render product names.
|
||||
|
||||
A recommended output path is for example:
|
||||
```
|
||||
$HIP/renders/$HIPNAME:r/$OS/$HIPNAME:r.$OS.$F4.exr
|
||||
```
|
||||
"""
|
||||
)
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import inspect
|
||||
import hou
|
||||
from pxr import Sdf
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api.action import SelectROPAction
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUSDRopDefaultPrim(plugin.HoudiniInstancePlugin):
|
||||
"""Validate the default prim exists if default prim value is set on ROP"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdrop"]
|
||||
hosts = ["houdini"]
|
||||
label = "Validate USD ROP Default Prim"
|
||||
actions = [SelectROPAction]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop_node = hou.node(instance.data["instance_node"])
|
||||
|
||||
default_prim = rop_node.evalParm("defaultprim")
|
||||
if not default_prim:
|
||||
self.log.debug(
|
||||
"No default prim specified on ROP node: %s", rop_node.path()
|
||||
)
|
||||
return
|
||||
|
||||
lop_node: hou.LopNode = instance.data.get("output_node")
|
||||
if not lop_node:
|
||||
return
|
||||
|
||||
above_break_layers = set(layer for layer in lop_node.layersAboveLayerBreak())
|
||||
stage = lop_node.stage()
|
||||
layers = [
|
||||
layer for layer
|
||||
in stage.GetLayerStack(includeSessionLayers=False)
|
||||
if layer.identifier not in above_break_layers
|
||||
]
|
||||
if not layers:
|
||||
self.log.error("No USD layers found. This is likely a bug.")
|
||||
return
|
||||
|
||||
# TODO: This only would detect any local opinions on that prim and thus
|
||||
# would fail to detect if a sublayer added on the stage root layer
|
||||
# being exported would actually be generating the prim path. We
|
||||
# should maybe consider that if this fails that we still check
|
||||
# whether a sublayer doesn't create the default prim path.
|
||||
for layer in layers:
|
||||
if layer.GetPrimAtPath(default_prim):
|
||||
break
|
||||
else:
|
||||
# No prim found at the given path on any of the generated layers
|
||||
raise PublishValidationError(
|
||||
"Default prim specified by USD ROP does not exist in "
|
||||
f"stage: '{default_prim}'",
|
||||
title="Default Prim",
|
||||
description=self.get_description()
|
||||
)
|
||||
|
||||
# Warn about any paths that are authored that are not a child
|
||||
# of the default prim
|
||||
outside_paths = set()
|
||||
default_prim_path = f"/{default_prim.strip('/')}"
|
||||
for layer in layers:
|
||||
|
||||
def collect_outside_paths(path: Sdf.Path):
|
||||
"""Collect all paths that are no child of the default prim"""
|
||||
|
||||
if not path.IsPrimPath():
|
||||
# Collect only prim paths
|
||||
return
|
||||
|
||||
# Ignore the HoudiniLayerInfo prim
|
||||
if path.pathString == "/HoudiniLayerInfo":
|
||||
return
|
||||
|
||||
if not path.pathString.startswith(default_prim_path):
|
||||
outside_paths.add(path)
|
||||
|
||||
layer.Traverse("/", collect_outside_paths)
|
||||
|
||||
if outside_paths:
|
||||
self.log.warning(
|
||||
"Found paths that are not within default primitive path '%s'. "
|
||||
"When referencing the following paths by default will not be "
|
||||
"loaded:",
|
||||
default_prim
|
||||
)
|
||||
for outside_path in sorted(outside_paths):
|
||||
self.log.warning("Outside default prim: %s", outside_path)
|
||||
|
||||
def get_description(self):
|
||||
return inspect.cleandoc(
|
||||
"""### Default Prim not found
|
||||
|
||||
The USD render ROP is currently configured to write the output
|
||||
USD file with a default prim. However, the default prim is not
|
||||
found in the USD stage.
|
||||
|
||||
Make sure to double check the Default Prim setting on the USD
|
||||
Render ROP for typos or make sure the hierarchy and opinions you
|
||||
are creating exist in the default prim path.
|
||||
|
||||
"""
|
||||
)
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
import ayon_houdini.api.usd as hou_usdlib
|
||||
|
||||
|
||||
class ValidateUsdSetDress(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD Set Dress.
|
||||
|
||||
Must only have references or payloads. May not generate new mesh or
|
||||
flattened meshes.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdSetDress"]
|
||||
label = "Validate USD Set Dress"
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
import hou
|
||||
from pxr import UsdGeom
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
lop_path = hou_usdlib.get_usd_rop_loppath(rop)
|
||||
stage = lop_path.stage(apply_viewport_overrides=False)
|
||||
|
||||
invalid = []
|
||||
for node in stage.Traverse():
|
||||
|
||||
if UsdGeom.Mesh(node):
|
||||
# This solely checks whether there is any USD involved
|
||||
# in this Prim's Stack and doesn't accurately tell us
|
||||
# whether it was generated locally or not.
|
||||
# TODO: More accurately track whether the Prim was created
|
||||
# in the local scene
|
||||
stack = node.GetPrimStack()
|
||||
for sdf in stack:
|
||||
path = sdf.layer.realPath
|
||||
if path:
|
||||
break
|
||||
else:
|
||||
prim_path = node.GetPath()
|
||||
self.log.error(
|
||||
"%s is not referenced geometry." % prim_path
|
||||
)
|
||||
invalid.append(node)
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError((
|
||||
"SetDress contains local geometry. "
|
||||
"This is not allowed, it must be an assembly "
|
||||
"of referenced assets."),
|
||||
title=self.label
|
||||
)
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
|
||||
import ayon_api
|
||||
from ayon_core.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
KnownPublishError,
|
||||
PublishValidationError,
|
||||
)
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUSDShadeModelExists(plugin.HoudiniInstancePlugin):
|
||||
"""Validate the Instance has no current cooking errors."""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
families = ["usdShade"]
|
||||
label = "USD Shade model exists"
|
||||
|
||||
def process(self, instance):
|
||||
project_name = instance.context.data["projectName"]
|
||||
folder_path = instance.data["folderPath"]
|
||||
product_name = instance.data["productName"]
|
||||
|
||||
# Assume shading variation starts after a dot separator
|
||||
shade_product_name = product_name.split(".", 1)[0]
|
||||
model_product_name = re.sub(
|
||||
"^usdShade", "usdModel", shade_product_name
|
||||
)
|
||||
|
||||
folder_entity = instance.data.get("folderEntity")
|
||||
if not folder_entity:
|
||||
raise KnownPublishError(
|
||||
"Folder entity is not filled on instance."
|
||||
)
|
||||
|
||||
product_entity = ayon_api.get_product_by_name(
|
||||
project_name,
|
||||
model_product_name,
|
||||
folder_entity["id"],
|
||||
fields={"id"}
|
||||
)
|
||||
if not product_entity:
|
||||
raise PublishValidationError(
|
||||
("USD Model product not found: "
|
||||
"{} ({})").format(model_product_name, folder_path),
|
||||
title=self.label
|
||||
)
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import hou
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import PublishValidationError
|
||||
|
||||
from ayon_houdini.api import plugin
|
||||
|
||||
|
||||
class ValidateUsdShadeWorkspace(plugin.HoudiniInstancePlugin):
|
||||
"""Validate USD Shading Workspace is correct version.
|
||||
|
||||
There have been some issues with outdated/erroneous Shading Workspaces
|
||||
so this is to confirm everything is set as it should.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["usdShade"]
|
||||
label = "USD Shade Workspace"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
workspace = rop.parent()
|
||||
|
||||
definition = workspace.type().definition()
|
||||
name = definition.nodeType().name()
|
||||
library = definition.libraryFilePath()
|
||||
|
||||
all_definitions = hou.hda.definitionsInFile(library)
|
||||
node_type, version = name.rsplit(":", 1)
|
||||
version = float(version)
|
||||
|
||||
highest = version
|
||||
for other_definition in all_definitions:
|
||||
other_name = other_definition.nodeType().name()
|
||||
other_node_type, other_version = other_name.rsplit(":", 1)
|
||||
other_version = float(other_version)
|
||||
|
||||
if node_type != other_node_type:
|
||||
continue
|
||||
|
||||
# Get the highest version
|
||||
highest = max(highest, other_version)
|
||||
|
||||
if version != highest:
|
||||
raise PublishValidationError(
|
||||
("Shading Workspace is not the latest version."
|
||||
" Found {}. Latest is {}.").format(version, highest),
|
||||
title=self.label
|
||||
)
|
||||
|
||||
# There were some issues with the editable node not having the right
|
||||
# configured path. So for now let's assure that is correct to.from
|
||||
value = (
|
||||
'avalon://`chs("../folder_path")`/'
|
||||
'usdShade`chs("../model_variantname1")`.usd'
|
||||
)
|
||||
rop_value = rop.parm("lopoutput").rawValue()
|
||||
if rop_value != value:
|
||||
raise PublishValidationError(
|
||||
("Shading Workspace has invalid 'lopoutput'"
|
||||
" parameter value. The Shading Workspace"
|
||||
" needs to be reset to its default values."),
|
||||
title=self.label
|
||||
)
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
import logging
|
||||
|
||||
from husd.outputprocessor import OutputProcessor
|
||||
|
||||
from ayon_core.pipeline import entity_uri
|
||||
from ayon_core.pipeline.load.utils import get_representation_path_by_names
|
||||
|
||||
|
||||
class AYONURIOutputProcessor(OutputProcessor):
|
||||
"""Process AYON Entity URIs into their full path equivalents."""
|
||||
|
||||
def __init__(self):
|
||||
""" There is only one object of each output processor class that is
|
||||
ever created in a Houdini session. Therefore, be very careful
|
||||
about what data gets put in this object.
|
||||
"""
|
||||
self._save_cache = dict()
|
||||
self._ref_cache = dict()
|
||||
self._publish_context = None
|
||||
self.log = logging.getLogger(__name__)
|
||||
|
||||
@staticmethod
|
||||
def name():
|
||||
return "ayon_uri_processor"
|
||||
|
||||
@staticmethod
|
||||
def displayName():
|
||||
return "AYON URI Output Processor"
|
||||
|
||||
def processReferencePath(self,
|
||||
asset_path,
|
||||
referencing_layer_path,
|
||||
asset_is_layer):
|
||||
"""
|
||||
Args:
|
||||
asset_path (str): The path to the asset, as specified in Houdini.
|
||||
If this asset is being written to disk, this will be the final
|
||||
output of the `processSavePath()` calls on all output
|
||||
processors.
|
||||
referencing_layer_path (str): The absolute file path of the file
|
||||
containing the reference to the asset. You can use this to make
|
||||
the path pointer relative.
|
||||
asset_is_layer (bool): A boolean value indicating whether this
|
||||
asset is a USD layer file. If this is `False`, the asset is
|
||||
something else (for example, a texture or volume file).
|
||||
|
||||
Returns:
|
||||
The refactored reference path.
|
||||
|
||||
"""
|
||||
|
||||
cache = self._ref_cache
|
||||
|
||||
# Retrieve from cache if this query occurred before (optimization)
|
||||
if asset_path in cache:
|
||||
return cache[asset_path]
|
||||
|
||||
uri_data = entity_uri.parse_ayon_entity_uri(asset_path)
|
||||
if not uri_data:
|
||||
cache[asset_path] = asset_path
|
||||
return asset_path
|
||||
|
||||
# Try and find it as an existing publish
|
||||
query = {
|
||||
"project_name": uri_data["project"],
|
||||
"folder_path": uri_data["folder"],
|
||||
"product_name": uri_data["product"],
|
||||
"version_name": uri_data["version"],
|
||||
"representation_name": uri_data["representation"],
|
||||
}
|
||||
path = get_representation_path_by_names(
|
||||
**query
|
||||
)
|
||||
if path:
|
||||
self.log.debug(
|
||||
"AYON URI Resolver - ref: %s -> %s", asset_path, path
|
||||
)
|
||||
cache[asset_path] = path
|
||||
return path
|
||||
|
||||
elif self._publish_context:
|
||||
# Query doesn't resolve to an existing version - likely
|
||||
# points to a version defined in the current publish session
|
||||
# as such we should resolve it using the current publish
|
||||
# context if that was set prior to this publish
|
||||
raise NotImplementedError("TODO")
|
||||
|
||||
self.log.warning(f"Unable to resolve AYON URI: {asset_path}")
|
||||
cache[asset_path] = asset_path
|
||||
return asset_path
|
||||
|
||||
def processSavePath(self,
|
||||
asset_path,
|
||||
referencing_layer_path,
|
||||
asset_is_layer):
|
||||
"""
|
||||
Args:
|
||||
asset_path (str): The path to the asset, as specified in Houdini.
|
||||
If this asset is being written to disk, this will be the final
|
||||
output of the `processSavePath()` calls on all output
|
||||
processors.
|
||||
referencing_layer_path (str): The absolute file path of the file
|
||||
containing the reference to the asset. You can use this to make
|
||||
the path pointer relative.
|
||||
asset_is_layer (bool): A boolean value indicating whether this
|
||||
asset is a USD layer file. If this is `False`, the asset is
|
||||
something else (for example, a texture or volume file).
|
||||
|
||||
Returns:
|
||||
The refactored save path.
|
||||
|
||||
"""
|
||||
cache = self._save_cache
|
||||
|
||||
# Retrieve from cache if this query occurred before (optimization)
|
||||
if asset_path in cache:
|
||||
return cache[asset_path]
|
||||
|
||||
uri_data = entity_uri.parse_ayon_entity_uri(asset_path)
|
||||
if not uri_data:
|
||||
cache[asset_path] = asset_path
|
||||
return asset_path
|
||||
|
||||
relative_template = "{asset}_{product}_{version}_{representation}.usd"
|
||||
# Set save output path to a relative path so other
|
||||
# processors can potentially manage it easily?
|
||||
path = relative_template.format(**uri_data)
|
||||
|
||||
self.log.debug("AYON URI Resolver - save: %s -> %s", asset_path, path)
|
||||
cache[asset_path] = path
|
||||
return path
|
||||
|
||||
|
||||
def usdOutputProcessor():
|
||||
return AYONURIOutputProcessor
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import hou
|
||||
from husd.outputprocessor import OutputProcessor
|
||||
|
||||
|
||||
class AYONRemapPaths(OutputProcessor):
|
||||
"""Remap paths based on a mapping dict on rop node."""
|
||||
|
||||
def __init__(self):
|
||||
self._mapping = dict()
|
||||
|
||||
@staticmethod
|
||||
def name():
|
||||
return "ayon_remap_paths"
|
||||
|
||||
@staticmethod
|
||||
def displayName():
|
||||
return "AYON Remap Paths"
|
||||
|
||||
@staticmethod
|
||||
def hidden():
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def parameters():
|
||||
group = hou.ParmTemplateGroup()
|
||||
|
||||
parm_template = hou.StringParmTemplate(
|
||||
"ayon_remap_paths_remap_json",
|
||||
"Remapping dict (json)",
|
||||
default_value="{}",
|
||||
num_components=1,
|
||||
string_type=hou.stringParmType.Regular,
|
||||
)
|
||||
group.append(parm_template)
|
||||
|
||||
return group.asDialogScript()
|
||||
|
||||
def beginSave(self, config_node, config_overrides, lop_node, t):
|
||||
super(AYONRemapPaths, self).beginSave(config_node,
|
||||
config_overrides,
|
||||
lop_node,
|
||||
t)
|
||||
|
||||
value = config_node.evalParm("ayon_remap_paths_remap_json")
|
||||
mapping = json.loads(value)
|
||||
assert isinstance(self._mapping, dict)
|
||||
|
||||
# Ensure all keys are normalized paths so the lookup can be done
|
||||
# correctly
|
||||
mapping = {
|
||||
os.path.normpath(key): value for key, value in mapping.items()
|
||||
}
|
||||
self._mapping = mapping
|
||||
|
||||
def processReferencePath(self,
|
||||
asset_path,
|
||||
referencing_layer_path,
|
||||
asset_is_layer):
|
||||
return self._mapping.get(os.path.normpath(asset_path), asset_path)
|
||||
|
||||
|
||||
def usdOutputProcessor():
|
||||
return AYONRemapPaths
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'houdini' version."""
|
||||
__version__ = "0.3.8"
|
||||
__version__ = "0.3.9"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "houdini"
|
||||
title = "Houdini"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
|
||||
client_dir = "ayon_houdini"
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,16 @@ class CreateStaticMeshModel(BaseSettingsModel):
|
|||
)
|
||||
|
||||
|
||||
class CreateUSDRenderModel(CreatorModel):
|
||||
default_renderer: str = SettingsField(
|
||||
"Karma CPU",
|
||||
title="Default Renderer",
|
||||
description=(
|
||||
"Specify either the Hydra renderer plug-in nice name, like "
|
||||
"'Karma CPU', or the plug-in name, e.g. 'BRAY_HdKarma'"
|
||||
))
|
||||
|
||||
|
||||
class CreatePluginsModel(BaseSettingsModel):
|
||||
CreateAlembicCamera: CreatorModel = SettingsField(
|
||||
default_factory=CreatorModel,
|
||||
|
|
@ -78,10 +88,10 @@ class CreatePluginsModel(BaseSettingsModel):
|
|||
title="Create Static Mesh")
|
||||
CreateUSD: CreatorModel = SettingsField(
|
||||
default_factory=CreatorModel,
|
||||
title="Create USD (experimental)")
|
||||
CreateUSDRender: CreatorModel = SettingsField(
|
||||
default_factory=CreatorModel,
|
||||
title="Create USD render (experimental)")
|
||||
title="Create USD")
|
||||
CreateUSDRender: CreateUSDRenderModel = SettingsField(
|
||||
default_factory=CreateUSDRenderModel,
|
||||
title="Create USD render")
|
||||
CreateVDBCache: CreatorModel = SettingsField(
|
||||
default_factory=CreatorModel,
|
||||
title="Create VDB Cache")
|
||||
|
|
@ -158,12 +168,13 @@ DEFAULT_HOUDINI_CREATE_SETTINGS = {
|
|||
]
|
||||
},
|
||||
"CreateUSD": {
|
||||
"enabled": False,
|
||||
"enabled": True,
|
||||
"default_variants": ["Main"]
|
||||
},
|
||||
"CreateUSDRender": {
|
||||
"enabled": False,
|
||||
"default_variants": ["Main"]
|
||||
"enabled": True,
|
||||
"default_variants": ["Main"],
|
||||
"default_renderer": "Karma CPU"
|
||||
},
|
||||
"CreateVDBCache": {
|
||||
"enabled": True,
|
||||
|
|
|
|||
|
|
@ -134,6 +134,9 @@ class PublishPluginsModel(BaseSettingsModel):
|
|||
ValidateWorkfilePaths: ValidateWorkfilePathsModel = SettingsField(
|
||||
default_factory=ValidateWorkfilePathsModel,
|
||||
title="Validate workfile paths settings")
|
||||
ValidateUSDRenderProductPaths: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Validate USD Render Product Paths")
|
||||
ExtractActiveViewThumbnail: BasicEnabledStatesModel = SettingsField(
|
||||
default_factory=BasicEnabledStatesModel,
|
||||
title="Extract Active View Thumbnail",
|
||||
|
|
@ -202,6 +205,11 @@ DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
|
|||
"$JOB"
|
||||
]
|
||||
},
|
||||
"ValidateUSDRenderProductPaths": {
|
||||
"enabled": False,
|
||||
"optional": True,
|
||||
"active": True
|
||||
},
|
||||
"ExtractActiveViewThumbnail": {
|
||||
"enabled": True,
|
||||
"optional": False,
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
## Basic setup
|
||||
|
||||
- Actually supported version is up to v18
|
||||
- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18)
|
||||
- pip install PySide2:
|
||||
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2`
|
||||
- pip install OpenTimelineIO:
|
||||
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO`
|
||||
- Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
|
||||
 with installed CMake in PATH.
|
||||
- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6
|
||||

|
||||
- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related.
|
||||
|
||||
## Editorial setup
|
||||
|
||||
This is how it looks on my testing project timeline
|
||||

|
||||
Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence.
|
||||
|
||||
1. you need to start AYON menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__**
|
||||
2. then select any clips in `main` track and change their color to `Chocolate`
|
||||
3. in OpenPype Menu select `Create`
|
||||
4. in Creator select `Create Publishable Clip [New]` (temporary name)
|
||||
5. set `Rename clips` to True, Master Track to `main` and Use review track to `review` as in picture
|
||||

|
||||
6. after you hit `ok` all clips are colored to `ping` and marked with openpype metadata tag
|
||||
7. git `Publish` on openpype menu and see that all had been collected correctly. That is the last step for now as rest is Work in progress. Next steps will follow.
|
||||
|
|
@ -1,838 +0,0 @@
|
|||
Last Updated: 1 April 2024
|
||||
----------------------------
|
||||
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
|
||||
modules for scripting access (DaVinciResolve.py) and some representative examples.
|
||||
|
||||
From v16.2.0 onwards, the nodeIndex parameters accepted by SetLUT() and SetCDL() are 1-based instead of 0-based, i.e. 1 <= nodeIndex <= total number of nodes.
|
||||
|
||||
Overview
|
||||
--------
|
||||
As with Blackmagic Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page,
|
||||
or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when
|
||||
allowing scripting access from outside of the Resolve application.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
DaVinci Resolve scripting requires one of the following to be installed (for all users):
|
||||
|
||||
Lua 5.1
|
||||
Python >= 3.6 64-bit
|
||||
Python 2.7 64-bit
|
||||
|
||||
Using a script
|
||||
--------------
|
||||
DaVinci Resolve needs to be running for a script to be invoked.
|
||||
|
||||
For a Resolve script to be executed from an external folder, the script needs to know of the API location.
|
||||
You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below:
|
||||
|
||||
Mac OS X:
|
||||
RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting"
|
||||
RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so"
|
||||
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
|
||||
|
||||
Windows:
|
||||
RESOLVE_SCRIPT_API="%PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Support\Developer\Scripting"
|
||||
RESOLVE_SCRIPT_LIB="C:\Program Files\Blackmagic Design\DaVinci Resolve\fusionscript.dll"
|
||||
PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\Modules\"
|
||||
|
||||
Linux:
|
||||
RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting"
|
||||
RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so"
|
||||
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
|
||||
(Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve)
|
||||
|
||||
As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console.
|
||||
|
||||
On startup, DaVinci Resolve scans the subfolders in the directories shown below and enumerates the scripts found in the Workspace application menu under Scripts.
|
||||
Place your script under Utility to be listed in all pages, under Comp or Tool to be available in the Fusion page or under folders for individual pages (Edit, Color or Deliver). Scripts under Deliver are additionally listed under render jobs.
|
||||
Placing your script here and invoking it from the menu is the easiest way to use scripts.
|
||||
Mac OS X:
|
||||
- All users: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts
|
||||
- Specific user: /Users/<UserName>/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts
|
||||
Windows:
|
||||
- All users: %PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts
|
||||
- Specific user: %APPDATA%\Roaming\Blackmagic Design\DaVinci Resolve\Support\Fusion\Scripts
|
||||
Linux:
|
||||
- All users: /opt/resolve/Fusion/Scripts (or /home/resolve/Fusion/Scripts/ depending on installation)
|
||||
- Specific user: $HOME/.local/share/DaVinciResolve/Fusion/Scripts
|
||||
|
||||
The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6
|
||||
and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
|
||||
|
||||
This example Python script creates a simple project:
|
||||
|
||||
#!/usr/bin/env python
|
||||
import DaVinciResolveScript as dvr_script
|
||||
resolve = dvr_script.scriptapp("Resolve")
|
||||
fusion = resolve.Fusion()
|
||||
projectManager = resolve.GetProjectManager()
|
||||
projectManager.CreateProject("Hello World")
|
||||
|
||||
The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and "getmetatable"
|
||||
in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality.
|
||||
|
||||
|
||||
Running DaVinci Resolve in headless mode
|
||||
----------------------------------------
|
||||
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled.
|
||||
However, the various scripting APIs will continue to work as expected.
|
||||
|
||||
DaVinci Resolve API
|
||||
-------------------
|
||||
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
|
||||
|
||||
Resolve
|
||||
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
|
||||
GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations.
|
||||
GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database.
|
||||
OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
|
||||
GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None).
|
||||
GetProductName() --> string # Returns product name.
|
||||
GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format.
|
||||
GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format.
|
||||
LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'.
|
||||
UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout.
|
||||
ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'.
|
||||
DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'.
|
||||
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
|
||||
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
|
||||
Quit() --> None # Quits the Resolve App.
|
||||
ImportRenderPreset(presetPath) --> Bool # Import a preset from presetPath (string) and set it as current preset for rendering.
|
||||
ExportRenderPreset(presetName, exportPath) --> Bool # Export a preset to a given path (string) if presetName(string) exists.
|
||||
ImportBurnInPreset(presetPath) --> Bool # Import a data burn in preset from a given presetPath (string)
|
||||
ExportBurnInPreset(presetName, exportPath) --> Bool # Export a data burn in preset to a given path (string) if presetName (string) exists.
|
||||
GetKeyframeMode() --> keyframeMode # Returns the currently set keyframe mode (int). Refer to section 'Keyframe Mode information' below for details.
|
||||
SetKeyframeMode(keyframeMode) --> Bool # Returns True when 'keyframeMode'(enum) is successfully set. Refer to section 'Keyframe Mode information' below for details.
|
||||
|
||||
ProjectManager
|
||||
ArchiveProject(projectName,
|
||||
filePath,
|
||||
isArchiveSrcMedia=True,
|
||||
isArchiveRenderCache=True,
|
||||
isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments
|
||||
CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not.
|
||||
DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded
|
||||
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project.
|
||||
GetCurrentProject() --> Project # Returns the currently loaded Resolve project.
|
||||
SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful.
|
||||
CloseProject(project) --> Bool # Closes the specified project without saving.
|
||||
CreateFolder(folderName) --> Bool # Creates a folder if folderName (string) is unique.
|
||||
DeleteFolder(folderName) --> Bool # Deletes the specified folder if it exists. Returns True in case of success.
|
||||
GetProjectListInCurrentFolder() --> [project names...] # Returns a list of project names in current folder.
|
||||
GetFolderListInCurrentFolder() --> [folder names...] # Returns a list of folder names in current folder.
|
||||
GotoRootFolder() --> Bool # Opens root folder in database.
|
||||
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
|
||||
GetCurrentFolder() --> string # Returns the current folder name.
|
||||
OpenFolder(folderName) --> Bool # Opens folder under given name.
|
||||
ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful.
|
||||
ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success.
|
||||
RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful.
|
||||
GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection
|
||||
GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve
|
||||
SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project.
|
||||
# 'DbType': 'Disk' or 'PostgreSQL' (string)
|
||||
# 'DbName': database name (string)
|
||||
# 'IpAddress': IP address of the PostgreSQL server (string, optional key - defaults to '127.0.0.1')
|
||||
CreateCloudProject({cloudSettings}) --> Project # Creates and returns a cloud project.
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
ImportCloudProject(filePath, {cloudSettings}) --> Bool # Returns True if import cloud project is successful; False otherwise
|
||||
# 'filePath': String; filePath of file to import
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
RestoreCloudProject(folderPath, {cloudSettings}) --> Bool # Returns True if restore cloud project is successful; False otherwise
|
||||
# 'folderPath': String; path of folder to restore
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
|
||||
Project
|
||||
GetMediaPool() --> MediaPool # Returns the Media Pool object.
|
||||
GetTimelineCount() --> int # Returns the number of timelines currently present in the project.
|
||||
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
|
||||
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
|
||||
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
|
||||
GetGallery() --> Gallery # Returns the Gallery object.
|
||||
GetName() --> string # Returns project name.
|
||||
SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique.
|
||||
GetPresetList() --> [presets...] # Returns a list of presets and their information.
|
||||
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
|
||||
AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job.
|
||||
DeleteRenderJob(jobId) --> Bool # Deletes render job for input job id (string).
|
||||
DeleteAllRenderJobs() --> Bool # Deletes all render jobs in the queue.
|
||||
GetRenderJobList() --> [render jobs...] # Returns a list of render jobs and their information.
|
||||
GetRenderPresetList() --> [presets...] # Returns a list of render presets and their information.
|
||||
StartRendering(jobId1, jobId2, ...) --> Bool # Starts rendering jobs indicated by the input job ids.
|
||||
StartRendering([jobIds...], isInteractiveMode=False) --> Bool # Starts rendering jobs indicated by the input job ids.
|
||||
# The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering.
|
||||
StartRendering(isInteractiveMode=False) --> Bool # Starts rendering all queued render jobs.
|
||||
# The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering.
|
||||
StopRendering() --> None # Stops any current render processes.
|
||||
IsRenderingInProgress() --> Bool # Returns True if rendering is in progress.
|
||||
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists.
|
||||
SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique.
|
||||
SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys:
|
||||
# Refer to "Looking up render settings" section for information for supported settings
|
||||
GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string).
|
||||
GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information.
|
||||
SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information.
|
||||
GetRenderFormats() --> {render formats..} # Returns a dict (format -> file extension) of available render formats.
|
||||
GetRenderCodecs(renderFormat) --> {render codecs...} # Returns a dict (codec description -> codec name) of available codecs for given render format (string).
|
||||
GetCurrentRenderFormatAndCodec() --> {format, codec} # Returns a dict with currently selected format 'format' and render codec 'codec'.
|
||||
SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering.
|
||||
GetCurrentRenderMode() --> int # Returns the render mode: 0 - Individual clips, 1 - Single clip.
|
||||
SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip.
|
||||
GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height".
|
||||
RefreshLUTList() --> Bool # Refreshes LUT List
|
||||
GetUniqueId() --> string # Returns a unique ID for the project item
|
||||
InsertAudioToCurrentTrackAtPlayhead(mediaPath, --> Bool # Inserts the media specified by mediaPath (string) with startOffsetInSamples (int) and durationInSamples (int) at the playhead on a selected track on the Fairlight page. Returns True if successful, otherwise False.
|
||||
startOffsetInSamples, durationInSamples)
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for project when supplied presetName (string). Returns true if successful.
|
||||
ExportCurrentFrameAsStill(filePath) --> Bool # Exports current frame as still to supplied filePath. filePath must end in valid export file format. Returns True if succssful, False otherwise.
|
||||
GetColorGroupsList() --> [ColorGroups...] # Returns a list of all group objects in the timeline.
|
||||
AddColorGroup(groupName) --> ColorGroup # Creates a new ColorGroup. groupName must be a unique string.
|
||||
DeleteColorGroup(colorGroup) --> Bool # Deletes the given color group and sets clips to ungrouped.
|
||||
|
||||
MediaStorage
|
||||
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage.
|
||||
GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path.
|
||||
GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
|
||||
RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolve’s Media Storage.
|
||||
AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
AddItemListToMediaPool([{itemInfo}, ...]) --> [clips...] # Adds list of itemInfos specified as dict of "media", "startFrame" (int), "endFrame" (int) from Media Storage into current Media Pool folder. Returns a list of the MediaPoolItems created.
|
||||
AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful.
|
||||
AddTimelineMattesToMediaPool([paths]) --> [MediaPoolItems] # Adds specified media files as timeline mattes in current media pool folder. Returns a list of created MediaPoolItems.
|
||||
|
||||
MediaPool
|
||||
GetRootFolder() --> Folder # Returns root Folder of Media Pool
|
||||
AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name.
|
||||
RefreshFolders() --> Bool # Updates the folders in collaboration mode
|
||||
CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name.
|
||||
AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
|
||||
AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
|
||||
AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only), "trackIndex" (int) and "recordFrame" (int). Returns the list of appended timelineItems.
|
||||
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), "recordFrame" (int).
|
||||
ImportTimelineFromFile(filePath, {importOptions}) --> Timeline # Creates timeline based on parameters within given file (AAF/EDL/XML/FCPXML/DRT/ADL/OTIO) and optional importOptions dict, with support for the keys:
|
||||
# "timelineName": string, specifies the name of the timeline to be created. Not valid for DRT import
|
||||
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default. Not valid for DRT import
|
||||
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
|
||||
# "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False. Not valid for DRT import
|
||||
# "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import
|
||||
DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool.
|
||||
GetCurrentFolder() --> Folder # Returns currently selected Folder.
|
||||
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
|
||||
DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool
|
||||
ImportFolderFromFile(filePath, sourceClipsPath="") --> Bool # Returns true if import from given DRB filePath is successful, false otherwise
|
||||
# sourceClipsPath is a string that specifies a filesystem path to search for source clips if the media is inaccessible in their original path, empty by default
|
||||
DeleteFolders([subfolders]) --> Bool # Deletes specified subfolders in the media pool
|
||||
MoveClips([clips], targetFolder) --> Bool # Moves specified clips to target folder.
|
||||
MoveFolders([folders], targetFolder) --> Bool # Moves specified folders to target folder.
|
||||
GetClipMatteList(MediaPoolItem) --> [paths] # Get mattes for specified MediaPoolItem, as a list of paths to the matte files.
|
||||
GetTimelineMatteList(Folder) --> [MediaPoolItems] # Get mattes in specified Folder, as list of MediaPoolItems.
|
||||
DeleteClipMattes(MediaPoolItem, [paths]) --> Bool # Delete mattes based on their file paths, for specified MediaPoolItem. Returns True on success.
|
||||
RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path.
|
||||
UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips.
|
||||
ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created.
|
||||
# Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on.
|
||||
# Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx".
|
||||
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
|
||||
# If no clips are specified, all clips from media pool will be used.
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool
|
||||
CreateStereoClip(LeftMediaPoolItem,
|
||||
RightMediaPoolItem) --> MediaPoolItem # Takes in two existing media pool items and creates a new 3D stereoscopic media pool entry replacing the input media in the media pool.
|
||||
|
||||
Folder
|
||||
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
|
||||
GetName() --> string # Returns the media folder name.
|
||||
GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder.
|
||||
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool folder
|
||||
Export(filePath) --> bool # Returns true if export of DRB folder to filePath is successful, false otherwise
|
||||
TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise
|
||||
ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise.
|
||||
|
||||
MediaPoolItem
|
||||
GetName() --> string # Returns the clip name.
|
||||
GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'.
|
||||
# If no argument is specified, a dict of all set metadata properties is returned.
|
||||
SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful.
|
||||
SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful.
|
||||
GetMediaId() --> string # Returns the unique ID for the MediaPoolItem.
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example of output format: {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...}
|
||||
# In the above example - there is one 'Green' marker at offset 96 (position of the marker)
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the media pool item. "All" as argument deletes all color markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the media pool item.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
AddFlag(color) --> Bool # Adds a flag with given color (string).
|
||||
GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item.
|
||||
ClearFlags(color) --> Bool # Clears the flag of the given color if one exists. An "All" argument is supported and clears all flags.
|
||||
GetClipColor() --> string # Returns the item color as a string.
|
||||
SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string).
|
||||
ClearClipColor() --> Bool # Clears the item color.
|
||||
GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'.
|
||||
# If no argument is specified, a dict of all clip properties is returned. Check the section below for more information.
|
||||
SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information.
|
||||
LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path.
|
||||
UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip.
|
||||
ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path.
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool item
|
||||
TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItem. Returns True if successful; False otherwise
|
||||
ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItem. Returns True if successful; False otherwise.
|
||||
|
||||
Timeline
|
||||
GetName() --> string # Returns the timeline name.
|
||||
SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful.
|
||||
GetStartFrame() --> int # Returns the frame number at the start of timeline.
|
||||
GetEndFrame() --> int # Returns the frame number at the end of timeline.
|
||||
SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise.
|
||||
GetStartTimecode() --> string # Returns the start timecode for the timeline.
|
||||
GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle").
|
||||
AddTrack(trackType, optionalSubTrackType) --> Bool # Adds track of trackType ("video", "subtitle", "audio"). Second argument optionalSubTrackType is required for "audio"
|
||||
# optionalSubTrackType can be one of {"mono", "stereo", "5.1", "5.1film", "7.1", "7.1film", "adaptive1", ... , "adaptive24"}
|
||||
DeleteTrack(trackType, trackIndex) --> Bool # Deletes track of trackType ("video", "subtitle", "audio") and given trackIndex. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackEnable(trackType, trackIndex, Bool) --> Bool # Enables/Disables track with given trackType and trackIndex
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
GetIsTrackEnabled(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is enabled and False otherwise.
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackLock(trackType, trackIndex, Bool) --> Bool # Locks/Unlocks track with given trackType and trackIndex
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
GetIsTrackLocked(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is locked and False otherwise.
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
DeleteClips([timelineItems], Bool) --> Bool # Deletes specified TimelineItems from the timeline, performing ripple delete if the second argument is True. Second argument is optional (The default for this is False)
|
||||
SetClipsLinked([timelineItems], Bool) --> Bool # Links or unlinks the specified TimelineItems depending on second argument.
|
||||
GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType).
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at timeline offset 96
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Deletes all timeline markers of the specified color. An "All" argument is supported and deletes all timeline markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Deletes the timeline marker at the given frame number.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
|
||||
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
|
||||
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages.
|
||||
SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages.
|
||||
GetCurrentVideoItem() --> item # Returns the current video timeline item.
|
||||
GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page.
|
||||
# An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder.
|
||||
GetTrackName(trackType, trackIndex) --> string # Returns the track name for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackName(trackType, trackIndex, name) --> Bool # Sets the track name (string) for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success.
|
||||
CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item.
|
||||
CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item.
|
||||
ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys:
|
||||
# "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default
|
||||
# "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default
|
||||
# "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default
|
||||
# "useSizingInfo": Bool, specifies if sizing information should be used, False by default
|
||||
# "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default
|
||||
# "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default
|
||||
# "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False
|
||||
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True
|
||||
# "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder
|
||||
|
||||
Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format.
|
||||
# Refer to section "Looking up timeline export properties" for information on the parameters.
|
||||
GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information.
|
||||
SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information.
|
||||
InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline.
|
||||
InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline.
|
||||
InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline.
|
||||
InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline.
|
||||
InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline.
|
||||
InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline.
|
||||
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
|
||||
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline
|
||||
CreateSubtitlesFromAudio({autoCaptionSettings}) --> Bool # Creates subtitles from audio for the timeline.
|
||||
# Takes in optional dictionary {autoCaptionSettings}. Check 'Auto Caption Settings' subsection below for more information.
|
||||
# Returns True on success, False otherwise.
|
||||
DetectSceneCuts() --> Bool # Detects and makes scene cuts along the timeline. Returns True if successful, False otherwise.
|
||||
ConvertTimelineToStereo() --> Bool # Converts timeline to stereo. Returns True if successful; False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the timeline's node graph object.
|
||||
|
||||
TimelineItem
|
||||
GetName() --> string # Returns the item name.
|
||||
GetDuration() --> int # Returns the item duration.
|
||||
GetEnd() --> int # Returns the end frame position on the timeline.
|
||||
GetFusionCompCount() --> int # Returns number of Fusion compositions associated with the timeline item.
|
||||
GetFusionCompByIndex(compIndex) --> fusionComp # Returns the Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount()
|
||||
GetFusionCompNameList() --> [names...] # Returns a list of Fusion composition names associated with the timeline item.
|
||||
GetFusionCompByName(compName) --> fusionComp # Returns the Fusion composition object based on given name.
|
||||
GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side.
|
||||
GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side.
|
||||
GetStart() --> int # Returns the start frame position on the timeline.
|
||||
SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue"
|
||||
# Refer to "Looking up Timeline item properties" for more information
|
||||
GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key
|
||||
# if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at clip offset 96
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the timeline item. "All" as argument deletes all color markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the timeline item.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
AddFlag(color) --> Bool # Adds a flag with given color (string).
|
||||
GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item.
|
||||
ClearFlags(color) --> Bool # Clear flags of the specified color. An "All" argument is supported to clear all flags.
|
||||
GetClipColor() --> string # Returns the item color as a string.
|
||||
SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string).
|
||||
ClearClipColor() --> Bool # Clears the item color.
|
||||
AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item.
|
||||
ImportFusionComp(path) --> fusionComp # Imports a Fusion composition from given file path by creating and adding a new composition for the item.
|
||||
ExportFusionComp(path, compIndex) --> Bool # Exports the Fusion composition based on given index to the path provided.
|
||||
DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition.
|
||||
LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition.
|
||||
RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName.
|
||||
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote).
|
||||
GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote).
|
||||
DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote).
|
||||
LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote.
|
||||
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote).
|
||||
GetVersionNameList(versionType) --> [names...] # Returns a list of all color versions for the given versionType (0 - local, 1 - remote).
|
||||
GetMediaPoolItem() --> MediaPoolItem # Returns the media pool item corresponding to the timeline item if one exists.
|
||||
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
|
||||
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
ApplyArriCdlLut() --> Bool # Applies ARRI CDL and LUT. Returns True if successful, False otherwise.
|
||||
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
|
||||
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
|
||||
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
|
||||
GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector.
|
||||
GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector.
|
||||
GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index.
|
||||
DeleteTakeByIndex(idx) --> Bool # Deletes a take by index, 1 <= idx <= number of takes.
|
||||
SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes.
|
||||
FinalizeTake() --> Bool # Finalizes take selection.
|
||||
CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred.
|
||||
SetClipEnabled(Bool) --> Bool # Sets clip enabled based on argument.
|
||||
GetClipEnabled() --> Bool # Gets clip enabled status.
|
||||
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline item
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for clip when supplied presetName (string). Returns true if successful.
|
||||
CreateMagicMask(mode) --> Bool # Returns True if magic mask was created successfully, False otherwise. mode can "F" (forward), "B" (backward), or "BI" (bidirection)
|
||||
RegenerateMagicMask() --> Bool # Returns True if magic mask was regenerated successfully, False otherwise.
|
||||
Stabilize() --> Bool # Returns True if stabilization was successful, False otherwise
|
||||
SmartReframe() --> Bool # Performs Smart Reframe. Returns True if successful, False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the clip's node graph object.
|
||||
GetColorGroup() --> ColorGroup # Returns the clip's color group if one exists.
|
||||
AssignToColorGroup(ColorGroup) --> Bool # Returns True if TiItem to successfully assigned to given ColorGroup. ColorGroup must be an existing group in the current project.
|
||||
RemoveFromColorGroup() --> Bool # Returns True if the TiItem is successfully removed from the ColorGroup it is in.
|
||||
ExportLUT(exportType, path) --> Bool # Exports LUTs from tiItem referring to value passed in 'exportType' (enum) for LUT size. Refer to. 'ExportLUT notes' section for possible values.
|
||||
# Saves generated LUT in the provided 'path' (string). 'path' should include the intended file name.
|
||||
# If an empty or incorrect extension is provided, the appropriate extension (.cube/.vlt) will be appended at the end of the path.
|
||||
|
||||
Gallery
|
||||
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
|
||||
SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'.
|
||||
GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object.
|
||||
SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'.
|
||||
GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects.
|
||||
|
||||
GalleryStillAlbum
|
||||
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
|
||||
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
|
||||
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
|
||||
ImportStills([filePaths]) --> Bool # Imports GalleryStill from each filePath in [filePaths] list. True if at least one still is imported successfully. False otherwise.
|
||||
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm, drx).
|
||||
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
|
||||
|
||||
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
|
||||
|
||||
Graph
|
||||
GetNumNodes() --> int # Returns the number of nodes in the graph
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= self.GetNumNodes().
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
GetToolsInNode(nodeIndex) --> [toolsList] # Returns toolsList (list of strings) of the tools used in the node indicated by given nodeIndex (int).
|
||||
|
||||
ColorGroup
|
||||
GetName() --> String # Returns the name (string) of the ColorGroup.
|
||||
SetName(groupName) --> Bool # Renames ColorGroup to groupName (string).
|
||||
GetClipsInTimeline(Timeline=CurrTimeline) --> [TimelineItem] # Returns a list of TimelineItem that are in colorGroup in the given Timeline. Timeline is Current Timeline by default.
|
||||
GetPreClipNodeGraph() --> Graph # Returns the ColorGroup Pre-clip graph.
|
||||
GetPostClipNodeGraph() --> Graph # Returns the ColorGroup Post-clip graph.
|
||||
|
||||
List and Dict Data Structures
|
||||
-----------------------------
|
||||
Beside primitive data types, Resolve's Python API mainly uses list and dict data structures. Lists are denoted by [ ... ] and dicts are denoted by { ... } above.
|
||||
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
|
||||
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
|
||||
|
||||
Keyframe Mode information
|
||||
-------------------------
|
||||
This section covers additional notes for the functions Resolve.GetKeyframeMode() and Resolve.SetKeyframeMode(keyframeMode).
|
||||
|
||||
'keyframeMode' can be one of the following enums:
|
||||
- resolve.KEYFRAME_MODE_ALL == 0
|
||||
- resolve.KEYFRAME_MODE_COLOR == 1
|
||||
- resolve.KEYFRAME_MODE_SIZING == 2
|
||||
|
||||
Integer values returned by Resolve.GetKeyframeMode() will correspond to the enums above.
|
||||
|
||||
Cloud Projects Settings
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject"
|
||||
|
||||
All three functions take in a {cloudSettings} dict, that have the following keys:
|
||||
* resolve.CLOUD_SETTING_PROJECT_NAME: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_PROJECT_MEDIA_PATH: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_IS_COLLAB: Bool, [False by default]
|
||||
* resolve.CLOUD_SETTING_SYNC_MODE: syncMode (see below), [resolve.CLOUD_SYNC_PROXY_ONLY by default]
|
||||
* resolve.CLOUD_SETTING_IS_CAMERA_ACCESS: Bool [False by default]
|
||||
|
||||
Where syncMode is one of the following values:
|
||||
* resolve.CLOUD_SYNC_NONE,
|
||||
* resolve.CLOUD_SYNC_PROXY_ONLY,
|
||||
* resolve.CLOUD_SYNC_PROXY_AND_ORIG
|
||||
|
||||
All three "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject" require resolve.PROJECT_MEDIA_PATH to be defined. "ProjectManager:CreateCloudProject" also requires resolve.PROJECT_NAME to be defined.
|
||||
|
||||
Looking up Project and Clip properties
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
|
||||
"MediaPoolItem:SetClipProperty". These functions are used to get and set properties otherwise available to the user through the Project Settings and the Clip Attributes dialogs.
|
||||
|
||||
The functions follow a key-value pair format, where each property is identified by a key (the settingName or propertyName parameter) and possesses a value (typically a text value). Keys and values are
|
||||
designed to be easily correlated with parameter names and values in the Resolve UI. Explicitly enumerated values for some parameters are listed below.
|
||||
|
||||
Some properties may be read only - these include intrinsic clip properties like date created or sample rate, and properties that can be disabled in specific application contexts (e.g. custom colorspaces
|
||||
in an ACES workflow, or output sizing parameters when behavior is set to match timeline)
|
||||
|
||||
Getting values:
|
||||
Invoke "Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" with the appropriate property key. To get a snapshot of all queryable properties (keys and values), you can call
|
||||
"Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" without parameters (or with a NoneType or a blank property key). Using specific keys to query individual properties will
|
||||
be faster. Note that getting a property using an invalid key will return a trivial result.
|
||||
|
||||
Setting values:
|
||||
Invoke "Project:SetSetting", "Timeline:SetSetting" or "MediaPoolItem:SetClipProperty" with the appropriate property key and a valid value. When setting a parameter, please check the return value to
|
||||
ensure the success of the operation. You can troubleshoot the validity of keys and values by setting the desired result from the UI and checking property snapshots before and after the change.
|
||||
|
||||
The following Project properties have specifically enumerated values:
|
||||
"superScale" - the property value is an enumerated integer between 0 and 4 with these meanings: 0=Auto, 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x.
|
||||
for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x.
|
||||
Affects:
|
||||
• x = Project:GetSetting('superScale') and Project:SetSetting('superScale', x)
|
||||
• for '2x Enhanced' --> Project:SetSetting('superScale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0]
|
||||
|
||||
"timelineFrameRate" - the property value is one of the frame rates available to the user in project settings under "Timeline frame rate" option. Drop Frame can be configured for supported frame rates
|
||||
by appending the frame rate with "DF", e.g. "29.97 DF" will enable drop frame and "29.97" will disable drop frame
|
||||
Affects:
|
||||
• x = Project:GetSetting('timelineFrameRate') and Project:SetSetting('timelineFrameRate', x)
|
||||
|
||||
The following Clip properties have specifically enumerated values:
|
||||
"Super Scale" - the property value is an enumerated integer between 1 and 4 with these meanings: 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x.
|
||||
for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x.
|
||||
Affects:
|
||||
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
|
||||
• for '2x Enhanced' --> MediaPoolItem:SetClipProperty('Super Scale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0]
|
||||
|
||||
Auto Caption Settings
|
||||
----------------------
|
||||
This section covers the supported settings for the method Timeline.CreateSubtitlesFromAudio({autoCaptionSettings})
|
||||
|
||||
The parameter setting is a dictionary containing the following keys:
|
||||
* resolve.SUBTITLE_LANGUAGE: languageID (see below), [resolve.AUTO_CAPTION_AUTO by default]
|
||||
* resolve.SUBTITLE_CAPTION_PRESET: presetType (see below), [resolve.AUTO_CAPTION_SUBTITLE_DEFAULT by default]
|
||||
* resolve.SUBTITLE_CHARS_PER_LINE: Number between 1 and 60 inclusive [42 by default]
|
||||
* resolve.SUBTITLE_LINE_BREAK: lineBreakType (see below), [resolve.AUTO_CAPTION_LINE_SINGLE by default]
|
||||
* resolve.SUBTITLE_GAP: Number between 0 and 10 inclusive [0 by default]
|
||||
|
||||
Note that the default values for some keys may change based on values defined for other keys, as per the UI.
|
||||
For example, if the following dictionary is supplied,
|
||||
CreateSubtitlesFromAudio( { resolve.SUBTITLE_LANGUAGE = resolve.AUTO_CAPTION_KOREAN,
|
||||
resolve.SUBTITLE_CAPTION_PRESET = resolve.AUTO_CAPTION_NETFLIX } )
|
||||
the default value for resolve.SUBTITLE_CHARS_PER_LINE will be 16 instead of 42
|
||||
|
||||
languageIDs:
|
||||
* resolve.AUTO_CAPTION_AUTO
|
||||
* resolve.AUTO_CAPTION_DANISH
|
||||
* resolve.AUTO_CAPTION_DUTCH
|
||||
* resolve.AUTO_CAPTION_ENGLISH
|
||||
* resolve.AUTO_CAPTION_FRENCH
|
||||
* resolve.AUTO_CAPTION_GERMAN
|
||||
* resolve.AUTO_CAPTION_ITALIAN
|
||||
* resolve.AUTO_CAPTION_JAPANESE
|
||||
* resolve.AUTO_CAPTION_KOREAN
|
||||
* resolve.AUTO_CAPTION_MANDARIN_SIMPLIFIED
|
||||
* resolve.AUTO_CAPTION_MANDARIN_TRADITIONAL
|
||||
* resolve.AUTO_CAPTION_NORWEGIAN
|
||||
* resolve.AUTO_CAPTION_PORTUGUESE
|
||||
* resolve.AUTO_CAPTION_RUSSIAN
|
||||
* resolve.AUTO_CAPTION_SPANISH
|
||||
* resolve.AUTO_CAPTION_SWEDISH
|
||||
|
||||
presetTypes:
|
||||
* resolve.AUTO_CAPTION_SUBTITLE_DEFAULT
|
||||
* resolve.AUTO_CAPTION_TELETEXT
|
||||
* resolve.AUTO_CAPTION_NETFLIX
|
||||
|
||||
lineBreakTypes:
|
||||
* resolve.AUTO_CAPTION_LINE_SINGLE
|
||||
* resolve.AUTO_CAPTION_LINE_DOUBLE
|
||||
|
||||
Looking up Render Settings
|
||||
--------------------------
|
||||
This section covers the supported settings for the method SetRenderSettings({settings})
|
||||
|
||||
The parameter setting is a dictionary containing the following keys:
|
||||
- "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored)
|
||||
- "MarkIn": int
|
||||
- "MarkOut": int
|
||||
- "TargetDir": string
|
||||
- "CustomName": string
|
||||
- "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
|
||||
- "ExportVideo": Bool
|
||||
- "ExportAudio": Bool
|
||||
- "FormatWidth": int
|
||||
- "FormatHeight": int
|
||||
- "FrameRate": float (examples: 23.976, 24)
|
||||
- "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
|
||||
- "VideoQuality" possible values for current codec (if applicable):
|
||||
- 0 (int) - will set quality to automatic
|
||||
- [1 -> MAX] (int) - will set input bit rate
|
||||
- ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
|
||||
- "AudioCodec": string (example: "aac")
|
||||
- "AudioBitDepth": int
|
||||
- "AudioSampleRate": int
|
||||
- "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
|
||||
- "GammaTag" : string (example: "Same as Project", "ACEScct")
|
||||
- "ExportAlpha": Bool
|
||||
- "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265.
|
||||
- "MultiPassEncode": Bool. Can only be set for H.264.
|
||||
- "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true.
|
||||
- "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats.
|
||||
|
||||
Looking up timeline export properties
|
||||
-------------------------------------
|
||||
This section covers the parameters for the argument Export(fileName, exportType, exportSubtype).
|
||||
|
||||
exportType can be one of the following constants:
|
||||
- resolve.EXPORT_AAF
|
||||
- resolve.EXPORT_DRT
|
||||
- resolve.EXPORT_EDL
|
||||
- resolve.EXPORT_FCP_7_XML
|
||||
- resolve.EXPORT_FCPXML_1_8
|
||||
- resolve.EXPORT_FCPXML_1_9
|
||||
- resolve.EXPORT_FCPXML_1_10
|
||||
- resolve.EXPORT_HDR_10_PROFILE_A
|
||||
- resolve.EXPORT_HDR_10_PROFILE_B
|
||||
- resolve.EXPORT_TEXT_CSV
|
||||
- resolve.EXPORT_TEXT_TAB
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_2_9
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_4_0
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_5_1
|
||||
- resolve.EXPORT_OTIO
|
||||
- resolve.EXPORT_ALE
|
||||
- resolve.EXPORT_ALE_CDL
|
||||
exportSubtype can be one of the following enums:
|
||||
- resolve.EXPORT_NONE
|
||||
- resolve.EXPORT_AAF_NEW
|
||||
- resolve.EXPORT_AAF_EXISTING
|
||||
- resolve.EXPORT_CDL
|
||||
- resolve.EXPORT_SDL
|
||||
- resolve.EXPORT_MISSING_CLIPS
|
||||
Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
|
||||
When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
|
||||
When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
|
||||
Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
|
||||
|
||||
Unsupported exportType types
|
||||
---------------------------------
|
||||
Starting with DaVinci Resolve 18.1, the following export types are not supported:
|
||||
- resolve.EXPORT_FCPXML_1_3
|
||||
- resolve.EXPORT_FCPXML_1_4
|
||||
- resolve.EXPORT_FCPXML_1_5
|
||||
- resolve.EXPORT_FCPXML_1_6
|
||||
- resolve.EXPORT_FCPXML_1_7
|
||||
|
||||
|
||||
Looking up Timeline item properties
|
||||
-----------------------------------
|
||||
This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned.
|
||||
|
||||
The supported keys with their accepted values are:
|
||||
"Pan" : floating point values from -4.0*width to 4.0*width
|
||||
"Tilt" : floating point values from -4.0*height to 4.0*height
|
||||
"ZoomX" : floating point values from 0.0 to 100.0
|
||||
"ZoomY" : floating point values from 0.0 to 100.0
|
||||
"ZoomGang" : a boolean value
|
||||
"RotationAngle" : floating point values from -360.0 to 360.0
|
||||
"AnchorPointX" : floating point values from -4.0*width to 4.0*width
|
||||
"AnchorPointY" : floating point values from -4.0*height to 4.0*height
|
||||
"Pitch" : floating point values from -1.5 to 1.5
|
||||
"Yaw" : floating point values from -1.5 to 1.5
|
||||
"FlipX" : boolean value for flipping horizontally
|
||||
"FlipY" : boolean value for flipping vertically
|
||||
"CropLeft" : floating point values from 0.0 to width
|
||||
"CropRight" : floating point values from 0.0 to width
|
||||
"CropTop" : floating point values from 0.0 to height
|
||||
"CropBottom" : floating point values from 0.0 to height
|
||||
"CropSoftness" : floating point values from -100.0 to 100.0
|
||||
"CropRetain" : boolean value for "Retain Image Position" checkbox
|
||||
"DynamicZoomEase" : A value from the following constants
|
||||
- DYNAMIC_ZOOM_EASE_LINEAR = 0
|
||||
- DYNAMIC_ZOOM_EASE_IN
|
||||
- DYNAMIC_ZOOM_EASE_OUT
|
||||
- DYNAMIC_ZOOM_EASE_IN_AND_OUT
|
||||
"CompositeMode" : A value from the following constants
|
||||
- COMPOSITE_NORMAL = 0
|
||||
- COMPOSITE_ADD
|
||||
- COMPOSITE_SUBTRACT
|
||||
- COMPOSITE_DIFF
|
||||
- COMPOSITE_MULTIPLY
|
||||
- COMPOSITE_SCREEN
|
||||
- COMPOSITE_OVERLAY
|
||||
- COMPOSITE_HARDLIGHT
|
||||
- COMPOSITE_SOFTLIGHT
|
||||
- COMPOSITE_DARKEN
|
||||
- COMPOSITE_LIGHTEN
|
||||
- COMPOSITE_COLOR_DODGE
|
||||
- COMPOSITE_COLOR_BURN
|
||||
- COMPOSITE_EXCLUSION
|
||||
- COMPOSITE_HUE
|
||||
- COMPOSITE_SATURATE
|
||||
- COMPOSITE_COLORIZE
|
||||
- COMPOSITE_LUMA_MASK
|
||||
- COMPOSITE_DIVIDE
|
||||
- COMPOSITE_LINEAR_DODGE
|
||||
- COMPOSITE_LINEAR_BURN
|
||||
- COMPOSITE_LINEAR_LIGHT
|
||||
- COMPOSITE_VIVID_LIGHT
|
||||
- COMPOSITE_PIN_LIGHT
|
||||
- COMPOSITE_HARD_MIX
|
||||
- COMPOSITE_LIGHTER_COLOR
|
||||
- COMPOSITE_DARKER_COLOR
|
||||
- COMPOSITE_FOREGROUND
|
||||
- COMPOSITE_ALPHA
|
||||
- COMPOSITE_INVERTED_ALPHA
|
||||
- COMPOSITE_LUM
|
||||
- COMPOSITE_INVERTED_LUM
|
||||
"Opacity" : floating point value from 0.0 to 100.0
|
||||
"Distortion" : floating point value from -1.0 to 1.0
|
||||
"RetimeProcess" : A value from the following constants
|
||||
- RETIME_USE_PROJECT = 0
|
||||
- RETIME_NEAREST
|
||||
- RETIME_FRAME_BLEND
|
||||
- RETIME_OPTICAL_FLOW
|
||||
"MotionEstimation" : A value from the following constants
|
||||
- MOTION_EST_USE_PROJECT = 0
|
||||
- MOTION_EST_STANDARD_FASTER
|
||||
- MOTION_EST_STANDARD_BETTER
|
||||
- MOTION_EST_ENHANCED_FASTER
|
||||
- MOTION_EST_ENHANCED_BETTER
|
||||
- MOTION_EST_SPEED_WARP_BETTER
|
||||
- MOTION_EST_SPEED_WARP_FASTER
|
||||
"Scaling" : A value from the following constants
|
||||
- SCALE_USE_PROJECT = 0
|
||||
- SCALE_CROP
|
||||
- SCALE_FIT
|
||||
- SCALE_FILL
|
||||
- SCALE_STRETCH
|
||||
"ResizeFilter" : A value from the following constants
|
||||
- RESIZE_FILTER_USE_PROJECT = 0
|
||||
- RESIZE_FILTER_SHARPER
|
||||
- RESIZE_FILTER_SMOOTHER
|
||||
- RESIZE_FILTER_BICUBIC
|
||||
- RESIZE_FILTER_BILINEAR
|
||||
- RESIZE_FILTER_BESSEL
|
||||
- RESIZE_FILTER_BOX
|
||||
- RESIZE_FILTER_CATMULL_ROM
|
||||
- RESIZE_FILTER_CUBIC
|
||||
- RESIZE_FILTER_GAUSSIAN
|
||||
- RESIZE_FILTER_LANCZOS
|
||||
- RESIZE_FILTER_MITCHELL
|
||||
- RESIZE_FILTER_NEAREST_NEIGHBOR
|
||||
- RESIZE_FILTER_QUADRATIC
|
||||
- RESIZE_FILTER_SINC
|
||||
- RESIZE_FILTER_LINEAR
|
||||
Values beyond the range will be clipped
|
||||
width and height are same as the UI max limits
|
||||
|
||||
The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed
|
||||
as a single argument.
|
||||
|
||||
Getting the values for the keys that uses constants will return the number which is in the constant
|
||||
|
||||
ExportLUT notes
|
||||
---------------
|
||||
The following section covers additional notes for TimelineItem.ExportLUT(exportType, path).
|
||||
|
||||
Supported values for 'exportType' (enum) are:
|
||||
- resolve.EXPORT_LUT_17PTCUBE
|
||||
- resolve.EXPORT_LUT_33PTCUBE
|
||||
- resolve.EXPORT_LUT_65PTCUBE
|
||||
- resolve.EXPORT_LUT_PANASONICVLUT
|
||||
|
||||
Deprecated Resolve API Functions
|
||||
--------------------------------
|
||||
The following API functions are deprecated.
|
||||
|
||||
ProjectManager
|
||||
GetProjectsInCurrentFolder() --> {project names...} # Returns a dict of project names in current folder.
|
||||
GetFoldersInCurrentFolder() --> {folder names...} # Returns a dict of folder names in current folder.
|
||||
|
||||
Project
|
||||
GetPresets() --> {presets...} # Returns a dict of presets and their information.
|
||||
GetRenderJobs() --> {render jobs...} # Returns a dict of render jobs and their information.
|
||||
GetRenderPresets() --> {presets...} # Returns a dict of render presets and their information.
|
||||
|
||||
MediaStorage
|
||||
GetMountedVolumes() --> {paths...} # Returns a dict of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage.
|
||||
GetSubFolders(folderPath) --> {paths...} # Returns a dict of folder paths in the given absolute folder path.
|
||||
GetFiles(folderPath) --> {paths...} # Returns a dict of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
|
||||
AddItemsToMediaPool(item1, item2, ...) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a dict of the MediaPoolItems created.
|
||||
AddItemsToMediaPool([items...]) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a dict of the MediaPoolItems created.
|
||||
|
||||
Folder
|
||||
GetClips() --> {clips...} # Returns a dict of clips (items) within the folder.
|
||||
GetSubFolders() --> {folders...} # Returns a dict of subfolders in the folder.
|
||||
|
||||
MediaPoolItem
|
||||
GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item.
|
||||
|
||||
Timeline
|
||||
GetItemsInTrack(trackType, index) --> {items...} # Returns a dict of Timeline items on the video or audio track (based on trackType) at specified
|
||||
|
||||
TimelineItem
|
||||
GetFusionCompNames() --> {names...} # Returns a dict of Fusion composition names associated with the timeline item.
|
||||
GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item.
|
||||
GetVersionNames(versionType) --> {names...} # Returns a dict of version names by provided versionType: 0 - local, 1 - remote.
|
||||
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
|
||||
Unsupported Resolve API Functions
|
||||
---------------------------------
|
||||
The following API (functions and parameters) are no longer supported. Use job IDs instead of indices.
|
||||
|
||||
Project
|
||||
StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices.
|
||||
GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI.
|
||||
# settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available.
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
RESOLVE_ADDON_ROOT,
|
||||
ResolveAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"RESOLVE_ADDON_ROOT",
|
||||
"ResolveAddon",
|
||||
)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
import os
|
||||
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
from .utils import RESOLVE_ADDON_ROOT
|
||||
|
||||
|
||||
class ResolveAddon(AYONAddon, IHostAddon):
|
||||
name = "resolve"
|
||||
version = __version__
|
||||
host_name = "resolve"
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(RESOLVE_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".drp"]
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
"""
|
||||
resolve api
|
||||
"""
|
||||
from .utils import (
|
||||
get_resolve_module
|
||||
)
|
||||
|
||||
from .pipeline import (
|
||||
ResolveHost,
|
||||
ls,
|
||||
containerise,
|
||||
update_container,
|
||||
maintained_selection,
|
||||
remove_instance,
|
||||
list_instances
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
maintain_current_timeline,
|
||||
publish_clip_color,
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
get_current_timeline,
|
||||
get_any_timeline,
|
||||
get_new_timeline,
|
||||
create_bin,
|
||||
get_media_pool_item,
|
||||
create_media_pool_item,
|
||||
create_timeline_item,
|
||||
get_timeline_item,
|
||||
get_video_track_names,
|
||||
get_current_timeline_items,
|
||||
get_pype_timeline_item_by_name,
|
||||
get_timeline_item_pype_tag,
|
||||
set_timeline_item_pype_tag,
|
||||
imprint,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
create_compound_clip,
|
||||
swap_clips,
|
||||
get_pype_clip_metadata,
|
||||
set_project_manager_to_folder_name,
|
||||
get_otio_clip_instance_data,
|
||||
get_reformated_path
|
||||
)
|
||||
|
||||
from .menu import launch_ayon_menu
|
||||
|
||||
from .plugin import (
|
||||
ClipLoader,
|
||||
TimelineItemLoader,
|
||||
Creator,
|
||||
PublishClip
|
||||
)
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
|
||||
from .testing_utils import TestGUI
|
||||
|
||||
|
||||
bmdvr = None
|
||||
bmdvf = None
|
||||
|
||||
__all__ = [
|
||||
"bmdvr",
|
||||
"bmdvf",
|
||||
|
||||
# pipeline
|
||||
"ResolveHost",
|
||||
"ls",
|
||||
"containerise",
|
||||
"update_container",
|
||||
"maintained_selection",
|
||||
"remove_instance",
|
||||
"list_instances",
|
||||
|
||||
# utils
|
||||
"get_resolve_module",
|
||||
|
||||
# lib
|
||||
"maintain_current_timeline",
|
||||
"publish_clip_color",
|
||||
"get_project_manager",
|
||||
"get_current_project",
|
||||
"get_current_timeline",
|
||||
"get_any_timeline",
|
||||
"get_new_timeline",
|
||||
"create_bin",
|
||||
"get_media_pool_item",
|
||||
"create_media_pool_item",
|
||||
"create_timeline_item",
|
||||
"get_timeline_item",
|
||||
"get_video_track_names",
|
||||
"get_current_timeline_items",
|
||||
"get_pype_timeline_item_by_name",
|
||||
"get_timeline_item_pype_tag",
|
||||
"set_timeline_item_pype_tag",
|
||||
"imprint",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"create_compound_clip",
|
||||
"swap_clips",
|
||||
"get_pype_clip_metadata",
|
||||
"set_project_manager_to_folder_name",
|
||||
"get_otio_clip_instance_data",
|
||||
"get_reformated_path",
|
||||
|
||||
# menu
|
||||
"launch_ayon_menu",
|
||||
|
||||
# plugin
|
||||
"ClipLoader",
|
||||
"TimelineItemLoader",
|
||||
"Creator",
|
||||
"PublishClip",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
"TestGUI"
|
||||
]
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
# absolute_import is needed to counter the `module has no cmds error` in Maya
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
from ayon_core.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid clips in Resolve timeline when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
from .lib import get_project_manager
|
||||
pm = get_project_manager()
|
||||
self.log.debug(pm)
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Resolve")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid clips..")
|
||||
invalid = list()
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
# Ensure unique (process each node only once)
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if invalid:
|
||||
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
|
||||
# TODO: select resolve timeline track items in current timeline
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
|
|
@ -1,970 +0,0 @@
|
|||
import sys
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import contextlib
|
||||
from opentimelineio import opentime
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline.editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
frames_to_timecode
|
||||
)
|
||||
|
||||
from ..otio import davinci_export as otio_export
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.project_manager = None
|
||||
self.media_storage = None
|
||||
|
||||
# OpenPype sequential rename variables
|
||||
self.rename_index = 0
|
||||
self.rename_add = 0
|
||||
|
||||
self.publish_clip_color = "Pink"
|
||||
self.pype_marker_workflow = True
|
||||
|
||||
# OpenPype compound clip workflow variable
|
||||
self.pype_tag_name = "VFX Notes"
|
||||
|
||||
# OpenPype marker workflow variables
|
||||
self.pype_marker_name = "OpenPypeData"
|
||||
self.pype_marker_duration = 1
|
||||
self.pype_marker_color = "Mint"
|
||||
self.temp_marker_frame = None
|
||||
|
||||
# OpenPype default timeline
|
||||
self.pype_timeline_name = "OpenPypeTimeline"
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintain_current_timeline(to_timeline: object,
|
||||
from_timeline: object = None):
|
||||
"""Maintain current timeline selection during context
|
||||
|
||||
Attributes:
|
||||
from_timeline (resolve.Timeline)[optional]:
|
||||
Example:
|
||||
>>> print(from_timeline.GetName())
|
||||
timeline1
|
||||
>>> print(to_timeline.GetName())
|
||||
timeline2
|
||||
|
||||
>>> with maintain_current_timeline(to_timeline):
|
||||
... print(get_current_timeline().GetName())
|
||||
timeline2
|
||||
|
||||
>>> print(get_current_timeline().GetName())
|
||||
timeline1
|
||||
"""
|
||||
project = get_current_project()
|
||||
working_timeline = from_timeline or project.GetCurrentTimeline()
|
||||
|
||||
# switch to the input timeline
|
||||
project.SetCurrentTimeline(to_timeline)
|
||||
|
||||
try:
|
||||
# do a work
|
||||
yield
|
||||
finally:
|
||||
# put the original working timeline to context
|
||||
project.SetCurrentTimeline(working_timeline)
|
||||
|
||||
|
||||
def get_project_manager():
|
||||
from . import bmdvr
|
||||
if not self.project_manager:
|
||||
self.project_manager = bmdvr.GetProjectManager()
|
||||
return self.project_manager
|
||||
|
||||
|
||||
def get_media_storage():
|
||||
from . import bmdvr
|
||||
if not self.media_storage:
|
||||
self.media_storage = bmdvr.GetMediaStorage()
|
||||
return self.media_storage
|
||||
|
||||
|
||||
def get_current_project():
|
||||
"""Get current project object.
|
||||
"""
|
||||
return get_project_manager().GetCurrentProject()
|
||||
|
||||
|
||||
def get_current_timeline(new=False):
|
||||
"""Get current timeline object.
|
||||
|
||||
Args:
|
||||
new (bool)[optional]: [DEPRECATED] if True it will create
|
||||
new timeline if none exists
|
||||
|
||||
Returns:
|
||||
TODO: will need to reflect future `None`
|
||||
object: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
|
||||
# return current timeline if any
|
||||
if timeline:
|
||||
return timeline
|
||||
|
||||
# TODO: [deprecated] and will be removed in future
|
||||
if new:
|
||||
return get_new_timeline()
|
||||
|
||||
|
||||
def get_any_timeline():
|
||||
"""Get any timeline object.
|
||||
|
||||
Returns:
|
||||
object | None: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
timeline_count = project.GetTimelineCount()
|
||||
if timeline_count > 0:
|
||||
return project.GetTimelineByIndex(1)
|
||||
|
||||
|
||||
def get_new_timeline(timeline_name: str = None):
|
||||
"""Get new timeline object.
|
||||
|
||||
Arguments:
|
||||
timeline_name (str): New timeline name.
|
||||
|
||||
Returns:
|
||||
object: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
new_timeline = media_pool.CreateEmptyTimeline(
|
||||
timeline_name or self.pype_timeline_name)
|
||||
project.SetCurrentTimeline(new_timeline)
|
||||
return new_timeline
|
||||
|
||||
|
||||
def create_bin(name: str,
|
||||
root: object = None,
|
||||
set_as_current: bool = True) -> object:
|
||||
"""
|
||||
Create media pool's folder.
|
||||
|
||||
Return folder object and if the name does not exist it will create a new.
|
||||
If the input name is with forward or backward slashes then it will create
|
||||
all parents and return the last child bin object
|
||||
|
||||
Args:
|
||||
name (str): name of folder / bin, or hierarchycal name "parent/name"
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
set_as_current (resolve.Folder)[optional]: Whether to set the
|
||||
resulting bin as current folder or not.
|
||||
|
||||
Returns:
|
||||
object: resolve.Folder
|
||||
"""
|
||||
# get all variables
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root_bin = root or media_pool.GetRootFolder()
|
||||
|
||||
# create hierarchy of bins in case there is slash in name
|
||||
if "/" in name.replace("\\", "/"):
|
||||
child_bin = None
|
||||
for bname in name.split("/"):
|
||||
child_bin = create_bin(bname,
|
||||
root=child_bin or root_bin,
|
||||
set_as_current=set_as_current)
|
||||
if child_bin:
|
||||
return child_bin
|
||||
else:
|
||||
# Find existing folder or create it
|
||||
for subfolder in root_bin.GetSubFolderList():
|
||||
if subfolder.GetName() == name:
|
||||
created_bin = subfolder
|
||||
break
|
||||
else:
|
||||
created_bin = media_pool.AddSubFolder(root_bin, name)
|
||||
|
||||
if set_as_current:
|
||||
media_pool.SetCurrentFolder(created_bin)
|
||||
|
||||
return created_bin
|
||||
|
||||
|
||||
def remove_media_pool_item(media_pool_item: object) -> bool:
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
return media_pool.DeleteClips([media_pool_item])
|
||||
|
||||
|
||||
def create_media_pool_item(
|
||||
files: list,
|
||||
root: object = None,
|
||||
) -> object:
|
||||
"""
|
||||
Create media pool item.
|
||||
|
||||
Args:
|
||||
files (list[str]): list of absolute paths to files
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
|
||||
Returns:
|
||||
object: resolve.MediaPoolItem
|
||||
"""
|
||||
# get all variables
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root_bin = root or media_pool.GetRootFolder()
|
||||
|
||||
# make sure files list is not empty and first available file exists
|
||||
filepath = next((f for f in files if os.path.isfile(f)), None)
|
||||
if not filepath:
|
||||
raise FileNotFoundError("No file found in input files list")
|
||||
|
||||
# try to search in bin if the clip does not exist
|
||||
existing_mpi = get_media_pool_item(filepath, root_bin)
|
||||
|
||||
if existing_mpi:
|
||||
return existing_mpi
|
||||
|
||||
# add all data in folder to media pool
|
||||
media_pool_items = media_pool.ImportMedia(files)
|
||||
|
||||
return media_pool_items.pop() if media_pool_items else False
|
||||
|
||||
|
||||
def get_media_pool_item(filepath, root: object = None) -> object:
|
||||
"""
|
||||
Return clip if found in folder with use of input file path.
|
||||
|
||||
Args:
|
||||
filepath (str): absolute path to a file
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
|
||||
Returns:
|
||||
object: resolve.MediaPoolItem
|
||||
"""
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root = root or media_pool.GetRootFolder()
|
||||
fname = os.path.basename(filepath)
|
||||
|
||||
for _mpi in root.GetClipList():
|
||||
_mpi_name = _mpi.GetClipProperty("File Name")
|
||||
_mpi_name = get_reformated_path(_mpi_name, first=True)
|
||||
if fname in _mpi_name:
|
||||
return _mpi
|
||||
return None
|
||||
|
||||
|
||||
def create_timeline_item(
|
||||
media_pool_item: object,
|
||||
timeline: object = None,
|
||||
timeline_in: int = None,
|
||||
source_start: int = None,
|
||||
source_end: int = None,
|
||||
) -> object:
|
||||
"""
|
||||
Add media pool item to current or defined timeline.
|
||||
|
||||
Args:
|
||||
media_pool_item (resolve.MediaPoolItem): resolve's object
|
||||
timeline (Optional[resolve.Timeline]): resolve's object
|
||||
timeline_in (Optional[int]): timeline input frame (sequence frame)
|
||||
source_start (Optional[int]): media source input frame (sequence frame)
|
||||
source_end (Optional[int]): media source output frame (sequence frame)
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
# get all variables
|
||||
project = get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
clip_name = media_pool_item.GetClipProperty("File Name")
|
||||
timeline = timeline or get_current_timeline()
|
||||
|
||||
# timing variables
|
||||
if all([timeline_in, source_start, source_end]):
|
||||
fps = timeline.GetSetting("timelineFrameRate")
|
||||
duration = source_end - source_start
|
||||
timecode_in = frames_to_timecode(timeline_in, fps)
|
||||
timecode_out = frames_to_timecode(timeline_in + duration, fps)
|
||||
else:
|
||||
timecode_in = None
|
||||
timecode_out = None
|
||||
|
||||
# if timeline was used then switch it to current timeline
|
||||
with maintain_current_timeline(timeline):
|
||||
# Add input mediaPoolItem to clip data
|
||||
clip_data = {
|
||||
"mediaPoolItem": media_pool_item,
|
||||
}
|
||||
|
||||
if source_start:
|
||||
clip_data["startFrame"] = source_start
|
||||
if source_end:
|
||||
clip_data["endFrame"] = source_end
|
||||
if timecode_in:
|
||||
# Note: specifying a recordFrame will fail to place the timeline
|
||||
# item if there's already an existing clip at that time on the
|
||||
# active track.
|
||||
clip_data["recordFrame"] = timeline_in
|
||||
|
||||
# add to timeline
|
||||
output_timeline_item = media_pool.AppendToTimeline([clip_data])[0]
|
||||
|
||||
# Adding the item may fail whilst Resolve will still return a
|
||||
# TimelineItem instance - however all `Get*` calls return None
|
||||
# Hence, we check whether the result is valid
|
||||
if output_timeline_item.GetDuration() is None:
|
||||
output_timeline_item = None
|
||||
|
||||
assert output_timeline_item, AssertionError((
|
||||
"Clip name '{}' wasn't created on the timeline: '{}' \n\n"
|
||||
"Please check if correct track position is activated, \n"
|
||||
"or if a clip is not already at the timeline in \n"
|
||||
"position: '{}' out: '{}'. \n\n"
|
||||
"Clip data: {}"
|
||||
).format(
|
||||
clip_name, timeline.GetName(), timecode_in, timecode_out, clip_data
|
||||
))
|
||||
return output_timeline_item
|
||||
|
||||
|
||||
def get_timeline_item(media_pool_item: object,
|
||||
timeline: object = None) -> object:
|
||||
"""
|
||||
Returns clips related to input mediaPoolItem.
|
||||
|
||||
Args:
|
||||
media_pool_item (resolve.MediaPoolItem): resolve's object
|
||||
timeline (resolve.Timeline)[optional]: resolve's object
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
clip_name = media_pool_item.GetClipProperty("File Name")
|
||||
output_timeline_item = None
|
||||
timeline = timeline or get_current_timeline()
|
||||
|
||||
with maintain_current_timeline(timeline):
|
||||
# search the timeline for the added clip
|
||||
|
||||
for ti_data in get_current_timeline_items():
|
||||
ti_clip_item = ti_data["clip"]["item"]
|
||||
ti_media_pool_item = ti_clip_item.GetMediaPoolItem()
|
||||
|
||||
# Skip items that do not have a media pool item, like for example
|
||||
# an "Adjustment Clip" or a "Fusion Composition" from the effects
|
||||
# toolbox
|
||||
if not ti_media_pool_item:
|
||||
continue
|
||||
|
||||
if clip_name in ti_media_pool_item.GetClipProperty("File Name"):
|
||||
output_timeline_item = ti_clip_item
|
||||
|
||||
return output_timeline_item
|
||||
|
||||
|
||||
def get_video_track_names() -> list:
|
||||
tracks = list()
|
||||
track_type = "video"
|
||||
timeline = get_current_timeline()
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
track_index: int
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
track_name = timeline.GetTrackName("video", track_index)
|
||||
tracks.append(track_name)
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def get_current_timeline_items(
|
||||
filter: bool = False,
|
||||
track_type: str = None,
|
||||
track_name: str = None,
|
||||
selecting_color: str = None) -> list:
|
||||
""" Gets all available current timeline track items
|
||||
"""
|
||||
track_type = track_type or "video"
|
||||
selecting_color = selecting_color or "Chocolate"
|
||||
project = get_current_project()
|
||||
|
||||
# get timeline anyhow
|
||||
timeline = (
|
||||
get_current_timeline() or
|
||||
get_any_timeline() or
|
||||
get_new_timeline()
|
||||
)
|
||||
selected_clips = []
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
_clips = {}
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
_track_name = timeline.GetTrackName(track_type, track_index)
|
||||
|
||||
# filter out all unmathed track names
|
||||
if track_name and _track_name not in track_name:
|
||||
continue
|
||||
|
||||
timeline_items = timeline.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
_clips[track_index] = timeline_items
|
||||
|
||||
_data = {
|
||||
"project": project,
|
||||
"timeline": timeline,
|
||||
"track": {
|
||||
"name": _track_name,
|
||||
"index": track_index,
|
||||
"type": track_type}
|
||||
}
|
||||
# get track item object and its color
|
||||
for clip_index, ti in enumerate(_clips[track_index]):
|
||||
data = _data.copy()
|
||||
data["clip"] = {
|
||||
"item": ti,
|
||||
"index": clip_index
|
||||
}
|
||||
ti_color = ti.GetClipColor()
|
||||
if filter and selecting_color in ti_color or not filter:
|
||||
selected_clips.append(data)
|
||||
return selected_clips
|
||||
|
||||
|
||||
def get_pype_timeline_item_by_name(name: str) -> object:
|
||||
"""Get timeline item by name.
|
||||
|
||||
Args:
|
||||
name (str): name of timeline item
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
for _ti_data in get_current_timeline_items():
|
||||
_ti_clip = _ti_data["clip"]["item"]
|
||||
tag_data = get_timeline_item_pype_tag(_ti_clip)
|
||||
tag_name = tag_data.get("namespace")
|
||||
if not tag_name:
|
||||
continue
|
||||
if tag_name in name:
|
||||
return _ti_clip
|
||||
return None
|
||||
|
||||
|
||||
def get_timeline_item_pype_tag(timeline_item):
|
||||
"""
|
||||
Get openpype track item tag created by creator or loader plugin.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): resolve object
|
||||
|
||||
Returns:
|
||||
dict: openpype tag data
|
||||
"""
|
||||
return_tag = None
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
return_tag = get_pype_marker(timeline_item)
|
||||
else:
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
|
||||
# get all tags from track item
|
||||
_tags = media_pool_item.GetMetadata()
|
||||
if not _tags:
|
||||
return None
|
||||
for key, data in _tags.items():
|
||||
# return only correct tag defined by global name
|
||||
if key in self.pype_tag_name:
|
||||
return_tag = json.loads(data)
|
||||
|
||||
return return_tag
|
||||
|
||||
|
||||
def set_timeline_item_pype_tag(timeline_item, data=None):
|
||||
"""
|
||||
Set openpype track item tag to input timeline_item.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): resolve api object
|
||||
|
||||
Returns:
|
||||
dict: json loaded data
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
# get available openpype tag if any
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
# delete tag as it is not updatable
|
||||
if tag_data:
|
||||
delete_pype_marker(timeline_item)
|
||||
|
||||
tag_data.update(data)
|
||||
set_pype_marker(timeline_item, tag_data)
|
||||
else:
|
||||
if tag_data:
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
# it not tag then create one
|
||||
tag_data.update(data)
|
||||
media_pool_item.SetMetadata(
|
||||
self.pype_tag_name, json.dumps(tag_data))
|
||||
else:
|
||||
tag_data = data
|
||||
# if openpype tag available then update with input data
|
||||
# add it to the input track item
|
||||
timeline_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data))
|
||||
|
||||
return tag_data
|
||||
|
||||
|
||||
def imprint(timeline_item, data=None):
|
||||
"""
|
||||
Adding `Avalon data` into a hiero track item tag.
|
||||
|
||||
Also including publish attribute into tag.
|
||||
|
||||
Arguments:
|
||||
timeline_item (hiero.core.TrackItem): hiero track item object
|
||||
data (dict): Any data which needs to be imprinted
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'folderPath': 'sq020sh0280',
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
}
|
||||
"""
|
||||
data = data or {}
|
||||
|
||||
set_timeline_item_pype_tag(timeline_item, data)
|
||||
|
||||
# add publish attribute
|
||||
set_publish_attribute(timeline_item, True)
|
||||
|
||||
|
||||
def set_publish_attribute(timeline_item, value):
|
||||
""" Set Publish attribute in input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
tag_data["publish"] = value
|
||||
# set data to the publish attribute
|
||||
set_timeline_item_pype_tag(timeline_item, tag_data)
|
||||
|
||||
|
||||
def get_publish_attribute(timeline_item):
|
||||
""" Get Publish attribute from input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
return tag_data["publish"]
|
||||
|
||||
|
||||
def set_pype_marker(timeline_item, tag_data):
|
||||
source_start = timeline_item.GetLeftOffset()
|
||||
item_duration = timeline_item.GetDuration()
|
||||
frame = int(source_start + (item_duration / 2))
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame / 10) * 10
|
||||
color = self.pype_marker_color
|
||||
name = self.pype_marker_name
|
||||
note = json.dumps(tag_data)
|
||||
duration = (self.pype_marker_duration / 10) * 10
|
||||
|
||||
timeline_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def get_pype_marker(timeline_item):
|
||||
timeline_item_markers = timeline_item.GetMarkers()
|
||||
for marker_frame, marker in timeline_item_markers.items():
|
||||
color = marker["color"]
|
||||
name = marker["name"]
|
||||
if name == self.pype_marker_name and color == self.pype_marker_color:
|
||||
note = marker["note"]
|
||||
self.temp_marker_frame = marker_frame
|
||||
return json.loads(note)
|
||||
|
||||
return dict()
|
||||
|
||||
|
||||
def delete_pype_marker(timeline_item):
|
||||
timeline_item.DeleteMarkerAtFrame(self.temp_marker_frame)
|
||||
self.temp_marker_frame = None
|
||||
|
||||
|
||||
def create_compound_clip(clip_data, name, folder):
|
||||
"""
|
||||
Convert timeline object into nested timeline object
|
||||
|
||||
Args:
|
||||
clip_data (dict): timeline item object packed into dict
|
||||
with project, timeline (sequence)
|
||||
folder (resolve.MediaPool.Folder): media pool folder object,
|
||||
name (str): name for compound clip
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
|
||||
"""
|
||||
# get basic objects form data
|
||||
project = clip_data["project"]
|
||||
timeline = clip_data["timeline"]
|
||||
clip = clip_data["clip"]
|
||||
|
||||
# get details of objects
|
||||
clip_item = clip["item"]
|
||||
|
||||
mp = project.GetMediaPool()
|
||||
|
||||
# get clip attributes
|
||||
clip_attributes = get_clip_attributes(clip_item)
|
||||
|
||||
mp_item = clip_item.GetMediaPoolItem()
|
||||
_mp_props = mp_item.GetClipProperty
|
||||
|
||||
mp_first_frame = int(_mp_props("Start"))
|
||||
mp_last_frame = int(_mp_props("End"))
|
||||
|
||||
# initialize basic source timing for otio
|
||||
ci_l_offset = clip_item.GetLeftOffset()
|
||||
ci_duration = clip_item.GetDuration()
|
||||
rate = float(_mp_props("FPS"))
|
||||
|
||||
# source rational times
|
||||
mp_in_rc = opentime.RationalTime((ci_l_offset), rate)
|
||||
mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate)
|
||||
|
||||
# get frame in and out for clip swapping
|
||||
in_frame = opentime.to_frames(mp_in_rc)
|
||||
out_frame = opentime.to_frames(mp_out_rc)
|
||||
|
||||
# keep original sequence
|
||||
tl_origin = timeline
|
||||
|
||||
# Set current folder to input media_pool_folder:
|
||||
mp.SetCurrentFolder(folder)
|
||||
|
||||
# check if clip doesn't exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
|
||||
if cct:
|
||||
print(f"Compound clip exists: {cct}")
|
||||
else:
|
||||
# Create empty timeline in current folder and give name:
|
||||
cct = mp.CreateEmptyTimeline(name)
|
||||
|
||||
# check if clip doesn't exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
print(f"Compound clip created: {cct}")
|
||||
|
||||
with maintain_current_timeline(cct, tl_origin):
|
||||
# Add input clip to the current timeline:
|
||||
mp.AppendToTimeline([{
|
||||
"mediaPoolItem": mp_item,
|
||||
"startFrame": mp_first_frame,
|
||||
"endFrame": mp_last_frame
|
||||
}])
|
||||
|
||||
# Add collected metadata and attributes to the comound clip:
|
||||
if mp_item.GetMetadata(self.pype_tag_name):
|
||||
clip_attributes[self.pype_tag_name] = mp_item.GetMetadata(
|
||||
self.pype_tag_name)[self.pype_tag_name]
|
||||
|
||||
# stringify
|
||||
clip_attributes = json.dumps(clip_attributes)
|
||||
|
||||
# add attributes to metadata
|
||||
for k, v in mp_item.GetMetadata().items():
|
||||
cct.SetMetadata(k, v)
|
||||
|
||||
# add metadata to cct
|
||||
cct.SetMetadata(self.pype_tag_name, clip_attributes)
|
||||
|
||||
# reset start timecode of the compound clip
|
||||
cct.SetClipProperty("Start TC", _mp_props("Start TC"))
|
||||
|
||||
# swap clips on timeline
|
||||
swap_clips(clip_item, cct, in_frame, out_frame)
|
||||
|
||||
cct.SetClipColor("Pink")
|
||||
return cct
|
||||
|
||||
|
||||
def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame):
|
||||
"""
|
||||
Swapping clips on timeline in timelineItem
|
||||
|
||||
It will add take and activate it to the frame range which is inputted
|
||||
|
||||
Args:
|
||||
from_clip (resolve.TimelineItem)
|
||||
to_clip (resolve.mediaPoolItem)
|
||||
to_clip_name (str): name of to_clip
|
||||
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
|
||||
to_out_frame (float): cut out frame, usually left offset plus duration
|
||||
|
||||
Returns:
|
||||
bool: True if successfully replaced
|
||||
|
||||
"""
|
||||
# copy ACES input transform from timeline clip to new media item
|
||||
mediapool_item_from_timeline = from_clip.GetMediaPoolItem()
|
||||
_idt = mediapool_item_from_timeline.GetClipProperty('IDT')
|
||||
to_clip.SetClipProperty('IDT', _idt)
|
||||
|
||||
_clip_prop = to_clip.GetClipProperty
|
||||
to_clip_name = _clip_prop("File Name")
|
||||
# add clip item as take to timeline
|
||||
take = from_clip.AddTake(
|
||||
to_clip,
|
||||
float(to_in_frame),
|
||||
float(to_out_frame)
|
||||
)
|
||||
|
||||
if not take:
|
||||
return False
|
||||
|
||||
for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)):
|
||||
take_item = from_clip.GetTakeByIndex(take_index)
|
||||
take_mp_item = take_item["mediaPoolItem"]
|
||||
if to_clip_name in take_mp_item.GetName():
|
||||
from_clip.SelectTakeByIndex(take_index)
|
||||
from_clip.FinalizeTake()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _validate_tc(x):
|
||||
# Validate and reformat timecode string
|
||||
|
||||
if len(x) != 11:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
c = ':'
|
||||
colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:]
|
||||
|
||||
if colonized.replace(':', '').isdigit():
|
||||
print(f"_ colonized: {colonized}")
|
||||
return colonized
|
||||
else:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
|
||||
def get_pype_clip_metadata(clip):
|
||||
"""
|
||||
Get openpype metadata created by creator plugin
|
||||
|
||||
Attributes:
|
||||
clip (resolve.TimelineItem): resolve's object
|
||||
|
||||
Returns:
|
||||
dict: hierarchy, orig clip attributes
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
metadata = mp_item.GetMetadata()
|
||||
|
||||
return metadata.get(self.pype_tag_name)
|
||||
|
||||
|
||||
def get_clip_attributes(clip):
|
||||
"""
|
||||
Collect basic attributes from resolve timeline item
|
||||
|
||||
Args:
|
||||
clip (resolve.TimelineItem): timeline item object
|
||||
|
||||
Returns:
|
||||
dict: all collected attributres as key: values
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
|
||||
return {
|
||||
"clipIn": clip.GetStart(),
|
||||
"clipOut": clip.GetEnd(),
|
||||
"clipLeftOffset": clip.GetLeftOffset(),
|
||||
"clipRightOffset": clip.GetRightOffset(),
|
||||
"clipMarkers": clip.GetMarkers(),
|
||||
"clipFlags": clip.GetFlagList(),
|
||||
"sourceId": mp_item.GetMediaId(),
|
||||
"sourceProperties": mp_item.GetClipProperty()
|
||||
}
|
||||
|
||||
|
||||
def set_project_manager_to_folder_name(folder_name):
|
||||
"""
|
||||
Sets context of Project manager to given folder by name.
|
||||
|
||||
Searching for folder by given name from root folder to nested.
|
||||
If no existing folder by name it will create one in root folder.
|
||||
|
||||
Args:
|
||||
folder_name (str): name of searched folder
|
||||
|
||||
Returns:
|
||||
bool: True if success
|
||||
|
||||
Raises:
|
||||
Exception: Cannot create folder in root
|
||||
|
||||
"""
|
||||
# initialize project manager
|
||||
get_project_manager()
|
||||
|
||||
set_folder = False
|
||||
|
||||
# go back to root folder
|
||||
if self.project_manager.GotoRootFolder():
|
||||
log.info(f"Testing existing folder: {folder_name}")
|
||||
folders = _convert_resolve_list_type(
|
||||
self.project_manager.GetFoldersInCurrentFolder())
|
||||
log.info(f"Testing existing folders: {folders}")
|
||||
# get me first available folder object
|
||||
# with the same name as in `folder_name` else return False
|
||||
if next((f for f in folders if f in folder_name), False):
|
||||
log.info(f"Found existing folder: {folder_name}")
|
||||
set_folder = self.project_manager.OpenFolder(folder_name)
|
||||
|
||||
if set_folder:
|
||||
return True
|
||||
|
||||
# if folder by name is not existent then create one
|
||||
# go back to root folder
|
||||
log.info(f"Folder `{folder_name}` not found and will be created")
|
||||
if self.project_manager.GotoRootFolder():
|
||||
try:
|
||||
# create folder by given name
|
||||
self.project_manager.CreateFolder(folder_name)
|
||||
self.project_manager.OpenFolder(folder_name)
|
||||
return True
|
||||
except NameError as e:
|
||||
log.error((f"Folder with name `{folder_name}` cannot be created!"
|
||||
f"Error: {e}"))
|
||||
return False
|
||||
|
||||
|
||||
def _convert_resolve_list_type(resolve_list):
|
||||
""" Resolve is using indexed dictionary as list type.
|
||||
`{1.0: 'vaule'}`
|
||||
This will convert it to normal list class
|
||||
"""
|
||||
assert isinstance(resolve_list, dict), (
|
||||
"Input argument should be dict() type")
|
||||
|
||||
return [resolve_list[i] for i in sorted(resolve_list.keys())]
|
||||
|
||||
|
||||
def create_otio_time_range_from_timeline_item_data(timeline_item_data):
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
project = timeline_item_data["project"]
|
||||
timeline = timeline_item_data["timeline"]
|
||||
timeline_start = timeline.GetStartFrame()
|
||||
|
||||
frame_start = int(timeline_item.GetStart() - timeline_start)
|
||||
frame_duration = int(timeline_item.GetDuration())
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
|
||||
return otio_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
||||
|
||||
def get_otio_clip_instance_data(otio_timeline, timeline_item_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
track_name = timeline_item_data["track"]["name"]
|
||||
timeline_range = create_otio_time_range_from_timeline_item_data(
|
||||
timeline_item_data)
|
||||
|
||||
for otio_clip in otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in timeline_item.GetName():
|
||||
continue
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if self.pype_marker_name in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=False, first=False):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]")
|
||||
|
||||
if "[" in path:
|
||||
padding_pattern = r"(\d+)(?=-)"
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
num_pattern = r"(\[\d+\-\d+\])"
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
elif first:
|
||||
first_frame = re.findall(first_frame_pattern, path, flags=0)
|
||||
if len(first_frame) >= 1:
|
||||
first_frame = first_frame[0]
|
||||
path = re.sub(num_pattern, first_frame, path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def iter_all_media_pool_clips():
|
||||
"""Recursively iterate all media pool clips in current project"""
|
||||
root = get_current_project().GetMediaPool().GetRootFolder()
|
||||
queue = [root]
|
||||
for folder in queue:
|
||||
for clip in folder.GetClipList():
|
||||
yield clip
|
||||
queue.extend(folder.GetSubFolderList())
|
||||
|
|
@ -1,184 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline import registered_host
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AYON_MENU_LABEL"]
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
|
||||
if not os.path.exists(path):
|
||||
print("Unable to load stylesheet, file not found in resources")
|
||||
return ""
|
||||
|
||||
with open(path, "r") as file_stream:
|
||||
stylesheet = file_stream.read()
|
||||
return stylesheet
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(Spacer, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
class AYONMenu(QtWidgets.QWidget):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AYONMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowMinimizeButtonHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
|
||||
self.setWindowTitle(f"{MENU_LABEL}")
|
||||
save_current_btn = QtWidgets.QPushButton("Save current file", self)
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create ...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish ...", self)
|
||||
load_btn = QtWidgets.QPushButton("Load ...", self)
|
||||
inventory_btn = QtWidgets.QPushButton("Manager ...", self)
|
||||
subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library ...", self)
|
||||
experimental_btn = QtWidgets.QPushButton(
|
||||
"Experimental tools ...", self
|
||||
)
|
||||
# rename_btn = QtWidgets.QPushButton("Rename", self)
|
||||
# set_colorspace_btn = QtWidgets.QPushButton(
|
||||
# "Set colorspace from presets", self
|
||||
# )
|
||||
# reset_resolution_btn = QtWidgets.QPushButton(
|
||||
# "Set Resolution from presets", self
|
||||
# )
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(10, 20, 10, 20)
|
||||
|
||||
layout.addWidget(save_current_btn)
|
||||
|
||||
layout.addWidget(Spacer(15, self))
|
||||
|
||||
layout.addWidget(workfiles_btn)
|
||||
layout.addWidget(create_btn)
|
||||
layout.addWidget(publish_btn)
|
||||
layout.addWidget(load_btn)
|
||||
layout.addWidget(inventory_btn)
|
||||
layout.addWidget(subsetm_btn)
|
||||
|
||||
layout.addWidget(Spacer(15, self))
|
||||
|
||||
layout.addWidget(libload_btn)
|
||||
|
||||
# layout.addWidget(Spacer(15, self))
|
||||
|
||||
# layout.addWidget(rename_btn)
|
||||
|
||||
# layout.addWidget(Spacer(15, self))
|
||||
|
||||
# layout.addWidget(set_colorspace_btn)
|
||||
# layout.addWidget(reset_resolution_btn)
|
||||
layout.addWidget(Spacer(15, self))
|
||||
layout.addWidget(experimental_btn)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
save_current_btn.clicked.connect(self.on_save_current_clicked)
|
||||
save_current_btn.setShortcut(QtGui.QKeySequence.Save)
|
||||
workfiles_btn.clicked.connect(self.on_workfile_clicked)
|
||||
create_btn.clicked.connect(self.on_create_clicked)
|
||||
publish_btn.clicked.connect(self.on_publish_clicked)
|
||||
load_btn.clicked.connect(self.on_load_clicked)
|
||||
inventory_btn.clicked.connect(self.on_inventory_clicked)
|
||||
subsetm_btn.clicked.connect(self.on_subsetm_clicked)
|
||||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
# rename_btn.clicked.connect(self.on_rename_clicked)
|
||||
# set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked)
|
||||
# reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
|
||||
experimental_btn.clicked.connect(self.on_experimental_clicked)
|
||||
|
||||
def on_save_current_clicked(self):
|
||||
host = registered_host()
|
||||
current_file = host.get_current_workfile()
|
||||
if not current_file:
|
||||
print("Current project is not saved. "
|
||||
"Please save once first via workfiles tool.")
|
||||
host_tools.show_workfiles()
|
||||
return
|
||||
|
||||
print(f"Saving current file to: {current_file}")
|
||||
host.save_workfile(current_file)
|
||||
|
||||
def on_workfile_clicked(self):
|
||||
print("Clicked Workfile")
|
||||
host_tools.show_workfiles()
|
||||
|
||||
def on_create_clicked(self):
|
||||
print("Clicked Create")
|
||||
host_tools.show_creator()
|
||||
|
||||
def on_publish_clicked(self):
|
||||
print("Clicked Publish")
|
||||
host_tools.show_publish(parent=None)
|
||||
|
||||
def on_load_clicked(self):
|
||||
print("Clicked Load")
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
||||
def on_inventory_clicked(self):
|
||||
print("Clicked Inventory")
|
||||
host_tools.show_scene_inventory()
|
||||
|
||||
def on_subsetm_clicked(self):
|
||||
print("Clicked Subset Manager")
|
||||
host_tools.show_subset_manager()
|
||||
|
||||
def on_libload_clicked(self):
|
||||
print("Clicked Library")
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rename_clicked(self):
|
||||
print("Clicked Rename")
|
||||
|
||||
def on_set_colorspace_clicked(self):
|
||||
print("Clicked Set Colorspace")
|
||||
|
||||
def on_set_resolution_clicked(self):
|
||||
print("Clicked Set Resolution")
|
||||
|
||||
def on_experimental_clicked(self):
|
||||
host_tools.show_experimental_tools_dialog()
|
||||
|
||||
|
||||
def launch_ayon_menu():
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
|
||||
ayon_menu = AYONMenu()
|
||||
|
||||
stylesheet = load_stylesheet()
|
||||
ayon_menu.setStyleSheet(stylesheet)
|
||||
|
||||
ayon_menu.show()
|
||||
|
||||
sys.exit(app.exec_())
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
QWidget {
|
||||
background-color: #282828;
|
||||
border-radius: 3;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
QComboBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
QComboBox QAbstractItemView
|
||||
{
|
||||
color: white;
|
||||
}
|
||||
|
||||
QPushButton {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 5;
|
||||
}
|
||||
|
||||
QPushButton:focus {
|
||||
background-color: "#171717";
|
||||
color: #d0d0d0;
|
||||
}
|
||||
|
||||
QPushButton:hover {
|
||||
background-color: "#171717";
|
||||
color: #e64b3d;
|
||||
}
|
||||
|
||||
QSpinBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
max-width: 8em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
QLineEdit {
|
||||
border: 1px solid #090909;
|
||||
border-radius: 3px;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
min-width: 10em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
#AYONMenu {
|
||||
qproperty-alignment: AlignLeft;
|
||||
min-width: 10em;
|
||||
border: 1px solid #fef9ef;
|
||||
}
|
||||
|
||||
QVBoxLayout {
|
||||
background-color: #282828;
|
||||
}
|
||||
|
||||
#Divider {
|
||||
border: 1px solid #090909;
|
||||
background-color: #585858;
|
||||
}
|
||||
|
||||
QLabel {
|
||||
color: #77776b;
|
||||
}
|
||||
|
|
@ -1,325 +0,0 @@
|
|||
"""
|
||||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
schema,
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
register_inventory_action_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_core.host import (
|
||||
HostBase,
|
||||
IWorkfileHost,
|
||||
ILoadHost
|
||||
)
|
||||
|
||||
from . import lib
|
||||
from .utils import get_resolve_module
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
file_extensions,
|
||||
has_unsaved_changes,
|
||||
work_root,
|
||||
current_file
|
||||
)
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
||||
|
||||
class ResolveHost(HostBase, IWorkfileHost, ILoadHost):
|
||||
name = "resolve"
|
||||
|
||||
def install(self):
|
||||
"""Install resolve-specific functionality of avalon-core.
|
||||
|
||||
This is where you install menus and register families, data
|
||||
and loaders into resolve.
|
||||
|
||||
It is called automatically when installing via `api.install(resolve)`.
|
||||
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
"""
|
||||
|
||||
log.info("ayon_resolve installed")
|
||||
|
||||
pyblish.register_host(self.name)
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
print("Registering DaVinci Resolve plug-ins..")
|
||||
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
register_inventory_action_path(INVENTORY_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled",
|
||||
on_pyblish_instance_toggled)
|
||||
|
||||
get_resolve_module()
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
return open_file(filepath)
|
||||
|
||||
def save_workfile(self, filepath=None):
|
||||
return save_file(filepath)
|
||||
|
||||
def work_root(self, session):
|
||||
return work_root(session)
|
||||
|
||||
def get_current_workfile(self):
|
||||
return current_file()
|
||||
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return has_unsaved_changes()
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return file_extensions()
|
||||
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
|
||||
def containerise(timeline_item,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
"""Bundle Hiero's object into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
timeline_item (hiero.core.TrackItem): object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this container.
|
||||
|
||||
Returns:
|
||||
timeline_item (hiero.core.TrackItem): containerised object
|
||||
|
||||
"""
|
||||
|
||||
data_imprint = OrderedDict({
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
})
|
||||
|
||||
if data:
|
||||
data_imprint.update(data)
|
||||
|
||||
lib.set_timeline_item_pype_tag(timeline_item, data_imprint)
|
||||
|
||||
return timeline_item
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
|
||||
This function is used by the Container Manager in Nuke. You'll
|
||||
need to implement a for-loop that then *yields* one Container at
|
||||
a time.
|
||||
|
||||
See the `container.json` schema for details on how it should look,
|
||||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
|
||||
# Media Pool instances from Load Media loader
|
||||
for clip in lib.iter_all_media_pool_clips():
|
||||
data = clip.GetMetadata(lib.pype_tag_name)
|
||||
if not data:
|
||||
continue
|
||||
data = json.loads(data)
|
||||
|
||||
# If not all required data, skip it
|
||||
required = ['schema', 'id', 'loader', 'representation']
|
||||
if not all(key in data for key in required):
|
||||
continue
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
container["objectName"] = clip.GetName() # Get path in folders
|
||||
container["namespace"] = clip.GetName()
|
||||
container["name"] = clip.GetUniqueId()
|
||||
container["_item"] = clip
|
||||
yield container
|
||||
|
||||
# Timeline instances from Load Clip loader
|
||||
# get all track items from current timeline
|
||||
all_timeline_items = lib.get_current_timeline_items(filter=False)
|
||||
|
||||
for timeline_item_data in all_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
container = parse_container(timeline_item)
|
||||
if container:
|
||||
yield container
|
||||
|
||||
|
||||
def parse_container(timeline_item, validate=True):
|
||||
"""Return container data from timeline_item's openpype tag.
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): A containerised track item.
|
||||
validate (bool)[optional]: validating with avalon scheme
|
||||
|
||||
Returns:
|
||||
dict: The container schema data for input containerized track item.
|
||||
|
||||
"""
|
||||
# convert tag metadata to normal keys names
|
||||
data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if validate and data and data.get("schema"):
|
||||
schema.validate(data)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
# If not all required data return the empty container
|
||||
required = ['schema', 'id', 'name',
|
||||
'namespace', 'loader', 'representation']
|
||||
|
||||
if not all(key in data for key in required):
|
||||
return
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
|
||||
container["objectName"] = timeline_item.GetName()
|
||||
|
||||
# Store reference to the node object
|
||||
container["_timeline_item"] = timeline_item
|
||||
|
||||
return container
|
||||
|
||||
|
||||
def update_container(timeline_item, data=None):
|
||||
"""Update container data to input timeline_item's openpype tag.
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): A containerised track item.
|
||||
data (dict)[optional]: dictionery with data to be updated
|
||||
|
||||
Returns:
|
||||
bool: True if container was updated correctly
|
||||
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
container = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
for _key, _value in container.items():
|
||||
try:
|
||||
container[_key] = data[_key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
log.info("Updating container: `{}`".format(timeline_item))
|
||||
return bool(lib.set_timeline_item_pype_tag(timeline_item, container))
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node['selected'].setValue(True)
|
||||
>>> print(node['selected'].value())
|
||||
False
|
||||
"""
|
||||
try:
|
||||
# do the operation
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from ayon_resolve.api import set_publish_attribute
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
timeline_item = instance.data["item"]
|
||||
set_publish_attribute(timeline_item, new_value)
|
||||
|
||||
|
||||
def remove_instance(instance):
|
||||
"""Remove instance marker from track item."""
|
||||
instance_id = instance.get("uuid")
|
||||
|
||||
selected_timeline_items = lib.get_current_timeline_items(
|
||||
filter=True, selecting_color=lib.publish_clip_color)
|
||||
|
||||
found_ti = None
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
_ti_id = tag_data.get("uuid")
|
||||
if _ti_id == instance_id:
|
||||
found_ti = timeline_item
|
||||
break
|
||||
|
||||
if found_ti is None:
|
||||
return
|
||||
|
||||
# removing instance by marker color
|
||||
print(f"Removing instance: {found_ti.GetName()}")
|
||||
found_ti.DeleteMarkersByColor(lib.pype_marker_color)
|
||||
|
||||
|
||||
def list_instances():
|
||||
"""List all created instances from current workfile."""
|
||||
listed_instances = []
|
||||
selected_timeline_items = lib.get_current_timeline_items(
|
||||
filter=True, selecting_color=lib.publish_clip_color)
|
||||
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
ti_name = timeline_item.GetName().split(".")[0]
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if tag_data:
|
||||
asset = tag_data.get("asset")
|
||||
product_name = tag_data.get("productName")
|
||||
tag_data["label"] = f"{ti_name} [{asset}-{product_name}]"
|
||||
listed_instances.append(tag_data)
|
||||
|
||||
return listed_instances
|
||||
|
|
@ -1,910 +0,0 @@
|
|||
import re
|
||||
import uuid
|
||||
import copy
|
||||
|
||||
import qargparse
|
||||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
from ayon_core.settings import get_current_project_settings
|
||||
from ayon_core.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
Anatomy
|
||||
)
|
||||
|
||||
from . import lib
|
||||
from .menu import load_stylesheet
|
||||
|
||||
|
||||
class CreatorWidget(QtWidgets.QDialog):
|
||||
|
||||
# output items
|
||||
items = {}
|
||||
|
||||
def __init__(self, name, info, ui_inputs, parent=None):
|
||||
super(CreatorWidget, self).__init__(parent)
|
||||
|
||||
self.setObjectName(name)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.setWindowTitle(name or "OpenPype Creator Input")
|
||||
self.resize(500, 700)
|
||||
|
||||
# Where inputs and labels are set
|
||||
self.content_widget = [QtWidgets.QWidget(self)]
|
||||
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
|
||||
top_layout.setObjectName("ContentLayout")
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# first add widget tag line
|
||||
top_layout.addWidget(QtWidgets.QLabel(info))
|
||||
|
||||
# main dynamic layout
|
||||
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAsNeeded)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOn)
|
||||
self.scroll_area.setHorizontalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOff)
|
||||
self.scroll_area.setWidgetResizable(True)
|
||||
|
||||
self.content_widget.append(self.scroll_area)
|
||||
|
||||
scroll_widget = QtWidgets.QWidget(self)
|
||||
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
|
||||
self.content_layout = [in_scroll_area]
|
||||
|
||||
# add preset data into input widget layout
|
||||
self.items = self.populate_widgets(ui_inputs)
|
||||
self.scroll_area.setWidget(scroll_widget)
|
||||
|
||||
# Confirmation buttons
|
||||
btns_widget = QtWidgets.QWidget(self)
|
||||
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
|
||||
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel")
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok")
|
||||
btns_layout.addWidget(ok_btn)
|
||||
|
||||
# Main layout of the dialog
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(10, 10, 10, 10)
|
||||
main_layout.setSpacing(0)
|
||||
|
||||
# adding content widget
|
||||
for w in self.content_widget:
|
||||
main_layout.addWidget(w)
|
||||
|
||||
main_layout.addWidget(btns_widget)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
stylesheet = load_stylesheet()
|
||||
self.setStyleSheet(stylesheet)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self.result = self.value(self.items)
|
||||
self.close()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self.result = None
|
||||
self.close()
|
||||
|
||||
def value(self, data, new_data=None):
|
||||
new_data = new_data or {}
|
||||
for k, v in data.items():
|
||||
new_data[k] = {
|
||||
"target": None,
|
||||
"value": None
|
||||
}
|
||||
if v["type"] == "dict":
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = self.value(v["value"])
|
||||
if v["type"] == "section":
|
||||
new_data.pop(k)
|
||||
new_data = self.value(v["value"], new_data)
|
||||
elif getattr(v["value"], "currentText", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].currentText()
|
||||
elif getattr(v["value"], "isChecked", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].isChecked()
|
||||
elif getattr(v["value"], "value", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].value()
|
||||
elif getattr(v["value"], "text", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].text()
|
||||
|
||||
return new_data
|
||||
|
||||
def camel_case_split(self, text):
|
||||
matches = re.finditer(
|
||||
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
|
||||
return " ".join([str(m.group(0)).capitalize() for m in matches])
|
||||
|
||||
def create_row(self, layout, type, text, **kwargs):
|
||||
# get type attribute from qwidgets
|
||||
attr = getattr(QtWidgets, type)
|
||||
|
||||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to label widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
# create attribute name text strip of spaces
|
||||
attr_name = text.replace(" ", "")
|
||||
|
||||
# create attribute and assign default values
|
||||
setattr(
|
||||
self,
|
||||
attr_name,
|
||||
attr(parent=self))
|
||||
|
||||
# assign the created attribute to variable
|
||||
item = getattr(self, attr_name)
|
||||
for func, val in kwargs.items():
|
||||
if getattr(item, func):
|
||||
func_attr = getattr(item, func)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
return item
|
||||
|
||||
def populate_widgets(self, data, content_layout=None):
|
||||
"""
|
||||
Populate widget from input dict.
|
||||
|
||||
Each plugin has its own set of widget rows defined in dictionary
|
||||
each row values should have following keys: `type`, `target`,
|
||||
`label`, `order`, `value` and optionally also `toolTip`.
|
||||
|
||||
Args:
|
||||
data (dict): widget rows or organized groups defined
|
||||
by types `dict` or `section`
|
||||
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
|
||||
|
||||
Returns:
|
||||
dict: redefined data dict updated with created widgets
|
||||
|
||||
"""
|
||||
|
||||
content_layout = content_layout or self.content_layout[-1]
|
||||
# fix order of process by defined order value
|
||||
ordered_keys = list(data.keys())
|
||||
for k, v in data.items():
|
||||
try:
|
||||
# try removing a key from index which should
|
||||
# be filled with new
|
||||
ordered_keys.pop(v["order"])
|
||||
except IndexError:
|
||||
pass
|
||||
# add key into correct order
|
||||
ordered_keys.insert(v["order"], k)
|
||||
|
||||
# process ordered
|
||||
for k in ordered_keys:
|
||||
v = data[k]
|
||||
tool_tip = v.get("toolTip", "")
|
||||
if v["type"] == "dict":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
if v["type"] == "section":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
elif v["type"] == "QLineEdit":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QLineEdit", v["label"],
|
||||
setText=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QComboBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QComboBox", v["label"],
|
||||
addItems=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QCheckBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QCheckBox", v["label"],
|
||||
setChecked=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setRange=(0, 99999),
|
||||
setValue=v["value"],
|
||||
setToolTip=tool_tip)
|
||||
return data
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
class ClipLoader:
|
||||
|
||||
active_bin = None
|
||||
data = {}
|
||||
|
||||
def __init__(self, loader_obj, context, **options):
|
||||
""" Initialize object
|
||||
|
||||
Arguments:
|
||||
loader_obj (ayon_core.pipeline.load.LoaderPlugin): plugin object
|
||||
context (dict): loader plugin context
|
||||
options (dict)[optional]: possible keys:
|
||||
projectBinPath: "path/to/binItem"
|
||||
|
||||
"""
|
||||
self.__dict__.update(loader_obj.__dict__)
|
||||
self.context = context
|
||||
self.active_project = lib.get_current_project()
|
||||
|
||||
# try to get value from options or evaluate key value for `handles`
|
||||
self.with_handles = options.get("handles") is True
|
||||
|
||||
# try to get value from options or evaluate key value for `load_to`
|
||||
self.new_timeline = (
|
||||
options.get("newTimeline") or
|
||||
options.get("load_to") == "New timeline"
|
||||
)
|
||||
# try to get value from options or evaluate key value for `load_how`
|
||||
self.sequential_load = (
|
||||
options.get("sequentially") or
|
||||
options.get("load_how") == "Sequentially in order"
|
||||
)
|
||||
|
||||
assert self._populate_data(), str(
|
||||
"Cannot Load selected data, look into database "
|
||||
"or call your supervisor")
|
||||
|
||||
# inject asset data to representation dict
|
||||
self._get_folder_attributes()
|
||||
|
||||
# add active components to class
|
||||
if self.new_timeline:
|
||||
loader_cls = loader_obj.__class__
|
||||
if loader_cls.timeline:
|
||||
# if multiselection is set then use options sequence
|
||||
self.active_timeline = loader_cls.timeline
|
||||
else:
|
||||
# create new sequence
|
||||
self.active_timeline = lib.get_new_timeline(
|
||||
"{}_{}".format(
|
||||
self.data["timeline_basename"],
|
||||
str(uuid.uuid4())[:8]
|
||||
)
|
||||
)
|
||||
loader_cls.timeline = self.active_timeline
|
||||
|
||||
else:
|
||||
self.active_timeline = lib.get_current_timeline()
|
||||
|
||||
def _populate_data(self):
|
||||
""" Gets context and convert it to self.data
|
||||
data structure:
|
||||
{
|
||||
"name": "assetName_productName_representationName"
|
||||
"binPath": "projectBinPath",
|
||||
}
|
||||
"""
|
||||
# create name
|
||||
folder_entity = self.context["folder"]
|
||||
product_name = self.context["product"]["name"]
|
||||
repre_entity = self.context["representation"]
|
||||
|
||||
folder_name = folder_entity["name"]
|
||||
folder_path = folder_entity["path"]
|
||||
representation_name = repre_entity["name"]
|
||||
|
||||
self.data["clip_name"] = "_".join([
|
||||
folder_name,
|
||||
product_name,
|
||||
representation_name
|
||||
])
|
||||
self.data["versionAttributes"] = self.context["version"]["attrib"]
|
||||
|
||||
self.data["timeline_basename"] = "timeline_{}_{}".format(
|
||||
product_name, representation_name)
|
||||
|
||||
# solve project bin structure path
|
||||
hierarchy = "Loader{}".format(folder_path)
|
||||
|
||||
self.data["binPath"] = hierarchy
|
||||
|
||||
return True
|
||||
|
||||
def _get_folder_attributes(self):
|
||||
""" Get all available asset data
|
||||
|
||||
joint `data` key with asset.data dict into the representation
|
||||
|
||||
"""
|
||||
|
||||
self.data["folderAttributes"] = copy.deepcopy(
|
||||
self.context["folder"]["attrib"]
|
||||
)
|
||||
|
||||
def load(self, files):
|
||||
"""Load clip into timeline
|
||||
|
||||
Arguments:
|
||||
files (list[str]): list of files to load into timeline
|
||||
"""
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = lib.create_bin(self.data["binPath"])
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
media_pool_item = lib.create_media_pool_item(
|
||||
files,
|
||||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# Trim clip start if slate is present
|
||||
if "slate" in self.data["versionAttributes"]["families"]:
|
||||
source_in += 1
|
||||
source_duration = source_out - source_in + 1
|
||||
|
||||
if not self.with_handles:
|
||||
# Load file without the handles of the source media
|
||||
# We remove the handles from the source in and source out
|
||||
# so that the handles are excluded in the timeline
|
||||
|
||||
# get version data frame data from db
|
||||
version_attributes = self.data["versionAttributes"]
|
||||
frame_start = version_attributes.get("frameStart")
|
||||
frame_end = version_attributes.get("frameEnd")
|
||||
|
||||
# The version data usually stored the frame range + handles of the
|
||||
# media however certain representations may be shorter because they
|
||||
# exclude those handles intentionally. Unfortunately the
|
||||
# representation does not store that in the database currently;
|
||||
# so we should compensate for those cases. If the media is shorter
|
||||
# than the frame range specified in the database we assume it is
|
||||
# without handles and thus we do not need to remove the handles
|
||||
# from source and out
|
||||
if frame_start is not None and frame_end is not None:
|
||||
# Version has frame range data, so we can compare media length
|
||||
handle_start = version_attributes.get("handleStart", 0)
|
||||
handle_end = version_attributes.get("handleEnd", 0)
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
database_frame_duration = int(
|
||||
frame_end_handle - frame_start_handle + 1
|
||||
)
|
||||
if source_duration >= database_frame_duration:
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# get timeline in
|
||||
timeline_start = self.active_timeline.GetStartFrame()
|
||||
if self.sequential_load:
|
||||
# set timeline start frame
|
||||
timeline_in = int(timeline_start)
|
||||
else:
|
||||
# set timeline start frame + original clip in frame
|
||||
timeline_in = int(
|
||||
timeline_start + self.data["folderAttributes"]["clipIn"])
|
||||
|
||||
# make track item from source in bin as item
|
||||
timeline_item = lib.create_timeline_item(
|
||||
media_pool_item,
|
||||
self.active_timeline,
|
||||
timeline_in,
|
||||
source_in,
|
||||
source_out,
|
||||
)
|
||||
|
||||
print("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
return timeline_item
|
||||
|
||||
def update(self, timeline_item, files):
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = lib.create_bin(self.data["binPath"])
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
media_pool_item = lib.create_media_pool_item(
|
||||
files,
|
||||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
# Read trimming from timeline item
|
||||
timeline_item_in = timeline_item.GetLeftOffset()
|
||||
timeline_item_len = timeline_item.GetDuration()
|
||||
timeline_item_out = timeline_item_in + timeline_item_len
|
||||
|
||||
lib.swap_clips(
|
||||
timeline_item,
|
||||
media_pool_item,
|
||||
timeline_item_in,
|
||||
timeline_item_out
|
||||
)
|
||||
|
||||
print("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
return timeline_item
|
||||
|
||||
|
||||
class TimelineItemLoader(LoaderPlugin):
|
||||
"""A basic SequenceLoader for Resolve
|
||||
|
||||
This will implement the basic behavior for a loader to inherit from that
|
||||
will containerize the reference and will implement the `remove` and
|
||||
`update` logic.
|
||||
|
||||
"""
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"handles",
|
||||
label="Include handles",
|
||||
default=0,
|
||||
help="Load with handles or without?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_to",
|
||||
label="Where to load clips",
|
||||
items=[
|
||||
"Current timeline",
|
||||
"New timeline"
|
||||
],
|
||||
default=0,
|
||||
help="Where do you want clips to be loaded?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_how",
|
||||
label="How to load clips",
|
||||
items=[
|
||||
"Original timing",
|
||||
"Sequentially in order"
|
||||
],
|
||||
default="Original timing",
|
||||
help="Would you like to place it at original timing?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(
|
||||
self,
|
||||
context,
|
||||
name=None,
|
||||
namespace=None,
|
||||
options=None
|
||||
):
|
||||
pass
|
||||
|
||||
def update(self, container, context):
|
||||
"""Update an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove(self, container):
|
||||
"""Remove an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
marker_color = "Purple"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
|
||||
resolve_p_settings = get_current_project_settings().get("resolve")
|
||||
self.presets = {}
|
||||
if resolve_p_settings:
|
||||
self.presets = resolve_p_settings["create"].get(
|
||||
self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
self.project = lib.get_current_project()
|
||||
self.timeline = lib.get_current_timeline()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
self.selected = lib.get_current_timeline_items(filter=True)
|
||||
else:
|
||||
self.selected = lib.get_current_timeline_items(filter=False)
|
||||
|
||||
self.widget = CreatorWidget
|
||||
|
||||
|
||||
class PublishClip:
|
||||
"""
|
||||
Convert a track item to publishable instance
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): hiero track item object
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: hiero track item object with openpype tag
|
||||
"""
|
||||
vertical_clip_match = {}
|
||||
tag_data = {}
|
||||
types = {
|
||||
"shot": "shot",
|
||||
"folder": "folder",
|
||||
"episode": "episode",
|
||||
"sequence": "sequence",
|
||||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search pattern
|
||||
parents_search_pattern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
|
||||
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
|
||||
base_product_name_default = "<track_name>"
|
||||
review_track_default = "< none >"
|
||||
product_type_default = "plate"
|
||||
count_from_default = 10
|
||||
count_steps_default = 10
|
||||
vertical_sync_default = False
|
||||
driving_layer_default = ""
|
||||
|
||||
def __init__(self, cls, timeline_item_data, **kwargs):
|
||||
# populate input cls attribute onto self.[attr]
|
||||
self.__dict__.update(cls.__dict__)
|
||||
|
||||
# get main parent objects
|
||||
self.timeline_item_data = timeline_item_data
|
||||
self.timeline_item = timeline_item_data["clip"]["item"]
|
||||
timeline_name = timeline_item_data["timeline"].GetName()
|
||||
self.timeline_name = str(timeline_name).replace(" ", "_")
|
||||
|
||||
# track item (clip) main attributes
|
||||
self.ti_name = self.timeline_item.GetName()
|
||||
self.ti_index = int(timeline_item_data["clip"]["index"])
|
||||
|
||||
# get track name and index
|
||||
track_name = timeline_item_data["track"]["name"]
|
||||
self.track_name = str(track_name).replace(" ", "_")
|
||||
self.track_index = int(timeline_item_data["track"]["index"])
|
||||
|
||||
if kwargs.get("avalon"):
|
||||
self.tag_data.update(kwargs["avalon"])
|
||||
|
||||
# adding ui inputs if any
|
||||
self.ui_inputs = kwargs.get("ui_inputs", {})
|
||||
|
||||
# adding media pool folder if any
|
||||
self.mp_folder = kwargs.get("mp_folder")
|
||||
|
||||
# populate default data before we get other attributes
|
||||
self._populate_timeline_item_default_data()
|
||||
|
||||
# use all populated default data to create all important attributes
|
||||
self._populate_attributes()
|
||||
|
||||
# create parents with correct types
|
||||
self._create_parents()
|
||||
|
||||
def convert(self):
|
||||
# solve track item data and add them to tag data
|
||||
self._convert_to_tag_data()
|
||||
|
||||
# if track name is in review track name and also if driving track name
|
||||
# is not in review track name: skip tag creation
|
||||
if (self.track_name in self.review_layer) and (
|
||||
self.driving_layer not in self.review_layer):
|
||||
return
|
||||
|
||||
# deal with clip name
|
||||
new_name = self.tag_data.pop("newClipName")
|
||||
|
||||
if self.rename:
|
||||
self.tag_data["asset_name"] = new_name
|
||||
else:
|
||||
self.tag_data["asset_name"] = self.ti_name
|
||||
|
||||
# AYON unique identifier
|
||||
folder_path = "/{}/{}".format(
|
||||
self.tag_data["hierarchy"],
|
||||
self.tag_data["asset_name"]
|
||||
)
|
||||
self.tag_data["folder_path"] = folder_path
|
||||
|
||||
# create new name for track item
|
||||
if not lib.pype_marker_workflow:
|
||||
# create compound clip workflow
|
||||
lib.create_compound_clip(
|
||||
self.timeline_item_data,
|
||||
self.tag_data["asset_name"],
|
||||
self.mp_folder
|
||||
)
|
||||
|
||||
# add timeline_item_data selection to tag
|
||||
self.tag_data.update({
|
||||
"track_data": self.timeline_item_data["track"]
|
||||
})
|
||||
|
||||
# create openpype tag on timeline_item and add data
|
||||
lib.imprint(self.timeline_item, self.tag_data)
|
||||
|
||||
return self.timeline_item
|
||||
|
||||
def _populate_timeline_item_default_data(self):
|
||||
""" Populate default formatting data from track item. """
|
||||
|
||||
self.timeline_item_default_data = {
|
||||
"_folder_": "shots",
|
||||
"_sequence_": self.timeline_name,
|
||||
"_track_": self.track_name,
|
||||
"_clip_": self.ti_name,
|
||||
"_trackIndex_": self.track_index,
|
||||
"_clipIndex_": self.ti_index
|
||||
}
|
||||
|
||||
def _populate_attributes(self):
|
||||
""" Populate main object attributes. """
|
||||
# track item frame range and parent track name for vertical sync check
|
||||
self.clip_in = int(self.timeline_item.GetStart())
|
||||
self.clip_out = int(self.timeline_item.GetEnd())
|
||||
|
||||
# define ui inputs if non gui mode was used
|
||||
self.shot_num = self.ti_index
|
||||
|
||||
# ui_inputs data or default values if gui was not used
|
||||
self.rename = self.ui_inputs.get(
|
||||
"clipRename", {}).get("value") or self.rename_default
|
||||
self.clip_name = self.ui_inputs.get(
|
||||
"clipName", {}).get("value") or self.clip_name_default
|
||||
self.hierarchy = self.ui_inputs.get(
|
||||
"hierarchy", {}).get("value") or self.hierarchy_default
|
||||
self.hierarchy_data = self.ui_inputs.get(
|
||||
"hierarchyData", {}).get("value") or \
|
||||
self.timeline_item_default_data.copy()
|
||||
self.count_from = self.ui_inputs.get(
|
||||
"countFrom", {}).get("value") or self.count_from_default
|
||||
self.count_steps = self.ui_inputs.get(
|
||||
"countSteps", {}).get("value") or self.count_steps_default
|
||||
self.base_product_name = self.ui_inputs.get(
|
||||
"productName", {}).get("value") or self.base_product_name_default
|
||||
self.product_type = self.ui_inputs.get(
|
||||
"productType", {}).get("value") or self.product_type_default
|
||||
self.vertical_sync = self.ui_inputs.get(
|
||||
"vSyncOn", {}).get("value") or self.vertical_sync_default
|
||||
self.driving_layer = self.ui_inputs.get(
|
||||
"vSyncTrack", {}).get("value") or self.driving_layer_default
|
||||
self.review_track = self.ui_inputs.get(
|
||||
"reviewTrack", {}).get("value") or self.review_track_default
|
||||
|
||||
# build product name from layer name
|
||||
if self.base_product_name == "<track_name>":
|
||||
self.base_product_name = self.track_name
|
||||
|
||||
# create product name for publishing
|
||||
self.product_name = (
|
||||
self.product_type + self.base_product_name.capitalize()
|
||||
)
|
||||
|
||||
def _replace_hash_to_expression(self, name, text):
|
||||
""" Replace hash with number in correct padding. """
|
||||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
||||
Populating the tag data into internal variable self.tag_data
|
||||
"""
|
||||
# define vertical sync attributes
|
||||
hero_track = True
|
||||
self.review_layer = ""
|
||||
if self.vertical_sync:
|
||||
# check if track name is not in driving layer
|
||||
if self.track_name not in self.driving_layer:
|
||||
# if it is not then define vertical sync as None
|
||||
hero_track = False
|
||||
|
||||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formatting_data = {}
|
||||
_data = self.timeline_item_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
# adding tag metadata from ui
|
||||
for _k, _v in self.ui_inputs.items():
|
||||
if _v["target"] == "tag":
|
||||
self.tag_data[_k] = _v["value"]
|
||||
|
||||
# driving layer is set as positive match
|
||||
if hero_track or self.vertical_sync:
|
||||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as default
|
||||
self.review_layer = self.review_track
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
self.shot_num = self.count_from
|
||||
else:
|
||||
self.shot_num = self.count_from + self.count_steps
|
||||
|
||||
# clip name sequence number
|
||||
_data.update({"shot": self.shot_num})
|
||||
|
||||
# solve # in test to pythonic expression
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
if "#" not in _v["value"]:
|
||||
continue
|
||||
self.hierarchy_data[
|
||||
_k]["value"] = self._replace_hash_to_expression(
|
||||
_k, _v["value"])
|
||||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in self.hierarchy_data.items():
|
||||
hierarchy_formatting_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formatting_data = self.hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formatting_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
if hero_track and self.vertical_sync:
|
||||
self.vertical_clip_match.update({
|
||||
(self.clip_in, self.clip_out): tag_hierarchy_data
|
||||
})
|
||||
|
||||
if not hero_track and self.vertical_sync:
|
||||
# driving layer is set as negative match
|
||||
for (_in, _out), hero_data in self.vertical_clip_match.items():
|
||||
hero_data.update({"heroTrack": False})
|
||||
if _in != self.clip_in or _out != self.clip_out:
|
||||
continue
|
||||
|
||||
data_product_name = hero_data["productName"]
|
||||
# add track index in case duplicity of names in hero data
|
||||
if self.product_name in data_product_name:
|
||||
hero_data["productName"] = self.product_name + str(
|
||||
self.track_index)
|
||||
# in case track name and product name is the same then add
|
||||
if self.base_product_name == self.track_name:
|
||||
hero_data["productName"] = self.product_name
|
||||
# assign data to return hierarchy data to tag
|
||||
tag_hierarchy_data = hero_data
|
||||
|
||||
# add data to return data dict
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
# add uuid to tag data
|
||||
self.tag_data["uuid"] = str(uuid.uuid4())
|
||||
|
||||
# add review track only to hero track
|
||||
if hero_track and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
else:
|
||||
self.tag_data.update({"reviewTrack": None})
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formatting_data)
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formatting_data,
|
||||
"productName": self.product_name,
|
||||
"productType": self.product_type
|
||||
}
|
||||
|
||||
def _convert_to_entity(self, key):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
folder_type = self.types.get(key)
|
||||
|
||||
assert folder_type, "Missing folder type for `{}`".format(
|
||||
key
|
||||
)
|
||||
|
||||
return {
|
||||
"folder_type": folder_type,
|
||||
"entity_name": self.hierarchy_data[key]["value"].format(
|
||||
**self.timeline_item_default_data
|
||||
)
|
||||
}
|
||||
|
||||
def _create_parents(self):
|
||||
""" Create parents and return it in list. """
|
||||
self.parents = []
|
||||
|
||||
pattern = re.compile(self.parents_search_pattern)
|
||||
par_split = [pattern.findall(t).pop()
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for key in par_split:
|
||||
parent = self._convert_to_entity(key)
|
||||
self.parents.append(parent)
|
||||
|
||||
|
||||
def get_representation_files(representation):
|
||||
anatomy = Anatomy()
|
||||
files = []
|
||||
for file_data in representation["files"]:
|
||||
path = anatomy.fill_root(file_data["path"])
|
||||
files.append(path)
|
||||
return files
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
#! python3
|
||||
|
||||
|
||||
class TestGUI:
|
||||
def __init__(self):
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
self.fu = resolve.Fusion()
|
||||
ui = self.fu.UIManager
|
||||
self.disp = bmd.UIDispatcher(self.fu.UIManager) # noqa
|
||||
self.title_font = ui.Font({"PixelSize": 18})
|
||||
self._dialogue = self.disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Get Testing folder",
|
||||
"ID": "TestingWin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "inputTestSourcesFolder",
|
||||
"Text": "Select folder with testing media",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": (
|
||||
"Chose folder with videos, sequences, "
|
||||
"single images, nested folders with "
|
||||
"media"
|
||||
),
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "openButton",
|
||||
"Text": "Process Test",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Run the test...",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
self._widgets = self._dialogue.GetItems()
|
||||
self._dialogue.On.TestingWin.Close = self._close_window
|
||||
self._dialogue.On.inputTestSourcesFolder.Clicked = self._open_dir_button_pressed # noqa
|
||||
self._dialogue.On.openButton.Clicked = self.process
|
||||
|
||||
def _close_window(self, event):
|
||||
self.disp.ExitLoop()
|
||||
|
||||
def process(self, event):
|
||||
# placeholder function this supposed to be run from child class
|
||||
pass
|
||||
|
||||
def _open_dir_button_pressed(self, event):
|
||||
# placeholder function this supposed to be run from child class
|
||||
pass
|
||||
|
||||
def show_gui(self):
|
||||
self._dialogue.Show()
|
||||
self.disp.RunLoop()
|
||||
self._dialogue.Hide()
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# TODO: convert this script to be usable with OpenPype
|
||||
"""
|
||||
Example DaVinci Resolve script:
|
||||
Load a still from DRX file, apply the still to all clips in all timelines.
|
||||
Set render format and codec, add render jobs for all timelines, render
|
||||
to specified path and wait for rendering completion.
|
||||
Once render is complete, delete all jobs
|
||||
"""
|
||||
# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa
|
||||
|
||||
from python_get_resolve import GetResolve
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def AddTimelineToRender(project, timeline, presetName,
|
||||
targetDirectory, renderFormat, renderCodec):
|
||||
project.SetCurrentTimeline(timeline)
|
||||
project.LoadRenderPreset(presetName)
|
||||
|
||||
if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec):
|
||||
return False
|
||||
|
||||
project.SetRenderSettings(
|
||||
{"SelectAllFrames": 1, "TargetDir": targetDirectory})
|
||||
return project.AddRenderJob()
|
||||
|
||||
|
||||
def RenderAllTimelines(resolve, presetName, targetDirectory,
|
||||
renderFormat, renderCodec):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
resolve.OpenPage("Deliver")
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
if not AddTimelineToRender(
|
||||
project,
|
||||
project.GetTimelineByIndex(index + 1),
|
||||
presetName,
|
||||
targetDirectory,
|
||||
renderFormat,
|
||||
renderCodec):
|
||||
return False
|
||||
return project.StartRendering()
|
||||
|
||||
|
||||
def IsRenderingInProgress(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
return project.IsRenderingInProgress()
|
||||
|
||||
|
||||
def WaitForRenderingCompletion(resolve):
|
||||
while IsRenderingInProgress(resolve):
|
||||
time.sleep(1)
|
||||
return
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0):
|
||||
trackCount = timeline.GetTrackCount("video")
|
||||
|
||||
clips = {}
|
||||
for index in range(1, int(trackCount) + 1):
|
||||
clips.update(timeline.GetItemsInTrack("video", index))
|
||||
return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips)
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelines(resolve, path, gradeMode=0):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
timeline = project.GetTimelineByIndex(index + 1)
|
||||
project.SetCurrentTimeline(timeline)
|
||||
if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def DeleteAllRenderJobs(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
project.DeleteAllRenderJobs()
|
||||
return
|
||||
|
||||
|
||||
# Inputs:
|
||||
# - DRX file to import grade still and apply it for clips
|
||||
# - grade mode (0, 1 or 2)
|
||||
# - preset name for rendering
|
||||
# - render path
|
||||
# - render format
|
||||
# - render codec
|
||||
if len(sys.argv) < 7:
|
||||
print(
|
||||
"input parameters for scripts are [drx file path] [grade mode] "
|
||||
"[render preset name] [render path] [render format] [render codec]")
|
||||
sys.exit()
|
||||
|
||||
drxPath = sys.argv[1]
|
||||
gradeMode = sys.argv[2]
|
||||
renderPresetName = sys.argv[3]
|
||||
renderPath = sys.argv[4]
|
||||
renderFormat = sys.argv[5]
|
||||
renderCodec = sys.argv[6]
|
||||
|
||||
# Get currently open project
|
||||
resolve = GetResolve()
|
||||
|
||||
if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode):
|
||||
print("Unable to apply a still from drx file to all timelines")
|
||||
sys.exit()
|
||||
|
||||
if not RenderAllTimelines(resolve, renderPresetName, renderPath,
|
||||
renderFormat, renderCodec):
|
||||
print("Unable to set all timelines for rendering")
|
||||
sys.exit()
|
||||
|
||||
WaitForRenderingCompletion(resolve)
|
||||
|
||||
DeleteAllRenderJobs(resolve)
|
||||
|
||||
print("Rendering is completed.")
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
#! python3
|
||||
|
||||
"""
|
||||
Resolve's tools for setting environment
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def get_resolve_module():
|
||||
from ayon_resolve import api
|
||||
# dont run if already loaded
|
||||
if api.bmdvr:
|
||||
log.info(("resolve module is assigned to "
|
||||
f"`ayon_resolve.api.bmdvr`: {api.bmdvr}"))
|
||||
return api.bmdvr
|
||||
try:
|
||||
"""
|
||||
The PYTHONPATH needs to be set correctly for this import
|
||||
statement to work. An alternative is to import the
|
||||
DaVinciResolveScript by specifying absolute path
|
||||
(see ExceptionHandler logic)
|
||||
"""
|
||||
import DaVinciResolveScript as bmd
|
||||
except ImportError:
|
||||
if sys.platform.startswith("darwin"):
|
||||
expected_path = ("/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Developer/Scripting/Modules")
|
||||
elif sys.platform.startswith("win") \
|
||||
or sys.platform.startswith("cygwin"):
|
||||
expected_path = os.path.normpath(
|
||||
os.getenv('PROGRAMDATA') + (
|
||||
"/Blackmagic Design/DaVinci Resolve/Support/Developer"
|
||||
"/Scripting/Modules"
|
||||
)
|
||||
)
|
||||
elif sys.platform.startswith("linux"):
|
||||
expected_path = "/opt/resolve/libs/Fusion/Modules"
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Unsupported platform: {}".format(sys.platform)
|
||||
)
|
||||
|
||||
# check if the default path has it...
|
||||
print(("Unable to find module DaVinciResolveScript from "
|
||||
"$PYTHONPATH - trying default locations"))
|
||||
|
||||
module_path = os.path.normpath(
|
||||
os.path.join(
|
||||
expected_path,
|
||||
"DaVinciResolveScript.py"
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
import imp
|
||||
bmd = imp.load_source('DaVinciResolveScript', module_path)
|
||||
except ImportError:
|
||||
# No fallbacks ... report error:
|
||||
log.error(
|
||||
("Unable to find module DaVinciResolveScript - please "
|
||||
"ensure that the module DaVinciResolveScript is "
|
||||
"discoverable by python")
|
||||
)
|
||||
log.error(
|
||||
("For a default DaVinci Resolve installation, the "
|
||||
f"module is expected to be located in: {expected_path}")
|
||||
)
|
||||
sys.exit()
|
||||
# assign global var and return
|
||||
bmdvr = bmd.scriptapp("Resolve")
|
||||
bmdvf = bmd.scriptapp("Fusion")
|
||||
api.bmdvr = bmdvr
|
||||
api.bmdvf = bmdvf
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`ayon_resolve.api.bmdvr`: {api.bmdvr}"))
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`ayon_resolve.api.bmdvf`: {api.bmdvf}"))
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
"""Host API required Work Files tool"""
|
||||
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
from .lib import (
|
||||
get_project_manager,
|
||||
get_current_project
|
||||
)
|
||||
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".drp"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
get_project_manager().SaveProject()
|
||||
return False
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
pm = get_project_manager()
|
||||
file = os.path.basename(filepath)
|
||||
fname, _ = os.path.splitext(file)
|
||||
project = get_current_project()
|
||||
name = project.GetName()
|
||||
|
||||
response = False
|
||||
if name == "Untitled Project":
|
||||
response = pm.CreateProject(fname)
|
||||
log.info("New project created: {}".format(response))
|
||||
pm.SaveProject()
|
||||
elif name != fname:
|
||||
response = project.SetName(fname)
|
||||
log.info("Project renamed: {}".format(response))
|
||||
|
||||
exported = pm.ExportProject(fname, filepath)
|
||||
log.info("Project exported: {}".format(exported))
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
"""
|
||||
Loading project
|
||||
"""
|
||||
|
||||
from . import bmdvr
|
||||
|
||||
pm = get_project_manager()
|
||||
page = bmdvr.GetCurrentPage()
|
||||
if page is not None:
|
||||
# Save current project only if Resolve has an active page, otherwise
|
||||
# we consider Resolve being in a pre-launch state (no open UI yet)
|
||||
project = pm.GetCurrentProject()
|
||||
print(f"Saving current project: {project}")
|
||||
pm.SaveProject()
|
||||
|
||||
file = os.path.basename(filepath)
|
||||
fname, _ = os.path.splitext(file)
|
||||
|
||||
try:
|
||||
# load project from input path
|
||||
project = pm.LoadProject(fname)
|
||||
log.info(f"Project {project.GetName()} opened...")
|
||||
|
||||
except AttributeError:
|
||||
log.warning((f"Project with name `{fname}` does not exist! It will "
|
||||
f"be imported from {filepath} and then loaded..."))
|
||||
if pm.ImportProject(filepath):
|
||||
# load project from input path
|
||||
project = pm.LoadProject(fname)
|
||||
log.info(f"Project imported/loaded {project.GetName()}...")
|
||||
return True
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def current_file():
|
||||
pm = get_project_manager()
|
||||
file_ext = file_extensions()[0]
|
||||
workdir_path = os.getenv("AYON_WORKDIR")
|
||||
project = pm.GetCurrentProject()
|
||||
project_name = project.GetName()
|
||||
file_name = project_name + file_ext
|
||||
|
||||
# create current file path
|
||||
current_file_path = os.path.join(workdir_path, file_name)
|
||||
|
||||
# return current file path if it exists
|
||||
if os.path.exists(current_file_path):
|
||||
return os.path.normpath(current_file_path)
|
||||
|
||||
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
import os
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class PreLaunchResolveLastWorkfile(PreLaunchHook):
|
||||
"""Special hook to open last workfile for Resolve.
|
||||
|
||||
Checks 'start_last_workfile', if set to False, it will not open last
|
||||
workfile. This property is set explicitly in Launcher.
|
||||
"""
|
||||
order = 10
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
if not self.data.get("start_last_workfile"):
|
||||
self.log.info("It is set to not start last workfile on start.")
|
||||
return
|
||||
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if not last_workfile:
|
||||
self.log.warning("Last workfile was not collected.")
|
||||
return
|
||||
|
||||
if not os.path.exists(last_workfile):
|
||||
self.log.info("Current context does not have any workfile yet.")
|
||||
return
|
||||
|
||||
# Add path to launch environment for the startup script to pick up
|
||||
self.log.info(
|
||||
"Setting AYON_RESOLVE_OPEN_ON_LAUNCH to launch "
|
||||
f"last workfile: {last_workfile}"
|
||||
)
|
||||
key = "AYON_RESOLVE_OPEN_ON_LAUNCH"
|
||||
self.launch_context.env[key] = last_workfile
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
import platform
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_resolve.utils import setup
|
||||
|
||||
|
||||
class PreLaunchResolveSetup(PreLaunchHook):
|
||||
"""
|
||||
This hook will set up the Resolve scripting environment as described in
|
||||
Resolve's documentation found with the installed application at
|
||||
{resolve}/Support/Developer/Scripting/README.txt
|
||||
|
||||
Prepares the following environment variables:
|
||||
- `RESOLVE_SCRIPT_API`
|
||||
- `RESOLVE_SCRIPT_LIB`
|
||||
|
||||
It adds $RESOLVE_SCRIPT_API/Modules to PYTHONPATH.
|
||||
|
||||
Additionally it sets up the Python home for Python 3 based on the
|
||||
RESOLVE_PYTHON3_HOME in the environment (usually defined in OpenPype's
|
||||
Application environment for Resolve by the admin). For this it sets
|
||||
PYTHONHOME and PATH variables.
|
||||
|
||||
It also defines:
|
||||
- `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype
|
||||
Fusion scripts to be copied to for Resolve to pick them up.
|
||||
- `AYON_LOG_NO_COLORS` to True to ensure OP doesn't try to
|
||||
use logging with terminal colors as it fails in Resolve.
|
||||
|
||||
"""
|
||||
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
current_platform = platform.system().lower()
|
||||
|
||||
programdata = self.launch_context.env.get("PROGRAMDATA", "")
|
||||
resolve_script_api_locations = {
|
||||
"windows": (
|
||||
f"{programdata}/Blackmagic Design/"
|
||||
"DaVinci Resolve/Support/Developer/Scripting"
|
||||
),
|
||||
"darwin": (
|
||||
"/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Developer/Scripting"
|
||||
),
|
||||
"linux": "/opt/resolve/Developer/Scripting",
|
||||
}
|
||||
resolve_script_api = Path(
|
||||
resolve_script_api_locations[current_platform]
|
||||
)
|
||||
self.log.info(
|
||||
f"setting RESOLVE_SCRIPT_API variable to {resolve_script_api}"
|
||||
)
|
||||
self.launch_context.env[
|
||||
"RESOLVE_SCRIPT_API"
|
||||
] = resolve_script_api.as_posix()
|
||||
|
||||
resolve_script_lib_dirs = {
|
||||
"windows": (
|
||||
"C:/Program Files/Blackmagic Design"
|
||||
"/DaVinci Resolve/fusionscript.dll"
|
||||
),
|
||||
"darwin": (
|
||||
"/Applications/DaVinci Resolve/DaVinci Resolve.app"
|
||||
"/Contents/Libraries/Fusion/fusionscript.so"
|
||||
),
|
||||
"linux": "/opt/resolve/libs/Fusion/fusionscript.so",
|
||||
}
|
||||
resolve_script_lib = Path(resolve_script_lib_dirs[current_platform])
|
||||
self.launch_context.env[
|
||||
"RESOLVE_SCRIPT_LIB"
|
||||
] = resolve_script_lib.as_posix()
|
||||
self.log.info(
|
||||
f"setting RESOLVE_SCRIPT_LIB variable to {resolve_script_lib}"
|
||||
)
|
||||
|
||||
# TODO: add OTIO installation from `openpype/requirements.py`
|
||||
# making sure python <3.9.* is installed at provided path
|
||||
python3_home = Path(
|
||||
self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")
|
||||
)
|
||||
|
||||
assert python3_home.is_dir(), (
|
||||
"Python 3 is not installed at the provided folder path. Either "
|
||||
"make sure the `environments\resolve.json` is having correctly "
|
||||
"set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed "
|
||||
f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`"
|
||||
)
|
||||
python3_home_str = python3_home.as_posix()
|
||||
self.launch_context.env["PYTHONHOME"] = python3_home_str
|
||||
self.log.info(f"Path to Resolve Python folder: `{python3_home_str}`")
|
||||
|
||||
# add to the PYTHONPATH
|
||||
env_pythonpath = self.launch_context.env["PYTHONPATH"]
|
||||
modules_path = Path(resolve_script_api, "Modules").as_posix()
|
||||
self.launch_context.env[
|
||||
"PYTHONPATH"
|
||||
] = f"{modules_path}{os.pathsep}{env_pythonpath}"
|
||||
|
||||
self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}")
|
||||
|
||||
# add the pythonhome folder to PATH because on Windows
|
||||
# this is needed for Py3 to be correctly detected within Resolve
|
||||
env_path = self.launch_context.env["PATH"]
|
||||
self.log.info(f"Adding `{python3_home_str}` to the PATH variable")
|
||||
self.launch_context.env[
|
||||
"PATH"
|
||||
] = f"{python3_home_str}{os.pathsep}{env_path}"
|
||||
|
||||
self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
|
||||
|
||||
resolve_utility_scripts_dirs = {
|
||||
"windows": (
|
||||
f"{programdata}/Blackmagic Design"
|
||||
"/DaVinci Resolve/Fusion/Scripts/Comp"
|
||||
),
|
||||
"darwin": (
|
||||
"/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Fusion/Scripts/Comp"
|
||||
),
|
||||
"linux": "/opt/resolve/Fusion/Scripts/Comp",
|
||||
}
|
||||
resolve_utility_scripts_dir = Path(
|
||||
resolve_utility_scripts_dirs[current_platform]
|
||||
)
|
||||
# setting utility scripts dir for scripts syncing
|
||||
self.launch_context.env[
|
||||
"RESOLVE_UTILITY_SCRIPTS_DIR"
|
||||
] = resolve_utility_scripts_dir.as_posix()
|
||||
|
||||
# remove terminal coloring tags
|
||||
self.launch_context.env["AYON_LOG_NO_COLORS"] = "1"
|
||||
|
||||
# Resolve Setup integration
|
||||
setup(self.launch_context.env)
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
import os
|
||||
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_resolve import RESOLVE_ADDON_ROOT
|
||||
|
||||
|
||||
class PreLaunchResolveStartup(PreLaunchHook):
|
||||
"""Special hook to configure startup script.
|
||||
|
||||
"""
|
||||
order = 11
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
# Set the openpype prelaunch startup script path for easy access
|
||||
# in the LUA .scriptlib code
|
||||
script_path = os.path.join(RESOLVE_ADDON_ROOT, "startup.py")
|
||||
key = "AYON_RESOLVE_STARTUP_SCRIPT"
|
||||
self.launch_context.env[key] = script_path
|
||||
|
||||
self.log.info(
|
||||
f"Setting AYON_RESOLVE_STARTUP_SCRIPT to: {script_path}"
|
||||
)
|
||||
|
|
@ -1,326 +0,0 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and older
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import clique
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_reference(media_pool_item):
|
||||
metadata = _get_metadata_media_pool_item(media_pool_item)
|
||||
print("media pool item: {}".format(media_pool_item.GetName()))
|
||||
|
||||
_mp_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
path = _mp_clip_property("File Path")
|
||||
reformat_path = utils.get_reformated_path(path, padded=True)
|
||||
padding = utils.get_padding_from_path(path)
|
||||
|
||||
if padding:
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# get clip property regarding to type
|
||||
fps = float(_mp_clip_property("FPS"))
|
||||
if _mp_clip_property("Type") == "Video":
|
||||
frame_start = int(_mp_clip_property("Start"))
|
||||
frame_duration = int(_mp_clip_property("Frames"))
|
||||
else:
|
||||
audio_duration = str(_mp_clip_property("Duration"))
|
||||
frame_start = 0
|
||||
frame_duration = int(utils.timecode_to_frames(
|
||||
audio_duration, float(fps)))
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if padding:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname, filename = os.path.split(path)
|
||||
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
|
||||
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=collection.format("{head}"),
|
||||
name_suffix=collection.format("{tail}"),
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding_num,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def create_otio_markers(track_item, fps):
|
||||
track_item_markers = track_item.GetMarkers()
|
||||
markers = []
|
||||
for marker_frame in track_item_markers:
|
||||
note = track_item_markers[marker_frame]["note"]
|
||||
if "{" in note and "}" in note:
|
||||
metadata = json.loads(note)
|
||||
else:
|
||||
metadata = {"note": note}
|
||||
markers.append(
|
||||
otio.schema.Marker(
|
||||
name=track_item_markers[marker_frame]["name"],
|
||||
marked_range=create_otio_time_range(
|
||||
marker_frame,
|
||||
track_item_markers[marker_frame]["duration"],
|
||||
fps
|
||||
),
|
||||
color=track_item_markers[marker_frame]["color"].upper(),
|
||||
metadata=metadata
|
||||
)
|
||||
)
|
||||
return markers
|
||||
|
||||
|
||||
def create_otio_clip(track_item):
|
||||
media_pool_item = track_item.GetMediaPoolItem()
|
||||
_mp_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
if not self.project_fps:
|
||||
fps = float(_mp_clip_property("FPS"))
|
||||
else:
|
||||
fps = self.project_fps
|
||||
|
||||
name = track_item.GetName()
|
||||
|
||||
media_reference = create_otio_reference(media_pool_item)
|
||||
source_range = create_otio_time_range(
|
||||
int(track_item.GetLeftOffset()),
|
||||
int(track_item.GetDuration()),
|
||||
fps
|
||||
)
|
||||
|
||||
if _mp_clip_property("Type") == "Audio":
|
||||
return_clips = list()
|
||||
audio_chanels = _mp_clip_property("Audio Ch")
|
||||
for channel in range(0, int(audio_chanels)):
|
||||
clip = otio.schema.Clip(
|
||||
name=f"{name}_{channel}",
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
return_clips.append(clip)
|
||||
return return_clips
|
||||
else:
|
||||
clip = otio.schema.Clip(
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _create_otio_timeline(project, timeline, fps):
|
||||
metadata = _get_timeline_metadata(project, timeline)
|
||||
start_time = create_otio_rational_time(
|
||||
timeline.GetStartFrame(), fps)
|
||||
otio_timeline = otio.schema.Timeline(
|
||||
name=timeline.GetName(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def _get_timeline_metadata(project, timeline):
|
||||
media_pool = project.GetMediaPool()
|
||||
root_folder = media_pool.GetRootFolder()
|
||||
ls_folder = root_folder.GetClipList()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
timeline_name = timeline.GetName()
|
||||
for tl in ls_folder:
|
||||
if tl.GetName() not in timeline_name:
|
||||
continue
|
||||
return _get_metadata_media_pool_item(tl)
|
||||
|
||||
|
||||
def _get_metadata_media_pool_item(media_pool_item):
|
||||
data = dict()
|
||||
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
|
||||
property = media_pool_item.GetClipProperty() or {}
|
||||
for name, value in property.items():
|
||||
if "Resolution" in name and "" != value:
|
||||
width, height = value.split("x")
|
||||
data.update({
|
||||
"width": int(width),
|
||||
"height": int(height)
|
||||
})
|
||||
if "PAR" in name and "" != value:
|
||||
try:
|
||||
data.update({"pixelAspect": float(value)})
|
||||
except ValueError:
|
||||
if "Square" in value:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
else:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(clip_start, otio_track, track_item, timeline):
|
||||
# if gap between track start and clip start
|
||||
if clip_start > otio_track.available_range().duration.value:
|
||||
# create gap and add it to track
|
||||
otio_track.append(
|
||||
create_otio_gap(
|
||||
otio_track.available_range().duration.value,
|
||||
track_item.GetStart(),
|
||||
timeline.GetStartFrame(),
|
||||
self.project_fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
|
||||
mp_metadata = media_pool_item.GetMetadata()
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
mp_metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in mp_metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def create_otio_timeline(resolve_project):
|
||||
|
||||
# get current timeline
|
||||
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
|
||||
timeline = resolve_project.GetCurrentTimeline()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline(
|
||||
resolve_project, timeline, self.project_fps)
|
||||
|
||||
# loop all defined track types
|
||||
for track_type in list(self.track_types.keys()):
|
||||
# get total track count
|
||||
track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks by track indexes
|
||||
for track_index in range(1, int(track_count) + 1):
|
||||
# get current track name
|
||||
track_name = timeline.GetTrackName(track_type, track_index)
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
|
||||
# get all track items in current track
|
||||
current_track_items = timeline.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
|
||||
# loop available track items in current track items
|
||||
for track_item in current_track_items:
|
||||
# skip offline track items
|
||||
if track_item.GetMediaPoolItem() is None:
|
||||
continue
|
||||
|
||||
# calculate real clip start
|
||||
clip_start = track_item.GetStart() - timeline.GetStartFrame()
|
||||
|
||||
add_otio_gap(
|
||||
clip_start, otio_track, track_item, timeline)
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(track_item)
|
||||
|
||||
if not isinstance(otio_clip, list):
|
||||
otio_track.append(otio_clip)
|
||||
else:
|
||||
for index, clip in enumerate(otio_clip):
|
||||
if index == 0:
|
||||
otio_track.append(clip)
|
||||
else:
|
||||
# add previous otio track to timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
add_otio_gap(
|
||||
clip_start, otio_track,
|
||||
track_item, timeline)
|
||||
otio_track.append(clip)
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
|
|
@ -1,108 +0,0 @@
|
|||
import sys
|
||||
import json
|
||||
import DaVinciResolveScript
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.resolve = DaVinciResolveScript.scriptapp('Resolve')
|
||||
self.fusion = DaVinciResolveScript.scriptapp('Fusion')
|
||||
self.project_manager = self.resolve.GetProjectManager()
|
||||
self.current_project = self.project_manager.GetCurrentProject()
|
||||
self.media_pool = self.current_project.GetMediaPool()
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def build_timeline(otio_timeline):
|
||||
# TODO: build timeline in mediapool `otioImport` folder
|
||||
# TODO: loop otio tracks and build them in the new timeline
|
||||
for clip in otio_timeline.each_clip():
|
||||
# TODO: create track item
|
||||
print(clip.name)
|
||||
print(clip.parent().name)
|
||||
print(clip.range_in_parent())
|
||||
|
||||
|
||||
def _build_track(otio_track):
|
||||
# TODO: _build_track
|
||||
pass
|
||||
|
||||
|
||||
def _build_media_pool_item(otio_media_reference):
|
||||
# TODO: _build_media_pool_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_track_item(otio_clip):
|
||||
# TODO: _build_track_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_gap(otio_clip):
|
||||
# TODO: _build_gap
|
||||
pass
|
||||
|
||||
|
||||
def _build_marker(track_item, otio_marker):
|
||||
frame_start = otio_marker.marked_range.start_time.value
|
||||
frame_duration = otio_marker.marked_range.duration.value
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame_start / 10) * 10
|
||||
color = otio_marker.color
|
||||
name = otio_marker.name
|
||||
note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata)
|
||||
duration = (frame_duration / 10) * 10
|
||||
|
||||
track_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def _build_media_pool_folder(name):
|
||||
"""
|
||||
Returns folder with input name and sets it as current folder.
|
||||
|
||||
It will create new media bin if none is found in root media bin
|
||||
|
||||
Args:
|
||||
name (str): name of bin
|
||||
|
||||
Returns:
|
||||
resolve.api.MediaPool.Folder: description
|
||||
|
||||
"""
|
||||
|
||||
root_folder = self.media_pool.GetRootFolder()
|
||||
sub_folders = root_folder.GetSubFolderList()
|
||||
testing_names = list()
|
||||
|
||||
for subfolder in sub_folders:
|
||||
subf_name = subfolder.GetName()
|
||||
if name in subf_name:
|
||||
testing_names.append(subfolder)
|
||||
else:
|
||||
testing_names.append(False)
|
||||
|
||||
matching = next((f for f in testing_names if f is not False), None)
|
||||
|
||||
if not matching:
|
||||
new_folder = self.media_pool.AddSubFolder(root_folder, name)
|
||||
self.media_pool.SetCurrentFolder(new_folder)
|
||||
else:
|
||||
self.media_pool.SetCurrentFolder(matching)
|
||||
|
||||
return self.media_pool.GetCurrentFolder()
|
||||
|
||||
|
||||
def read_from_file(otio_file):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_timeline(otio_timeline)
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, 24)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True, first=False):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
num_pattern = r"(\[\d+\-\d+\])"
|
||||
padding_pattern = r"(\d+)(?=-)"
|
||||
first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]")
|
||||
|
||||
if "[" in path:
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
elif first:
|
||||
first_frame = re.findall(first_frame_pattern, path, flags=0)
|
||||
if len(first_frame) >= 1:
|
||||
first_frame = first_frame[0]
|
||||
path = re.sub(num_pattern, first_frame, path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
"""
|
||||
Return padding number from DaVinci Resolve sequence path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.[0001-1008].exr") > 4
|
||||
|
||||
"""
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
return len(re.findall(padding_pattern, path).pop())
|
||||
|
||||
return None
|
||||
|
|
@ -1,272 +0,0 @@
|
|||
# from pprint import pformat
|
||||
from ayon_resolve.api import plugin, lib
|
||||
from ayon_resolve.api.lib import (
|
||||
get_video_track_names,
|
||||
create_bin,
|
||||
)
|
||||
|
||||
|
||||
class CreateShotClip(plugin.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Create Publishable Clip"
|
||||
product_type = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
gui_tracks = get_video_track_names()
|
||||
gui_name = "AYON publish attributes creator"
|
||||
gui_info = "Define sequential rename and fill hierarchy data."
|
||||
gui_inputs = {
|
||||
"renameHierarchy": {
|
||||
"type": "section",
|
||||
"label": "Shot Hierarchy And Rename Settings",
|
||||
"target": "ui",
|
||||
"order": 0,
|
||||
"value": {
|
||||
"hierarchy": {
|
||||
"value": "{folder}/{sequence}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Shot Parent Hierarchy",
|
||||
"target": "tag",
|
||||
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
|
||||
"order": 0},
|
||||
"clipRename": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Rename clips",
|
||||
"target": "ui",
|
||||
"toolTip": "Renaming selected clips on fly", # noqa
|
||||
"order": 1},
|
||||
"clipName": {
|
||||
"value": "{sequence}{shot}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Clip Name Template",
|
||||
"target": "ui",
|
||||
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
|
||||
"order": 2},
|
||||
"countFrom": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Count sequence from",
|
||||
"target": "ui",
|
||||
"toolTip": "Set when the sequence number stafrom", # noqa
|
||||
"order": 3},
|
||||
"countSteps": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Stepping number",
|
||||
"target": "ui",
|
||||
"toolTip": "What number is adding every new step", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"hierarchyData": {
|
||||
"type": "dict",
|
||||
"label": "Shot Template Keywords",
|
||||
"target": "tag",
|
||||
"order": 1,
|
||||
"value": {
|
||||
"folder": {
|
||||
"value": "shots",
|
||||
"type": "QLineEdit",
|
||||
"label": "{folder}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 0},
|
||||
"episode": {
|
||||
"value": "ep01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{episode}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 1},
|
||||
"sequence": {
|
||||
"value": "sq01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{sequence}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 2},
|
||||
"track": {
|
||||
"value": "{_track_}",
|
||||
"type": "QLineEdit",
|
||||
"label": "{track}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 3},
|
||||
"shot": {
|
||||
"value": "sh###",
|
||||
"type": "QLineEdit",
|
||||
"label": "{shot}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 4}
|
||||
}
|
||||
},
|
||||
"verticalSync": {
|
||||
"type": "section",
|
||||
"label": "Vertical Synchronization Of Attributes",
|
||||
"target": "ui",
|
||||
"order": 2,
|
||||
"value": {
|
||||
"vSyncOn": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Enable Vertical Sync",
|
||||
"target": "ui",
|
||||
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
|
||||
"order": 0},
|
||||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be mastering all others", # noqa
|
||||
"order": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"publishSettings": {
|
||||
"type": "section",
|
||||
"label": "Publish Settings",
|
||||
"target": "ui",
|
||||
"order": 3,
|
||||
"value": {
|
||||
"productName": {
|
||||
"value": ["<track_name>", "main", "bg", "fg", "bg",
|
||||
"animatic"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose product name pattern, if <track_name> is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"productType": {
|
||||
"value": ["plate", "take"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product type",
|
||||
"target": "ui", "toolTip": "What use of this product is for", # noqa
|
||||
"order": 1},
|
||||
"reviewTrack": {
|
||||
"value": ["< none >"] + gui_tracks,
|
||||
"type": "QComboBox",
|
||||
"label": "Use Review Track",
|
||||
"target": "ui",
|
||||
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
|
||||
"order": 2},
|
||||
"audio": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include audio",
|
||||
"target": "tag",
|
||||
"toolTip": "Process products with corresponding audio", # noqa
|
||||
"order": 3},
|
||||
"sourceResolution": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Source resolution",
|
||||
"target": "tag",
|
||||
"toolTip": "Is resolution taken from timeline or source?", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"shotAttr": {
|
||||
"type": "section",
|
||||
"label": "Shot Attributes",
|
||||
"target": "ui",
|
||||
"order": 4,
|
||||
"value": {
|
||||
"workfileFrameStart": {
|
||||
"value": 1001,
|
||||
"type": "QSpinBox",
|
||||
"label": "Workfiles Start Frame",
|
||||
"target": "tag",
|
||||
"toolTip": "Set workfile starting frame number", # noqa
|
||||
"order": 0
|
||||
},
|
||||
"handleStart": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle start (head)",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at start of clip", # noqa
|
||||
"order": 1
|
||||
},
|
||||
"handleEnd": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle end (tail)",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at end of clip", # noqa
|
||||
"order": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# get key pairs from presets and match it on ui inputs
|
||||
for k, v in self.gui_inputs.items():
|
||||
if v["type"] in ("dict", "section"):
|
||||
# nested dictionary (only one level allowed
|
||||
# for sections and dict)
|
||||
for _k, _v in v["value"].items():
|
||||
if self.presets.get(_k) is not None:
|
||||
self.gui_inputs[k][
|
||||
"value"][_k]["value"] = self.presets[_k]
|
||||
if self.presets.get(k):
|
||||
self.gui_inputs[k]["value"] = self.presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
|
||||
widget.exec_()
|
||||
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not widget.result:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
self.rename_add = 0
|
||||
|
||||
# get ui output for track name for vertical sync
|
||||
v_sync_track = widget.result["vSyncTrack"]["value"]
|
||||
|
||||
# sort selected trackItems by
|
||||
sorted_selected_track_items = []
|
||||
unsorted_selected_track_items = []
|
||||
print("_____ selected ______")
|
||||
print(self.selected)
|
||||
for track_item_data in self.selected:
|
||||
if track_item_data["track"]["name"] in v_sync_track:
|
||||
sorted_selected_track_items.append(track_item_data)
|
||||
else:
|
||||
unsorted_selected_track_items.append(track_item_data)
|
||||
|
||||
sorted_selected_track_items.extend(unsorted_selected_track_items)
|
||||
|
||||
# sequence attrs
|
||||
sq_frame_start = self.timeline.GetStartFrame()
|
||||
sq_markers = self.timeline.GetMarkers()
|
||||
|
||||
# create media bin for compound clips (trackItems)
|
||||
mp_folder = create_bin(self.timeline.GetName())
|
||||
|
||||
kwargs = {
|
||||
"ui_inputs": widget.result,
|
||||
"avalon": self.data,
|
||||
"mp_folder": mp_folder,
|
||||
"sq_frame_start": sq_frame_start,
|
||||
"sq_markers": sq_markers
|
||||
}
|
||||
print(kwargs)
|
||||
for i, track_item_data in enumerate(sorted_selected_track_items):
|
||||
self.rename_index = i
|
||||
self.log.info(track_item_data)
|
||||
# convert track item to timeline media pool item
|
||||
track_item = plugin.PublishClip(
|
||||
self, track_item_data, **kwargs).convert()
|
||||
track_item.SetClipColor(lib.publish_clip_color)
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
from ayon_core.pipeline import (
|
||||
InventoryAction,
|
||||
)
|
||||
from ayon_core.pipeline.load.utils import remove_container
|
||||
|
||||
|
||||
class RemoveUnusedMedia(InventoryAction):
|
||||
|
||||
label = "Remove Unused Selected Media"
|
||||
icon = "trash"
|
||||
|
||||
@staticmethod
|
||||
def is_compatible(container):
|
||||
return (
|
||||
container.get("loader") == "LoadMedia"
|
||||
)
|
||||
|
||||
def process(self, containers):
|
||||
any_removed = False
|
||||
for container in containers:
|
||||
media_pool_item = container["_item"]
|
||||
usage = int(media_pool_item.GetClipProperty("Usage"))
|
||||
name = media_pool_item.GetName()
|
||||
if usage == 0:
|
||||
print(f"Removing {name}")
|
||||
remove_container(container)
|
||||
any_removed = True
|
||||
else:
|
||||
print(f"Keeping {name} with usage: {usage}")
|
||||
|
||||
return any_removed
|
||||
|
|
@ -1,168 +0,0 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_resolve.api import lib, plugin
|
||||
from ayon_resolve.api.pipeline import (
|
||||
containerise,
|
||||
update_container,
|
||||
)
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(plugin.TimelineItemLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# for loader multiselection
|
||||
timeline = None
|
||||
|
||||
# presets
|
||||
clip_color_last = "Olive"
|
||||
clip_color = "Orange"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
files = plugin.get_representation_files(context["representation"])
|
||||
|
||||
timeline_item = plugin.ClipLoader(
|
||||
self, context, **options).load(files)
|
||||
namespace = namespace or timeline_item.GetName()
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"],
|
||||
timeline_item,
|
||||
context["version"]
|
||||
)
|
||||
|
||||
data_imprint = self.get_tag_data(context, name, namespace)
|
||||
return containerise(
|
||||
timeline_item,
|
||||
name, namespace, context,
|
||||
self.__class__.__name__,
|
||||
data_imprint)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def update(self, container, context):
|
||||
""" Updating previously loaded clips
|
||||
"""
|
||||
|
||||
repre_entity = context["representation"]
|
||||
name = container['name']
|
||||
namespace = container['namespace']
|
||||
timeline_item = container["_timeline_item"]
|
||||
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
|
||||
files = plugin.get_representation_files(repre_entity)
|
||||
|
||||
loader = plugin.ClipLoader(self, context)
|
||||
timeline_item = loader.update(timeline_item, files)
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"],
|
||||
timeline_item,
|
||||
context["version"]
|
||||
)
|
||||
|
||||
# if original media pool item has no remaining usages left
|
||||
# remove it from the media pool
|
||||
if int(media_pool_item.GetClipProperty("Usage")) == 0:
|
||||
lib.remove_media_pool_item(media_pool_item)
|
||||
|
||||
data_imprint = self.get_tag_data(context, name, namespace)
|
||||
return update_container(timeline_item, data_imprint)
|
||||
|
||||
def get_tag_data(self, context, name, namespace):
|
||||
"""Return data to be imprinted on the timeline item marker"""
|
||||
|
||||
repre_entity = context["representation"]
|
||||
version_entity = context["version"]
|
||||
version_attributes = version_entity["attrib"]
|
||||
colorspace = version_attributes.get("colorSpace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
# move all version data keys to tag data
|
||||
add_version_data_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
data = {
|
||||
key: version_attributes.get(key, "None")
|
||||
for key in add_version_data_keys
|
||||
}
|
||||
|
||||
# add variables related to version context
|
||||
data.update({
|
||||
"representation": repre_entity["id"],
|
||||
"version": version_entity["version"],
|
||||
"colorspace": colorspace,
|
||||
"objectName": object_name
|
||||
})
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def set_item_color(cls, project_name, timeline_item, version_entity):
|
||||
"""Color timeline item based on whether it is outdated or latest"""
|
||||
# get all versions in list
|
||||
last_version_entity = ayon_api.get_last_version_by_product_id(
|
||||
project_name,
|
||||
version_entity["productId"],
|
||||
fields=["name"]
|
||||
)
|
||||
last_version_id = None
|
||||
if last_version_entity:
|
||||
last_version_id = last_version_entity["id"]
|
||||
|
||||
# set clip colour
|
||||
if version_entity["id"] == last_version_id:
|
||||
timeline_item.SetClipColor(cls.clip_color_last)
|
||||
else:
|
||||
timeline_item.SetClipColor(cls.clip_color)
|
||||
|
||||
def remove(self, container):
|
||||
timeline_item = container["_timeline_item"]
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
timeline = lib.get_current_timeline()
|
||||
|
||||
# DeleteClips function was added in Resolve 18.5+
|
||||
# by checking None we can detect whether the
|
||||
# function exists in Resolve
|
||||
if timeline.DeleteClips is not None:
|
||||
timeline.DeleteClips([timeline_item])
|
||||
else:
|
||||
# Resolve versions older than 18.5 can't delete clips via API
|
||||
# so all we can do is just remove the pype marker to 'untag' it
|
||||
if lib.get_pype_marker(timeline_item):
|
||||
# Note: We must call `get_pype_marker` because
|
||||
# `delete_pype_marker` uses a global variable set by
|
||||
# `get_pype_marker` to delete the right marker
|
||||
# TODO: Improve code to avoid the global `temp_marker_frame`
|
||||
lib.delete_pype_marker(timeline_item)
|
||||
|
||||
# if media pool item has no remaining usages left
|
||||
# remove it from the media pool
|
||||
if int(media_pool_item.GetClipProperty("Usage")) == 0:
|
||||
lib.remove_media_pool_item(media_pool_item)
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
from pathlib import Path
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
|
||||
from ayon_resolve.api import lib
|
||||
|
||||
|
||||
class LoadEditorialPackage(load.LoaderPlugin):
|
||||
"""Load editorial package to timeline.
|
||||
|
||||
Loading timeline from OTIO file included media sources
|
||||
and timeline structure.
|
||||
"""
|
||||
|
||||
product_types = {"editorial_pkg"}
|
||||
|
||||
representations = {"*"}
|
||||
extensions = {"otio"}
|
||||
|
||||
label = "Load as Timeline"
|
||||
order = -10
|
||||
icon = "ei.align-left"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
files = get_representation_path(context["representation"])
|
||||
|
||||
search_folder_path = Path(files).parent / "resources"
|
||||
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
|
||||
# create versioned bin for editorial package
|
||||
version_name = context["version"]["name"]
|
||||
bin_name = f"{name}_{version_name}"
|
||||
lib.create_bin(bin_name)
|
||||
|
||||
import_options = {
|
||||
"timelineName": "Editorial Package Timeline",
|
||||
"importSourceClips": True,
|
||||
"sourceClipsPath": search_folder_path.as_posix(),
|
||||
}
|
||||
|
||||
timeline = media_pool.ImportTimelineFromFile(files, import_options)
|
||||
print("Timeline imported: ", timeline)
|
||||
|
||||
def update(self, container, context):
|
||||
# TODO: implement update method in future
|
||||
pass
|
||||
|
|
@ -1,533 +0,0 @@
|
|||
import json
|
||||
import contextlib
|
||||
from pathlib import Path
|
||||
from collections import defaultdict
|
||||
from typing import Union, List, Optional, TypedDict, Tuple
|
||||
|
||||
from ayon_api import version_is_latest
|
||||
from ayon_core.lib import StringTemplate
|
||||
from ayon_core.pipeline.colorspace import get_remapped_colorspace_to_native
|
||||
from ayon_core.pipeline import (
|
||||
Anatomy,
|
||||
LoaderPlugin,
|
||||
get_representation_path,
|
||||
registered_host
|
||||
)
|
||||
from ayon_core.pipeline.load import get_representation_path_with_anatomy
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_resolve.api import lib
|
||||
from ayon_resolve.api.pipeline import AVALON_CONTAINER_ID
|
||||
|
||||
|
||||
FRAME_SPLITTER = "__frame_splitter__"
|
||||
|
||||
|
||||
class MetadataEntry(TypedDict):
|
||||
"""Metadata entry is dict with {"name": "key", "value: "value"}"""
|
||||
name: str
|
||||
value: str
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def project_color_science_mode(project=None, mode="davinciYRGBColorManagedv2"):
|
||||
"""Set project color science mode during context.
|
||||
|
||||
This is especially useful as context for setting the colorspace for media
|
||||
pool items, because when Resolve is not set to `davinciYRGBColorManagedv2`
|
||||
it fails to set its "Input Color Space" clip property even though it is
|
||||
accessible and settable via the Resolve User Interface.
|
||||
|
||||
Args
|
||||
project (Project): The active Resolve Project.
|
||||
mode (Optional[str]): The color science mode to apply during the
|
||||
context. Defaults to 'davinciYRGBColorManagedv2'
|
||||
|
||||
See Also:
|
||||
https://forum.blackmagicdesign.com/viewtopic.php?f=21&t=197441
|
||||
"""
|
||||
|
||||
if project is None:
|
||||
project = lib.get_current_project()
|
||||
|
||||
original_mode = project.GetSetting("colorScienceMode")
|
||||
if original_mode != mode:
|
||||
project.SetSetting("colorScienceMode", mode)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if project.GetSetting("colorScienceMode") != original_mode:
|
||||
project.SetSetting("colorScienceMode", original_mode)
|
||||
|
||||
|
||||
def set_colorspace(media_pool_item,
|
||||
colorspace,
|
||||
mode="davinciYRGBColorManagedv2"):
|
||||
"""Set MediaPoolItem colorspace.
|
||||
|
||||
This implements a workaround that you cannot set the input colorspace
|
||||
unless the Resolve project's color science mode is set to
|
||||
`davinciYRGBColorManagedv2`.
|
||||
|
||||
Args:
|
||||
media_pool_item (MediaPoolItem): The media pool item.
|
||||
colorspace (str): The colorspace to apply.
|
||||
mode (Optional[str]): The Resolve project color science mode to be in
|
||||
while setting the colorspace.
|
||||
Defaults to 'davinciYRGBColorManagedv2'
|
||||
|
||||
Returns:
|
||||
bool: Whether applying the colorspace succeeded.
|
||||
"""
|
||||
with project_color_science_mode(mode=mode):
|
||||
return media_pool_item.SetClipProperty("Input Color Space", colorspace)
|
||||
|
||||
|
||||
def find_clip_usage(media_pool_item, project=None):
|
||||
"""Return all Timeline Items in the project using the Media Pool Item.
|
||||
|
||||
Each entry in the list is a tuple of Timeline and TimelineItem so that
|
||||
it's easy to know which Timeline the TimelineItem belongs to.
|
||||
|
||||
Arguments:
|
||||
media_pool_item (MediaPoolItem): The Media Pool Item to search for.
|
||||
project (Project): The resolve project the media pool item resides in.
|
||||
|
||||
Returns:
|
||||
List[Tuple[Timeline, TimelineItem]]: A 2-tuple of a timeline with
|
||||
the timeline item.
|
||||
|
||||
"""
|
||||
usage = int(media_pool_item.GetClipProperty("Usage"))
|
||||
if not usage:
|
||||
return []
|
||||
|
||||
if project is None:
|
||||
project = lib.get_current_project()
|
||||
|
||||
matching_items = []
|
||||
unique_id = media_pool_item.GetUniqueId()
|
||||
for timeline_idx in range(project.GetTimelineCount()):
|
||||
timeline = project.GetTimelineByIndex(timeline_idx + 1)
|
||||
|
||||
# Consider audio and video tracks
|
||||
for track_type in ["video", "audio"]:
|
||||
for track_idx in range(timeline.GetTrackCount(track_type)):
|
||||
timeline_items = timeline.GetItemListInTrack(track_type,
|
||||
track_idx + 1)
|
||||
for timeline_item in timeline_items:
|
||||
timeline_item_mpi = timeline_item.GetMediaPoolItem()
|
||||
if not timeline_item_mpi:
|
||||
continue
|
||||
|
||||
if timeline_item_mpi.GetUniqueId() == unique_id:
|
||||
matching_items.append((timeline, timeline_item))
|
||||
usage -= 1
|
||||
if usage <= 0:
|
||||
# If there should be no usage left after this found
|
||||
# entry we return early
|
||||
return matching_items
|
||||
|
||||
return matching_items
|
||||
|
||||
|
||||
class LoadMedia(LoaderPlugin):
|
||||
"""Load product as media pool item."""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
|
||||
representations = ["*"]
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load media"
|
||||
order = -20
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
options = [
|
||||
BoolDef(
|
||||
"load_to_timeline",
|
||||
label="Load to timeline",
|
||||
default=True,
|
||||
tooltip="Whether on load to automatically add it to the current "
|
||||
"timeline"
|
||||
),
|
||||
BoolDef(
|
||||
"load_once",
|
||||
label="Re-use existing",
|
||||
default=True,
|
||||
tooltip="When enabled - if this particular version is already"
|
||||
"loaded it will not be loaded again but will be re-used."
|
||||
)
|
||||
]
|
||||
|
||||
# for loader multiselection
|
||||
timeline = None
|
||||
|
||||
# presets
|
||||
clip_color_last = "Olive"
|
||||
clip_color_old = "Orange"
|
||||
|
||||
media_pool_bin_path = "Loader/{folder[path]}"
|
||||
|
||||
metadata: List[MetadataEntry] = []
|
||||
|
||||
# cached on apply settings
|
||||
_host_imageio_settings = None
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
super(LoadMedia, cls).apply_settings(project_settings)
|
||||
cls._host_imageio_settings = project_settings["resolve"]["imageio"]
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# For loading multiselection, we store timeline before first load
|
||||
# because the current timeline can change with the imported media.
|
||||
if self.timeline is None:
|
||||
self.timeline = lib.get_current_timeline()
|
||||
|
||||
representation = context["representation"]
|
||||
self._project_name = context["project"]["name"]
|
||||
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
|
||||
# Allow to use an existing media pool item and re-use it
|
||||
item = None
|
||||
if options.get("load_once", True):
|
||||
host = registered_host()
|
||||
repre_id = context["representation"]["id"]
|
||||
for container in host.ls():
|
||||
if container["representation"] != repre_id:
|
||||
continue
|
||||
|
||||
if container["loader"] != self.__class__.__name__:
|
||||
continue
|
||||
|
||||
print(f"Re-using existing container: {container}")
|
||||
item = container["_item"]
|
||||
|
||||
if item is None:
|
||||
item = self._import_media_to_bin(context, media_pool, representation)
|
||||
# Always update clip color - even if re-using existing clip
|
||||
color = self.get_item_color(context)
|
||||
item.SetClipColor(color)
|
||||
|
||||
if options.get("load_to_timeline", True):
|
||||
timeline = options.get("timeline", self.timeline)
|
||||
if timeline:
|
||||
# Add media to active timeline
|
||||
lib.create_timeline_item(
|
||||
media_pool_item=item,
|
||||
timeline=timeline
|
||||
)
|
||||
|
||||
def _import_media_to_bin(
|
||||
self, context, media_pool, representation
|
||||
):
|
||||
"""Import media to Resolve Media Pool.
|
||||
|
||||
Also create a bin if `media_pool_bin_path` is set.
|
||||
|
||||
Args:
|
||||
context (dict): The context dictionary.
|
||||
media_pool (resolve.MediaPool): The Resolve Media Pool.
|
||||
representation (dict): The representation data.
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: The imported media pool item.
|
||||
"""
|
||||
# Create or set the bin folder, we add it in there
|
||||
# If bin path is not set we just add into the current active bin
|
||||
if self.media_pool_bin_path:
|
||||
media_pool_bin_path = StringTemplate(
|
||||
self.media_pool_bin_path).format_strict(context)
|
||||
|
||||
folder = lib.create_bin(
|
||||
# double slashes will create unconnected folders
|
||||
name=media_pool_bin_path.replace("//", "/"),
|
||||
root=media_pool.GetRootFolder(),
|
||||
set_as_current=False
|
||||
)
|
||||
media_pool.SetCurrentFolder(folder)
|
||||
|
||||
# Import media
|
||||
# Resolve API: ImportMedia function requires a list of dictionaries
|
||||
# with keys "FilePath", "StartIndex" and "EndIndex" for sequences
|
||||
# but only string with absolute path for single files.
|
||||
is_sequence, file_info = self._get_file_info(context)
|
||||
items = (
|
||||
media_pool.ImportMedia([file_info])
|
||||
if is_sequence
|
||||
else media_pool.ImportMedia([file_info["FilePath"]])
|
||||
)
|
||||
assert len(items) == 1, "Must import only one media item"
|
||||
|
||||
result = items[0]
|
||||
|
||||
self._set_metadata(result, context)
|
||||
self._set_colorspace_from_representation(result, representation)
|
||||
|
||||
data = self._get_container_data(context)
|
||||
|
||||
# Add containerise data only needed on first load
|
||||
data.update({
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"loader": str(self.__class__.__name__),
|
||||
})
|
||||
|
||||
result.SetMetadata(lib.pype_tag_name, json.dumps(data))
|
||||
|
||||
return result
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def update(self, container, context):
|
||||
# Update MediaPoolItem filepath and metadata
|
||||
item = container["_item"]
|
||||
|
||||
# Get the existing metadata before we update because the
|
||||
# metadata gets removed
|
||||
data = json.loads(item.GetMetadata(lib.pype_tag_name))
|
||||
|
||||
# Get metadata to preserve after the clip replacement
|
||||
# TODO: Maybe preserve more, like LUT, Alpha Mode, Input Sizing Preset
|
||||
colorspace_before = item.GetClipProperty("Input Color Space")
|
||||
|
||||
# Update path
|
||||
path = get_representation_path(context["representation"])
|
||||
success = item.ReplaceClip(path)
|
||||
if not success:
|
||||
raise RuntimeError(
|
||||
f"Failed to replace media pool item clip to filepath: {path}"
|
||||
)
|
||||
|
||||
# Update the metadata
|
||||
update_data = self._get_container_data(context)
|
||||
data.update(update_data)
|
||||
item.SetMetadata(lib.pype_tag_name, json.dumps(data))
|
||||
|
||||
self._set_metadata(media_pool_item=item, context=context)
|
||||
self._set_colorspace_from_representation(
|
||||
item,
|
||||
representation=context["representation"]
|
||||
)
|
||||
|
||||
# If no specific colorspace is set then we want to preserve the
|
||||
# colorspace a user might have set before the clip replacement
|
||||
if (
|
||||
item.GetClipProperty("Input Color Space") == "Project"
|
||||
and colorspace_before != "Project"
|
||||
):
|
||||
result = set_colorspace(item, colorspace_before)
|
||||
if not result:
|
||||
self.log.warning(
|
||||
f"Failed to re-apply colorspace: {colorspace_before}."
|
||||
)
|
||||
|
||||
# Update the clip color
|
||||
color = self.get_item_color(context)
|
||||
item.SetClipColor(color)
|
||||
|
||||
def remove(self, container):
|
||||
# Remove MediaPoolItem entry
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
item = container["_item"]
|
||||
|
||||
# Delete any usages of the media pool item so there's no trail
|
||||
# left in existing timelines. Currently only the media pool item
|
||||
# gets removed which fits the Resolve workflow but is confusing
|
||||
# artists
|
||||
usage = find_clip_usage(media_pool_item=item, project=project)
|
||||
if usage:
|
||||
# Group all timeline items per timeline, so we can delete the clips
|
||||
# in the timeline at once. The Resolve objects are not hashable, so
|
||||
# we need to store them in the dict by id
|
||||
usage_by_timeline = defaultdict(list)
|
||||
timeline_by_id = {}
|
||||
for timeline, timeline_item in usage:
|
||||
timeline_id = timeline.GetUniqueId()
|
||||
timeline_by_id[timeline_id] = timeline
|
||||
usage_by_timeline[timeline.GetUniqueId()].append(timeline_item)
|
||||
|
||||
for timeline_id, timeline_items in usage_by_timeline.items():
|
||||
timeline = timeline_by_id[timeline_id]
|
||||
timeline.DeleteClips(timeline_items)
|
||||
|
||||
# Delete the media pool item
|
||||
media_pool.DeleteClips([item])
|
||||
|
||||
def _get_container_data(self, context: dict) -> dict:
|
||||
"""Return metadata related to the representation and version."""
|
||||
|
||||
# add additional metadata from the version to imprint AYON knob
|
||||
version = context["version"]
|
||||
data = {}
|
||||
|
||||
# version.attrib
|
||||
for key in [
|
||||
"frameStart", "frameEnd",
|
||||
"handleStart", "handleEnd",
|
||||
"source", "fps", "colorSpace"
|
||||
]:
|
||||
data[key] = version["attrib"][key]
|
||||
|
||||
# version.data
|
||||
for key in ["author"]:
|
||||
data[key] = version["data"][key]
|
||||
|
||||
# add variables related to version context
|
||||
data.update({
|
||||
"representation": context["representation"]["id"],
|
||||
"version": version["name"],
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def get_item_color(cls, context: dict) -> str:
|
||||
"""Return item color name.
|
||||
|
||||
Coloring depends on whether representation is the latest version.
|
||||
"""
|
||||
# Compare version with last version
|
||||
# set clip colour
|
||||
if version_is_latest(project_name=context["project"]["name"],
|
||||
version_id=context["version"]["id"]):
|
||||
return cls.clip_color_last
|
||||
else:
|
||||
return cls.clip_color_old
|
||||
|
||||
def _set_metadata(self, media_pool_item, context: dict):
|
||||
"""Set Media Pool Item Clip Properties"""
|
||||
|
||||
# Set more clip metadata based on the loaded clip's context
|
||||
for meta_item in self.metadata:
|
||||
clip_property = meta_item["name"]
|
||||
value = meta_item["value"]
|
||||
value_formatted = StringTemplate(value).format_strict(context)
|
||||
media_pool_item.SetClipProperty(clip_property, value_formatted)
|
||||
|
||||
def _get_file_info(self, context: dict) -> Tuple[bool, Union[str, dict]]:
|
||||
"""Return file info for Resolve ImportMedia.
|
||||
|
||||
Args:
|
||||
context (dict): The context dictionary.
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[str, dict]]: A tuple of whether the file is a
|
||||
sequence and the file info dictionary.
|
||||
"""
|
||||
|
||||
representation = context["representation"]
|
||||
anatomy = Anatomy(self._project_name)
|
||||
|
||||
# Get path to representation with correct frame number
|
||||
repre_path = get_representation_path_with_anatomy(
|
||||
representation, anatomy)
|
||||
|
||||
first_frame = representation["context"].get("frame")
|
||||
|
||||
is_sequence = False
|
||||
# is not sequence
|
||||
if first_frame is None:
|
||||
return (
|
||||
is_sequence, {"FilePath": repre_path}
|
||||
)
|
||||
|
||||
# This is sequence
|
||||
is_sequence = True
|
||||
repre_files = [
|
||||
file["path"].format(root=anatomy.roots)
|
||||
for file in representation["files"]
|
||||
]
|
||||
|
||||
# Change frame in representation context to get path with frame
|
||||
# splitter.
|
||||
representation["context"]["frame"] = FRAME_SPLITTER
|
||||
frame_repre_path = get_representation_path_with_anatomy(
|
||||
representation, anatomy
|
||||
)
|
||||
frame_repre_path = Path(frame_repre_path)
|
||||
repre_dir, repre_filename = (
|
||||
frame_repre_path.parent, frame_repre_path.name)
|
||||
# Get sequence prefix and suffix
|
||||
file_prefix, file_suffix = repre_filename.split(FRAME_SPLITTER)
|
||||
# Get frame number from path as string to get frame padding
|
||||
frame_str = str(repre_path)[len(file_prefix):][:len(file_suffix)]
|
||||
frame_padding = len(frame_str)
|
||||
|
||||
file_name = f"{file_prefix}%0{frame_padding}d{file_suffix}"
|
||||
|
||||
abs_filepath = Path(repre_dir, file_name)
|
||||
|
||||
start_index = int(first_frame)
|
||||
end_index = int(int(first_frame) + len(repre_files) - 1)
|
||||
|
||||
# See Resolve API, to import for example clip "file_[001-100].dpx":
|
||||
# ImportMedia([{"FilePath":"file_%03d.dpx",
|
||||
# "StartIndex":1,
|
||||
# "EndIndex":100}])
|
||||
return (
|
||||
is_sequence,
|
||||
{
|
||||
"FilePath": abs_filepath.as_posix(),
|
||||
"StartIndex": start_index,
|
||||
"EndIndex": end_index,
|
||||
}
|
||||
)
|
||||
|
||||
def _get_colorspace(self, representation: dict) -> Optional[str]:
|
||||
"""Return Resolve native colorspace from OCIO colorspace data.
|
||||
|
||||
Returns:
|
||||
Optional[str]: The Resolve native colorspace name, if any mapped.
|
||||
"""
|
||||
|
||||
data = representation.get("data", {}).get("colorspaceData", {})
|
||||
if not data:
|
||||
return
|
||||
|
||||
ocio_colorspace = data["colorspace"]
|
||||
if not ocio_colorspace:
|
||||
return
|
||||
|
||||
resolve_colorspace = get_remapped_colorspace_to_native(
|
||||
ocio_colorspace_name=ocio_colorspace,
|
||||
host_name="resolve",
|
||||
imageio_host_settings=self._host_imageio_settings
|
||||
)
|
||||
if resolve_colorspace:
|
||||
return resolve_colorspace
|
||||
else:
|
||||
self.log.warning(
|
||||
f"No mapping from OCIO colorspace '{ocio_colorspace}' "
|
||||
"found to a Resolve colorspace. "
|
||||
"Ignoring colorspace."
|
||||
)
|
||||
|
||||
def _set_colorspace_from_representation(
|
||||
self, media_pool_item, representation: dict):
|
||||
"""Set the colorspace for the media pool item.
|
||||
|
||||
Args:
|
||||
media_pool_item (MediaPoolItem): The media pool item.
|
||||
representation (dict): The representation data.
|
||||
"""
|
||||
# Set the Resolve Input Color Space for the media.
|
||||
colorspace = self._get_colorspace(representation)
|
||||
if colorspace:
|
||||
result = set_colorspace(media_pool_item, colorspace)
|
||||
if not result:
|
||||
self.log.warning(
|
||||
f"Failed to apply colorspace: {colorspace}."
|
||||
)
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_resolve.api.lib import get_project_manager
|
||||
|
||||
|
||||
class ExtractWorkfile(publish.Extractor):
|
||||
"""
|
||||
Extractor export DRP workfile file representation
|
||||
"""
|
||||
|
||||
label = "Extract Workfile"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["workfile"]
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
name = instance.data["name"]
|
||||
project = instance.context.data["activeProject"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
resolve_workfile_ext = ".drp"
|
||||
drp_file_name = name + resolve_workfile_ext
|
||||
|
||||
drp_file_path = os.path.normpath(
|
||||
os.path.join(staging_dir, drp_file_name))
|
||||
|
||||
# write out the drp workfile
|
||||
get_project_manager().ExportProject(
|
||||
project.GetName(), drp_file_path)
|
||||
|
||||
# create drp workfile representation
|
||||
representation_drp = {
|
||||
'name': resolve_workfile_ext[1:],
|
||||
'ext': resolve_workfile_ext[1:],
|
||||
'files': drp_file_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_drp)
|
||||
|
||||
# add sourcePath attribute to instance
|
||||
if not instance.data.get("sourcePath"):
|
||||
instance.data["sourcePath"] = drp_file_path
|
||||
|
||||
self.log.info("Added Resolve file representation: {}".format(
|
||||
representation_drp))
|
||||
|
|
@ -1,178 +0,0 @@
|
|||
from pprint import pformat
|
||||
|
||||
import pyblish
|
||||
|
||||
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
from ayon_resolve.api.lib import (
|
||||
get_current_timeline_items,
|
||||
get_timeline_item_pype_tag,
|
||||
publish_clip_color,
|
||||
get_publish_attribute,
|
||||
get_otio_clip_instance_data,
|
||||
)
|
||||
|
||||
|
||||
class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
label = "Precollect Instances"
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, context):
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
selected_timeline_items = get_current_timeline_items(
|
||||
filter=True, selecting_color=publish_clip_color)
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(
|
||||
len(selected_timeline_items)))
|
||||
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
|
||||
data = {}
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
|
||||
# get pype tag data
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
self.log.debug(f"__ tag_data: {pformat(tag_data)}")
|
||||
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if tag_data.get("id") not in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
continue
|
||||
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
source_duration = int(media_pool_item.GetClipProperty("Frames"))
|
||||
|
||||
# solve handles length
|
||||
handle_start = min(
|
||||
tag_data["handleStart"], int(timeline_item.GetLeftOffset()))
|
||||
handle_end = min(
|
||||
tag_data["handleEnd"], int(
|
||||
source_duration - timeline_item.GetRightOffset()))
|
||||
|
||||
self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end))
|
||||
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
folder_path = tag_data["folder_path"]
|
||||
# Backward compatibility fix of 'entity_type' > 'folder_type'
|
||||
if "parents" in data:
|
||||
for parent in data["parents"]:
|
||||
if "entity_type" in parent:
|
||||
parent["folder_type"] = parent.pop("entity_type")
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_name = tag_data.get("productName")
|
||||
if product_name is None:
|
||||
# backward compatibility: subset -> productName
|
||||
product_name = tag_data.get("subset")
|
||||
|
||||
# backward compatibility: product_name should not be missing
|
||||
if not product_name:
|
||||
self.log.error(
|
||||
"Product name is not defined for: {}".format(folder_path))
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_type = tag_data.get("productType")
|
||||
if product_type is None:
|
||||
# backward compatibility: family -> productType
|
||||
product_type = tag_data.get("family")
|
||||
|
||||
# backward compatibility: product_type should not be missing
|
||||
if not product_type:
|
||||
self.log.error(
|
||||
"Product type is not defined for: {}".format(folder_path))
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": "{} {}".format(folder_path, product_name),
|
||||
"folderPath": folder_path,
|
||||
"item": timeline_item,
|
||||
"publish": get_publish_attribute(timeline_item),
|
||||
"fps": context.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"newHierarchyIntegration": True,
|
||||
# Backwards compatible (Deprecated since 24/06/06)
|
||||
"newAssetPublishing": True,
|
||||
"families": ["clip"],
|
||||
"productType": product_type,
|
||||
"productName": product_name,
|
||||
"family": product_type
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
otio_data = get_otio_clip_instance_data(
|
||||
otio_timeline, timeline_item_data) or {}
|
||||
data.update(otio_data)
|
||||
|
||||
# add resolution
|
||||
self.get_resolution_to_data(data, context)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self.create_shot_instance(context, timeline_item, **data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata["width"],
|
||||
"resolutionHeight": otio_clip_metadata["height"],
|
||||
"pixelAspect": otio_clip_metadata["pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["width"],
|
||||
"resolutionHeight": otio_tl_metadata["height"],
|
||||
"pixelAspect": otio_tl_metadata["pixelAspect"]
|
||||
})
|
||||
|
||||
def create_shot_instance(self, context, timeline_item, **data):
|
||||
hero_track = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
|
||||
if not hero_track:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
product_name = "shotMain"
|
||||
|
||||
# insert family into families
|
||||
product_type = "shot"
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": "{} {}".format(folder_path, product_name),
|
||||
"folderPath": folder_path,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"publish": get_publish_attribute(timeline_item)
|
||||
})
|
||||
|
||||
context.create_instance(**data)
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
import pyblish.api
|
||||
from pprint import pformat
|
||||
|
||||
from ayon_core.pipeline import get_current_folder_path
|
||||
|
||||
from ayon_resolve import api as rapi
|
||||
from ayon_resolve.otio import davinci_export
|
||||
|
||||
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Precollect the current working file into context"""
|
||||
|
||||
label = "Precollect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
|
||||
def process(self, context):
|
||||
current_folder_path = get_current_folder_path()
|
||||
folder_name = current_folder_path.split("/")[-1]
|
||||
|
||||
product_name = "workfileMain"
|
||||
project = rapi.get_current_project()
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
video_tracks = rapi.get_video_track_names()
|
||||
|
||||
# adding otio timeline to context
|
||||
otio_timeline = davinci_export.create_otio_timeline(project)
|
||||
|
||||
instance_data = {
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": "{} {}".format(current_folder_path, product_name),
|
||||
"item": project,
|
||||
"folderPath": current_folder_path,
|
||||
"productName": product_name,
|
||||
"productType": "workfile",
|
||||
"family": "workfile",
|
||||
"families": []
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"otioTimeline": otio_timeline,
|
||||
"videoTracks": video_tracks,
|
||||
"currentFile": project.GetName(),
|
||||
"fps": fps,
|
||||
}
|
||||
context.data.update(context_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
"""This script is used as a startup script in Resolve through a .scriptlib file
|
||||
|
||||
It triggers directly after the launch of Resolve and it's recommended to keep
|
||||
it optimized for fast performance since the Resolve UI is actually interactive
|
||||
while this is running. As such, there's nothing ensuring the user isn't
|
||||
continuing manually before any of the logic here runs. As such we also try
|
||||
to delay any imports as much as possible.
|
||||
|
||||
This code runs in a separate process to the main Resolve process.
|
||||
|
||||
"""
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
import ayon_resolve.api
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def ensure_installed_host():
|
||||
"""Install resolve host with openpype and return the registered host.
|
||||
|
||||
This function can be called multiple times without triggering an
|
||||
additional install.
|
||||
"""
|
||||
from ayon_core.pipeline import install_host, registered_host
|
||||
host = registered_host()
|
||||
if host:
|
||||
return host
|
||||
|
||||
host = ayon_resolve.api.ResolveHost()
|
||||
install_host(host)
|
||||
return registered_host()
|
||||
|
||||
|
||||
def launch_menu():
|
||||
print("Launching Resolve AYON menu..")
|
||||
ensure_installed_host()
|
||||
ayon_resolve.api.launch_ayon_menu()
|
||||
|
||||
|
||||
def open_workfile(path):
|
||||
# Avoid the need to "install" the host
|
||||
host = ensure_installed_host()
|
||||
host.open_workfile(path)
|
||||
|
||||
|
||||
def main():
|
||||
# Open last workfile
|
||||
workfile_path = os.environ.get("AYON_RESOLVE_OPEN_ON_LAUNCH")
|
||||
|
||||
if workfile_path and os.path.exists(workfile_path):
|
||||
log.info(f"Opening last workfile: {workfile_path}")
|
||||
open_workfile(workfile_path)
|
||||
else:
|
||||
log.info("No last workfile set to open. Skipping..")
|
||||
|
||||
# Launch AYON menu
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline.context_tools import get_current_project_name
|
||||
project_name = get_current_project_name()
|
||||
log.info(f"Current project name in context: {project_name}")
|
||||
|
||||
settings = get_project_settings(project_name)
|
||||
if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True):
|
||||
log.info("Launching AYON menu..")
|
||||
launch_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.pipeline import install_host
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def main(env):
|
||||
from ayon_resolve.api import ResolveHost, launch_ayon_menu
|
||||
|
||||
# activate resolve from openpype
|
||||
host = ResolveHost()
|
||||
install_host(host)
|
||||
|
||||
launch_ayon_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
-- Run OpenPype's Python launch script for resolve
|
||||
function file_exists(name)
|
||||
local f = io.open(name, "r")
|
||||
return f ~= nil and io.close(f)
|
||||
end
|
||||
|
||||
|
||||
ayon_startup_script = os.getenv("AYON_RESOLVE_STARTUP_SCRIPT")
|
||||
if ayon_startup_script ~= nil then
|
||||
script = fusion:MapPath(ayon_startup_script)
|
||||
|
||||
if file_exists(script) then
|
||||
-- We must use RunScript to ensure it runs in a separate
|
||||
-- process to Resolve itself to avoid a deadlock for
|
||||
-- certain imports of OpenPype libraries or Qt
|
||||
print("Running launch script: " .. script)
|
||||
fusion:RunScript(script)
|
||||
else
|
||||
print("Launch script not found at: " .. script)
|
||||
end
|
||||
end
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from ayon_resolve.otio import davinci_export as otio_export
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Export OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportfilebttn",
|
||||
"Text": "Select Destination",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose where to save the otio",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportbttn",
|
||||
"Text": "Export",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Export the current timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _export_button(event):
|
||||
pm = resolve.GetProjectManager()
|
||||
project = pm.GetCurrentProject()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
otio_timeline = otio_export.create_otio_timeline(project)
|
||||
otio_path = os.path.join(
|
||||
itm["exportfilebttn"].Text,
|
||||
timeline.GetName() + ".otio")
|
||||
print(otio_path)
|
||||
otio_export.write_to_file(
|
||||
otio_timeline,
|
||||
otio_path)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _export_file_pressed(event):
|
||||
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
|
||||
itm["exportfilebttn"].Text = selectedPath
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.exportfilebttn.Clicked = _export_file_pressed
|
||||
dlg.On.exportbttn.Clicked = _export_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from ayon_resolve.otio import davinci_import as otio_import
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Import OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importOTIOfileButton",
|
||||
"Text": "Select OTIO File Path",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose otio file to import from",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importButton",
|
||||
"Text": "Import",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Import otio to new timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _import_button(event):
|
||||
otio_import.read_from_file(itm["importOTIOfileButton"].Text)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _import_file_pressed(event):
|
||||
selected_path = fu.RequestFile(os.path.expanduser("~/Documents"))
|
||||
itm["importOTIOfileButton"].Text = selected_path
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.importOTIOfileButton.Clicked = _import_file_pressed
|
||||
dlg.On.importButton.Clicked = _import_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.pipeline import install_host
|
||||
|
||||
|
||||
def main(env):
|
||||
from ayon_resolve.utils import setup
|
||||
import ayon_resolve.api as bmdvr
|
||||
# Registers openpype's Global pyblish plugins
|
||||
install_host(bmdvr)
|
||||
setup(env)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
import os
|
||||
import shutil
|
||||
from ayon_core.lib import Logger, is_running_from_build
|
||||
|
||||
RESOLVE_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def setup(env):
|
||||
log = Logger.get_logger("ResolveSetup")
|
||||
scripts = {}
|
||||
util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
|
||||
util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
|
||||
|
||||
util_scripts_paths = [os.path.join(
|
||||
RESOLVE_ADDON_ROOT,
|
||||
"utility_scripts"
|
||||
)]
|
||||
|
||||
# collect script dirs
|
||||
if util_scripts_env:
|
||||
log.info("Utility Scripts Env: `{}`".format(util_scripts_env))
|
||||
util_scripts_paths = util_scripts_env.split(
|
||||
os.pathsep) + util_scripts_paths
|
||||
|
||||
# collect scripts from dirs
|
||||
for path in util_scripts_paths:
|
||||
scripts.update({path: os.listdir(path)})
|
||||
|
||||
log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths))
|
||||
log.info("Utility Scripts: `{}`".format(scripts))
|
||||
|
||||
# Make sure scripts dir exists
|
||||
os.makedirs(util_scripts_dir, exist_ok=True)
|
||||
|
||||
# make sure no script file is in folder
|
||||
for script in os.listdir(util_scripts_dir):
|
||||
path = os.path.join(util_scripts_dir, script)
|
||||
log.info("Removing `{}`...".format(path))
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path, onerror=None)
|
||||
else:
|
||||
os.remove(path)
|
||||
|
||||
# copy scripts into Resolve's utility scripts dir
|
||||
for directory, scripts in scripts.items():
|
||||
for script in scripts:
|
||||
if (
|
||||
is_running_from_build()
|
||||
and script in ["tests", "develop"]
|
||||
):
|
||||
# only copy those if started from build
|
||||
continue
|
||||
|
||||
src = os.path.join(directory, script)
|
||||
dst = os.path.join(util_scripts_dir, script)
|
||||
|
||||
# TODO: Make this a less hacky workaround
|
||||
if script == "ayon_startup.scriptlib":
|
||||
# Handle special case for scriptlib that needs to be a folder
|
||||
# up from the Comp folder in the Fusion scripts
|
||||
dst = os.path.join(os.path.dirname(util_scripts_dir),
|
||||
script)
|
||||
|
||||
log.info("Copying `{}` to `{}`...".format(src, dst))
|
||||
if os.path.isdir(src):
|
||||
shutil.copytree(
|
||||
src, dst, symlinks=False,
|
||||
ignore=None, ignore_dangling_symlinks=False
|
||||
)
|
||||
else:
|
||||
shutil.copy2(src, dst)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'resolve' version."""
|
||||
__version__ = "0.2.2"
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
name = "resolve"
|
||||
title = "DaVinci Resolve"
|
||||
version = "0.2.2"
|
||||
|
||||
client_dir = "ayon_resolve"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
from typing import Type
|
||||
|
||||
from ayon_server.addons import BaseServerAddon
|
||||
|
||||
from .settings import ResolveSettings, DEFAULT_VALUES
|
||||
|
||||
|
||||
class ResolveAddon(BaseServerAddon):
|
||||
settings_model: Type[ResolveSettings] = ResolveSettings
|
||||
|
||||
async def get_default_settings(self):
|
||||
settings_model_cls = self.get_settings_model()
|
||||
return settings_model_cls(**DEFAULT_VALUES)
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
from pydantic import validator
|
||||
from ayon_server.settings import BaseSettingsModel, SettingsField
|
||||
from ayon_server.settings.validators import ensure_unique_names
|
||||
|
||||
|
||||
class ImageIOConfigModel(BaseSettingsModel):
|
||||
"""[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
|
||||
path in the Core addon profiles here
|
||||
(ayon+settings://core/imageio/ocio_config_profiles).
|
||||
"""
|
||||
|
||||
override_global_config: bool = SettingsField(
|
||||
False,
|
||||
title="Override global OCIO config",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
filepath: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Config path",
|
||||
description=(
|
||||
"DEPRECATED functionality. Please set the OCIO config path in the "
|
||||
"Core addon profiles here (ayon+settings://core/imageio/"
|
||||
"ocio_config_profiles)."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ImageIOFileRuleModel(BaseSettingsModel):
|
||||
name: str = SettingsField("", title="Rule name")
|
||||
pattern: str = SettingsField("", title="Regex pattern")
|
||||
colorspace: str = SettingsField("", title="Colorspace name")
|
||||
ext: str = SettingsField("", title="File extension")
|
||||
|
||||
|
||||
class ImageIOFileRulesModel(BaseSettingsModel):
|
||||
activate_host_rules: bool = SettingsField(False)
|
||||
rules: list[ImageIOFileRuleModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Rules"
|
||||
)
|
||||
|
||||
@validator("rules")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class ImageIORemappingRulesModel(BaseSettingsModel):
|
||||
host_native_name: str = SettingsField(
|
||||
title="Application native colorspace name"
|
||||
)
|
||||
ocio_name: str = SettingsField(title="OCIO colorspace name")
|
||||
|
||||
|
||||
class ImageIORemappingModel(BaseSettingsModel):
|
||||
rules: list[ImageIORemappingRulesModel] = SettingsField(
|
||||
default_factory=list)
|
||||
|
||||
|
||||
class ResolveImageIOModel(BaseSettingsModel):
|
||||
activate_host_color_management: bool = SettingsField(
|
||||
True, title="Enable Color Management"
|
||||
)
|
||||
remapping: ImageIORemappingModel = SettingsField(
|
||||
title="Remapping colorspace names",
|
||||
default_factory=ImageIORemappingModel
|
||||
)
|
||||
ocio_config: ImageIOConfigModel = SettingsField(
|
||||
default_factory=ImageIOConfigModel,
|
||||
title="OCIO config"
|
||||
)
|
||||
file_rules: ImageIOFileRulesModel = SettingsField(
|
||||
default_factory=ImageIOFileRulesModel,
|
||||
title="File Rules"
|
||||
)
|
||||
|
|
@ -1,208 +0,0 @@
|
|||
from pydantic import validator
|
||||
from ayon_server.settings import (
|
||||
BaseSettingsModel,
|
||||
SettingsField,
|
||||
ensure_unique_names,
|
||||
)
|
||||
|
||||
from .imageio import ResolveImageIOModel
|
||||
|
||||
|
||||
class CreateShotClipModels(BaseSettingsModel):
|
||||
hierarchy: str = SettingsField(
|
||||
"{folder}/{sequence}",
|
||||
title="Shot parent hierarchy",
|
||||
section="Shot Hierarchy And Rename Settings"
|
||||
)
|
||||
clipRename: bool = SettingsField(
|
||||
True,
|
||||
title="Rename clips"
|
||||
)
|
||||
clipName: str = SettingsField(
|
||||
"{track}{sequence}{shot}",
|
||||
title="Clip name template"
|
||||
)
|
||||
countFrom: int = SettingsField(
|
||||
10,
|
||||
title="Count sequence from"
|
||||
)
|
||||
countSteps: int = SettingsField(
|
||||
10,
|
||||
title="Stepping number"
|
||||
)
|
||||
|
||||
folder: str = SettingsField(
|
||||
"shots",
|
||||
title="{folder}",
|
||||
section="Shot Template Keywords"
|
||||
)
|
||||
episode: str = SettingsField(
|
||||
"ep01",
|
||||
title="{episode}"
|
||||
)
|
||||
sequence: str = SettingsField(
|
||||
"sq01",
|
||||
title="{sequence}"
|
||||
)
|
||||
track: str = SettingsField(
|
||||
"{_track_}",
|
||||
title="{track}"
|
||||
)
|
||||
shot: str = SettingsField(
|
||||
"sh###",
|
||||
title="{shot}"
|
||||
)
|
||||
|
||||
vSyncOn: bool = SettingsField(
|
||||
False,
|
||||
title="Enable Vertical Sync",
|
||||
section="Vertical Synchronization Of Attributes"
|
||||
)
|
||||
|
||||
workfileFrameStart: int = SettingsField(
|
||||
1001,
|
||||
title="Workfile Start Frame",
|
||||
section="Shot Attributes"
|
||||
)
|
||||
handleStart: int = SettingsField(
|
||||
10,
|
||||
title="Handle start (head)"
|
||||
)
|
||||
handleEnd: int = SettingsField(
|
||||
10,
|
||||
title="Handle end (tail)"
|
||||
)
|
||||
|
||||
|
||||
class CreatorPluginsModel(BaseSettingsModel):
|
||||
CreateShotClip: CreateShotClipModels = SettingsField(
|
||||
default_factory=CreateShotClipModels,
|
||||
title="Create Shot Clip"
|
||||
)
|
||||
|
||||
|
||||
class MetadataMappingModel(BaseSettingsModel):
|
||||
"""Metadata mapping
|
||||
|
||||
Representation document context data are used for formatting of
|
||||
anatomy tokens. Following are supported:
|
||||
- version
|
||||
- task
|
||||
- asset
|
||||
|
||||
"""
|
||||
name: str = SettingsField(
|
||||
"",
|
||||
title="Metadata property name"
|
||||
)
|
||||
value: str = SettingsField(
|
||||
"",
|
||||
title="Metadata value template"
|
||||
)
|
||||
|
||||
|
||||
class LoadMediaModel(BaseSettingsModel):
|
||||
clip_color_last: str = SettingsField(
|
||||
"Olive",
|
||||
title="Clip color for last version"
|
||||
)
|
||||
clip_color_old: str = SettingsField(
|
||||
"Orange",
|
||||
title="Clip color for old version"
|
||||
)
|
||||
media_pool_bin_path: str = SettingsField(
|
||||
"Loader/{folder[path]}",
|
||||
title="Media Pool bin path template"
|
||||
)
|
||||
metadata: list[MetadataMappingModel] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Metadata mapping",
|
||||
description=(
|
||||
"Set these media pool item metadata values on load and update. The"
|
||||
" keys must match the exact Resolve metadata names like"
|
||||
" 'Clip Name' or 'Shot'"
|
||||
)
|
||||
)
|
||||
|
||||
@validator("metadata")
|
||||
def validate_unique_outputs(cls, value):
|
||||
ensure_unique_names(value)
|
||||
return value
|
||||
|
||||
|
||||
class LoaderPluginsModel(BaseSettingsModel):
|
||||
LoadMedia: LoadMediaModel = SettingsField(
|
||||
default_factory=LoadMediaModel,
|
||||
title="Load Media"
|
||||
)
|
||||
|
||||
|
||||
class ResolveSettings(BaseSettingsModel):
|
||||
launch_openpype_menu_on_start: bool = SettingsField(
|
||||
False, title="Launch OpenPype menu on start of Resolve"
|
||||
)
|
||||
imageio: ResolveImageIOModel = SettingsField(
|
||||
default_factory=ResolveImageIOModel,
|
||||
title="Color Management (ImageIO)"
|
||||
)
|
||||
create: CreatorPluginsModel = SettingsField(
|
||||
default_factory=CreatorPluginsModel,
|
||||
title="Creator plugins",
|
||||
)
|
||||
load: LoaderPluginsModel = SettingsField(
|
||||
default_factory=LoaderPluginsModel,
|
||||
title="Loader plugins",
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_VALUES = {
|
||||
"launch_openpype_menu_on_start": False,
|
||||
"create": {
|
||||
"CreateShotClip": {
|
||||
"hierarchy": "{folder}/{sequence}",
|
||||
"clipRename": True,
|
||||
"clipName": "{track}{sequence}{shot}",
|
||||
"countFrom": 10,
|
||||
"countSteps": 10,
|
||||
"folder": "shots",
|
||||
"episode": "ep01",
|
||||
"sequence": "sq01",
|
||||
"track": "{_track_}",
|
||||
"shot": "sh###",
|
||||
"vSyncOn": False,
|
||||
"workfileFrameStart": 1001,
|
||||
"handleStart": 10,
|
||||
"handleEnd": 10
|
||||
}
|
||||
},
|
||||
"load": {
|
||||
"LoadMedia": {
|
||||
"clip_color_last": "Olive",
|
||||
"clip_color_old": "Orange",
|
||||
"media_pool_bin_path": (
|
||||
"Loader/{folder[path]}"
|
||||
),
|
||||
"metadata": [
|
||||
{
|
||||
"name": "Comments",
|
||||
"value": "{version[attrib][comment]}"
|
||||
},
|
||||
{
|
||||
"name": "Shot",
|
||||
"value": "{folder[path]}"
|
||||
},
|
||||
{
|
||||
"name": "Take",
|
||||
"value": "{product[name]} {version[name]}"
|
||||
},
|
||||
{
|
||||
"name": "Clip Name",
|
||||
"value": (
|
||||
"{folder[path]} {product[name]} "
|
||||
"{version[name]} ({representation[name]})"
|
||||
)
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue