mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-02 00:44:52 +01:00
Merge branch 'develop' into OP-3438_Maya-missing-playblast-options
This commit is contained in:
commit
82909fe976
23 changed files with 603 additions and 382 deletions
|
|
@ -3,6 +3,7 @@ from __future__ import absolute_import
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_asset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.api import get_errored_instances_from_context
|
||||
|
||||
|
|
@ -74,12 +75,21 @@ class GenerateUUIDsOnInvalidAction(pyblish.api.Action):
|
|||
|
||||
from . import lib
|
||||
|
||||
asset = instance.data['asset']
|
||||
asset_id = legacy_io.find_one(
|
||||
{"name": asset, "type": "asset"},
|
||||
projection={"_id": True}
|
||||
)['_id']
|
||||
for node, _id in lib.generate_ids(nodes, asset_id=asset_id):
|
||||
# Expecting this is called on validators in which case 'assetEntity'
|
||||
# should be always available, but kept a way to query it by name.
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
if not asset_doc:
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = legacy_io.active_project()
|
||||
self.log.info((
|
||||
"Asset is not stored on instance."
|
||||
" Querying by name \"{}\" from project \"{}\""
|
||||
).format(asset_name, project_name))
|
||||
asset_doc = get_asset_by_name(
|
||||
project_name, asset_name, fields=["_id"]
|
||||
)
|
||||
|
||||
for node, _id in lib.generate_ids(nodes, asset_id=asset_doc["_id"]):
|
||||
lib.set_id(node, _id, overwrite=True)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
"""OpenPype script commands to be used directly in Maya."""
|
||||
from maya import cmds
|
||||
|
||||
from openpype.client import get_asset_by_name, get_project
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
|
|
@ -79,8 +80,9 @@ def reset_frame_range():
|
|||
cmds.currentUnit(time=fps)
|
||||
|
||||
# Set frame start/end
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
asset = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
|
|
@ -145,8 +147,9 @@ def reset_resolution():
|
|||
resolution_height = 1080
|
||||
|
||||
# Get resolution from asset
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset_doc = legacy_io.find_one({"name": asset_name, "type": "asset"})
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
resolution = _resolution_from_document(asset_doc)
|
||||
# Try get resolution from project
|
||||
if resolution is None:
|
||||
|
|
@ -155,7 +158,7 @@ def reset_resolution():
|
|||
"Asset \"{}\" does not have set resolution."
|
||||
" Trying to get resolution from project"
|
||||
).format(asset_name))
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_doc = get_project(project_name)
|
||||
resolution = _resolution_from_document(project_doc)
|
||||
|
||||
if resolution is None:
|
||||
|
|
|
|||
|
|
@ -12,11 +12,17 @@ import contextlib
|
|||
from collections import OrderedDict, defaultdict
|
||||
from math import ceil
|
||||
from six import string_types
|
||||
import bson
|
||||
|
||||
from maya import cmds, mel
|
||||
import maya.api.OpenMaya as om
|
||||
|
||||
from openpype.client import (
|
||||
get_project,
|
||||
get_asset_by_name,
|
||||
get_subsets,
|
||||
get_last_versions,
|
||||
get_representation_by_name
|
||||
)
|
||||
from openpype import lib
|
||||
from openpype.api import get_anatomy_settings
|
||||
from openpype.pipeline import (
|
||||
|
|
@ -1387,15 +1393,11 @@ def generate_ids(nodes, asset_id=None):
|
|||
|
||||
if asset_id is None:
|
||||
# Get the asset ID from the database for the asset of current context
|
||||
asset_data = legacy_io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": legacy_io.Session["AVALON_ASSET"]
|
||||
},
|
||||
projection={"_id": True}
|
||||
)
|
||||
assert asset_data, "No current asset found in Session"
|
||||
asset_id = asset_data['_id']
|
||||
project_name = legacy_io.active_project()
|
||||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"])
|
||||
assert asset_doc, "No current asset found in Session"
|
||||
asset_id = asset_doc['_id']
|
||||
|
||||
node_ids = []
|
||||
for node in nodes:
|
||||
|
|
@ -1548,13 +1550,15 @@ def list_looks(asset_id):
|
|||
|
||||
# # get all subsets with look leading in
|
||||
# the name associated with the asset
|
||||
subset = legacy_io.find({
|
||||
"parent": bson.ObjectId(asset_id),
|
||||
"type": "subset",
|
||||
"name": {"$regex": "look*"}
|
||||
})
|
||||
|
||||
return list(subset)
|
||||
# TODO this should probably look for family 'look' instead of checking
|
||||
# subset name that can not start with family
|
||||
project_name = legacy_io.active_project()
|
||||
subset_docs = get_subsets(project_name, asset_ids=[asset_id])
|
||||
return [
|
||||
subset_doc
|
||||
for subset_doc in subset_docs
|
||||
if subset_doc["name"].startswith("look")
|
||||
]
|
||||
|
||||
|
||||
def assign_look_by_version(nodes, version_id):
|
||||
|
|
@ -1570,18 +1574,15 @@ def assign_look_by_version(nodes, version_id):
|
|||
None
|
||||
"""
|
||||
|
||||
# Get representations of shader file and relationships
|
||||
look_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "ma"
|
||||
})
|
||||
project_name = legacy_io.active_project()
|
||||
|
||||
json_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": "json"
|
||||
})
|
||||
# Get representations of shader file and relationships
|
||||
look_representation = get_representation_by_name(
|
||||
project_name, "ma", version_id
|
||||
)
|
||||
json_representation = get_representation_by_name(
|
||||
project_name, "json", version_id
|
||||
)
|
||||
|
||||
# See if representation is already loaded, if so reuse it.
|
||||
host = registered_host()
|
||||
|
|
@ -1639,42 +1640,54 @@ def assign_look(nodes, subset="lookDefault"):
|
|||
parts = pype_id.split(":", 1)
|
||||
grouped[parts[0]].append(node)
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
subset_docs = get_subsets(
|
||||
project_name, subset_names=[subset], asset_ids=grouped.keys()
|
||||
)
|
||||
subset_docs_by_asset_id = {
|
||||
str(subset_doc["parent"]): subset_doc
|
||||
for subset_doc in subset_docs
|
||||
}
|
||||
subset_ids = {
|
||||
subset_doc["_id"]
|
||||
for subset_doc in subset_docs_by_asset_id.values()
|
||||
}
|
||||
last_version_docs = get_last_versions(
|
||||
project_name,
|
||||
subset_ids=subset_ids,
|
||||
fields=["_id", "name", "data.families"]
|
||||
)
|
||||
last_version_docs_by_subset_id = {
|
||||
last_version_doc["parent"]: last_version_doc
|
||||
for last_version_doc in last_version_docs
|
||||
}
|
||||
|
||||
for asset_id, asset_nodes in grouped.items():
|
||||
# create objectId for database
|
||||
try:
|
||||
asset_id = bson.ObjectId(asset_id)
|
||||
except bson.errors.InvalidId:
|
||||
log.warning("Asset ID is not compatible with bson")
|
||||
continue
|
||||
subset_data = legacy_io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset,
|
||||
"parent": asset_id
|
||||
})
|
||||
|
||||
if not subset_data:
|
||||
subset_doc = subset_docs_by_asset_id.get(asset_id)
|
||||
if not subset_doc:
|
||||
log.warning("No subset '{}' found for {}".format(subset, asset_id))
|
||||
continue
|
||||
|
||||
# get last version
|
||||
# with backwards compatibility
|
||||
version = legacy_io.find_one(
|
||||
{
|
||||
"parent": subset_data['_id'],
|
||||
"type": "version",
|
||||
"data.families": {"$in": ["look"]}
|
||||
},
|
||||
sort=[("name", -1)],
|
||||
projection={
|
||||
"_id": True,
|
||||
"name": True
|
||||
}
|
||||
)
|
||||
last_version = last_version_docs_by_subset_id.get(subset_doc["_id"])
|
||||
if not last_version:
|
||||
log.warning((
|
||||
"Not found last version for subset '{}' on asset with id {}"
|
||||
).format(subset, asset_id))
|
||||
continue
|
||||
|
||||
log.debug("Assigning look '{}' <v{:03d}>".format(subset,
|
||||
version["name"]))
|
||||
families = last_version.get("data", {}).get("families") or []
|
||||
if "look" not in families:
|
||||
log.warning((
|
||||
"Last version for subset '{}' on asset with id {}"
|
||||
" does not have look family"
|
||||
).format(subset, asset_id))
|
||||
continue
|
||||
|
||||
assign_look_by_version(asset_nodes, version['_id'])
|
||||
log.debug("Assigning look '{}' <v{:03d}>".format(
|
||||
subset, last_version["name"]))
|
||||
|
||||
assign_look_by_version(asset_nodes, last_version["_id"])
|
||||
|
||||
|
||||
def apply_shaders(relationships, shadernodes, nodes):
|
||||
|
|
@ -2158,7 +2171,8 @@ def reset_scene_resolution():
|
|||
None
|
||||
"""
|
||||
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_name = legacy_io.active_project()
|
||||
project_doc = get_project(project_name)
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
|
|
@ -2191,7 +2205,8 @@ def set_context_settings():
|
|||
"""
|
||||
|
||||
# Todo (Wijnand): apply renderer and resolution of project
|
||||
project_doc = legacy_io.find_one({"type": "project"})
|
||||
project_name = legacy_io.active_project()
|
||||
project_doc = get_project(project_name)
|
||||
project_data = project_doc["data"]
|
||||
asset_data = lib.get_asset()["data"]
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,16 @@ import contextlib
|
|||
import copy
|
||||
|
||||
import six
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from openpype.client import (
|
||||
get_version_by_name,
|
||||
get_last_version_by_subset_id,
|
||||
get_representation_by_id,
|
||||
get_representation_by_name,
|
||||
get_representation_parents,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
schema,
|
||||
legacy_io,
|
||||
|
|
@ -283,36 +289,35 @@ def update_package_version(container, version):
|
|||
"""
|
||||
|
||||
# Versioning (from `core.maya.pipeline`)
|
||||
current_representation = legacy_io.find_one({
|
||||
"_id": ObjectId(container["representation"])
|
||||
})
|
||||
project_name = legacy_io.active_project()
|
||||
current_representation = get_representation_by_id(
|
||||
project_name, container["representation"]
|
||||
)
|
||||
|
||||
assert current_representation is not None, "This is a bug"
|
||||
|
||||
version_, subset, asset, project = legacy_io.parenthood(
|
||||
current_representation
|
||||
repre_parents = get_representation_parents(
|
||||
project_name, current_representation
|
||||
)
|
||||
version_doc = subset_doc = asset_doc = project_doc = None
|
||||
if repre_parents:
|
||||
version_doc, subset_doc, asset_doc, project_doc = repre_parents
|
||||
|
||||
if version == -1:
|
||||
new_version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"parent": subset["_id"]
|
||||
}, sort=[("name", -1)])
|
||||
new_version = get_last_version_by_subset_id(
|
||||
project_name, subset_doc["_id"]
|
||||
)
|
||||
else:
|
||||
new_version = legacy_io.find_one({
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": version,
|
||||
})
|
||||
new_version = get_version_by_name(
|
||||
project_name, version, subset_doc["_id"]
|
||||
)
|
||||
|
||||
assert new_version is not None, "This is a bug"
|
||||
|
||||
# Get the new representation (new file)
|
||||
new_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": new_version["_id"],
|
||||
"name": current_representation["name"]
|
||||
})
|
||||
new_representation = get_representation_by_name(
|
||||
project_name, current_representation["name"], new_version["_id"]
|
||||
)
|
||||
|
||||
update_package(container, new_representation)
|
||||
|
||||
|
|
@ -330,10 +335,10 @@ def update_package(set_container, representation):
|
|||
"""
|
||||
|
||||
# Load the original package data
|
||||
current_representation = legacy_io.find_one({
|
||||
"_id": ObjectId(set_container['representation']),
|
||||
"type": "representation"
|
||||
})
|
||||
project_name = legacy_io.active_project()
|
||||
current_representation = get_representation_by_id(
|
||||
project_name, set_container["representation"]
|
||||
)
|
||||
|
||||
current_file = get_representation_path(current_representation)
|
||||
assert current_file.endswith(".json")
|
||||
|
|
@ -380,6 +385,7 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
|
|||
from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms
|
||||
|
||||
set_namespace = set_container['namespace']
|
||||
project_name = legacy_io.active_project()
|
||||
|
||||
# Update the setdress hierarchy alembic
|
||||
set_root = get_container_transforms(set_container, root=True)
|
||||
|
|
@ -481,12 +487,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file):
|
|||
# Check whether the conversion can be done by the Loader.
|
||||
# They *must* use the same asset, subset and Loader for
|
||||
# `update_container` to make sense.
|
||||
old = legacy_io.find_one({
|
||||
"_id": ObjectId(representation_current)
|
||||
})
|
||||
new = legacy_io.find_one({
|
||||
"_id": ObjectId(representation_new)
|
||||
})
|
||||
old = get_representation_by_id(
|
||||
project_name, representation_current
|
||||
)
|
||||
new = get_representation_by_id(
|
||||
project_name, representation_new
|
||||
)
|
||||
is_valid = compare_representations(old=old, new=new)
|
||||
if not is_valid:
|
||||
log.error("Skipping: %s. See log for details.",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
import re
|
||||
import json
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from openpype.client import (
|
||||
get_representation_by_id,
|
||||
get_representations
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
InventoryAction,
|
||||
get_representation_context,
|
||||
|
|
@ -31,6 +35,7 @@ class ImportModelRender(InventoryAction):
|
|||
def process(self, containers):
|
||||
from maya import cmds
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
for container in containers:
|
||||
con_name = container["objectName"]
|
||||
nodes = []
|
||||
|
|
@ -40,9 +45,9 @@ class ImportModelRender(InventoryAction):
|
|||
else:
|
||||
nodes.append(n)
|
||||
|
||||
repr_doc = legacy_io.find_one({
|
||||
"_id": ObjectId(container["representation"]),
|
||||
})
|
||||
repr_doc = get_representation_by_id(
|
||||
project_name, container["representation"], fields=["parent"]
|
||||
)
|
||||
version_id = repr_doc["parent"]
|
||||
|
||||
print("Importing render sets for model %r" % con_name)
|
||||
|
|
@ -63,26 +68,38 @@ class ImportModelRender(InventoryAction):
|
|||
|
||||
from maya import cmds
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
repre_docs = get_representations(
|
||||
project_name, version_ids=[version_id], fields=["_id", "name"]
|
||||
)
|
||||
# Get representations of shader file and relationships
|
||||
look_repr = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": {"$regex": self.scene_type_regex},
|
||||
})
|
||||
if not look_repr:
|
||||
json_repre = None
|
||||
look_repres = []
|
||||
scene_type_regex = re.compile(self.scene_type_regex)
|
||||
for repre_doc in repre_docs:
|
||||
repre_name = repre_doc["name"]
|
||||
if repre_name == self.look_data_type:
|
||||
json_repre = repre_doc
|
||||
continue
|
||||
|
||||
if scene_type_regex.fullmatch(repre_name):
|
||||
look_repres.append(repre_doc)
|
||||
|
||||
# QUESTION should we care if there is more then one look
|
||||
# representation? (since it's based on regex match)
|
||||
look_repre = None
|
||||
if look_repres:
|
||||
look_repre = look_repres[0]
|
||||
|
||||
# QUESTION shouldn't be json representation validated too?
|
||||
if not look_repre:
|
||||
print("No model render sets for this model version..")
|
||||
return
|
||||
|
||||
json_repr = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": version_id,
|
||||
"name": self.look_data_type,
|
||||
})
|
||||
|
||||
context = get_representation_context(look_repr["_id"])
|
||||
context = get_representation_context(look_repre["_id"])
|
||||
maya_file = self.filepath_from_context(context)
|
||||
|
||||
context = get_representation_context(json_repr["_id"])
|
||||
context = get_representation_context(json_repre["_id"])
|
||||
json_file = self.filepath_from_context(context)
|
||||
|
||||
# Import the look file
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
from maya import cmds, mel
|
||||
|
||||
from openpype.client import (
|
||||
get_asset_by_id,
|
||||
get_subset_by_id,
|
||||
get_version_by_id,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
|
|
@ -65,9 +70,16 @@ class AudioLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
# Set frame range.
|
||||
version = legacy_io.find_one({"_id": representation["parent"]})
|
||||
subset = legacy_io.find_one({"_id": version["parent"]})
|
||||
asset = legacy_io.find_one({"_id": subset["parent"]})
|
||||
project_name = legacy_io.active_project()
|
||||
version = get_version_by_id(
|
||||
project_name, representation["parent"], fields=["parent"]
|
||||
)
|
||||
subset = get_subset_by_id(
|
||||
project_name, version["parent"], fields=["parent"]
|
||||
)
|
||||
asset = get_asset_by_id(
|
||||
project_name, subset["parent"], fields=["parent"]
|
||||
)
|
||||
audio_node.sourceStart.set(1 - asset["data"]["frameStart"])
|
||||
audio_node.sourceEnd.set(asset["data"]["frameEnd"])
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from openpype.client import (
|
||||
get_asset_by_id,
|
||||
get_subset_by_id,
|
||||
get_version_by_id,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
load,
|
||||
|
|
@ -216,9 +221,16 @@ class ImagePlaneLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
# Set frame range.
|
||||
version = legacy_io.find_one({"_id": representation["parent"]})
|
||||
subset = legacy_io.find_one({"_id": version["parent"]})
|
||||
asset = legacy_io.find_one({"_id": subset["parent"]})
|
||||
project_name = legacy_io.active_project()
|
||||
version = get_version_by_id(
|
||||
project_name, representation["parent"], fields=["parent"]
|
||||
)
|
||||
subset = get_subset_by_id(
|
||||
project_name, version["parent"], fields=["parent"]
|
||||
)
|
||||
asset = get_asset_by_id(
|
||||
project_name, subset["parent"], fields=["parent"]
|
||||
)
|
||||
start_frame = asset["data"]["frameStart"]
|
||||
end_frame = asset["data"]["frameEnd"]
|
||||
image_plane_shape.frameOffset.set(1 - start_frame)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from collections import defaultdict
|
|||
|
||||
from Qt import QtWidgets
|
||||
|
||||
from openpype.client import get_representation_by_name
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
get_representation_path,
|
||||
|
|
@ -75,11 +76,10 @@ class LookLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
shader_nodes = cmds.ls(members, type='shadingEngine')
|
||||
nodes = set(self._get_nodes_with_shader(shader_nodes))
|
||||
|
||||
json_representation = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": representation['parent'],
|
||||
"name": "json"
|
||||
})
|
||||
project_name = legacy_io.active_project()
|
||||
json_representation = get_representation_by_name(
|
||||
project_name, "json", representation["parent"]
|
||||
)
|
||||
|
||||
# Load relationships
|
||||
shader_relation = get_representation_path(json_representation)
|
||||
|
|
|
|||
|
|
@ -7,10 +7,9 @@ loader will use them instead of native vray vrmesh format.
|
|||
"""
|
||||
import os
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
from openpype.client import get_representation_by_name
|
||||
from openpype.api import get_project_settings
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
|
|
@ -185,12 +184,8 @@ class VRayProxyLoader(load.LoaderPlugin):
|
|||
"""
|
||||
self.log.debug(
|
||||
"Looking for abc in published representations of this version.")
|
||||
abc_rep = legacy_io.find_one({
|
||||
"type": "representation",
|
||||
"parent": ObjectId(version_id),
|
||||
"name": "abc"
|
||||
})
|
||||
|
||||
project_name = legacy_io.active_project()
|
||||
abc_rep = get_representation_by_name(project_name, "abc", version_id)
|
||||
if abc_rep:
|
||||
self.log.debug("Found, we'll link alembic to vray proxy.")
|
||||
file_name = get_representation_path(abc_rep)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import pymel.core as pm
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.client import get_subset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
|
|
@ -78,11 +79,15 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug('isntance data {}'.format(instance.data))
|
||||
else:
|
||||
legacy_subset_name = task + 'Review'
|
||||
asset_doc_id = instance.context.data['assetEntity']["_id"]
|
||||
subsets = legacy_io.find({"type": "subset",
|
||||
"name": legacy_subset_name,
|
||||
"parent": asset_doc_id}).distinct("_id")
|
||||
if len(list(subsets)) > 0:
|
||||
asset_doc = instance.context.data['assetEntity']
|
||||
project_name = legacy_io.active_project()
|
||||
subset_doc = get_subset_by_name(
|
||||
project_name,
|
||||
legacy_subset_name,
|
||||
asset_doc["_id"],
|
||||
fields=["_id"]
|
||||
)
|
||||
if subset_doc:
|
||||
self.log.debug("Existing subsets found, keep legacy name.")
|
||||
instance.data['subset'] = legacy_subset_name
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.client import get_assets
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.maya.api.action
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
|
@ -42,8 +43,12 @@ class ValidateNodeIdsInDatabase(pyblish.api.InstancePlugin):
|
|||
nodes=instance[:])
|
||||
|
||||
# check ids against database ids
|
||||
db_asset_ids = legacy_io.find({"type": "asset"}).distinct("_id")
|
||||
db_asset_ids = set(str(i) for i in db_asset_ids)
|
||||
project_name = legacy_io.active_project()
|
||||
asset_docs = get_assets(project_name, fields=["_id"])
|
||||
db_asset_ids = {
|
||||
str(asset_doc["_id"])
|
||||
for asset_doc in asset_docs
|
||||
}
|
||||
|
||||
# Get all asset IDs
|
||||
for node in id_required_nodes:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import pyblish.api
|
||||
import openpype.api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.hosts.maya.api.action
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
|
@ -36,15 +35,7 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin):
|
|||
"""Return the member nodes that are invalid"""
|
||||
invalid = list()
|
||||
|
||||
asset = instance.data['asset']
|
||||
asset_data = legacy_io.find_one(
|
||||
{
|
||||
"name": asset,
|
||||
"type": "asset"
|
||||
},
|
||||
projection={"_id": True}
|
||||
)
|
||||
asset_id = str(asset_data['_id'])
|
||||
asset_id = str(instance.data['assetEntity']["_id"])
|
||||
|
||||
# We do want to check the referenced nodes as we it might be
|
||||
# part of the end product
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.client import get_subset_by_name
|
||||
import openpype.hosts.maya.api.action
|
||||
from openpype.pipeline import legacy_io
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin):
|
||||
|
|
@ -33,26 +33,23 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin):
|
|||
raise RuntimeError("Found unregistered subsets: {}".format(invalid))
|
||||
|
||||
def get_invalid(self, instance):
|
||||
|
||||
invalid = []
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = legacy_io.active_project()
|
||||
asset_doc = instance.data["assetEntity"]
|
||||
render_passses = instance.data.get("renderPasses", [])
|
||||
for render_pass in render_passses:
|
||||
is_valid = self.validate_subset_registered(asset_name, render_pass)
|
||||
is_valid = self.validate_subset_registered(
|
||||
project_name, asset_doc, render_pass
|
||||
)
|
||||
if not is_valid:
|
||||
invalid.append(render_pass)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_subset_registered(self, asset_name, subset_name):
|
||||
def validate_subset_registered(self, project_name, asset_doc, subset_name):
|
||||
"""Check if subset is registered in the database under the asset"""
|
||||
|
||||
asset = legacy_io.find_one({"type": "asset", "name": asset_name})
|
||||
is_valid = legacy_io.find_one({
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset["_id"]
|
||||
})
|
||||
|
||||
return is_valid
|
||||
return get_subset_by_name(
|
||||
project_name, subset_name, asset_doc["_id"], fields=["_id"]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -26,7 +26,11 @@ from .pipeline import (
|
|||
update_container,
|
||||
)
|
||||
from .lib import (
|
||||
maintained_selection
|
||||
maintained_selection,
|
||||
reset_selection,
|
||||
get_view_process_node,
|
||||
duplicate_node
|
||||
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
|
|
@ -58,6 +62,9 @@ __all__ = (
|
|||
"update_container",
|
||||
|
||||
"maintained_selection",
|
||||
"reset_selection",
|
||||
"get_view_process_node",
|
||||
"duplicate_node",
|
||||
|
||||
"colorspace_exists_on_node",
|
||||
"get_colorspace_list"
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from pprint import pformat
|
|||
import re
|
||||
import six
|
||||
import platform
|
||||
import tempfile
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
|
|
@ -711,6 +712,20 @@ def get_imageio_input_colorspace(filename):
|
|||
return preset_clrsp
|
||||
|
||||
|
||||
def get_view_process_node():
|
||||
reset_selection()
|
||||
|
||||
ipn_orig = None
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
|
||||
if ipn_orig:
|
||||
return duplicate_node(ipn_orig)
|
||||
|
||||
|
||||
def on_script_load():
|
||||
''' Callback for ffmpeg support
|
||||
'''
|
||||
|
|
@ -2374,6 +2389,8 @@ def process_workfile_builder():
|
|||
env_value_to_bool,
|
||||
get_custom_workfile_template
|
||||
)
|
||||
# to avoid looping of the callback, remove it!
|
||||
nuke.removeOnCreate(process_workfile_builder, nodeClass="Root")
|
||||
|
||||
# get state from settings
|
||||
workfile_builder = get_current_project_settings()["nuke"].get(
|
||||
|
|
@ -2429,9 +2446,6 @@ def process_workfile_builder():
|
|||
if not openlv_on or not os.path.exists(last_workfile_path):
|
||||
return
|
||||
|
||||
# to avoid looping of the callback, remove it!
|
||||
nuke.removeOnCreate(process_workfile_builder, nodeClass="Root")
|
||||
|
||||
log.info("Opening last workfile...")
|
||||
# open workfile
|
||||
open_file(last_workfile_path)
|
||||
|
|
@ -2617,6 +2631,57 @@ class DirmapCache:
|
|||
return cls._sync_module
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _duplicate_node_temp():
|
||||
"""Create a temp file where node is pasted during duplication.
|
||||
|
||||
This is to avoid using clipboard for node duplication.
|
||||
"""
|
||||
|
||||
duplicate_node_temp_path = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
"openpype_nuke_duplicate_temp_{}".format(os.getpid())
|
||||
)
|
||||
|
||||
# This can happen only if 'duplicate_node' would be
|
||||
if os.path.exists(duplicate_node_temp_path):
|
||||
log.warning((
|
||||
"Temp file for node duplication already exists."
|
||||
" Trying to remove {}"
|
||||
).format(duplicate_node_temp_path))
|
||||
os.remove(duplicate_node_temp_path)
|
||||
|
||||
try:
|
||||
# Yield the path where node can be copied
|
||||
yield duplicate_node_temp_path
|
||||
|
||||
finally:
|
||||
# Remove the file at the end
|
||||
os.remove(duplicate_node_temp_path)
|
||||
|
||||
|
||||
def duplicate_node(node):
|
||||
reset_selection()
|
||||
|
||||
# select required node for duplication
|
||||
node.setSelected(True)
|
||||
|
||||
with _duplicate_node_temp() as filepath:
|
||||
# copy selected to temp filepath
|
||||
nuke.nodeCopy(filepath)
|
||||
|
||||
# reset selection
|
||||
reset_selection()
|
||||
|
||||
# paste node and selection is on it only
|
||||
dupli_node = nuke.nodePaste(filepath)
|
||||
|
||||
# reset selection
|
||||
reset_selection()
|
||||
|
||||
return dupli_node
|
||||
|
||||
|
||||
def dirmap_file_name_filter(file_name):
|
||||
"""Nuke callback function with single full path argument.
|
||||
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@ from openpype.pipeline import (
|
|||
from .lib import (
|
||||
Knobby,
|
||||
check_subsetname_exists,
|
||||
reset_selection,
|
||||
maintained_selection,
|
||||
set_avalon_knob_data,
|
||||
add_publish_knob,
|
||||
get_nuke_imageio_settings,
|
||||
set_node_knobs_from_settings
|
||||
set_node_knobs_from_settings,
|
||||
get_view_process_node
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -216,37 +216,6 @@ class ExporterReview(object):
|
|||
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_view_input_process_node(self):
|
||||
"""
|
||||
Will get any active view process.
|
||||
|
||||
Arguments:
|
||||
self (class): in object definition
|
||||
|
||||
Returns:
|
||||
nuke.Node: copy node of Input Process node
|
||||
"""
|
||||
reset_selection()
|
||||
ipn_orig = None
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
ip = v["input_process"].getValue()
|
||||
ipn = v["input_process_node"].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
|
||||
if ipn_orig:
|
||||
# copy selected to clipboard
|
||||
nuke.nodeCopy("%clipboard%")
|
||||
# reset selection
|
||||
reset_selection()
|
||||
# paste node and selection is on it only
|
||||
nuke.nodePaste("%clipboard%")
|
||||
# assign to variable
|
||||
ipn = nuke.selectedNode()
|
||||
|
||||
return ipn
|
||||
|
||||
def get_imageio_baking_profile(self):
|
||||
from . import lib as opnlib
|
||||
nuke_imageio = opnlib.get_nuke_imageio_settings()
|
||||
|
|
@ -311,7 +280,7 @@ class ExporterReviewLut(ExporterReview):
|
|||
self._temp_nodes = []
|
||||
self.log.info("Deleted nodes...")
|
||||
|
||||
def generate_lut(self):
|
||||
def generate_lut(self, **kwargs):
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
|
@ -329,7 +298,7 @@ class ExporterReviewLut(ExporterReview):
|
|||
if bake_viewer_process:
|
||||
# Node View Process
|
||||
if bake_viewer_input_process_node:
|
||||
ipn = self.get_view_input_process_node()
|
||||
ipn = get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
|
|
@ -511,7 +480,7 @@ class ExporterReviewMov(ExporterReview):
|
|||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# View Process node
|
||||
ipn = self.get_view_input_process_node()
|
||||
ipn = get_view_process_node()
|
||||
if ipn is not None:
|
||||
# connect
|
||||
ipn.setInput(0, self.previous_node)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import re
|
||||
import pyblish.api
|
||||
import openpype
|
||||
|
|
@ -50,6 +51,8 @@ class ExtractReviewDataMov(openpype.api.Extractor):
|
|||
with maintained_selection():
|
||||
generated_repres = []
|
||||
for o_name, o_data in self.outputs.items():
|
||||
self.log.debug(
|
||||
"o_name: {}, o_data: {}".format(o_name, pformat(o_data)))
|
||||
f_families = o_data["filter"]["families"]
|
||||
f_task_types = o_data["filter"]["task_types"]
|
||||
f_subsets = o_data["filter"]["subsets"]
|
||||
|
|
@ -88,7 +91,13 @@ class ExtractReviewDataMov(openpype.api.Extractor):
|
|||
# check if settings have more then one preset
|
||||
# so we dont need to add outputName to representation
|
||||
# in case there is only one preset
|
||||
multiple_presets = bool(len(self.outputs.keys()) > 1)
|
||||
multiple_presets = len(self.outputs.keys()) > 1
|
||||
|
||||
# adding bake presets to instance data for other plugins
|
||||
if not instance.data.get("bakePresets"):
|
||||
instance.data["bakePresets"] = {}
|
||||
# add preset to bakePresets
|
||||
instance.data["bakePresets"][o_name] = o_data
|
||||
|
||||
# create exporter instance
|
||||
exporter = plugin.ExporterReviewMov(
|
||||
|
|
|
|||
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import nuke
|
||||
import copy
|
||||
|
||||
import pyblish.api
|
||||
|
||||
import openpype
|
||||
from openpype.hosts.nuke.api.lib import maintained_selection
|
||||
from openpype.hosts.nuke.api import (
|
||||
maintained_selection,
|
||||
duplicate_node,
|
||||
get_view_process_node
|
||||
)
|
||||
|
||||
|
||||
class ExtractSlateFrame(openpype.api.Extractor):
|
||||
|
|
@ -15,14 +20,13 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.001
|
||||
order = pyblish.api.ExtractorOrder + 0.011
|
||||
label = "Extract Slate Frame"
|
||||
|
||||
families = ["slate"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# Settings values
|
||||
# - can be extended by other attributes from node in the future
|
||||
key_value_mapping = {
|
||||
"f_submission_note": [True, "{comment}"],
|
||||
"f_submitting_for": [True, "{intent[value]}"],
|
||||
|
|
@ -30,44 +34,107 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
}
|
||||
|
||||
def process(self, instance):
|
||||
if hasattr(self, "viewer_lut_raw"):
|
||||
self.viewer_lut_raw = self.viewer_lut_raw
|
||||
else:
|
||||
self.viewer_lut_raw = False
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self._create_staging_dir(instance)
|
||||
|
||||
with maintained_selection():
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
self.log.debug("instance.data[families]: {}".format(
|
||||
instance.data["families"]))
|
||||
|
||||
self.render_slate(instance)
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.log.info("_ o_name: {}, o_data: {}".format(
|
||||
o_name, pformat(o_data)))
|
||||
self.render_slate(
|
||||
instance,
|
||||
o_name,
|
||||
o_data["bake_viewer_process"],
|
||||
o_data["bake_viewer_input_process"]
|
||||
)
|
||||
else:
|
||||
# backward compatibility
|
||||
self.render_slate(instance)
|
||||
|
||||
# also render image to sequence
|
||||
self._render_slate_to_sequence(instance)
|
||||
|
||||
def _create_staging_dir(self, instance):
|
||||
|
||||
def render_slate(self, instance):
|
||||
node_subset_name = instance.data.get("name", None)
|
||||
node = instance[0] # group node
|
||||
self.log.info("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = list()
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data['path']))
|
||||
os.path.dirname(instance.data["path"]))
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
def _check_frames_exists(self, instance):
|
||||
# rendering path from group write node
|
||||
fpath = instance.data["path"]
|
||||
|
||||
frame_length = int(
|
||||
(frame_end - frame_start + 1) + (handle_start + handle_end)
|
||||
)
|
||||
# instance frame range with handles
|
||||
first = instance.data["frameStartHandle"]
|
||||
last = instance.data["frameEndHandle"]
|
||||
|
||||
padding = fpath.count('#')
|
||||
|
||||
test_path_template = fpath
|
||||
if padding:
|
||||
repl_string = "#" * padding
|
||||
test_path_template = fpath.replace(
|
||||
repl_string, "%0{}d".format(padding))
|
||||
|
||||
for frame in range(first, last + 1):
|
||||
test_file = test_path_template % frame
|
||||
if not os.path.exists(test_file):
|
||||
self.log.debug("__ test_file: `{}`".format(test_file))
|
||||
return None
|
||||
|
||||
return True
|
||||
|
||||
def render_slate(
|
||||
self,
|
||||
instance,
|
||||
output_name=None,
|
||||
bake_viewer_process=True,
|
||||
bake_viewer_input_process=True
|
||||
):
|
||||
"""Slate frame renderer
|
||||
|
||||
Args:
|
||||
instance (PyblishInstance): Pyblish instance with subset data
|
||||
output_name (str, optional):
|
||||
Slate variation name. Defaults to None.
|
||||
bake_viewer_process (bool, optional):
|
||||
Switch for viewer profile baking. Defaults to True.
|
||||
bake_viewer_input_process (bool, optional):
|
||||
Switch for input process node baking. Defaults to True.
|
||||
"""
|
||||
slate_node = instance.data["slateNode"]
|
||||
|
||||
# rendering path from group write node
|
||||
fpath = instance.data["path"]
|
||||
|
||||
# instance frame range with handles
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
|
||||
# fill slate node with comments
|
||||
self.add_comment_slate_node(instance, slate_node)
|
||||
|
||||
# solve output name if any is set
|
||||
_output_name = output_name or ""
|
||||
if _output_name:
|
||||
_output_name = "_" + _output_name
|
||||
|
||||
slate_first_frame = first_frame - 1
|
||||
|
||||
temporary_nodes = []
|
||||
collection = instance.data.get("collection", None)
|
||||
|
||||
if collection:
|
||||
|
|
@ -75,99 +142,101 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
fname = os.path.basename(collection.format(
|
||||
"{head}{padding}{tail}"))
|
||||
fhead = collection.format("{head}")
|
||||
|
||||
collected_frames_len = int(len(collection.indexes))
|
||||
|
||||
# get first and last frame
|
||||
first_frame = min(collection.indexes) - 1
|
||||
self.log.info('frame_length: {}'.format(frame_length))
|
||||
self.log.info(
|
||||
'len(collection.indexes): {}'.format(collected_frames_len)
|
||||
)
|
||||
if ("slate" in instance.data["families"]) \
|
||||
and (frame_length != collected_frames_len):
|
||||
first_frame += 1
|
||||
|
||||
last_frame = first_frame
|
||||
else:
|
||||
fname = os.path.basename(instance.data.get("path", None))
|
||||
fname = os.path.basename(fpath)
|
||||
fhead = os.path.splitext(fname)[0] + "."
|
||||
first_frame = instance.data.get("frameStartHandle", None) - 1
|
||||
last_frame = first_frame
|
||||
|
||||
if "#" in fhead:
|
||||
fhead = fhead.replace("#", "")[:-1]
|
||||
|
||||
previous_node = node
|
||||
self.log.debug("__ first_frame: {}".format(first_frame))
|
||||
self.log.debug("__ slate_first_frame: {}".format(slate_first_frame))
|
||||
|
||||
# get input process and connect it to baking
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
# fallback if files does not exists
|
||||
if self._check_frames_exists(instance):
|
||||
# Read node
|
||||
r_node = nuke.createNode("Read")
|
||||
r_node["file"].setValue(fpath)
|
||||
r_node["first"].setValue(first_frame)
|
||||
r_node["origfirst"].setValue(first_frame)
|
||||
r_node["last"].setValue(last_frame)
|
||||
r_node["origlast"].setValue(last_frame)
|
||||
r_node["colorspace"].setValue(instance.data["colorspace"])
|
||||
previous_node = r_node
|
||||
temporary_nodes = [previous_node]
|
||||
else:
|
||||
previous_node = slate_node.dependencies().pop()
|
||||
temporary_nodes = []
|
||||
|
||||
if not self.viewer_lut_raw:
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process:
|
||||
# get input process and connect it to baking
|
||||
ipn = get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
# add duplicate slate node and connect to previous
|
||||
duply_slate_node = duplicate_node(slate_node)
|
||||
duply_slate_node.setInput(0, previous_node)
|
||||
previous_node = duply_slate_node
|
||||
temporary_nodes.append(duply_slate_node)
|
||||
|
||||
# add viewer display transformation node
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node.setInput(0, previous_node)
|
||||
previous_node = dag_node
|
||||
temporary_nodes.append(dag_node)
|
||||
|
||||
else:
|
||||
# add duplicate slate node and connect to previous
|
||||
duply_slate_node = duplicate_node(slate_node)
|
||||
duply_slate_node.setInput(0, previous_node)
|
||||
previous_node = duply_slate_node
|
||||
temporary_nodes.append(duply_slate_node)
|
||||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead + "slate.png"
|
||||
path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
instance.data["slateFrame"] = path
|
||||
file = fhead[:-1] + _output_name + "_slate.png"
|
||||
path = os.path.join(
|
||||
instance.data["stagingDir"], file).replace("\\", "/")
|
||||
|
||||
# add slate path to `slateFrames` instance data attr
|
||||
if not instance.data.get("slateFrames"):
|
||||
instance.data["slateFrames"] = {}
|
||||
|
||||
instance.data["slateFrames"][output_name or "*"] = path
|
||||
|
||||
# create write node
|
||||
write_node["file"].setValue(path)
|
||||
write_node["file_type"].setValue("png")
|
||||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
|
||||
# fill slate node with comments
|
||||
self.add_comment_slate_node(instance)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
|
||||
# also render slate as sequence frame
|
||||
nuke.execute(node_subset_name, int(first_frame), int(last_frame))
|
||||
|
||||
self.log.debug(
|
||||
"slate frame path: {}".format(instance.data["slateFrame"]))
|
||||
nuke.execute(
|
||||
write_node.name(), int(slate_first_frame), int(slate_first_frame))
|
||||
|
||||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
||||
def get_view_process_node(self):
|
||||
# Select only the target node
|
||||
if nuke.selectedNodes():
|
||||
[n.setSelected(False) for n in nuke.selectedNodes()]
|
||||
def _render_slate_to_sequence(self, instance):
|
||||
# set slate frame
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
slate_first_frame = first_frame - 1
|
||||
|
||||
ipn_orig = None
|
||||
for v in [n for n in nuke.allNodes()
|
||||
if "Viewer" in n.Class()]:
|
||||
ip = v['input_process'].getValue()
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
# render slate as sequence frame
|
||||
nuke.execute(
|
||||
instance.data["name"],
|
||||
int(slate_first_frame),
|
||||
int(slate_first_frame)
|
||||
)
|
||||
|
||||
if ipn_orig:
|
||||
nuke.nodeCopy('%clipboard%')
|
||||
|
||||
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
|
||||
|
||||
nuke.nodePaste('%clipboard%')
|
||||
|
||||
ipn = nuke.selectedNode()
|
||||
|
||||
return ipn
|
||||
|
||||
def add_comment_slate_node(self, instance):
|
||||
node = instance.data.get("slateNode")
|
||||
if not node:
|
||||
return
|
||||
def add_comment_slate_node(self, instance, node):
|
||||
|
||||
comment = instance.context.data.get("comment")
|
||||
intent = instance.context.data.get("intent")
|
||||
|
|
@ -186,8 +255,8 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
"intent": intent
|
||||
})
|
||||
|
||||
for key, value in self.key_value_mapping.items():
|
||||
enabled, template = value
|
||||
for key, _values in self.key_value_mapping.items():
|
||||
enabled, template = _values
|
||||
if not enabled:
|
||||
self.log.debug("Key \"{}\" is disabled".format(key))
|
||||
continue
|
||||
|
|
@ -221,5 +290,5 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
))
|
||||
except NameError:
|
||||
self.log.warning((
|
||||
"Failed to set value \"{}\" on node attribute \"{}\""
|
||||
"Failed to set value \"{0}\" on node attribute \"{0}\""
|
||||
).format(value))
|
||||
|
|
|
|||
|
|
@ -3,7 +3,10 @@ import os
|
|||
import nuke
|
||||
import pyblish.api
|
||||
import openpype
|
||||
from openpype.hosts.nuke.api.lib import maintained_selection
|
||||
from openpype.hosts.nuke.api import (
|
||||
maintained_selection,
|
||||
get_view_process_node
|
||||
)
|
||||
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
|
|
@ -17,7 +20,7 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
order = pyblish.api.ExtractorOrder + 0.011
|
||||
label = "Extract Thumbnail"
|
||||
|
||||
families = ["review"]
|
||||
|
|
@ -39,15 +42,32 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
self.log.debug("instance.data[families]: {}".format(
|
||||
instance.data["families"]))
|
||||
|
||||
self.render_thumbnail(instance)
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.render_thumbnail(instance, o_name, **o_data)
|
||||
else:
|
||||
viewer_process_swithes = {
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True
|
||||
}
|
||||
self.render_thumbnail(instance, None, **viewer_process_swithes)
|
||||
|
||||
def render_thumbnail(self, instance):
|
||||
def render_thumbnail(self, instance, output_name=None, **kwargs):
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
|
||||
# find frame range and define middle thumb frame
|
||||
mid_frame = int((last_frame - first_frame) / 2)
|
||||
|
||||
# solve output name if any is set
|
||||
output_name = output_name or ""
|
||||
if output_name:
|
||||
output_name = "_" + output_name
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
node = instance[0] # group node
|
||||
self.log.info("Creating staging dir...")
|
||||
|
||||
|
|
@ -106,17 +126,7 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
temporary_nodes.append(rnode)
|
||||
previous_node = rnode
|
||||
|
||||
# bake viewer input look node into thumbnail image
|
||||
if self.bake_viewer_input_process:
|
||||
# get input process and connect it to baking
|
||||
ipn = self.get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
reformat_node = nuke.createNode("Reformat")
|
||||
|
||||
ref_node = self.nodes.get("Reformat", None)
|
||||
if ref_node:
|
||||
for k, v in ref_node:
|
||||
|
|
@ -129,8 +139,16 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
previous_node = reformat_node
|
||||
temporary_nodes.append(reformat_node)
|
||||
|
||||
# bake viewer colorspace into thumbnail image
|
||||
if self.bake_viewer_process:
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# get input process and connect it to baking
|
||||
ipn = get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node.setInput(0, previous_node)
|
||||
previous_node = dag_node
|
||||
|
|
@ -138,7 +156,7 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead + "jpg"
|
||||
file = fhead[:-1] + output_name + ".jpg"
|
||||
name = "thumbnail"
|
||||
path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
instance.data["thumbnail"] = path
|
||||
|
|
@ -168,30 +186,3 @@ class ExtractThumbnail(openpype.api.Extractor):
|
|||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
||||
def get_view_process_node(self):
|
||||
|
||||
# Select only the target node
|
||||
if nuke.selectedNodes():
|
||||
[n.setSelected(False) for n in nuke.selectedNodes()]
|
||||
|
||||
ipn_orig = None
|
||||
for v in [n for n in nuke.allNodes()
|
||||
if "Viewer" == n.Class()]:
|
||||
ip = v['input_process'].getValue()
|
||||
ipn = v['input_process_node'].getValue()
|
||||
if "VIEWER_INPUT" not in ipn and ip:
|
||||
ipn_orig = nuke.toNode(ipn)
|
||||
ipn_orig.setSelected(True)
|
||||
|
||||
if ipn_orig:
|
||||
nuke.nodeCopy('%clipboard%')
|
||||
|
||||
# Deselect all
|
||||
[n.setSelected(False) for n in nuke.selectedNodes()]
|
||||
|
||||
nuke.nodePaste('%clipboard%')
|
||||
|
||||
ipn = nuke.selectedNode()
|
||||
|
||||
return ipn
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# mapping of instance properties to be transfered to new instance for every
|
||||
# specified family
|
||||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"slate": ["slateFrames"],
|
||||
"review": ["lutPath"],
|
||||
"render2d": ["bakingNukeScripts", "version"],
|
||||
"renderlayer": ["convertToScanline"]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import re
|
||||
import openpype.api
|
||||
import pyblish
|
||||
from openpype.lib import (
|
||||
|
|
@ -21,6 +23,8 @@ class ExtractReviewSlate(openpype.api.Extractor):
|
|||
families = ["slate", "review"]
|
||||
match = pyblish.api.Subset
|
||||
|
||||
SUFFIX = "_slate"
|
||||
|
||||
hosts = ["nuke", "shell"]
|
||||
optional = True
|
||||
|
||||
|
|
@ -29,28 +33,19 @@ class ExtractReviewSlate(openpype.api.Extractor):
|
|||
if "representations" not in inst_data:
|
||||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
||||
suffix = "_slate"
|
||||
slate_path = inst_data.get("slateFrame")
|
||||
# get slates frame from upstream
|
||||
slates_data = inst_data.get("slateFrames")
|
||||
if not slates_data:
|
||||
# make it backward compatible and open for slates generator
|
||||
# premium plugin
|
||||
slates_data = {
|
||||
"*": inst_data["slateFrame"]
|
||||
}
|
||||
|
||||
self.log.info("_ slates_data: {}".format(pformat(slates_data)))
|
||||
|
||||
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
slate_streams = get_ffprobe_streams(slate_path, self.log)
|
||||
# Try to find first stream with defined 'width' and 'height'
|
||||
# - this is to avoid order of streams where audio can be as first
|
||||
# - there may be a better way (checking `codec_type`?)+
|
||||
slate_width = None
|
||||
slate_height = None
|
||||
for slate_stream in slate_streams:
|
||||
if "width" in slate_stream and "height" in slate_stream:
|
||||
slate_width = int(slate_stream["width"])
|
||||
slate_height = int(slate_stream["height"])
|
||||
break
|
||||
|
||||
# Raise exception of any stream didn't define input resolution
|
||||
if slate_width is None:
|
||||
raise AssertionError((
|
||||
"FFprobe couldn't read resolution from input file: \"{}\""
|
||||
).format(slate_path))
|
||||
|
||||
if "reviewToWidth" in inst_data:
|
||||
use_legacy_code = True
|
||||
else:
|
||||
|
|
@ -77,6 +72,12 @@ class ExtractReviewSlate(openpype.api.Extractor):
|
|||
streams = get_ffprobe_streams(
|
||||
input_path, self.log
|
||||
)
|
||||
# get slate data
|
||||
slate_path = self._get_slate_path(input_file, slates_data)
|
||||
self.log.info("_ slate_path: {}".format(slate_path))
|
||||
|
||||
slate_width, slate_height = self._get_slates_resolution(slate_path)
|
||||
|
||||
# Get video metadata
|
||||
(
|
||||
input_width,
|
||||
|
|
@ -138,7 +139,7 @@ class ExtractReviewSlate(openpype.api.Extractor):
|
|||
_remove_at_end = []
|
||||
|
||||
ext = os.path.splitext(input_file)[1]
|
||||
output_file = input_file.replace(ext, "") + suffix + ext
|
||||
output_file = input_file.replace(ext, "") + self.SUFFIX + ext
|
||||
|
||||
_remove_at_end.append(input_path)
|
||||
|
||||
|
|
@ -369,6 +370,43 @@ class ExtractReviewSlate(openpype.api.Extractor):
|
|||
|
||||
self.log.debug(inst_data["representations"])
|
||||
|
||||
def _get_slate_path(self, input_file, slates_data):
|
||||
slate_path = None
|
||||
for sl_n, _slate_path in slates_data.items():
|
||||
if "*" in sl_n:
|
||||
slate_path = _slate_path
|
||||
break
|
||||
elif re.search(sl_n, input_file):
|
||||
slate_path = _slate_path
|
||||
break
|
||||
|
||||
if not slate_path:
|
||||
raise AttributeError(
|
||||
"Missing slates paths: {}".format(slates_data))
|
||||
|
||||
return slate_path
|
||||
|
||||
def _get_slates_resolution(self, slate_path):
|
||||
slate_streams = get_ffprobe_streams(slate_path, self.log)
|
||||
# Try to find first stream with defined 'width' and 'height'
|
||||
# - this is to avoid order of streams where audio can be as first
|
||||
# - there may be a better way (checking `codec_type`?)+
|
||||
slate_width = None
|
||||
slate_height = None
|
||||
for slate_stream in slate_streams:
|
||||
if "width" in slate_stream and "height" in slate_stream:
|
||||
slate_width = int(slate_stream["width"])
|
||||
slate_height = int(slate_stream["height"])
|
||||
break
|
||||
|
||||
# Raise exception of any stream didn't define input resolution
|
||||
if slate_width is None:
|
||||
raise AssertionError((
|
||||
"FFprobe couldn't read resolution from input file: \"{}\""
|
||||
).format(slate_path))
|
||||
|
||||
return (slate_width, slate_height)
|
||||
|
||||
def _get_video_metadata(self, streams):
|
||||
input_timecode = ""
|
||||
input_width = None
|
||||
|
|
|
|||
|
|
@ -361,7 +361,7 @@ function Home() {
|
|||
|
||||
<a className="link" href="https://www.blackmagicdesign.com/products/davinciresolve">
|
||||
<img src="/img/app_resolve.png" alt="" title=""></img>
|
||||
<span className="caption">DaVinci Resolve (Beta)</span>
|
||||
<span className="caption">Resolve (Beta)</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.blackmagicdesign.com/products/fusion">
|
||||
|
|
@ -374,6 +374,16 @@ function Home() {
|
|||
<span className="caption">Ftrack</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.shotgridsoftware.com/">
|
||||
<img src="/img/app_shotgrid.png" alt="" title=""></img>
|
||||
<span className="caption">Shotgrid (Beta)</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.cg-wire.com/en/kitsu.html">
|
||||
<img src="/img/app_kitsu.png" alt="" title=""></img>
|
||||
<span className="caption">Kitsu (Beta)</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://clockify.me">
|
||||
<img src="/img/app_clockify.png" alt="" title=""></img>
|
||||
<span className="caption">Clockify</span>
|
||||
|
|
@ -384,12 +394,7 @@ function Home() {
|
|||
<span className="caption">Deadline</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.vvertex.com">
|
||||
<img src="/img/app_muster.png" alt="" title=""></img>
|
||||
<span className="caption">Muster</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.royalrender.de/index.php/startseite.html">
|
||||
<a className="link" href="https://www.royalrender.de/index.php/startseite.html">
|
||||
<img src="/img/app_royalrender.png" alt="" title=""></img>
|
||||
<span className="caption">Royal Render</span>
|
||||
</a>
|
||||
|
|
@ -399,30 +404,30 @@ function Home() {
|
|||
<span className="caption">Slack</span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
|
||||
<p> <b>In development by us or OpenPype community.</b></p>
|
||||
|
||||
<div className={classnames('showcase',)}>
|
||||
|
||||
<a className="link" href="https://j-cube.jp/solutions/multiverse/">
|
||||
<img src="/img/app_multiverse.png" alt="" title=""></img>
|
||||
<span className="caption">Multiverse</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.shotgridsoftware.com/">
|
||||
<img src="/img/app_shotgrid.png" alt="" title=""></img>
|
||||
<span className="caption">Shotgrid</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p> <b>Planned or in development by us and OpenPype community.</b></p>
|
||||
|
||||
<div className={classnames('showcase',)}>
|
||||
|
||||
<a className="link" href="https://fatfi.sh/aquarium/en">
|
||||
<img src="/img/app_aquarium.png" alt="" title=""></img>
|
||||
<span className="caption">Aquarium</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.cg-wire.com/en/kitsu.html">
|
||||
<img src="/img/app_kitsu.png" alt="" title=""></img>
|
||||
<span className="caption">Kitsu</span>
|
||||
<a className="link" href="https://www.vvertex.com">
|
||||
<img src="/img/app_muster.png" alt="" title=""></img>
|
||||
<span className="caption">Muster</span>
|
||||
</a>
|
||||
|
||||
<a className="link" href="https://www.hibob.com">
|
||||
<img src="/img/app_hibob.png" alt="Hi Bob" title="Hi Bob"></img>
|
||||
<span className="caption">Bob</span>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
|
|
|
|||
BIN
website/static/img/app_hibob.png
Normal file
BIN
website/static/img/app_hibob.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
Loading…
Add table
Add a link
Reference in a new issue