Merge branch 'develop' of github.com:pypeclub/OpenPype into bugfix/revert_wrong_targets

This commit is contained in:
Petr Kalis 2022-06-16 11:25:05 +02:00
commit ef3575ab29
9 changed files with 256 additions and 204 deletions

View file

@ -22,7 +22,8 @@ class CreateYetiCache(plugin.Creator):
# Add animation data without step and handles
anim_data = lib.collect_animation_data()
anim_data.pop("step")
anim_data.pop("handles")
anim_data.pop("handleStart")
anim_data.pop("handleEnd")
self.data.update(anim_data)
# Add samples

View file

@ -1,15 +1,13 @@
import os
import json
import re
import glob
from collections import defaultdict
from pprint import pprint
import clique
from maya import cmds
from openpype.api import get_project_settings
from openpype.pipeline import (
legacy_io,
load,
get_representation_path
)
@ -17,7 +15,15 @@ from openpype.hosts.maya.api import lib
from openpype.hosts.maya.api.pipeline import containerise
def set_attribute(node, attr, value):
"""Wrapper of set attribute which ignores None values"""
if value is None:
return
lib.set_attribute(node, attr, value)
class YetiCacheLoader(load.LoaderPlugin):
"""Load Yeti Cache with one or more Yeti nodes"""
families = ["yeticache", "yetiRig"]
representations = ["fur"]
@ -28,6 +34,16 @@ class YetiCacheLoader(load.LoaderPlugin):
color = "orange"
def load(self, context, name=None, namespace=None, data=None):
"""Loads a .fursettings file defining how to load .fur sequences
A single yeticache or yetiRig can have more than a single pgYetiMaya
nodes and thus load more than a single yeti.fur sequence.
The .fursettings file defines what the node names should be and also
what "cbId" attribute they should receive to match the original source
and allow published looks to also work for Yeti rigs and its caches.
"""
try:
family = context["representation"]["context"]["family"]
@ -43,22 +59,11 @@ class YetiCacheLoader(load.LoaderPlugin):
if not cmds.pluginInfo("pgYetiMaya", query=True, loaded=True):
cmds.loadPlugin("pgYetiMaya", quiet=True)
# Get JSON
fbase = re.search(r'^(.+)\.(\d+|#+)\.fur', self.fname)
if not fbase:
raise RuntimeError('Cannot determine fursettings file path')
settings_fname = "{}.fursettings".format(fbase.group(1))
with open(settings_fname, "r") as fp:
fursettings = json.load(fp)
# Check if resources map exists
# Get node name from JSON
if "nodes" not in fursettings:
raise RuntimeError("Encountered invalid data, expect 'nodes' in "
"fursettings.")
node_data = fursettings["nodes"]
nodes = self.create_nodes(namespace, node_data)
# Create Yeti cache nodes according to settings
settings = self.read_settings(self.fname)
nodes = []
for node in settings["nodes"]:
nodes.extend(self.create_node(namespace, node))
group_name = "{}:{}".format(namespace, name)
group_node = cmds.group(nodes, name=group_name)
@ -111,28 +116,14 @@ class YetiCacheLoader(load.LoaderPlugin):
def update(self, container, representation):
legacy_io.install()
namespace = container["namespace"]
container_node = container["objectName"]
fur_settings = legacy_io.find_one(
{"parent": representation["parent"], "name": "fursettings"}
)
pprint({"parent": representation["parent"], "name": "fursettings"})
pprint(fur_settings)
assert fur_settings is not None, (
"cannot find fursettings representation"
)
settings_fname = get_representation_path(fur_settings)
path = get_representation_path(representation)
# Get all node data
with open(settings_fname, "r") as fp:
settings = json.load(fp)
settings = self.read_settings(path)
# Collect scene information of asset
set_members = cmds.sets(container["objectName"], query=True)
set_members = lib.get_container_members(container)
container_root = lib.get_container_transforms(container,
members=set_members,
root=True)
@ -147,7 +138,7 @@ class YetiCacheLoader(load.LoaderPlugin):
# Re-assemble metadata with cbId as keys
meta_data_lookup = {n["cbId"]: n for n in settings["nodes"]}
# Compare look ups and get the nodes which ar not relevant any more
# Delete nodes by "cbId" that are not in the updated version
to_delete_lookup = {cb_id for cb_id in scene_lookup.keys() if
cb_id not in meta_data_lookup}
if to_delete_lookup:
@ -163,25 +154,18 @@ class YetiCacheLoader(load.LoaderPlugin):
fullPath=True) or []
to_remove.extend(shapes + transforms)
# Remove id from look uop
# Remove id from lookup
scene_lookup.pop(_id, None)
cmds.delete(to_remove)
# replace frame in filename with %04d
RE_frame = re.compile(r"(\d+)(\.fur)$")
file_name = re.sub(RE_frame, r"%04d\g<2>", os.path.basename(path))
for cb_id, data in meta_data_lookup.items():
# Update cache file name
data["attrs"]["cacheFileName"] = os.path.join(
os.path.dirname(path), file_name)
for cb_id, node_settings in meta_data_lookup.items():
if cb_id not in scene_lookup:
# Create new nodes
self.log.info("Creating new nodes ..")
new_nodes = self.create_nodes(namespace, [data])
new_nodes = self.create_node(namespace, node_settings)
cmds.sets(new_nodes, addElement=container_node)
cmds.parent(new_nodes, container_root)
@ -218,14 +202,8 @@ class YetiCacheLoader(load.LoaderPlugin):
children=True)
yeti_node = yeti_nodes[0]
for attr, value in data["attrs"].items():
# handle empty attribute strings. Those are reported
# as None, so their type is NoneType and this is not
# supported on attributes in Maya. We change it to
# empty string.
if value is None:
value = ""
lib.set_attribute(attr, value, yeti_node)
for attr, value in node_settings["attrs"].items():
set_attribute(attr, value, yeti_node)
cmds.setAttr("{}.representation".format(container_node),
str(representation["_id"]),
@ -235,7 +213,6 @@ class YetiCacheLoader(load.LoaderPlugin):
self.update(container, representation)
# helper functions
def create_namespace(self, asset):
"""Create a unique namespace
Args:
@ -253,100 +230,122 @@ class YetiCacheLoader(load.LoaderPlugin):
return namespace
def validate_cache(self, filename, pattern="%04d"):
"""Check if the cache has more than 1 frame
def get_cache_node_filepath(self, root, node_name):
"""Get the cache file path for one of the yeti nodes.
All caches with more than 1 frame need to be called with `%04d`
If the cache has only one frame we return that file name as we assume
All caches with more than 1 frame need cache file name set with `%04d`
If the cache has only one frame we return the file name as we assume
it is a snapshot.
This expects the files to be named after the "node name" through
exports with <Name> in Yeti.
Args:
filename(str)
pattern(str)
root(str): Folder containing cache files to search in.
node_name(str): Node name to search cache files for
Returns:
str
str: Cache file path value needed for cacheFileName attribute
"""
glob_pattern = filename.replace(pattern, "*")
name = node_name.replace(":", "_")
pattern = r"^({name})(\.[0-4]+)?(\.fur)$".format(name=re.escape(name))
escaped = re.escape(filename)
re_pattern = escaped.replace(pattern, "-?[0-9]+")
files = glob.glob(glob_pattern)
files = [str(f) for f in files if re.match(re_pattern, f)]
files = [fname for fname in os.listdir(root) if re.match(pattern,
fname)]
if not files:
self.log.error("Could not find cache files for '{}' "
"with pattern {}".format(node_name, pattern))
return
if len(files) == 1:
return files[0]
elif len(files) == 0:
self.log.error("Could not find cache files for '%s'" % filename)
# Single file
return os.path.join(root, files[0])
return filename
# Get filename for the sequence with padding
collections, remainder = clique.assemble(files)
assert not remainder, "This is a bug"
assert len(collections) == 1, "This is a bug"
collection = collections[0]
def create_nodes(self, namespace, settings):
# Formats name as {head}%d{tail} like cache.%04d.fur
fname = collection.format("{head}{padding}{tail}")
return os.path.join(root, fname)
def create_node(self, namespace, node_settings):
"""Create nodes with the correct namespace and settings
Args:
namespace(str): namespace
settings(list): list of dictionaries
node_settings(dict): Single "nodes" entry from .fursettings file.
Returns:
list
list: Created nodes
"""
nodes = []
for node_settings in settings:
# Create pgYetiMaya node
original_node = node_settings["name"]
node_name = "{}:{}".format(namespace, original_node)
yeti_node = cmds.createNode("pgYetiMaya", name=node_name)
# Get original names and ids
orig_transform_name = node_settings["transform"]["name"]
orig_shape_name = node_settings["name"]
# Create transform node
transform_node = node_name.rstrip("Shape")
# Add namespace
transform_name = "{}:{}".format(namespace, orig_transform_name)
shape_name = "{}:{}".format(namespace, orig_shape_name)
lib.set_id(transform_node, node_settings["transform"]["cbId"])
lib.set_id(yeti_node, node_settings["cbId"])
# Create pgYetiMaya node
transform_node = cmds.createNode("transform",
name=transform_name)
yeti_node = cmds.createNode("pgYetiMaya",
name=shape_name,
parent=transform_node)
nodes.extend([transform_node, yeti_node])
lib.set_id(transform_node, node_settings["transform"]["cbId"])
lib.set_id(yeti_node, node_settings["cbId"])
# Ensure the node has no namespace identifiers
attributes = node_settings["attrs"]
nodes.extend([transform_node, yeti_node])
# Check if cache file name is stored
# Update attributes with defaults
attributes = node_settings["attrs"]
attributes.update({
"viewportDensity": 0.1,
"verbosity": 2,
"fileMode": 1,
# get number of # in path and convert it to C prinf format
# like %04d expected by Yeti
fbase = re.search(r'^(.+)\.(\d+|#+)\.fur', self.fname)
if not fbase:
raise RuntimeError('Cannot determine file path')
padding = len(fbase.group(2))
if "cacheFileName" not in attributes:
cache = "{}.%0{}d.fur".format(fbase.group(1), padding)
# Fix render stats, like Yeti's own
# ../scripts/pgYetiNode.mel script
"visibleInReflections": True,
"visibleInRefractions": True
})
self.validate_cache(cache)
attributes["cacheFileName"] = cache
# Apply attributes to pgYetiMaya node
for attr, value in attributes.items():
set_attribute(attr, value, yeti_node)
# Update attributes with requirements
attributes.update({"viewportDensity": 0.1,
"verbosity": 2,
"fileMode": 1})
# Apply attributes to pgYetiMaya node
for attr, value in attributes.items():
if value is None:
continue
lib.set_attribute(attr, value, yeti_node)
# Fix for : YETI-6
# Fixes the render stats (this is literally taken from Perigrene's
# ../scripts/pgYetiNode.mel script)
cmds.setAttr("{}.visibleInReflections".format(yeti_node), True)
cmds.setAttr("{}.visibleInRefractions".format(yeti_node), True)
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
# Connect to the time node
cmds.connectAttr("time1.outTime", "%s.currentTime" % yeti_node)
return nodes
def read_settings(self, path):
"""Read .fursettings file and compute some additional attributes"""
with open(path, "r") as fp:
fur_settings = json.load(fp)
if "nodes" not in fur_settings:
raise RuntimeError("Encountered invalid data, "
"expected 'nodes' in fursettings.")
# Compute the cache file name values we want to set for the nodes
root = os.path.dirname(path)
for node in fur_settings["nodes"]:
cache_filename = self.get_cache_node_filepath(
root=root, node_name=node["name"])
attrs = node.get("attrs", {}) # allow 'attrs' to not exist
attrs["cacheFileName"] = cache_filename
node["attrs"] = attrs
return fur_settings

View file

@ -43,11 +43,12 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
instance.data["resources"] = yeti_resources
# Force frame range for export
instance.data["frameStart"] = cmds.playbackOptions(
query=True, animationStartTime=True)
instance.data["frameEnd"] = cmds.playbackOptions(
query=True, animationStartTime=True)
# Force frame range for yeti cache export for the rig
start = cmds.playbackOptions(query=True, animationStartTime=True)
for key in ["frameStart", "frameEnd",
"frameStartHandle", "frameEndHandle"]:
instance.data[key] = start
instance.data["preroll"] = 0
def collect_input_connections(self, instance):
"""Collect the inputs for all nodes in the input_SET"""

View file

@ -25,13 +25,10 @@ class ExtractYetiCache(openpype.api.Extractor):
# Define extract output file path
dirname = self.staging_dir(instance)
# Yeti related staging dirs
data_file = os.path.join(dirname, "yeti.fursettings")
# Collect information for writing cache
start_frame = instance.data.get("frameStartHandle")
end_frame = instance.data.get("frameEndHandle")
preroll = instance.data.get("preroll")
start_frame = instance.data["frameStartHandle"]
end_frame = instance.data["frameEndHandle"]
preroll = instance.data["preroll"]
if preroll > 0:
start_frame -= preroll
@ -57,32 +54,35 @@ class ExtractYetiCache(openpype.api.Extractor):
cache_files = [x for x in os.listdir(dirname) if x.endswith(".fur")]
self.log.info("Writing metadata file")
settings = instance.data.get("fursettings", None)
if settings is not None:
with open(data_file, "w") as fp:
json.dump(settings, fp, ensure_ascii=False)
settings = instance.data["fursettings"]
fursettings_path = os.path.join(dirname, "yeti.fursettings")
with open(fursettings_path, "w") as fp:
json.dump(settings, fp, ensure_ascii=False)
# build representations
if "representations" not in instance.data:
instance.data["representations"] = []
self.log.info("cache files: {}".format(cache_files[0]))
instance.data["representations"].append(
{
'name': 'fur',
'ext': 'fur',
'files': cache_files[0] if len(cache_files) == 1 else cache_files,
'stagingDir': dirname,
'frameStart': int(start_frame),
'frameEnd': int(end_frame)
}
)
# Workaround: We do not explicitly register these files with the
# representation solely so that we can write multiple sequences
# a single Subset without renaming - it's a bit of a hack
# TODO: Implement better way to manage this sort of integration
if 'transfers' not in instance.data:
instance.data['transfers'] = []
publish_dir = instance.data["publishDir"]
for cache_filename in cache_files:
src = os.path.join(dirname, cache_filename)
dst = os.path.join(publish_dir, os.path.basename(cache_filename))
instance.data['transfers'].append([src, dst])
instance.data["representations"].append(
{
'name': 'fursettings',
'name': 'fur',
'ext': 'fursettings',
'files': os.path.basename(data_file),
'files': os.path.basename(fursettings_path),
'stagingDir': dirname
}
)

View file

@ -124,8 +124,8 @@ class ExtractYetiRig(openpype.api.Extractor):
settings_path = os.path.join(dirname, "yeti.rigsettings")
# Yeti related staging dirs
maya_path = os.path.join(
dirname, "yeti_rig.{}".format(self.scene_type))
maya_path = os.path.join(dirname,
"yeti_rig.{}".format(self.scene_type))
self.log.info("Writing metadata file")
@ -157,7 +157,7 @@ class ExtractYetiRig(openpype.api.Extractor):
input_set = next(i for i in instance if i == "input_SET")
# Get all items
set_members = cmds.sets(input_set, query=True)
set_members = cmds.sets(input_set, query=True) or []
set_members += cmds.listRelatives(set_members,
allDescendents=True,
fullPath=True) or []
@ -167,7 +167,7 @@ class ExtractYetiRig(openpype.api.Extractor):
resources = instance.data.get("resources", {})
with disconnect_plugs(settings, members):
with yetigraph_attribute_values(resources_dir, resources):
with maya.attribute_values(attr_value):
with lib.attribute_values(attr_value):
cmds.select(nodes, noExpand=True)
cmds.file(maya_path,
force=True,

View file

@ -23,9 +23,13 @@ class ExtractThumbnail(openpype.api.Extractor):
families = ["review"]
hosts = ["nuke"]
# presets
# settings
use_rendered = False
bake_viewer_process = True
bake_viewer_input_process = True
nodes = {}
def process(self, instance):
if "render.farm" in instance.data["families"]:
return
@ -38,11 +42,17 @@ class ExtractThumbnail(openpype.api.Extractor):
self.render_thumbnail(instance)
def render_thumbnail(self, instance):
first_frame = instance.data["frameStartHandle"]
last_frame = instance.data["frameEndHandle"]
# find frame range and define middle thumb frame
mid_frame = int((last_frame - first_frame) / 2)
node = instance[0] # group node
self.log.info("Creating staging dir...")
if "representations" not in instance.data:
instance.data["representations"] = list()
instance.data["representations"] = []
staging_dir = os.path.normpath(
os.path.dirname(instance.data['path']))
@ -53,48 +63,59 @@ class ExtractThumbnail(openpype.api.Extractor):
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
temporary_nodes = []
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# try to connect already rendered images
if self.use_rendered:
collection = instance.data.get("collection", None)
self.log.debug("__ collection: `{}`".format(collection))
# get first and last frame
first_frame = min(collection.indexes)
last_frame = max(collection.indexes)
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
thumb_fname = list(collection)[mid_frame]
else:
fname = thumb_fname = os.path.basename(
instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
path_render = os.path.join(staging_dir, fname).replace("\\", "/")
# check if file exist otherwise connect to write node
if os.path.isfile(path_render):
rnode = nuke.createNode("Read")
self.log.debug("__ fhead: `{}`".format(fhead))
rnode["file"].setValue(path_render)
if "#" in fhead:
fhead = fhead.replace("#", "")[:-1]
rnode["first"].setValue(first_frame)
rnode["origfirst"].setValue(first_frame)
rnode["last"].setValue(last_frame)
rnode["origlast"].setValue(last_frame)
temporary_nodes.append(rnode)
previous_node = rnode
else:
previous_node = node
path_render = os.path.join(
staging_dir, thumb_fname).replace("\\", "/")
self.log.debug("__ path_render: `{}`".format(path_render))
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
# check if file exist otherwise connect to write node
if os.path.isfile(path_render):
rnode = nuke.createNode("Read")
rnode["file"].setValue(path_render)
# turn it raw if none of baking is ON
if all([
not self.bake_viewer_input_process,
not self.bake_viewer_process
]):
rnode["raw"].setValue(True)
temporary_nodes.append(rnode)
previous_node = rnode
else:
previous_node = node
# bake viewer input look node into thumbnail image
if self.bake_viewer_input_process:
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
reformat_node = nuke.createNode("Reformat")
@ -110,10 +131,12 @@ class ExtractThumbnail(openpype.api.Extractor):
previous_node = reformat_node
temporary_nodes.append(reformat_node)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# bake viewer colorspace into thumbnail image
if self.bake_viewer_process:
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
@ -128,26 +151,18 @@ class ExtractThumbnail(openpype.api.Extractor):
temporary_nodes.append(write_node)
tags = ["thumbnail", "publish_on_farm"]
# retime for
mid_frame = int((int(last_frame) - int(first_frame)) / 2) \
+ int(first_frame)
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
repre = {
'name': name,
'ext': "jpg",
"outputName": "thumb",
'files': file,
"stagingDir": staging_dir,
"frameStart": first_frame,
"frameEnd": last_frame,
"tags": tags
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(mid_frame), int(mid_frame))
nuke.execute(write_node.name(), mid_frame, mid_frame)
self.log.debug(
"representations: {}".format(instance.data["representations"]))

View file

@ -2,6 +2,7 @@ import os
import time
import datetime
import threading
from Qt import QtCore, QtWidgets, QtGui
import ftrack_api
@ -48,6 +49,9 @@ class FtrackTrayWrapper:
self.widget_login.activateWindow()
self.widget_login.raise_()
def show_ftrack_browser(self):
QtGui.QDesktopServices.openUrl(self.module.ftrack_url)
def validate(self):
validation = False
cred = credentials.get_credentials()
@ -284,6 +288,13 @@ class FtrackTrayWrapper:
tray_server_menu.addAction(self.action_server_stop)
self.tray_server_menu = tray_server_menu
# Ftrack Browser
browser_open = QtWidgets.QAction("Open Ftrack...", tray_menu)
browser_open.triggered.connect(self.show_ftrack_browser)
tray_menu.addAction(browser_open)
self.browser_open = browser_open
self.bool_logged = False
self.set_menu_visibility()

View file

@ -166,6 +166,9 @@
},
"ExtractThumbnail": {
"enabled": true,
"use_rendered": true,
"bake_viewer_process": true,
"bake_viewer_input_process": true,
"nodes": {
"Reformat": [
[

View file

@ -135,9 +135,31 @@
"label": "Enabled"
},
{
"type": "raw-json",
"key": "nodes",
"label": "Nodes"
"type": "boolean",
"key": "use_rendered",
"label": "Use rendered images"
},
{
"type": "boolean",
"key": "bake_viewer_process",
"label": "Bake viewer process"
},
{
"type": "boolean",
"key": "bake_viewer_input_process",
"label": "Bake viewer input process"
},
{
"type": "collapsible-wrap",
"label": "Nodes",
"collapsible": true,
"children": [
{
"type": "raw-json",
"key": "nodes",
"label": "Nodes"
}
]
}
]
},