Use json sidecar instead of querying ass file for IDs.

This commit is contained in:
Toke Stuart Jepsen 2023-03-31 11:23:12 +01:00
parent e04523966f
commit 10fdeb5243
4 changed files with 37 additions and 52 deletions

View file

@ -1,4 +1,6 @@
import os
from collections import defaultdict
import json
from maya import cmds
import arnold
@ -17,8 +19,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
def process(self, instance):
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
file_path = os.path.join(staging_dir, filename)
file_path = os.path.join(staging_dir, "{}.ass".format(instance.name))
# Mask
mask = arnold.AI_NODE_ALL
@ -69,7 +70,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
"mask": mask
}
filenames = self._extract(
filenames, nodes_by_id = self._extract(
instance.data["contentMembers"], attribute_data, kwargs
)
@ -86,6 +87,19 @@ class ExtractArnoldSceneSource(publish.Extractor):
instance.data["representations"].append(representation)
json_path = os.path.join(staging_dir, "{}.json".format(instance.name))
with open(json_path, "w") as f:
json.dump(nodes_by_id, f)
representation = {
"name": "json",
"ext": "json",
"files": os.path.basename(json_path),
"stagingDir": staging_dir
}
instance.data["representations"].append(representation)
self.log.info(
"Extracted instance {} to: {}".format(instance.name, staging_dir)
)
@ -95,7 +109,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
return
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
filenames = self._extract(
filenames, _ = self._extract(
instance.data["proxy"], attribute_data, kwargs
)
@ -115,6 +129,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
"Writing {} with:\n{}".format(kwargs["filename"], kwargs)
)
filenames = []
nodes_by_id = defaultdict(list)
# Duplicating nodes so they are direct children of the world. This
# makes the hierarchy of any exported ass file the same.
with lib.delete_after() as delete_bin:
@ -158,7 +173,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
# Copy cbId to mtoa_constant.
for node in duplicate_nodes:
lib.set_attribute("mtoa_constant_cbId", lib.get_id(node), node)
nodes_by_id[lib.get_id(node)].append(node.replace("|", "/"))
with lib.attribute_values(attribute_data):
with lib.maintained_selection():
@ -178,4 +193,4 @@ class ExtractArnoldSceneSource(publish.Extractor):
self.log.info("Exported: {}".format(filenames))
return filenames
return filenames, nodes_by_id

View file

@ -1,5 +1,5 @@
import os
import re
import json
from collections import defaultdict
import logging
@ -58,52 +58,24 @@ def calculate_visibility_mask(attributes):
return mask
def get_id_by_node(path):
"""Get node id from Arnold Scene Source.
def get_nodes_by_id(standin):
"""Get node id from aiStandIn via json sidecar.
Args:
path (string): Path to Arnold Scene Source.
standin (string): aiStandIn node.
Returns:
(dict): Dictionary with node full name/path and id.
"""
import arnold
results = defaultdict(list)
path = cmds.getAttr(standin + ".dso")
json_path = None
for f in os.listdir(os.path.dirname(path)):
if f.endswith(".json"):
json_path = os.path.join(os.path.dirname(path), f)
break
arnold.AiBegin()
arnold.AiMsgSetConsoleFlags(arnold.AI_LOG_ALL)
arnold.AiSceneLoad(None, path, None)
# Iterate over all shader nodes
iter = arnold.AiUniverseGetNodeIterator(arnold.AI_NODE_SHAPE)
while not arnold.AiNodeIteratorFinished(iter):
node = arnold.AiNodeIteratorGetNext(iter)
if arnold.AiNodeIs(node, "polymesh"):
node_name = arnold.AiNodeGetName(node)
results[arnold.AiNodeGetStr(node, "cbId")].append(node_name)
arnold.AiNodeIteratorDestroy(iter)
arnold.AiEnd()
return results
def get_standin_path(node):
path = cmds.getAttr(node + ".dso")
# Account for frame extension.
basename = os.path.basename(path)
current_frame = 1
pattern = "(#+)"
matches = re.findall(pattern, basename)
if matches:
substring = "%{}d".format(str(len(matches[0])).zfill(2))
path = path.replace(matches[0], substring)
path = path % current_frame
return path
with open(json_path, "r") as f:
return json.load(f)
def shading_engine_assignments(shading_engine, attribute, nodes, assignments):
@ -136,7 +108,7 @@ def shading_engine_assignments(shading_engine, attribute, nodes, assignments):
def assign_look(standin, subset):
log.info("Assigning {} to {}.".format(subset, standin))
nodes_by_id = get_id_by_node(get_standin_path(standin))
nodes_by_id = get_nodes_by_id(standin)
# Group by asset id so we run over the look per asset
node_ids_by_asset_id = defaultdict(set)
@ -161,8 +133,7 @@ def assign_look(standin, subset):
continue
relationships = lib.get_look_relationships(version["_id"])
shader_nodes, container_nodes = lib.load_look(version["_id"])
container_node = container_nodes[0]
shader_nodes, container_node = lib.load_look(version["_id"])
namespace = shader_nodes[0].split(":")[0]
# Get only the node ids and paths related to this asset

View file

@ -87,8 +87,7 @@ def create_asset_id_hash(nodes):
id = k.split(":")[0]
node_id_hash[id].append(node)
elif cmds.nodeType(node) == "aiStandIn":
path = arnold_standin.get_standin_path(node)
for id, _ in arnold_standin.get_id_by_node(path).items():
for id, _ in arnold_standin.get_nodes_by_id(node).items():
id = id.split(":")[0]
node_id_hash[id].append(node)
else:

View file

@ -82,6 +82,6 @@ def load_look(version_id):
# Reference the look file
with lib.maintained_selection():
container_node = load_container(loader, look_representation)
container_node = load_container(loader, look_representation)[0]
return lib.get_container_members(container_node), container_node