Merge branch 'develop' into bugfix/OP-7281_Maya-Review---playblast-renders-without-textures

This commit is contained in:
Kayla Man 2023-11-27 18:09:29 +08:00
commit 2dda5c7007
152 changed files with 3379 additions and 1074 deletions

View file

@ -35,6 +35,8 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.7-nightly.3
- 3.17.7-nightly.2
- 3.17.7-nightly.1
- 3.17.6
- 3.17.6-nightly.3
@ -133,8 +135,6 @@ body:
- 3.15.2-nightly.6
- 3.15.2-nightly.5
- 3.15.2-nightly.4
- 3.15.2-nightly.3
- 3.15.2-nightly.2
validations:
required: true
- type: dropdown

View file

@ -44,6 +44,8 @@ from .entities import (
get_thumbnail_id_from_source,
get_workfile_info,
get_asset_name_identifier,
)
from .entity_links import (
@ -108,4 +110,6 @@ __all__ = (
"get_linked_representation_id",
"create_project",
"get_asset_name_identifier",
)

View file

@ -4,3 +4,22 @@ if not AYON_SERVER_ENABLED:
from .mongo.entities import *
else:
from .server.entities import *
def get_asset_name_identifier(asset_doc):
"""Get asset name identifier by asset document.
This function is added because of AYON implementation where name
identifier is not just a name but full path.
Asset document must have "name" key, and "data.parents" when in AYON mode.
Args:
asset_doc (dict[str, Any]): Asset document.
"""
if not AYON_SERVER_ENABLED:
return asset_doc["name"]
parents = list(asset_doc["data"]["parents"])
parents.append(asset_doc["name"])
return "/" + "/".join(parents)

View file

@ -182,6 +182,19 @@ def get_asset_by_name(project_name, asset_name, fields=None):
return None
def _folders_query(project_name, con, fields, **kwargs):
if fields is None or "tasks" in fields:
folders = get_folders_with_tasks(
con, project_name, fields=fields, **kwargs
)
else:
folders = con.get_folders(project_name, fields=fields, **kwargs)
for folder in folders:
yield folder
def get_assets(
project_name,
asset_ids=None,
@ -201,20 +214,39 @@ def get_assets(
fields = folder_fields_v3_to_v4(fields, con)
kwargs = dict(
folder_ids=asset_ids,
folder_names=asset_names,
parent_ids=parent_ids,
active=active,
fields=fields
)
if not asset_names:
for folder in _folders_query(project_name, con, fields, **kwargs):
yield convert_v4_folder_to_v3(folder, project_name)
return
if fields is None or "tasks" in fields:
folders = get_folders_with_tasks(con, project_name, **kwargs)
new_asset_names = set()
folder_paths = set()
for name in asset_names:
if "/" in name:
folder_paths.add(name)
else:
new_asset_names.add(name)
else:
folders = con.get_folders(project_name, **kwargs)
yielded_ids = set()
if folder_paths:
for folder in _folders_query(
project_name, con, fields, folder_paths=folder_paths, **kwargs
):
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
for folder in folders:
yield convert_v4_folder_to_v3(folder, project_name)
if not new_asset_names:
return
for folder in _folders_query(
project_name, con, fields, folder_names=new_asset_names, **kwargs
):
if folder["id"] not in yielded_ids:
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(

View file

@ -170,7 +170,7 @@ class HostBase(object):
if project_name:
items.append(project_name)
if asset_name:
items.append(asset_name)
items.append(asset_name.lstrip("/"))
if task_name:
items.append(task_name)
if items:

View file

@ -1,3 +1,4 @@
from openpype import AYON_SERVER_ENABLED
import openpype.hosts.aftereffects.api as api
from openpype.client import get_asset_by_name
from openpype.pipeline import (
@ -43,6 +44,14 @@ class AEWorkfileCreator(AutoCreator):
task_name = context.get_current_task_name()
host_name = context.host_name
existing_asset_name = None
if existing_instance is not None:
if AYON_SERVER_ENABLED:
existing_asset_name = existing_instance.get("folderPath")
if existing_asset_name is None:
existing_asset_name = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -50,10 +59,13 @@ class AEWorkfileCreator(AutoCreator):
project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@ -68,7 +80,7 @@ class AEWorkfileCreator(AutoCreator):
new_instance.data_to_store())
elif (
existing_instance["asset"] != asset_name
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -76,6 +88,10 @@ class AEWorkfileCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,6 +1,8 @@
import os
import pyblish.api
from openpype.client import get_asset_name_identifier
from openpype.pipeline.create import get_subset_name
@ -48,9 +50,11 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
asset_name = get_asset_name_identifier(asset_entity)
instance_data = {
"active": True,
"asset": asset_entity["name"],
"asset": asset_name,
"task": task,
"frameStart": context.data['frameStart'],
"frameEnd": context.data['frameEnd'],

View file

@ -6,11 +6,11 @@ from typing import Dict, List, Optional
import bpy
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
Creator,
CreatedInstance,
LoaderPlugin,
get_current_task_name,
)
from openpype.lib import BoolDef
@ -225,7 +225,12 @@ class BaseCreator(Creator):
bpy.context.scene.collection.children.link(instances)
# Create asset group
name = prepare_scene_name(instance_data["asset"], subset_name)
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
name = prepare_scene_name(asset_name, subset_name)
if self.create_as_asset_group:
# Create instance as empty
instance_node = bpy.data.objects.new(name=name, object_data=None)
@ -281,7 +286,14 @@ class BaseCreator(Creator):
Args:
update_list(List[UpdateData]): Changed instances
and their changes, as a list of tuples."""
and their changes, as a list of tuples.
"""
if AYON_SERVER_ENABLED:
asset_name_key = "folderPath"
else:
asset_name_key = "asset"
for created_instance, changes in update_list:
data = created_instance.data_to_store()
node = created_instance.transient_data["instance_node"]
@ -295,11 +307,12 @@ class BaseCreator(Creator):
# Rename the instance node in the scene if subset or asset changed
if (
"subset" in changes.changed_keys
or "asset" in changes.changed_keys
"subset" in changes.changed_keys
or asset_name_key in changes.changed_keys
):
asset_name = data[asset_name_key]
name = prepare_scene_name(
asset=data["asset"], subset=data["subset"]
asset=asset_name, subset=data["subset"]
)
node.name = name

View file

@ -1,5 +1,6 @@
import bpy
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
from openpype.hosts.blender.api.plugin import BaseCreator
@ -24,7 +25,7 @@ class CreateWorkfile(BaseCreator, AutoCreator):
def create(self):
"""Create workfile instances."""
current_instance = next(
existing_instance = next(
(
instance for instance in self.create_context.instances
if instance.creator_identifier == self.identifier
@ -37,16 +38,27 @@ class CreateWorkfile(BaseCreator, AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
if not current_instance:
existing_asset_name = None
if existing_instance is not None:
if AYON_SERVER_ENABLED:
existing_asset_name = existing_instance.get("folderPath")
if existing_asset_name is None:
existing_asset_name = existing_instance["asset"]
if not existing_instance:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
task_name, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": task_name,
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(
self.get_dynamic_data(
task_name,
@ -54,7 +66,7 @@ class CreateWorkfile(BaseCreator, AutoCreator):
asset_doc,
project_name,
host_name,
current_instance,
existing_instance,
)
)
self.log.info("Auto-creating workfile instance...")
@ -65,17 +77,21 @@ class CreateWorkfile(BaseCreator, AutoCreator):
current_instance.transient_data["instance_node"] = instance_node
self._add_instance_to_context(current_instance)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
# Update instance context if it's different
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
task_name, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
def collect_instances(self):

View file

@ -19,7 +19,10 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -23,7 +23,11 @@ class ExtractAnimationABC(
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -20,7 +20,10 @@ class ExtractBlend(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -23,7 +23,10 @@ class ExtractBlendAnimation(
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -21,7 +21,10 @@ class ExtractCameraABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -20,7 +20,10 @@ class ExtractCamera(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -21,7 +21,10 @@ class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction

View file

@ -145,7 +145,10 @@ class ExtractAnimationFBX(
root.select_set(True)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
fbx_filename = f"{instance_name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = plugin.create_blender_context(
@ -178,7 +181,7 @@ class ExtractAnimationFBX(
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {

View file

@ -224,7 +224,11 @@ class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:

View file

@ -51,7 +51,10 @@ class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin):
# get output path
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -27,7 +27,10 @@ class ExtractThumbnail(publish.Extractor):
self.log.debug("Extracting capture..")
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -1,6 +1,8 @@
import os
import pyblish.api
from openpype.client import get_asset_name_identifier
class CollectCelactionInstances(pyblish.api.ContextPlugin):
""" Adds the celaction render instances """
@ -17,8 +19,10 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
asset_name = get_asset_name_identifier(asset_entity)
shared_instance_data = {
"asset": asset_entity["name"],
"asset": asset_name,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],

View file

@ -1,5 +1,6 @@
import pyblish.api
from openpype.client import get_asset_name_identifier
import openpype.hosts.flame.api as opfapi
from openpype.hosts.flame.otio import flame_export
from openpype.pipeline.create import get_subset_name
@ -33,13 +34,15 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
project_settings=context.data["project_settings"]
)
asset_name = get_asset_name_identifier(asset_doc)
# adding otio timeline to context
with opfapi.maintained_segment_selection(sequence) as selected_seg:
otio_timeline = flame_export.create_otio_timeline(sequence)
instance_data = {
"name": subset_name,
"asset": asset_doc["name"],
"asset": asset_name,
"subset": subset_name,
"family": "workfile",
"families": []

View file

@ -1,6 +1,7 @@
from openpype.hosts.fusion.api import (
get_current_comp
)
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name
from openpype.pipeline import (
AutoCreator,
@ -68,6 +69,13 @@ class FusionWorkfileCreator(AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
if existing_instance is None:
existing_instance_asset = None
elif AYON_SERVER_ENABLED:
existing_instance_asset = existing_instance["folderPath"]
else:
existing_instance_asset = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -75,10 +83,13 @@ class FusionWorkfileCreator(AutoCreator):
project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@ -91,7 +102,7 @@ class FusionWorkfileCreator(AutoCreator):
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
existing_instance_asset != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -99,6 +110,9 @@ class FusionWorkfileCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -11,7 +11,6 @@ import qargparse
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LoaderPlugin, LegacyCreator
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline.load import get_representation_path_from_context
from . import lib
@ -32,7 +31,7 @@ def load_stylesheet():
class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
items = {}
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
@ -494,9 +493,8 @@ class ClipLoader:
joint `data` key with asset.data dict into the representation
"""
asset_name = self.context["representation"]["context"]["asset"]
asset_doc = get_current_project_asset(asset_name)
log.debug("__ asset_doc: {}".format(pformat(asset_doc)))
asset_doc = self.context["asset"]
self.data["assetData"] = asset_doc["data"]
def _make_track_item(self, source_bin_item, audio=False):
@ -644,8 +642,8 @@ class PublishClip:
Returns:
hiero.core.TrackItem: hiero track item object with pype tag
"""
vertical_clip_match = dict()
tag_data = dict()
vertical_clip_match = {}
tag_data = {}
types = {
"shot": "shot",
"folder": "folder",
@ -707,9 +705,10 @@ class PublishClip:
self._create_parents()
def convert(self):
# solve track item data and add them to tag data
self._convert_to_tag_data()
tag_hierarchy_data = self._convert_to_tag_data()
self.tag_data.update(tag_hierarchy_data)
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
@ -723,16 +722,23 @@ class PublishClip:
if self.rename:
# rename track item
self.track_item.setName(new_name)
self.tag_data["asset"] = new_name
self.tag_data["asset_name"] = new_name
else:
self.tag_data["asset"] = self.ti_name
self.tag_data["asset_name"] = self.ti_name
self.tag_data["hierarchyData"]["shot"] = self.ti_name
# AYON unique identifier
folder_path = "/{}/{}".format(
tag_hierarchy_data["hierarchy"],
self.tag_data["asset_name"]
)
self.tag_data["folderPath"] = folder_path
if self.tag_data["heroTrack"] and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
else:
self.tag_data.update({"reviewTrack": None})
# TODO: remove debug print
log.debug("___ self.tag_data: {}".format(
pformat(self.tag_data)
))
@ -891,7 +897,7 @@ class PublishClip:
tag_hierarchy_data = hero_data
# add data to return data dict
self.tag_data.update(tag_hierarchy_data)
return tag_hierarchy_data
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
""" Solve tag data from hierarchy data and templates. """

View file

@ -5,6 +5,8 @@ import json
import pyblish.api
from openpype.client import get_asset_name_identifier
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
"""Collect frames from tags.
@ -99,6 +101,9 @@ class CollectFrameTagInstances(pyblish.api.ContextPlugin):
# first collect all available subset tag frames
subset_data = {}
context_asset_doc = context.data["assetEntity"]
context_asset_name = get_asset_name_identifier(context_asset_doc)
for tag_data in sequence_tags:
frame = int(tag_data["start"])
@ -115,7 +120,7 @@ class CollectFrameTagInstances(pyblish.api.ContextPlugin):
subset_data[subset] = {
"frames": [frame],
"format": tag_data["format"],
"asset": context.data["assetEntity"]["name"]
"asset": context_asset_name
}
return subset_data

View file

@ -1,9 +1,12 @@
import pyblish
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline.editorial import is_overlapping_otio_ranges
from openpype.hosts.hiero import api as phiero
from openpype.hosts.hiero.api.otio import hiero_export
import hiero
import hiero
# # developer reload modules
from pprint import pformat
@ -80,25 +83,24 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
if k not in ("id", "applieswhole", "label")
})
asset = tag_data["asset"]
asset, asset_name = self._get_asset_data(tag_data)
subset = tag_data["subset"]
# insert family into families
family = tag_data["family"]
families = [str(f) for f in tag_data["families"]]
families.insert(0, str(family))
# form label
label = asset
if asset != clip_name:
label = "{} -".format(asset)
if asset_name != clip_name:
label += " ({})".format(clip_name)
label += " {}".format(subset)
label += " {}".format("[" + ", ".join(families) + "]")
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"asset": asset,
"asset_name": asset_name,
"item": track_item,
"families": families,
"publish": tag_data["publish"],
@ -176,9 +178,9 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
})
def create_shot_instance(self, context, **data):
subset = "shotMain"
master_layer = data.get("heroTrack")
hierarchy_data = data.get("hierarchyData")
asset = data.get("asset")
item = data.get("item")
clip_name = item.name()
@ -189,23 +191,21 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return
asset = data["asset"]
subset = "shotMain"
asset_name = data["asset_name"]
# insert family into families
family = "shot"
# form label
label = asset
if asset != clip_name:
label = "{} -".format(asset)
if asset_name != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(subset)
label += " [{}]".format(family)
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"subset": subset,
"asset": asset,
"family": family,
"families": []
})
@ -215,7 +215,33 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def _get_asset_data(self, data):
folder_path = data.pop("folderPath", None)
if data.get("asset_name"):
asset_name = data["asset_name"]
else:
asset_name = data["asset"]
# backward compatibility for clip tags
# which are missing folderPath key
# TODO remove this in future versions
if not folder_path:
hierarchy_path = data["hierarchy"]
folder_path = "/{}/{}".format(
hierarchy_path,
asset_name
)
if AYON_SERVER_ENABLED:
asset = folder_path
else:
asset = asset_name
return asset, asset_name
def create_audio_instance(self, context, **data):
subset = "audioMain"
master_layer = data.get("heroTrack")
if not master_layer:
@ -230,23 +256,21 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
return
asset = data["asset"]
subset = "audioMain"
asset_name = data["asset_name"]
# insert family into families
family = "audio"
# form label
label = asset
if asset != clip_name:
label = "{} -".format(asset)
if asset_name != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(subset)
label += " [{}]".format(family)
data.update({
"name": "{}_{}".format(asset, subset),
"label": label,
"subset": subset,
"asset": asset,
"family": family,
"families": ["clip"]
})

View file

@ -7,6 +7,7 @@ from qtpy.QtGui import QPixmap
import hiero.ui
from openpype import AYON_SERVER_ENABLED
from openpype.hosts.hiero.api.otio import hiero_export
@ -17,9 +18,11 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.491
def process(self, context):
asset = context.data["asset"]
subset = "workfile"
asset_name = asset
if AYON_SERVER_ENABLED:
asset_name = asset_name.split("/")[-1]
active_timeline = hiero.ui.activeSequence()
project = active_timeline.project()
fps = active_timeline.framerate().toFloat()
@ -27,7 +30,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
# adding otio timeline to context
otio_timeline = hiero_export.create_otio_timeline()
# get workfile thumnail paths
# get workfile thumbnail paths
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
thumbnail_name = "workfile_thumbnail.png"
thumbnail_path = os.path.join(tmp_staging, thumbnail_name)
@ -49,8 +52,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
}
# get workfile paths
curent_file = project.path()
staging_dir, base_name = os.path.split(curent_file)
current_file = project.path()
staging_dir, base_name = os.path.split(current_file)
# creating workfile representation
workfile_representation = {
@ -59,13 +62,16 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
'files': base_name,
"stagingDir": staging_dir,
}
family = "workfile"
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"label": "{} - {}Main".format(
asset, family),
"name": "{}_{}".format(asset_name, family),
"asset": context.data["asset"],
# TODO use 'get_subset_name'
"subset": "{}{}Main".format(asset_name, family.capitalize()),
"item": project,
"family": "workfile",
"family": family,
"families": [],
"representations": [workfile_representation, thumb_representation]
}
@ -78,7 +84,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"activeProject": project,
"activeTimeline": active_timeline,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"currentFile": current_file,
"colorspace": self.get_colorspace(project),
"fps": fps
}

View file

@ -1,5 +1,6 @@
from pyblish import api
from openpype.client import get_assets
from openpype.client import get_assets, get_asset_name_identifier
class CollectAssetBuilds(api.ContextPlugin):
@ -19,10 +20,13 @@ class CollectAssetBuilds(api.ContextPlugin):
def process(self, context):
project_name = context.data["projectName"]
asset_builds = {}
for asset in get_assets(project_name):
if asset["data"]["entityType"] == "AssetBuild":
self.log.debug("Found \"{}\" in database.".format(asset))
asset_builds[asset["name"]] = asset
for asset_doc in get_assets(project_name):
if asset_doc["data"].get("entityType") != "AssetBuild":
continue
asset_name = get_asset_name_identifier(asset_doc)
self.log.debug("Found \"{}\" in database.".format(asset_doc))
asset_builds[asset_name] = asset_doc
for instance in context:
if instance.data["family"] != "clip":
@ -50,9 +54,7 @@ class CollectAssetBuilds(api.ContextPlugin):
# Collect asset builds.
data = {"assetbuilds": []}
for name in asset_names:
data["assetbuilds"].append(
asset_builds[name]
)
data["assetbuilds"].append(asset_builds[name])
self.log.debug(
"Found asset builds: {}".format(data["assetbuilds"])
)

View file

@ -152,7 +152,9 @@ def get_output_parameter(node):
return node.parm("ar_ass_file")
elif node_type == "Redshift_Proxy_Output":
return node.parm("RS_archive_file")
elif node_type == "ifd":
if node.evalParm("soho_outputmode"):
return node.parm("soho_diskfile")
raise TypeError("Node type '%s' not supported" % node_type)

View file

@ -66,10 +66,6 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
register_event_callback("new", on_new)
self._has_been_setup = True
# add houdini vendor packages
hou_pythonpath = os.path.join(HOUDINI_HOST_DIR, "vendor")
sys.path.append(hou_pythonpath)
# Set asset settings for the empty scene directly after launch of
# Houdini so it initializes into the correct scene FPS,

View file

@ -6,6 +6,8 @@ from abc import (
)
import six
import hou
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
CreatorError,
LegacyCreator,
@ -142,12 +144,13 @@ class HoudiniCreatorBase(object):
@staticmethod
def create_instance_node(
node_name, parent,
node_type="geometry"):
asset_name, node_name, parent, node_type="geometry"
):
# type: (str, str, str) -> hou.Node
"""Create node representing instance.
Arguments:
asset_name (str): Asset name.
node_name (str): Name of the new node.
parent (str): Name of the parent node.
node_type (str, optional): Type of the node.
@ -182,8 +185,13 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase):
if node_type is None:
node_type = "geometry"
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
instance_node = self.create_instance_node(
subset_name, "/out", node_type)
asset_name, subset_name, "/out", node_type)
self.customize_node_look(instance_node)

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating Arnold ASS files."""
from openpype.hosts.houdini.api import plugin
from openpype.lib import BoolDef
class CreateArnoldAss(plugin.HoudiniCreator):
@ -21,6 +22,9 @@ class CreateArnoldAss(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "arnold"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateArnoldAss, self).create(
subset_name,
@ -52,3 +56,15 @@ class CreateArnoldAss(plugin.HoudiniCreator):
# Lock any parameters in this list
to_lock = ["ar_ass_export_enable", "family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,8 +2,8 @@
"""Creator plugin for creating pointcache bgeo files."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance, CreatorError
from openpype.lib import EnumDef
import hou
from openpype.lib import EnumDef, BoolDef
class CreateBGEO(plugin.HoudiniCreator):
@ -18,6 +18,9 @@ class CreateBGEO(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateBGEO, self).create(
subset_name,
@ -58,6 +61,13 @@ class CreateBGEO(plugin.HoudiniCreator):
instance_node.setParms(parms)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
bgeo_enum = [
@ -89,7 +99,7 @@ class CreateBGEO(plugin.HoudiniCreator):
return attrs + [
EnumDef("bgeo_type", bgeo_enum, label="BGEO Options"),
]
] + self.get_instance_attr_defs()
def get_network_categories(self):
return [

View file

@ -17,13 +17,13 @@ class CreateHDA(plugin.HoudiniCreator):
icon = "gears"
maintain_selection = False
def _check_existing(self, subset_name):
def _check_existing(self, asset_name, subset_name):
# type: (str) -> bool
"""Check if existing subset name versions already exists."""
# Get all subsets of the current asset
project_name = self.project_name
asset_doc = get_asset_by_name(
project_name, self.data["asset"], fields=["_id"]
project_name, asset_name, fields=["_id"]
)
subset_docs = get_subsets(
project_name, asset_ids=[asset_doc["_id"]], fields=["name"]
@ -35,7 +35,8 @@ class CreateHDA(plugin.HoudiniCreator):
return subset_name.lower() in existing_subset_names_low
def create_instance_node(
self, node_name, parent, node_type="geometry"):
self, asset_name, node_name, parent, node_type="geometry"
):
parent_node = hou.node("/obj")
if self.selected_nodes:
@ -61,7 +62,7 @@ class CreateHDA(plugin.HoudiniCreator):
hda_file_name="$HIP/{}.hda".format(node_name)
)
hda_node.layoutChildren()
elif self._check_existing(node_name):
elif self._check_existing(asset_name, node_name):
raise plugin.OpenPypeCreatorError(
("subset {} is already published with different HDA"
"definition.").format(node_name))

View file

@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef
class CreateMantraIFD(plugin.HoudiniCreator):
"""Mantra .ifd Archive"""
identifier = "io.openpype.creators.houdini.mantraifd"
label = "Mantra IFD"
family = "mantraifd"
icon = "gears"
def create(self, subset_name, instance_data, pre_create_data):
import hou
instance_data.pop("active", None)
instance_data.update({"node_type": "ifd"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateMantraIFD, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
filepath = "{}{}".format(
hou.text.expandString("$HIP/pyblish/"),
"{}.$F4.ifd".format(subset_name))
parms = {
# Render frame range
"trange": 1,
# Arnold ROP settings
"soho_diskfile": filepath,
"soho_outputmode": 1
}
instance_node.setParms(parms)
# Lock any parameters in this list
to_lock = ["soho_outputmode", "family", "id"]
self.lock_parameters(instance_node, to_lock)
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating pointcache alembics."""
from openpype.hosts.houdini.api import plugin
from openpype.lib import BoolDef
import hou
class CreatePointCache(plugin.HoudiniCreator):
"""Alembic ROP to pointcache"""
identifier = "io.openpype.creators.houdini.pointcache"
@ -15,6 +17,9 @@ class CreatePointCache(plugin.HoudiniCreator):
def create(self, subset_name, instance_data, pre_create_data):
instance_data.pop("active", None)
instance_data.update({"node_type": "alembic"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreatePointCache, self).create(
subset_name,
@ -105,3 +110,15 @@ class CreatePointCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,6 +2,7 @@
"""Creator plugin for creating Redshift proxies."""
from openpype.hosts.houdini.api import plugin
import hou
from openpype.lib import BoolDef
class CreateRedshiftProxy(plugin.HoudiniCreator):
@ -24,6 +25,9 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
# TODO: Somehow enforce so that it only shows the original limited
# attributes of the Redshift_Proxy_Output node type
instance_data.update({"node_type": "Redshift_Proxy_Output"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateRedshiftProxy, self).create(
subset_name,
@ -50,3 +54,15 @@ class CreateRedshiftProxy(plugin.HoudiniCreator):
hou.ropNodeTypeCategory(),
hou.sopNodeTypeCategory()
]
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -2,6 +2,7 @@
"""Creator plugin for creating VDB Caches."""
from openpype.hosts.houdini.api import plugin
from openpype.pipeline import CreatedInstance
from openpype.lib import BoolDef
import hou
@ -19,15 +20,20 @@ class CreateVDBCache(plugin.HoudiniCreator):
instance_data.pop("active", None)
instance_data.update({"node_type": "geometry"})
creator_attributes = instance_data.setdefault(
"creator_attributes", dict())
creator_attributes["farm"] = pre_create_data["farm"]
instance = super(CreateVDBCache, self).create(
subset_name,
instance_data,
pre_create_data) # type: CreatedInstance
instance_node = hou.node(instance.get("instance_node"))
file_path = "{}{}".format(
hou.text.expandString("$HIP/pyblish/"),
"{}.$F4.vdb".format(subset_name))
parms = {
"sopoutput": "$HIP/pyblish/{}.$F4.vdb".format(subset_name),
"sopoutput": file_path,
"initsim": True,
"trange": 1
}
@ -103,3 +109,15 @@ class CreateVDBCache(plugin.HoudiniCreator):
else:
return min(outputs,
key=lambda node: node.evalParm('outputidx'))
def get_instance_attr_defs(self):
return [
BoolDef("farm",
label="Submitting to Farm",
default=False)
]
def get_pre_create_attr_defs(self):
attrs = super().get_pre_create_attr_defs()
# Use same attributes as for instance attributes
return attrs + self.get_instance_attr_defs()

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
from openpype import AYON_SERVER_ENABLED
from openpype.hosts.houdini.api import plugin
from openpype.hosts.houdini.api.lib import read, imprint
from openpype.hosts.houdini.api.pipeline import CONTEXT_CONTAINER
@ -30,16 +31,27 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.host_name
if current_instance is None:
current_instance_asset = None
elif AYON_SERVER_ENABLED:
current_instance_asset = current_instance["folderPath"]
else:
current_instance_asset = current_instance["asset"]
if current_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(
self.get_dynamic_data(
variant, task_name, asset_doc,
@ -51,15 +63,18 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
)
self._add_instance_to_context(current_instance)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
current_instance_asset != asset_name
or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
current_instance["folderPath"] = asset_name
else:
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name

View file

@ -0,0 +1,75 @@
import os
import pyblish.api
import hou
from openpype.hosts.houdini.api import lib
class CollectDataforCache(pyblish.api.InstancePlugin):
"""Collect data for caching to Deadline."""
order = pyblish.api.CollectorOrder + 0.04
families = ["ass", "pointcache",
"mantraifd", "redshiftproxy",
"vdbcache"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect Data for Cache"
def process(self, instance):
creator_attribute = instance.data["creator_attributes"]
farm_enabled = creator_attribute["farm"]
instance.data["farm"] = farm_enabled
if not farm_enabled:
self.log.debug("Caching on farm is disabled. "
"Skipping farm collecting.")
return
# Why do we need this particular collector to collect the expected
# output files from a ROP node. Don't we have a dedicated collector
# for that yet?
# Collect expected files
ropnode = hou.node(instance.data["instance_node"])
output_parm = lib.get_output_parameter(ropnode)
expected_filepath = output_parm.eval()
instance.data.setdefault("files", list())
instance.data.setdefault("expectedFiles", list())
if instance.data.get("frames"):
files = self.get_files(instance, expected_filepath)
# list of files
instance.data["files"].extend(files)
else:
# single file
instance.data["files"].append(output_parm.eval())
cache_files = {"_": instance.data["files"]}
# Convert instance family to pointcache if it is bgeo or abc
# because ???
for family in instance.data["families"]:
if family == "bgeo" or "abc":
instance.data["family"] = "pointcache"
break
instance.data.update({
"plugin": "Houdini",
"publish": True
})
instance.data["families"].append("publish.hou")
instance.data["expectedFiles"].append(cache_files)
self.log.debug("{}".format(instance.data))
def get_files(self, instance, output_parm):
"""Get the files with the frame range data
Args:
instance (_type_): instance
output_parm (_type_): path of output parameter
Returns:
files: a list of files
"""
directory = os.path.dirname(output_parm)
files = [
os.path.join(directory, frame).replace("\\", "/")
for frame in instance.data["frames"]
]
return files

View file

@ -0,0 +1,39 @@
import pyblish.api
from openpype.lib import NumberDef
from openpype.pipeline import OpenPypePyblishPluginMixin
class CollectChunkSize(pyblish.api.InstancePlugin,
OpenPypePyblishPluginMixin):
"""Collect chunk size for cache submission to Deadline."""
order = pyblish.api.CollectorOrder + 0.05
families = ["ass", "pointcache",
"vdbcache", "mantraifd",
"redshiftproxy"]
hosts = ["houdini"]
targets = ["local", "remote"]
label = "Collect Chunk Size"
chunkSize = 999999
def process(self, instance):
# need to get the chunk size info from the setting
attr_values = self.get_attr_values_from_data(instance.data)
instance.data["chunkSize"] = attr_values.get("chunkSize")
@classmethod
def apply_settings(cls, project_settings):
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
cls.chunkSize = project_setting["chunk_size"]
@classmethod
def get_attribute_defs(cls):
return [
NumberDef("chunkSize",
minimum=1,
maximum=999999,
decimals=0,
default=cls.chunkSize,
label="Frame Per Task")
]

View file

@ -16,7 +16,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Frames"
families = ["vdbcache", "imagesequence", "ass",
"redshiftproxy", "review", "bgeo"]
"mantraifd", "redshiftproxy", "review",
"bgeo"]
def process(self, instance):

View file

@ -1,6 +1,10 @@
import pyblish.api
from openpype.client import get_subset_by_name, get_asset_by_name
from openpype.client import (
get_subset_by_name,
get_asset_by_name,
get_asset_name_identifier,
)
import openpype.lib.usdlib as usdlib
@ -51,8 +55,9 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
self.log.debug("Add bootstrap for: %s" % bootstrap)
project_name = instance.context.data["projectName"]
asset = get_asset_by_name(project_name, instance.data["asset"])
assert asset, "Asset must exist: %s" % asset
asset_name = instance.data["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
assert asset_doc, "Asset must exist: %s" % asset_name
# Check which are not about to be created and don't exist yet
required = {"shot": ["usdShot"], "asset": ["usdAsset"]}.get(bootstrap)
@ -67,19 +72,21 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
required += list(layers)
self.log.debug("Checking required bootstrap: %s" % required)
for subset in required:
if self._subset_exists(project_name, instance, subset, asset):
for subset_name in required:
if self._subset_exists(
project_name, instance, subset_name, asset_doc
):
continue
self.log.debug(
"Creating {0} USD bootstrap: {1} {2}".format(
bootstrap, asset["name"], subset
bootstrap, asset_name, subset_name
)
)
new = instance.context.create_instance(subset)
new.data["subset"] = subset
new.data["label"] = "{0} ({1})".format(subset, asset["name"])
new = instance.context.create_instance(subset_name)
new.data["subset"] = subset_name
new.data["label"] = "{0} ({1})".format(subset_name, asset_name)
new.data["family"] = "usd.bootstrap"
new.data["comment"] = "Automated bootstrap USD file."
new.data["publishFamilies"] = ["usd"]
@ -91,21 +98,23 @@ class CollectUsdBootstrap(pyblish.api.InstancePlugin):
for key in ["asset"]:
new.data[key] = instance.data[key]
def _subset_exists(self, project_name, instance, subset, asset):
def _subset_exists(self, project_name, instance, subset_name, asset_doc):
"""Return whether subset exists in current context or in database."""
# Allow it to be created during this publish session
context = instance.context
asset_doc_name = get_asset_name_identifier(asset_doc)
for inst in context:
if (
inst.data["subset"] == subset
and inst.data["asset"] == asset["name"]
inst.data["subset"] == subset_name
and inst.data["asset"] == asset_doc_name
):
return True
# Or, if they already exist in the database we can
# skip them too.
if get_subset_by_name(
project_name, subset, asset["_id"], fields=["_id"]
project_name, subset_name, asset_doc["_id"], fields=["_id"]
):
return True
return False

View file

@ -14,8 +14,12 @@ class ExtractAlembic(publish.Extractor):
label = "Extract Alembic"
hosts = ["houdini"]
families = ["abc", "camera"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])

View file

@ -14,9 +14,12 @@ class ExtractAss(publish.Extractor):
label = "Extract Ass"
families = ["ass"]
hosts = ["houdini"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -17,7 +17,9 @@ class ExtractBGEO(publish.Extractor):
families = ["bgeo"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -0,0 +1,51 @@
import os
import pyblish.api
from openpype.pipeline import publish
import hou
class ExtractMantraIFD(publish.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Mantra ifd"
hosts = ["houdini"]
families = ["mantraifd"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data.get("instance_node"))
output = ropnode.evalParm("soho_diskfile")
staging_dir = os.path.dirname(output)
instance.data["stagingDir"] = staging_dir
files = instance.data["frames"]
missing_frames = [
frame
for frame in instance.data["frames"]
if not os.path.exists(
os.path.normpath(os.path.join(staging_dir, frame)))
]
if missing_frames:
raise RuntimeError("Failed to complete Mantra ifd extraction. "
"Missing output files: {}".format(
missing_frames))
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'ifd',
'ext': 'ifd',
'files': files,
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
}
instance.data["representations"].append(representation)

View file

@ -14,9 +14,12 @@ class ExtractRedshiftProxy(publish.Extractor):
label = "Extract Redshift Proxy"
families = ["redshiftproxy"]
hosts = ["houdini"]
targets = ["local", "remote"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data.get("instance_node"))
# Get the filename from the filename parameter

View file

@ -16,7 +16,9 @@ class ExtractVDBCache(publish.Extractor):
hosts = ["houdini"]
def process(self, instance):
if instance.data.get("farm"):
self.log.debug("Should be processed on farm, skipping.")
return
ropnode = hou.node(instance.data["instance_node"])
# Get the filename from the filename parameter

View file

@ -22,7 +22,8 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin):
"arnold_rop",
"mantra_rop",
"karma_rop",
"usdrender"]
"usdrender",
"publish.hou"]
optional = True
def process(self, context):

View file

@ -20,7 +20,7 @@ class ValidateHoudiniNotApprenticeLicense(pyblish.api.InstancePlugin):
"""
order = pyblish.api.ValidatorOrder
families = ["usd", "abc"]
families = ["usd", "abc", "fbx", "camera"]
hosts = ["houdini"]
label = "Houdini Apprentice License"

View file

@ -54,12 +54,13 @@ class ValidateSubsetName(pyblish.api.InstancePlugin,
rop_node = hou.node(instance.data["instance_node"])
# Check subset name
asset_doc = instance.data["assetEntity"]
subset_name = get_subset_name(
family=instance.data["family"],
variant=instance.data["variant"],
task_name=instance.data["task"],
asset_doc=instance.data["assetEntity"],
dynamic_data={"asset": instance.data["asset"]}
asset_doc=asset_doc,
dynamic_data={"asset": asset_doc["name"]}
)
if instance.data.get("subset") != subset_name:
@ -76,12 +77,13 @@ class ValidateSubsetName(pyblish.api.InstancePlugin,
rop_node = hou.node(instance.data["instance_node"])
# Check subset name
asset_doc = instance.data["assetEntity"]
subset_name = get_subset_name(
family=instance.data["family"],
variant=instance.data["variant"],
task_name=instance.data["task"],
asset_doc=instance.data["assetEntity"],
dynamic_data={"asset": instance.data["asset"]}
asset_doc=asset_doc,
dynamic_data={"asset": asset_doc["name"]}
)
instance.data["subset"] = subset_name

View file

@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
"""OpenPype startup script."""
from openpype.pipeline import install_host
from openpype.hosts.houdini.api import HoudiniHost
def main():
print("Installing OpenPype ...")
install_host(HoudiniHost())
main()

View file

@ -1 +0,0 @@
__path__ = __import__('pkgutil').extend_path(__path__, __name__)

View file

@ -1,152 +0,0 @@
import os
import hou
import husdoutputprocessors.base as base
import colorbleed.usdlib as usdlib
from openpype.client import get_asset_by_name
from openpype.pipeline import Anatomy, get_current_project_name
class AvalonURIOutputProcessor(base.OutputProcessorBase):
"""Process Avalon URIs into their full path equivalents.
"""
_parameters = None
_param_prefix = 'avalonurioutputprocessor_'
_parms = {
"use_publish_paths": _param_prefix + "use_publish_paths"
}
def __init__(self):
""" There is only one object of each output processor class that is
ever created in a Houdini session. Therefore be very careful
about what data gets put in this object.
"""
self._use_publish_paths = False
self._cache = dict()
def displayName(self):
return 'Avalon URI Output Processor'
def parameters(self):
if not self._parameters:
parameters = hou.ParmTemplateGroup()
use_publish_path = hou.ToggleParmTemplate(
name=self._parms["use_publish_paths"],
label='Resolve Reference paths to publish paths',
default_value=False,
help=("When enabled any paths for Layers, References or "
"Payloads are resolved to published master versions.\n"
"This is usually only used by the publishing pipeline, "
"but can be used for testing too."))
parameters.append(use_publish_path)
self._parameters = parameters.asDialogScript()
return self._parameters
def beginSave(self, config_node, t):
parm = self._parms["use_publish_paths"]
self._use_publish_paths = config_node.parm(parm).evalAtTime(t)
self._cache.clear()
def endSave(self):
self._use_publish_paths = None
self._cache.clear()
def processAsset(self,
asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Retrieve from cache if this query occurred before (optimization)
cache_key = (asset_path, asset_path_for_save, asset_is_layer, for_save)
if cache_key in self._cache:
return self._cache[cache_key]
relative_template = "{asset}_{subset}.{ext}"
uri_data = usdlib.parse_avalon_uri(asset_path)
if uri_data:
if for_save:
# Set save output path to a relative path so other
# processors can potentially manage it easily?
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
if self._use_publish_paths:
# Resolve to an Avalon published asset for embedded paths
path = self._get_usd_master_path(**uri_data)
else:
path = relative_template.format(**uri_data)
print("Avalon URI Resolver: %s -> %s" % (asset_path, path))
self._cache[cache_key] = path
return path
self._cache[cache_key] = asset_path
return asset_path
def _get_usd_master_path(self,
asset,
subset,
ext):
"""Get the filepath for a .usd file of a subset.
This will return the path to an unversioned master file generated by
`usd_master_file.py`.
"""
PROJECT = get_current_project_name()
anatomy = Anatomy(PROJECT)
asset_doc = get_asset_by_name(PROJECT, asset)
if not asset_doc:
raise RuntimeError("Invalid asset name: '%s'" % asset)
template_obj = anatomy.templates_obj["publish"]["path"]
path = template_obj.format_strict({
"project": PROJECT,
"asset": asset_doc["name"],
"subset": subset,
"representation": ext,
"version": 0 # stub version zero
})
# Remove the version folder
subset_folder = os.path.dirname(os.path.dirname(path))
master_folder = os.path.join(subset_folder, "master")
fname = "{0}.{1}".format(subset, ext)
return os.path.join(master_folder, fname).replace("\\", "/")
output_processor = AvalonURIOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -1,90 +0,0 @@
import hou
import husdoutputprocessors.base as base
import os
class StagingDirOutputProcessor(base.OutputProcessorBase):
"""Output all USD Rop file nodes into the Staging Directory
Ignore any folders and paths set in the Configured Layers
and USD Rop node, just take the filename and save into a
single directory.
"""
theParameters = None
parameter_prefix = "stagingdiroutputprocessor_"
stagingdir_parm_name = parameter_prefix + "stagingDir"
def __init__(self):
self.staging_dir = None
def displayName(self):
return 'StagingDir Output Processor'
def parameters(self):
if not self.theParameters:
parameters = hou.ParmTemplateGroup()
rootdirparm = hou.StringParmTemplate(
self.stagingdir_parm_name,
'Staging Directory', 1,
string_type=hou.stringParmType.FileReference,
file_type=hou.fileType.Directory
)
parameters.append(rootdirparm)
self.theParameters = parameters.asDialogScript()
return self.theParameters
def beginSave(self, config_node, t):
# Use the Root Directory parameter if it is set.
root_dir_parm = config_node.parm(self.stagingdir_parm_name)
if root_dir_parm:
self.staging_dir = root_dir_parm.evalAtTime(t)
if not self.staging_dir:
out_file_parm = config_node.parm('lopoutput')
if out_file_parm:
self.staging_dir = out_file_parm.evalAtTime(t)
if self.staging_dir:
(self.staging_dir, filename) = os.path.split(self.staging_dir)
def endSave(self):
self.staging_dir = None
def processAsset(self, asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Treat save paths as being relative to the output path.
if for_save and self.staging_dir:
# Whenever we're processing a Save Path make sure to
# resolve it to the Staging Directory
filename = os.path.basename(asset_path)
return os.path.join(self.staging_dir, filename)
return asset_path
output_processor = StagingDirOutputProcessor()
def usdOutputProcessor():
return output_processor

View file

@ -102,8 +102,6 @@ _alembic_options = {
INT_FPS = {15, 24, 25, 30, 48, 50, 60, 44100, 48000}
FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94}
RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"]
DISPLAY_LIGHTS_ENUM = [
{"label": "Use Project Settings", "value": "project_settings"},
@ -3032,194 +3030,6 @@ class shelf():
cmds.shelfLayout(self.name, p="ShelfLayout")
def _get_render_instances():
"""Return all 'render-like' instances.
This returns list of instance sets that needs to receive information
about render layer changes.
Returns:
list: list of instances
"""
objectset = cmds.ls("*.id", long=True, exactType="objectSet",
recursive=True, objectsOnly=True)
instances = []
for objset in objectset:
if not cmds.attributeQuery("id", node=objset, exists=True):
continue
id_attr = "{}.id".format(objset)
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
continue
has_family = cmds.attributeQuery("family",
node=objset,
exists=True)
if not has_family:
continue
if cmds.getAttr(
"{}.family".format(objset)) in RENDERLIKE_INSTANCE_FAMILIES:
instances.append(objset)
return instances
renderItemObserverList = []
class RenderSetupListObserver:
"""Observer to catch changes in render setup layers."""
def listItemAdded(self, item):
print("--- adding ...")
self._add_render_layer(item)
def listItemRemoved(self, item):
print("--- removing ...")
self._remove_render_layer(item.name())
def _add_render_layer(self, item):
render_sets = _get_render_instances()
layer_name = item.name()
for render_set in render_sets:
members = cmds.sets(render_set, query=True) or []
namespace_name = "_{}".format(render_set)
if not cmds.namespace(exists=namespace_name):
index = 1
namespace_name = "_{}".format(render_set)
try:
cmds.namespace(rm=namespace_name)
except RuntimeError:
# namespace is not empty, so we leave it untouched
pass
orignal_namespace_name = namespace_name
while(cmds.namespace(exists=namespace_name)):
namespace_name = "{}{}".format(
orignal_namespace_name, index)
index += 1
namespace = cmds.namespace(add=namespace_name)
if members:
# if set already have namespaced members, use the same
# namespace as others.
namespace = members[0].rpartition(":")[0]
else:
namespace = namespace_name
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
continue
print(" - creating set for {}".format(layer_name))
maya_set = cmds.sets(n=render_layer_set_name, empty=True)
cmds.sets(maya_set, forceElement=render_set)
rio = RenderSetupItemObserver(item)
print("- adding observer for {}".format(item.name()))
item.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def _remove_render_layer(self, layer_name):
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(namespace, layer_name)
if render_layer_set_name in members:
print(" - removing set for {}".format(layer_name))
cmds.delete(render_layer_set_name)
class RenderSetupItemObserver:
"""Handle changes in render setup items."""
def __init__(self, item):
self.item = item
self.original_name = item.name()
def itemChanged(self, *args, **kwargs):
"""Item changed callback."""
if self.item.name() == self.original_name:
return
render_sets = _get_render_instances()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
render_layer_set_name = "{}:{}".format(
namespace, self.original_name)
if render_layer_set_name in members:
print(" <> renaming {} to {}".format(self.original_name,
self.item.name()))
cmds.rename(render_layer_set_name,
"{}:{}".format(
namespace, self.item.name()))
self.original_name = self.item.name()
renderListObserver = RenderSetupListObserver()
def add_render_layer_change_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
rs = renderSetup.instance()
render_sets = _get_render_instances()
layers = rs.getRenderLayers()
for render_set in render_sets:
members = cmds.sets(render_set, query=True)
if not members:
continue
# all sets under set should have the same namespace
namespace = members[0].rpartition(":")[0]
for layer in layers:
render_layer_set_name = "{}:{}".format(namespace, layer.name())
if render_layer_set_name not in members:
continue
rio = RenderSetupItemObserver(layer)
print("- adding observer for {}".format(layer.name()))
layer.addItemObserver(rio.itemChanged)
renderItemObserverList.append(rio)
def add_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("> adding renderSetup observer ...")
rs = renderSetup.instance()
rs.addListObserver(renderListObserver)
pass
def remove_render_layer_observer():
import maya.app.renderSetup.model.renderSetup as renderSetup
print("< removing renderSetup observer ...")
rs = renderSetup.instance()
try:
rs.removeListObserver(renderListObserver)
except ValueError:
# no observer set yet
pass
def update_content_on_context_change():
"""
This will update scene content to match new asset on context change

View file

@ -70,8 +70,8 @@ class RenderSettings(object):
def set_default_renderer_settings(self, renderer=None):
"""Set basic settings based on renderer."""
# Not all hosts can import this module.
from maya import cmds
import maya.mel as mel
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
if not renderer:
renderer = cmds.getAttr(
@ -126,6 +126,10 @@ class RenderSettings(object):
"""Sets settings for Arnold."""
from mtoa.core import createOptions # noqa
from mtoa.aovs import AOVInterface # noqa
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
createOptions()
render_settings = self._project_settings["maya"]["RenderSettings"]
arnold_render_presets = render_settings["arnold_renderer"] # noqa
@ -172,6 +176,10 @@ class RenderSettings(object):
def _set_redshift_settings(self, width, height):
"""Sets settings for Redshift."""
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
render_settings = self._project_settings["maya"]["RenderSettings"]
redshift_render_presets = render_settings["redshift_renderer"]
@ -224,6 +232,10 @@ class RenderSettings(object):
def _set_renderman_settings(self, width, height, aov_separator):
"""Sets settings for Renderman"""
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
rman_render_presets = (
self._project_settings
["maya"]
@ -285,6 +297,11 @@ class RenderSettings(object):
def _set_vray_settings(self, aov_separator, width, height):
# type: (str, int, int) -> None
"""Sets important settings for Vray."""
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
settings = cmds.ls(type="VRaySettingsNode")
node = settings[0] if settings else cmds.createNode("VRaySettingsNode")
render_settings = self._project_settings["maya"]["RenderSettings"]
@ -357,6 +374,10 @@ class RenderSettings(object):
@staticmethod
def _set_global_output_settings():
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
# enable animation
cmds.setAttr("defaultRenderGlobals.outFormatControl", 0)
cmds.setAttr("defaultRenderGlobals.animation", 1)
@ -364,6 +385,10 @@ class RenderSettings(object):
cmds.setAttr("defaultRenderGlobals.extensionPadding", 4)
def _additional_attribs_setter(self, additional_attribs):
# Not all hosts can import this module.
from maya import cmds # noqa: F401
import maya.mel as mel # noqa: F401
for item in additional_attribs:
attribute, value = item
attribute = str(attribute) # ensure str conversion from settings

View file

@ -580,20 +580,11 @@ def on_save():
lib.set_id(node, new_id, overwrite=False)
def _update_render_layer_observers():
# Helper to trigger update for all renderlayer observer logic
lib.remove_render_layer_observer()
lib.add_render_layer_observer()
lib.add_render_layer_change_observer()
def on_open():
"""On scene open let's assume the containers have changed."""
from openpype.widgets import popup
utils.executeDeferred(_update_render_layer_observers)
# Validate FPS after update_task_from_path to
# ensure it is using correct FPS for the asset
lib.validate_fps()
@ -630,7 +621,6 @@ def on_new():
with lib.suspended_refresh():
lib.set_context_settings()
utils.executeDeferred(_update_render_layer_observers)
_remove_workfile_lock()

View file

@ -7,6 +7,7 @@ import six
from maya import cmds
from maya.app.renderSetup.model import renderSetup
from openpype import AYON_SERVER_ENABLED
from openpype.lib import BoolDef, Logger
from openpype.settings import get_project_settings
from openpype.pipeline import (
@ -449,14 +450,16 @@ class RenderlayerCreator(NewCreator, MayaCreatorBase):
# this instance will not have the `instance_node` data yet
# until it's been saved/persisted at least once.
project_name = self.create_context.get_current_project_name()
asset_name = self.create_context.get_current_asset_name()
instance_data = {
"asset": self.create_context.get_current_asset_name(),
"task": self.create_context.get_current_task_name(),
"variant": layer.name(),
}
asset_doc = get_asset_by_name(project_name,
instance_data["asset"])
if AYON_SERVER_ENABLED:
instance_data["folderPath"] = asset_name
else:
instance_data["asset"] = asset_name
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
layer.name(),
instance_data["task"],

View file

@ -45,10 +45,14 @@ class CreateMultishotLayout(plugin.MayaCreator):
above is done.
"""
current_folder = get_folder_by_name(
project_name=get_current_project_name(),
folder_name=get_current_asset_name(),
)
project_name = get_current_project_name()
folder_path = get_current_asset_name()
if "/" in folder_path:
current_folder = get_folder_by_path(project_name, folder_path)
else:
current_folder = get_folder_by_name(
project_name, folder_name=folder_path
)
current_path_parts = current_folder["path"].split("/")
@ -154,7 +158,7 @@ class CreateMultishotLayout(plugin.MayaCreator):
# Create layout instance by the layout creator
instance_data = {
"asset": shot["name"],
"folderPath": shot["path"],
"variant": layout_creator.get_default_variant()
}
if layout_task:

View file

@ -2,6 +2,7 @@ import json
from maya import cmds
from openpype import AYON_SERVER_ENABLED
from openpype.hosts.maya.api import (
lib,
plugin
@ -43,7 +44,11 @@ class CreateReview(plugin.MayaCreator):
members = cmds.ls(selection=True)
project_name = self.project_name
asset_doc = get_asset_by_name(project_name, instance_data["asset"])
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
asset_doc = get_asset_by_name(project_name, asset_name)
task_name = instance_data["task"]
preset = lib.get_capture_preset(
task_name,

View file

@ -51,7 +51,7 @@ class CreateUnrealSkeletalMesh(plugin.MayaCreator):
# We reorganize the geometry that was originally added into the
# set into either 'joints_SET' or 'geometry_SET' based on the
# joint_hints from project settings
members = cmds.sets(instance_node, query=True)
members = cmds.sets(instance_node, query=True) or []
cmds.sets(clear=instance_node)
geometry_set = cmds.sets(name="geometry_SET", empty=True)

View file

@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
from openpype.client import get_asset_by_name, get_asset_name_identifier
from openpype.hosts.maya.api import plugin
from maya import cmds
@ -29,16 +30,27 @@ class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator):
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
if current_instance is None:
current_instance_asset = None
elif AYON_SERVER_ENABLED:
current_instance_asset = current_instance["folderPath"]
else:
current_instance_asset = current_instance["asset"]
if current_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(
self.get_dynamic_data(
variant, task_name, asset_doc,
@ -50,15 +62,20 @@ class CreateWorkfile(plugin.MayaCreatorBase, AutoCreator):
)
self._add_instance_to_context(current_instance)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
current_instance_asset != asset_name
or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
asset_name = get_asset_name_identifier(asset_doc)
if AYON_SERVER_ENABLED:
current_instance["folderPath"] = asset_name
else:
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name

View file

@ -3,7 +3,7 @@ from maya import cmds, mel
import pyblish.api
from openpype.client import get_subset_by_name
from openpype.pipeline import legacy_io, KnownPublishError
from openpype.pipeline import KnownPublishError
from openpype.hosts.maya.api import lib
@ -116,10 +116,10 @@ class CollectReview(pyblish.api.InstancePlugin):
instance.data['remove'] = True
else:
task = legacy_io.Session["AVALON_TASK"]
legacy_subset_name = task + 'Review'
project_name = instance.context.data["projectName"]
asset_doc = instance.context.data['assetEntity']
project_name = legacy_io.active_project()
task = instance.context.data["task"]
legacy_subset_name = task + 'Review'
subset_doc = get_subset_by_name(
project_name,
legacy_subset_name,

View file

@ -13,16 +13,6 @@ from openpype.hosts.maya.api.lib import (
)
@contextmanager
def renamed(original_name, renamed_name):
# type: (str, str) -> None
try:
cmds.rename(original_name, renamed_name)
yield
finally:
cmds.rename(renamed_name, original_name)
class ExtractUnrealSkeletalMeshAbc(publish.Extractor):
"""Extract Unreal Skeletal Mesh as FBX from Maya. """

View file

@ -62,6 +62,10 @@ class ExtractUnrealSkeletalMeshFbx(publish.Extractor):
original_parent = to_extract[0].split("|")[1]
parent_node = instance.data.get("asset")
# this needs to be done for AYON
# WARNING: since AYON supports duplicity of asset names,
# this needs to be refactored throughout the pipeline.
parent_node = parent_node.split("/")[-1]
renamed_to_extract = []
for node in to_extract:

View file

@ -3,6 +3,7 @@
from __future__ import absolute_import
import pyblish.api
from openpype import AYON_SERVER_ENABLED
import openpype.hosts.maya.api.action
from openpype.pipeline.publish import (
RepairAction,
@ -66,12 +67,16 @@ class ValidateInstanceInContext(pyblish.api.InstancePlugin,
def repair(cls, instance):
context_asset = cls.get_context_asset(instance)
instance_node = instance.data["instance_node"]
if AYON_SERVER_ENABLED:
asset_name_attr = "folderPath"
else:
asset_name_attr = "asset"
cmds.setAttr(
"{}.asset".format(instance_node),
"{}.{}".format(instance_node, asset_name_attr),
context_asset,
type="string"
)
@staticmethod
def get_context_asset(instance):
return instance.context.data["assetEntity"]["name"]
return instance.context.data["asset"]

View file

@ -67,13 +67,15 @@ class ValidateModelName(pyblish.api.InstancePlugin,
regex = cls.top_level_regex
r = re.compile(regex)
m = r.match(top_group)
project_name = instance.context.data["projectName"]
current_asset_name = instance.context.data["asset"]
if m is None:
cls.log.error("invalid name on: {}".format(top_group))
cls.log.error("name doesn't match regex {}".format(regex))
invalid.append(top_group)
else:
if "asset" in r.groupindex:
if m.group("asset") != legacy_io.Session["AVALON_ASSET"]:
if m.group("asset") != current_asset_name:
cls.log.error("Invalid asset name in top level group.")
return top_group
if "subset" in r.groupindex:
@ -81,7 +83,7 @@ class ValidateModelName(pyblish.api.InstancePlugin,
cls.log.error("Invalid subset name in top level group.")
return top_group
if "project" in r.groupindex:
if m.group("project") != legacy_io.Session["AVALON_PROJECT"]:
if m.group("project") != project_name:
cls.log.error("Invalid project name in top level group.")
return top_group

View file

@ -51,7 +51,7 @@ class ValidateShaderName(pyblish.api.InstancePlugin,
descendants = cmds.ls(descendants, noIntermediate=True, long=True)
shapes = cmds.ls(descendants, type=["nurbsSurface", "mesh"], long=True)
asset_name = instance.data.get("asset", None)
asset_name = instance.data.get("asset")
# Check the number of connected shadingEngines per shape
regex_compile = re.compile(cls.regex)

View file

@ -102,7 +102,8 @@ class ValidateUnrealStaticMeshName(pyblish.api.InstancePlugin,
cl_r = re.compile(regex_collision)
mesh_name = "{}{}".format(instance.data["asset"],
asset_name = instance.data["assetEntity"]["name"]
mesh_name = "{}{}".format(asset_name,
instance.data.get("variant", []))
for obj in collision_set:

View file

@ -4,7 +4,7 @@ from collections import defaultdict
import maya.cmds as cmds
from openpype.client import get_assets
from openpype.client import get_assets, get_asset_name_identifier
from openpype.pipeline import (
remove_container,
registered_host,
@ -128,7 +128,8 @@ def create_items_from_nodes(nodes):
project_name = get_current_project_name()
asset_ids = set(id_hashes.keys())
asset_docs = get_assets(project_name, asset_ids, fields=["name"])
fields = {"_id", "name", "data.parents"}
asset_docs = get_assets(project_name, asset_ids, fields=fields)
asset_docs_by_id = {
str(asset_doc["_id"]): asset_doc
for asset_doc in asset_docs
@ -156,8 +157,9 @@ def create_items_from_nodes(nodes):
namespace = get_namespace_from_node(node)
namespaces.add(namespace)
label = get_asset_name_identifier(asset_doc)
asset_view_items.append({
"label": asset_doc["name"],
"label": label,
"asset": asset_doc,
"looks": looks,
"namespaces": namespaces

View file

@ -3,6 +3,7 @@ from collections import defaultdict
from qtpy import QtWidgets, QtCore
from openpype.client import get_asset_name_identifier
from openpype.tools.utils.models import TreeModel
from openpype.tools.utils.lib import (
preserve_expanded_rows,
@ -126,7 +127,7 @@ class AssetOutliner(QtWidgets.QWidget):
asset_namespaces = defaultdict(set)
for item in items:
asset_id = str(item["asset"]["_id"])
asset_name = item["asset"]["name"]
asset_name = get_asset_name_identifier(item["asset"])
asset_namespaces[asset_name].add(item.get("namespace"))
if asset_name in assets:

View file

@ -13,6 +13,7 @@ from collections import OrderedDict
import nuke
from qtpy import QtCore, QtWidgets
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_project,
get_asset_by_name,
@ -1107,7 +1108,9 @@ def format_anatomy(data):
Return:
path (str)
'''
anatomy = Anatomy()
project_name = get_current_project_name()
anatomy = Anatomy(project_name)
log.debug("__ anatomy.templates: {}".format(anatomy.templates))
padding = None
@ -1125,8 +1128,10 @@ def format_anatomy(data):
file = script_name()
data["version"] = get_version_from_path(file)
project_name = anatomy.project_name
asset_name = data["asset"]
if AYON_SERVER_ENABLED:
asset_name = data["folderPath"]
else:
asset_name = data["asset"]
task_name = data["task"]
host_name = get_current_host_name()
context_data = get_template_data_with_names(

View file

@ -111,7 +111,6 @@ class ValidateNukeWriteNode(
for value in values:
if type(node_value) in (int, float):
try:
if isinstance(value, list):
value = color_gui_to_int(value)
else:
@ -130,7 +129,7 @@ class ValidateNukeWriteNode(
and key != "file"
and key != "tile_color"
):
check.append([key, value, write_node[key].value()])
check.append([key, node_value, write_node[key].value()])
if check:
self._make_error(check)

View file

@ -1,5 +1,6 @@
import re
from openpype import AYON_SERVER_ENABLED
import openpype.hosts.photoshop.api as api
from openpype.client import get_asset_by_name
from openpype.lib import prepare_template_data
@ -43,6 +44,14 @@ class PSAutoCreator(AutoCreator):
asset_name = context.get_current_asset_name()
task_name = context.get_current_task_name()
host_name = context.host_name
if existing_instance is None:
existing_instance_asset = None
elif AYON_SERVER_ENABLED:
existing_instance_asset = existing_instance["folderPath"]
else:
existing_instance_asset = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -50,10 +59,13 @@ class PSAutoCreator(AutoCreator):
project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
data.update(self.get_dynamic_data(
self.default_variant, task_name, asset_doc,
project_name, host_name, None
@ -70,7 +82,7 @@ class PSAutoCreator(AutoCreator):
new_instance.data_to_store())
elif (
existing_instance["asset"] != asset_name
existing_instance_asset != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -78,7 +90,10 @@ class PSAutoCreator(AutoCreator):
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,5 +1,6 @@
from openpype.pipeline import CreatedInstance
from openpype import AYON_SERVER_ENABLED
from openpype.lib import BoolDef
import openpype.hosts.photoshop.api as api
from openpype.hosts.photoshop.lib import PSAutoCreator, clean_subset_name
@ -37,6 +38,13 @@ class AutoImageCreator(PSAutoCreator):
host_name = context.host_name
asset_doc = get_asset_by_name(project_name, asset_name)
if existing_instance is None:
existing_instance_asset = None
elif AYON_SERVER_ENABLED:
existing_instance_asset = existing_instance["folderPath"]
else:
existing_instance_asset = existing_instance["asset"]
if existing_instance is None:
subset_name = self.get_subset_name(
self.default_variant, task_name, asset_doc,
@ -44,9 +52,12 @@ class AutoImageCreator(PSAutoCreator):
)
data = {
"asset": asset_name,
"task": task_name,
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
if not self.active_on_create:
data["active"] = False
@ -62,15 +73,17 @@ class AutoImageCreator(PSAutoCreator):
new_instance.data_to_store())
elif ( # existing instance from different context
existing_instance["asset"] != asset_name
existing_instance_asset != asset_name
or existing_instance["task"] != task_name
):
subset_name = self.get_subset_name(
self.default_variant, task_name, asset_doc,
project_name, host_name
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,5 +1,6 @@
import pyblish.api
from openpype.client import get_asset_name_identifier
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
@ -27,7 +28,7 @@ class CollectAutoImage(pyblish.api.ContextPlugin):
task_name = context.data["task"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
asset_name = get_asset_name_identifier(asset_doc)
auto_creator = proj_settings.get(
"photoshop", {}).get(

View file

@ -7,6 +7,7 @@ Provides:
"""
import pyblish.api
from openpype.client import get_asset_name_identifier
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
@ -65,7 +66,8 @@ class CollectAutoReview(pyblish.api.ContextPlugin):
task_name = context.data["task"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
asset_name = get_asset_name_identifier(asset_doc)
subset_name = get_subset_name(
family,

View file

@ -1,6 +1,7 @@
import os
import pyblish.api
from openpype.client import get_asset_name_identifier
from openpype.hosts.photoshop import api as photoshop
from openpype.pipeline.create import get_subset_name
@ -69,8 +70,8 @@ class CollectAutoWorkfile(pyblish.api.ContextPlugin):
task_name = context.data["task"]
host_name = context.data["hostName"]
asset_doc = context.data["assetEntity"]
asset_name = asset_doc["name"]
asset_name = get_asset_name_identifier(asset_doc)
subset_name = get_subset_name(
family,
variant,

View file

@ -1,10 +1,11 @@
import re
import uuid
import copy
import qargparse
from qtpy import QtWidgets, QtCore
from openpype.settings import get_current_project_settings
from openpype.pipeline.context_tools import get_current_project_asset
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
@ -18,7 +19,7 @@ from .menu import load_stylesheet
class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
items = {}
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
@ -100,7 +101,7 @@ class CreatorWidget(QtWidgets.QDialog):
self.close()
def value(self, data, new_data=None):
new_data = new_data or dict()
new_data = new_data or {}
for k, v in data.items():
new_data[k] = {
"target": None,
@ -289,7 +290,7 @@ class Spacer(QtWidgets.QWidget):
class ClipLoader:
active_bin = None
data = dict()
data = {}
def __init__(self, loader_obj, context, **options):
""" Initialize object
@ -386,8 +387,8 @@ class ClipLoader:
joint `data` key with asset.data dict into the representation
"""
asset_name = self.context["representation"]["context"]["asset"]
self.data["assetData"] = get_current_project_asset(asset_name)["data"]
self.data["assetData"] = copy.deepcopy(self.context["asset"]["data"])
def load(self, files):
"""Load clip into timeline
@ -587,8 +588,8 @@ class PublishClip:
Returns:
hiero.core.TrackItem: hiero track item object with openpype tag
"""
vertical_clip_match = dict()
tag_data = dict()
vertical_clip_match = {}
tag_data = {}
types = {
"shot": "shot",
"folder": "folder",
@ -664,15 +665,23 @@ class PublishClip:
new_name = self.tag_data.pop("newClipName")
if self.rename:
self.tag_data["asset"] = new_name
self.tag_data["asset_name"] = new_name
else:
self.tag_data["asset"] = self.ti_name
self.tag_data["asset_name"] = self.ti_name
# AYON unique identifier
folder_path = "/{}/{}".format(
self.tag_data["hierarchy"],
self.tag_data["asset_name"]
)
self.tag_data["folder_path"] = folder_path
# create new name for track item
if not lib.pype_marker_workflow:
# create compound clip workflow
lib.create_compound_clip(
self.timeline_item_data,
self.tag_data["asset"],
self.tag_data["asset_name"],
self.mp_folder
)
@ -764,7 +773,7 @@ class PublishClip:
# increasing steps by index of rename iteration
self.count_steps *= self.rename_index
hierarchy_formatting_data = dict()
hierarchy_formatting_data = {}
_data = self.timeline_item_default_data.copy()
if self.ui_inputs:
# adding tag metadata from ui
@ -853,8 +862,7 @@ class PublishClip:
"parents": self.parents,
"hierarchyData": hierarchy_formatting_data,
"subset": self.subset,
"family": self.subset_family,
"families": ["clip"]
"family": self.subset_family
}
def _convert_to_entity(self, key):

View file

@ -26,6 +26,7 @@ class ExtractWorkfile(publish.Extractor):
resolve_workfile_ext = ".drp"
drp_file_name = name + resolve_workfile_ext
drp_file_path = os.path.normpath(
os.path.join(staging_dir, drp_file_name))

View file

@ -9,6 +9,7 @@ from openpype.hosts.resolve.api.lib import (
get_publish_attribute,
get_otio_clip_instance_data,
)
from openpype import AYON_SERVER_ENABLED
class PrecollectInstances(pyblish.api.ContextPlugin):
@ -29,7 +30,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
for timeline_item_data in selected_timeline_items:
data = dict()
data = {}
timeline_item = timeline_item_data["clip"]["item"]
# get pype tag data
@ -60,24 +61,24 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
if k not in ("id", "applieswhole", "label")
})
asset = tag_data["asset"]
if AYON_SERVER_ENABLED:
asset = tag_data["folder_path"]
else:
asset = tag_data["asset_name"]
subset = tag_data["subset"]
# insert family into families
family = tag_data["family"]
families = [str(f) for f in tag_data["families"]]
families.insert(0, str(family))
data.update({
"name": "{} {} {}".format(asset, subset, families),
"name": "{}_{}".format(asset, subset),
"label": "{} {}".format(asset, subset),
"asset": asset,
"item": timeline_item,
"families": families,
"publish": get_publish_attribute(timeline_item),
"fps": context.data["fps"],
"handleStart": handle_start,
"handleEnd": handle_end,
"newAssetPublishing": True
"newAssetPublishing": True,
"families": ["clip"],
})
# otio clip data
@ -135,7 +136,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
family = "shot"
data.update({
"name": "{} {} {}".format(asset, subset, family),
"name": "{}_{}".format(asset, subset),
"label": "{} {}".format(asset, subset),
"subset": subset,
"asset": asset,
"family": family,

View file

@ -1,7 +1,9 @@
import pyblish.api
from pprint import pformat
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import get_current_asset_name
from openpype.hosts.resolve import api as rapi
from openpype.hosts.resolve.otio import davinci_export
@ -13,9 +15,12 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
current_asset_name = asset_name = get_current_asset_name()
asset = get_current_asset_name()
subset = "workfile"
if AYON_SERVER_ENABLED:
asset_name = current_asset_name.split("/")[-1]
subset = "workfileMain"
project = rapi.get_current_project()
fps = project.GetSetting("timelineFrameRate")
video_tracks = rapi.get_video_track_names()
@ -24,9 +29,10 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
otio_timeline = davinci_export.create_otio_timeline(project)
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"name": "{}_{}".format(asset_name, subset),
"label": "{} {}".format(current_asset_name, subset),
"asset": current_asset_name,
"subset": subset,
"item": project,
"family": "workfile",
"families": []

View file

@ -60,6 +60,9 @@ class CollectHarmonyScenes(pyblish.api.InstancePlugin):
# updating hierarchy data
anatomy_data_new.update({
"asset": asset_data["name"],
"folder": {
"name": asset_data["name"],
},
"task": {
"name": task,
"type": task_type,

View file

@ -56,6 +56,9 @@ class CollectHarmonyZips(pyblish.api.InstancePlugin):
anatomy_data_new.update(
{
"asset": asset_data["name"],
"folder": {
"name": asset_data["name"],
},
"task": {
"name": task,
"type": task_type,

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator plugin for creating workfiles."""
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import CreatedInstance, AutoCreator
from openpype.client import get_asset_by_name
@ -41,6 +42,13 @@ class CreateWorkfile(AutoCreator):
if instance.creator_identifier == self.identifier
), None)
if current_instance is None:
current_instance_asset = None
elif AYON_SERVER_ENABLED:
current_instance_asset = current_instance["folderPath"]
else:
current_instance_asset = current_instance["asset"]
if current_instance is None:
self.log.info("Auto-creating workfile instance...")
asset_doc = get_asset_by_name(project_name, asset_name)
@ -48,22 +56,28 @@ class CreateWorkfile(AutoCreator):
variant, task_name, asset_doc, project_name, host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
current_instance = self.create_instance_in_context(subset_name,
data)
elif (
current_instance["asset"] != asset_name
or current_instance["task"] != task_name
current_instance_asset != asset_name
or current_instance["task"] != task_name
):
# Update instance context if is not the same
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
variant, task_name, asset_doc, project_name, host_name
)
current_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
current_instance["folderPath"] = asset_name
else:
current_instance["asset"] = asset_name
current_instance["task"] = task_name
current_instance["subset"] = subset_name

View file

@ -53,11 +53,11 @@ class ShotMetadataSolver:
try:
# format to new shot name
return shot_rename_template.format(**data)
except KeyError as _E:
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct:: \n\n"
f"From template string {shot_rename_template} > "
f"`{_E}` has no equivalent in \n"
f"`{_error}` has no equivalent in \n"
f"{list(data.keys())} input formatting keys!"
))
@ -100,7 +100,7 @@ class ShotMetadataSolver:
"at your project settings..."
))
# QUESTION:how to refactory `match[-1]` to some better way?
# QUESTION:how to refactor `match[-1]` to some better way?
output_data[token_key] = match[-1]
return output_data
@ -130,10 +130,10 @@ class ShotMetadataSolver:
parent_token["name"]: parent_token["value"].format(**data)
for parent_token in hierarchy_parents
}
except KeyError as _E:
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct : \n"
f"`{_E}` has no equivalent in \n{list(data.keys())}"
f"`{_error}` has no equivalent in \n{list(data.keys())}"
))
_parent_tokens_type = {
@ -147,10 +147,10 @@ class ShotMetadataSolver:
try:
parent_name = _parent.format(
**_parent_tokens_formatting_data)
except KeyError as _E:
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct : \n\n"
f"`{_E}` from template string "
f"`{_error}` from template string "
f"{shot_hierarchy['parents_path']}, "
f" has no equivalent in \n"
f"{list(_parent_tokens_formatting_data.keys())} parents"
@ -319,8 +319,16 @@ class ShotMetadataSolver:
tasks = self._generate_tasks_from_settings(
project_doc)
# generate hierarchy path from parents
hierarchy_path = self._create_hierarchy_path(parents)
if hierarchy_path:
folder_path = f"/{hierarchy_path}/{shot_name}"
else:
folder_path = f"/{shot_name}"
return shot_name, {
"hierarchy": self._create_hierarchy_path(parents),
"hierarchy": hierarchy_path,
"folderPath": folder_path,
"parents": parents,
"tasks": tasks
}

View file

@ -1,7 +1,9 @@
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_assets,
get_subsets,
get_last_versions,
get_asset_name_identifier,
)
from openpype.lib.attribute_definitions import (
FileDef,
@ -114,7 +116,10 @@ class SettingsCreator(TrayPublishCreator):
# Fill 'version_to_use' if version control is enabled
if self.allow_version_control:
asset_name = data["asset"]
if AYON_SERVER_ENABLED:
asset_name = data["folderPath"]
else:
asset_name = data["asset"]
subset_docs_by_asset_id = self._prepare_next_versions(
[asset_name], [subset_name])
version = subset_docs_by_asset_id[asset_name].get(subset_name)
@ -162,10 +167,10 @@ class SettingsCreator(TrayPublishCreator):
asset_docs = get_assets(
self.project_name,
asset_names=asset_names,
fields=["_id", "name"]
fields=["_id", "name", "data.parents"]
)
asset_names_by_id = {
asset_doc["_id"]: asset_doc["name"]
asset_doc["_id"]: get_asset_name_identifier(asset_doc)
for asset_doc in asset_docs
}
subset_docs = list(get_subsets(

View file

@ -6,6 +6,7 @@ production type `ociolook`. All files are published as representation.
"""
from pathlib import Path
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name
from openpype.lib.attribute_definitions import (
FileDef, EnumDef, TextDef, UISeparatorDef
@ -54,8 +55,12 @@ This creator publishes color space look file (LUT).
# this should never happen
raise CreatorError("Missing files from representation")
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
asset_doc = get_asset_by_name(
self.project_name, instance_data["asset"])
self.project_name, asset_name)
subset_name = self.get_subset_name(
variant=instance_data["variant"],

View file

@ -1,6 +1,7 @@
import os
from copy import deepcopy
import opentimelineio as otio
from openpype import AYON_SERVER_ENABLED
from openpype.client import (
get_asset_by_name,
get_project
@ -101,14 +102,23 @@ class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase):
label = "Editorial Shot"
def get_instance_attr_defs(self):
attr_defs = [
TextDef(
"asset_name",
label="Asset name",
instance_attributes = []
if AYON_SERVER_ENABLED:
instance_attributes.append(
TextDef(
"folderPath",
label="Folder path"
)
)
]
attr_defs.extend(CLIP_ATTR_DEFS)
return attr_defs
else:
instance_attributes.append(
TextDef(
"shotName",
label="Shot name"
)
)
instance_attributes.extend(CLIP_ATTR_DEFS)
return instance_attributes
class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase):
@ -214,8 +224,11 @@ or updating already created. Publishing will create OTIO file.
i["family"] for i in self._creator_settings["family_presets"]
]
}
# Create otio editorial instance
asset_name = instance_data["asset"]
if AYON_SERVER_ENABLED:
asset_name = instance_data["folderPath"]
else:
asset_name = instance_data["asset"]
asset_doc = get_asset_by_name(self.project_name, asset_name)
if pre_create_data["fps"] == "from_selection":
@ -595,19 +608,23 @@ or updating already created. Publishing will create OTIO file.
Returns:
str: label string
"""
shot_name = instance_data["shotName"]
if AYON_SERVER_ENABLED:
asset_name = instance_data["creator_attributes"]["folderPath"]
else:
asset_name = instance_data["creator_attributes"]["shotName"]
variant_name = instance_data["variant"]
family = preset["family"]
# get variant name from preset or from inharitance
# get variant name from preset or from inheritance
_variant_name = preset.get("variant") or variant_name
# subset name
subset_name = "{}{}".format(
family, _variant_name.capitalize()
)
label = "{}_{}".format(
shot_name,
label = "{} {}".format(
asset_name,
subset_name
)
@ -666,7 +683,10 @@ or updating already created. Publishing will create OTIO file.
}
)
self._validate_name_uniqueness(shot_name)
# It should be validated only in openpype since we are supporting
# publishing to AYON with folder path and uniqueness is not an issue
if not AYON_SERVER_ENABLED:
self._validate_name_uniqueness(shot_name)
timing_data = self._get_timing_data(
otio_clip,
@ -677,35 +697,43 @@ or updating already created. Publishing will create OTIO file.
# create creator attributes
creator_attributes = {
"asset_name": shot_name,
"Parent hierarchy path": shot_metadata["hierarchy"],
"workfile_start_frame": workfile_start_frame,
"fps": fps,
"handle_start": int(handle_start),
"handle_end": int(handle_end)
}
# add timing data
creator_attributes.update(timing_data)
# create shared new instance data
# create base instance data
base_instance_data = {
"shotName": shot_name,
"variant": variant_name,
# HACK: just for temporal bug workaround
# TODO: should loockup shot name for update
"asset": parent_asset_name,
"task": "",
"newAssetPublishing": True,
# parent time properties
"trackStartFrame": track_start_frame,
"timelineOffset": timeline_offset,
"isEditorial": True,
# creator_attributes
"creator_attributes": creator_attributes
}
# update base instance data with context data
# and also update creator attributes with context data
if AYON_SERVER_ENABLED:
# TODO: this is here just to be able to publish
# to AYON with folder path
creator_attributes["folderPath"] = shot_metadata.pop("folderPath")
base_instance_data["folderPath"] = parent_asset_name
else:
creator_attributes.update({
"shotName": shot_name,
"Parent hierarchy path": shot_metadata["hierarchy"]
})
base_instance_data["asset"] = parent_asset_name
# add creator attributes to shared instance data
base_instance_data["creator_attributes"] = creator_attributes
# add hierarchy shot metadata
base_instance_data.update(shot_metadata)

View file

@ -2,6 +2,8 @@ import copy
import os
import re
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_name_identifier
from openpype.lib import (
FileDef,
BoolDef,
@ -64,8 +66,13 @@ class BatchMovieCreator(TrayPublishCreator):
subset_name, task_name = self._get_subset_and_task(
asset_doc, data["variant"], self.project_name)
asset_name = get_asset_name_identifier(asset_doc)
instance_data["task"] = task_name
instance_data["asset"] = asset_doc["name"]
if AYON_SERVER_ENABLED:
instance_data["folderPath"] = asset_name
else:
instance_data["asset"] = asset_name
# Create new instance
new_instance = CreatedInstance(self.family, subset_name,

View file

@ -28,9 +28,9 @@ class CollectSequenceFrameData(
return
# editorial would fail since they might not be in database yet
is_editorial = instance.data.get("isEditorial")
if is_editorial:
self.log.debug("Instance is Editorial. Skipping.")
new_asset_publishing = instance.data.get("newAssetPublishing")
if new_asset_publishing:
self.log.debug("Instance is creating new asset. Skipping.")
return
frame_data = self.get_frame_data_from_repre_sequence(instance)

View file

@ -2,6 +2,8 @@ from pprint import pformat
import pyblish.api
import opentimelineio as otio
from openpype import AYON_SERVER_ENABLED
class CollectShotInstance(pyblish.api.InstancePlugin):
""" Collect shot instances
@ -119,8 +121,7 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
frame_end = _cr_attrs["frameEnd"]
frame_dur = frame_end - frame_start
return {
"asset": _cr_attrs["asset_name"],
data = {
"fps": float(_cr_attrs["fps"]),
"handleStart": _cr_attrs["handle_start"],
"handleEnd": _cr_attrs["handle_end"],
@ -133,6 +134,12 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
"sourceOut": _cr_attrs["sourceOut"],
"workfileFrameStart": workfile_start_frame
}
if AYON_SERVER_ENABLED:
data["asset"] = _cr_attrs["folderPath"]
else:
data["asset"] = _cr_attrs["shotName"]
return data
def _solve_hierarchy_context(self, instance):
""" Adding hierarchy data to context shared data.
@ -148,7 +155,7 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
else {}
)
name = instance.data["asset"]
asset_name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
@ -170,7 +177,7 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
parents = instance.data.get('parents', [])
actual = {name: in_info}
actual = {asset_name: in_info}
for parent in reversed(parents):
parent_name = parent["entity_name"]

View file

@ -31,9 +31,9 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
return
# editorial would fail since they might not be in database yet
is_editorial = instance.data.get("isEditorial")
if is_editorial:
self.log.debug("Instance is Editorial. Skipping.")
new_asset_publishing = instance.data.get("newAssetPublishing")
if new_asset_publishing:
self.log.debug("Instance is creating new asset. Skipping.")
return
if (self.skip_timelines_check and
@ -41,6 +41,7 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
for pattern in self.skip_timelines_check)):
self.log.info("Skipping for {} task".format(instance.data["task"]))
asset_doc = instance.data["assetEntity"]
asset_data = asset_doc["data"]
frame_start = asset_data["frameStart"]
frame_end = asset_data["frameEnd"]

View file

@ -37,7 +37,8 @@ Todos:
import collections
from typing import Any, Optional, Union
from openpype.client import get_asset_by_name
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name, get_asset_name_identifier
from openpype.lib import (
prepare_template_data,
AbstractAttrDef,
@ -784,18 +785,25 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator):
project_name,
host_name=self.create_context.host_name,
)
asset_name = get_asset_name_identifier(asset_doc)
if existing_instance is not None:
existing_instance["asset"] = asset_doc["name"]
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name
return existing_instance
instance_data: dict[str, str] = {
"asset": asset_doc["name"],
"task": task_name,
"family": creator.family,
"variant": variant
}
if AYON_SERVER_ENABLED:
instance_data["folderPath"] = asset_name
else:
instance_data["asset"] = asset_name
pre_create_data: dict[str, str] = {
"group_id": group_id,
"mark_for_review": mark_for_review
@ -820,6 +828,8 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator):
for layer_name in render_pass["layer_names"]:
render_pass_by_layer_name[layer_name] = render_pass
asset_name = get_asset_name_identifier(asset_doc)
for layer in layers:
layer_name = layer["name"]
variant = layer_name
@ -838,17 +848,25 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator):
)
if render_pass is not None:
render_pass["asset"] = asset_doc["name"]
if AYON_SERVER_ENABLED:
render_pass["folderPath"] = asset_name
else:
render_pass["asset"] = asset_name
render_pass["task"] = task_name
render_pass["subset"] = subset_name
continue
instance_data: dict[str, str] = {
"asset": asset_doc["name"],
"task": task_name,
"family": creator.family,
"variant": variant
}
if AYON_SERVER_ENABLED:
instance_data["folderPath"] = asset_name
else:
instance_data["asset"] = asset_name
pre_create_data: dict[str, Any] = {
"render_layer_instance_id": render_layer_instance.id,
"layer_names": [layer_name],
@ -882,9 +900,13 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator):
def create(self, subset_name, instance_data, pre_create_data):
project_name: str = self.create_context.get_current_project_name()
asset_name: str = instance_data["asset"]
if AYON_SERVER_ENABLED:
asset_name: str = instance_data["folderPath"]
else:
asset_name: str = instance_data["asset"]
task_name: str = instance_data["task"]
asset_doc: dict[str, Any] = get_asset_by_name(project_name, asset_name)
asset_doc: dict[str, Any] = get_asset_by_name(
project_name, asset_name)
render_layers_by_group_id: dict[int, CreatedInstance] = {}
render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = (
@ -1061,7 +1083,6 @@ class TVPaintSceneRenderCreator(TVPaintAutoCreator):
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant,
"creator_attributes": {
@ -1073,6 +1094,10 @@ class TVPaintSceneRenderCreator(TVPaintAutoCreator):
self.default_pass_name
)
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
if not self.active_on_create:
data["active"] = False
@ -1101,8 +1126,14 @@ class TVPaintSceneRenderCreator(TVPaintAutoCreator):
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
existing_name = None
if AYON_SERVER_ENABLED:
existing_name = existing_instance.get("folderPath")
if existing_name is None:
existing_name = existing_instance["asset"]
if (
existing_instance["asset"] != asset_name
existing_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -1114,7 +1145,10 @@ class TVPaintSceneRenderCreator(TVPaintAutoCreator):
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,3 +1,4 @@
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
@ -33,6 +34,13 @@ class TVPaintReviewCreator(TVPaintAutoCreator):
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
existing_asset_name = None
elif AYON_SERVER_ENABLED:
existing_asset_name = existing_instance["folderPath"]
else:
existing_asset_name = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -43,10 +51,14 @@ class TVPaintReviewCreator(TVPaintAutoCreator):
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
if not self.active_on_create:
data["active"] = False
@ -59,7 +71,7 @@ class TVPaintReviewCreator(TVPaintAutoCreator):
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -71,6 +83,9 @@ class TVPaintReviewCreator(TVPaintAutoCreator):
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,3 +1,4 @@
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_asset_by_name
from openpype.pipeline import CreatedInstance
from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator
@ -29,6 +30,13 @@ class TVPaintWorkfileCreator(TVPaintAutoCreator):
asset_name = create_context.get_current_asset_name()
task_name = create_context.get_current_task_name()
if existing_instance is None:
existing_asset_name = None
elif AYON_SERVER_ENABLED:
existing_asset_name = existing_instance["folderPath"]
else:
existing_asset_name = existing_instance["asset"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
subset_name = self.get_subset_name(
@ -39,10 +47,13 @@ class TVPaintWorkfileCreator(TVPaintAutoCreator):
host_name
)
data = {
"asset": asset_name,
"task": task_name,
"variant": self.default_variant
}
if AYON_SERVER_ENABLED:
data["folderPath"] = asset_name
else:
data["asset"] = asset_name
new_instance = CreatedInstance(
self.family, subset_name, data, self
@ -53,7 +64,7 @@ class TVPaintWorkfileCreator(TVPaintAutoCreator):
self._add_instance_to_context(new_instance)
elif (
existing_instance["asset"] != asset_name
existing_asset_name != asset_name
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
@ -65,6 +76,9 @@ class TVPaintWorkfileCreator(TVPaintAutoCreator):
host_name,
existing_instance
)
existing_instance["asset"] = asset_name
if AYON_SERVER_ENABLED:
existing_instance["folderPath"] = asset_name
else:
existing_instance["asset"] = asset_name
existing_instance["task"] = task_name
existing_instance["subset"] = subset_name

View file

@ -1,4 +1,5 @@
import pyblish.api
from openpype import AYON_SERVER_ENABLED
from openpype.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin,
@ -24,12 +25,19 @@ class FixAssetNames(pyblish.api.Action):
old_instance_items = list_instances()
new_instance_items = []
for instance_item in old_instance_items:
instance_asset_name = instance_item.get("asset")
if AYON_SERVER_ENABLED:
instance_asset_name = instance_item.get("folderPath")
else:
instance_asset_name = instance_item.get("asset")
if (
instance_asset_name
and instance_asset_name != context_asset_name
):
instance_item["asset"] = context_asset_name
if AYON_SERVER_ENABLED:
instance_item["folderPath"] = context_asset_name
else:
instance_item["asset"] = context_asset_name
new_instance_items.append(instance_item)
write_instances(new_instance_items)

View file

@ -12,6 +12,7 @@ from abc import ABCMeta, abstractmethod
import six
from openpype import AYON_SERVER_ENABLED, PACKAGE_DIR
from openpype.client import get_asset_name_identifier
from openpype.settings import (
get_system_settings,
get_project_settings,
@ -1728,7 +1729,9 @@ def prepare_context_environments(data, env_group=None, modules_manager=None):
"AVALON_APP_NAME": app.full_name
}
if asset_doc:
context_env["AVALON_ASSET"] = asset_doc["name"]
asset_name = get_asset_name_identifier(asset_doc)
context_env["AVALON_ASSET"] = asset_name
if task_name:
context_env["AVALON_TASK"] = task_name

Some files were not shown because too many files have changed in this diff Show more