Merge branch 'feature/OP-7190_Use-folder-path-as-identifier-in-editorial' into enhancement/OP-7234_Use-folder-path-as-identifier-in-traypublisher-editorial

This commit is contained in:
Jakub Jezek 2023-11-15 11:36:09 +01:00
commit 1ec0e208b6
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
134 changed files with 10694 additions and 400 deletions

View file

@ -35,6 +35,9 @@ body:
label: Version
description: What version are you running? Look to OpenPype Tray
options:
- 3.17.6-nightly.3
- 3.17.6-nightly.2
- 3.17.6-nightly.1
- 3.17.5
- 3.17.5-nightly.3
- 3.17.5-nightly.2
@ -132,9 +135,6 @@ body:
- 3.15.2-nightly.2
- 3.15.2-nightly.1
- 3.15.1
- 3.15.1-nightly.6
- 3.15.1-nightly.5
- 3.15.1-nightly.4
validations:
required: true
- type: dropdown

View file

@ -1,6 +1,7 @@
from .mongo import (
OpenPypeMongoConnection,
)
from .server.utils import get_ayon_server_api_connection
from .entities import (
get_projects,
@ -61,6 +62,8 @@ from .operations import (
__all__ = (
"OpenPypeMongoConnection",
"get_ayon_server_api_connection",
"get_projects",
"get_project",
"get_whole_project",

View file

@ -1,9 +1,8 @@
import collections
from ayon_api import get_server_api_connection
from openpype.client.mongo.operations import CURRENT_THUMBNAIL_SCHEMA
from .utils import get_ayon_server_api_connection
from .openpype_comp import get_folders_with_tasks
from .conversion_utils import (
project_fields_v3_to_v4,
@ -37,7 +36,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
elif inactive:
active = False
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
for project in con.get_projects(active, library, fields=fields):
yield convert_v4_project_to_v3(project)
@ -45,7 +44,7 @@ def get_projects(active=True, inactive=False, library=None, fields=None):
def get_project(project_name, active=True, inactive=False, fields=None):
# Skip if both are disabled
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
return convert_v4_project_to_v3(
con.get_project(project_name, fields=fields)
@ -66,7 +65,7 @@ def _get_subsets(
fields=None
):
# Convert fields and add minimum required fields
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = subset_fields_v3_to_v4(fields, con)
if fields is not None:
for key in (
@ -102,7 +101,7 @@ def _get_versions(
active=None,
fields=None
):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = version_fields_v3_to_v4(fields, con)
@ -211,8 +210,7 @@ def get_assets(
if archived:
active = None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = folder_fields_v3_to_v4(fields, con)
kwargs = dict(
folder_ids=asset_ids,
@ -232,10 +230,12 @@ def get_assets(
else:
new_asset_names.add(name)
yielded_ids = set()
if folder_paths:
for folder in _folders_query(
project_name, con, fields, folder_paths=folder_paths, **kwargs
):
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
if not new_asset_names:
@ -244,7 +244,9 @@ def get_assets(
for folder in _folders_query(
project_name, con, fields, folder_names=new_asset_names, **kwargs
):
yield convert_v4_folder_to_v3(folder, project_name)
if folder["id"] not in yielded_ids:
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(
@ -265,7 +267,7 @@ def get_archived_assets(
def get_asset_ids_with_subsets(project_name, asset_ids=None):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.get_folder_ids_with_products(project_name, asset_ids)
@ -311,7 +313,7 @@ def get_subsets(
def get_subset_families(project_name, subset_ids=None):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.get_product_type_names(project_name, subset_ids)
@ -459,7 +461,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
if not version_id:
return []
con = get_server_api_connection()
con = get_ayon_server_api_connection()
version_links = con.get_version_links(
project_name, version_id, link_direction="out")
@ -475,7 +477,7 @@ def get_output_link_versions(project_name, version_id, fields=None):
def version_is_latest(project_name, version_id):
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.version_is_latest(project_name, version_id)
@ -530,7 +532,7 @@ def get_representations(
else:
active = None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
fields = representation_fields_v3_to_v4(fields, con)
if fields and active is not None:
fields.add("active")
@ -564,7 +566,7 @@ def get_representations_parents(project_name, representations):
repre["_id"]
for repre in representations
}
con = get_server_api_connection()
con = get_ayon_server_api_connection()
parents_by_repre_id = con.get_representations_parents(project_name,
repre_ids)
folder_ids = set()
@ -706,7 +708,7 @@ def get_workfile_info(
if not asset_id or not task_name or not filename:
return None
con = get_server_api_connection()
con = get_ayon_server_api_connection()
task = con.get_task_by_name(
project_name, asset_id, task_name, fields=["id", "name", "folderId"]
)

View file

@ -1,6 +1,4 @@
import ayon_api
from ayon_api import get_folder_links, get_versions_links
from .utils import get_ayon_server_api_connection
from .entities import get_assets, get_representation_by_id
@ -28,7 +26,8 @@ def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None):
if not asset_id:
asset_id = asset_doc["_id"]
links = get_folder_links(project_name, asset_id, link_direction="in")
con = get_ayon_server_api_connection()
links = con.get_folder_links(project_name, asset_id, link_direction="in")
return [
link["entityId"]
for link in links
@ -115,6 +114,7 @@ def get_linked_representation_id(
if link_type:
link_types = [link_type]
con = get_ayon_server_api_connection()
# Store already found version ids to avoid recursion, and also to store
# output -> Don't forget to remove 'version_id' at the end!!!
linked_version_ids = {version_id}
@ -124,7 +124,7 @@ def get_linked_representation_id(
if not versions_to_check:
break
links = get_versions_links(
links = con.get_versions_links(
project_name,
versions_to_check,
link_types=link_types,
@ -145,8 +145,8 @@ def get_linked_representation_id(
linked_version_ids.remove(version_id)
if not linked_version_ids:
return []
representations = ayon_api.get_representations(
con = get_ayon_server_api_connection()
representations = con.get_representations(
project_name,
version_ids=linked_version_ids,
fields=["id"])

View file

@ -5,7 +5,6 @@ import uuid
import datetime
from bson.objectid import ObjectId
from ayon_api import get_server_api_connection
from openpype.client.operations_base import (
REMOVED_VALUE,
@ -41,7 +40,7 @@ from .conversion_utils import (
convert_update_representation_to_v4,
convert_update_workfile_info_to_v4,
)
from .utils import create_entity_id
from .utils import create_entity_id, get_ayon_server_api_connection
def _create_or_convert_to_id(entity_id=None):
@ -680,7 +679,7 @@ class OperationsSession(BaseOperationsSession):
def __init__(self, con=None, *args, **kwargs):
super(OperationsSession, self).__init__(*args, **kwargs)
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
self._con = con
self._project_cache = {}
self._nested_operations = collections.defaultdict(list)
@ -858,7 +857,7 @@ def create_project(
"""
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.create_project(
project_name,
@ -870,12 +869,12 @@ def create_project(
def delete_project(project_name, con=None):
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.delete_project(project_name)
def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None):
if con is None:
con = get_server_api_connection()
con = get_ayon_server_api_connection()
return con.create_thumbnail(project_name, src_filepath, thumbnail_id)

View file

@ -1,8 +1,33 @@
import os
import uuid
import ayon_api
from openpype.client.operations_base import REMOVED_VALUE
class _GlobalCache:
initialized = False
def get_ayon_server_api_connection():
if _GlobalCache.initialized:
con = ayon_api.get_server_api_connection()
else:
from openpype.lib.local_settings import get_local_site_id
_GlobalCache.initialized = True
site_id = get_local_site_id()
version = os.getenv("AYON_VERSION")
if ayon_api.is_connection_created():
con = ayon_api.get_server_api_connection()
con.set_site_id(site_id)
con.set_client_version(version)
else:
con = ayon_api.create_connection(site_id, version)
return con
def create_entity_id():
return uuid.uuid1().hex

View file

@ -266,9 +266,57 @@ def read(node: bpy.types.bpy_struct_meta_idprop):
return data
def get_selection() -> List[bpy.types.Object]:
"""Return the selected objects from the current scene."""
return [obj for obj in bpy.context.scene.objects if obj.select_get()]
def get_selected_collections():
"""
Returns a list of the currently selected collections in the outliner.
Raises:
RuntimeError: If the outliner cannot be found in the main Blender
window.
Returns:
list: A list of `bpy.types.Collection` objects that are currently
selected in the outliner.
"""
try:
area = next(
area for area in bpy.context.window.screen.areas
if area.type == 'OUTLINER')
region = next(
region for region in area.regions
if region.type == 'WINDOW')
except StopIteration as e:
raise RuntimeError("Could not find outliner. An outliner space "
"must be in the main Blender window.") from e
with bpy.context.temp_override(
window=bpy.context.window,
area=area,
region=region,
screen=bpy.context.window.screen
):
ids = bpy.context.selected_ids
return [id for id in ids if isinstance(id, bpy.types.Collection)]
def get_selection(include_collections: bool = False) -> List[bpy.types.Object]:
"""
Returns a list of selected objects in the current Blender scene.
Args:
include_collections (bool, optional): Whether to include selected
collections in the result. Defaults to False.
Returns:
List[bpy.types.Object]: A list of selected objects.
"""
selection = [obj for obj in bpy.context.scene.objects if obj.select_get()]
if include_collections:
selection.extend(get_selected_collections())
return selection
@contextlib.contextmanager

View file

@ -9,7 +9,10 @@ from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
)
from .pipeline import AVALON_CONTAINERS
from .pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
from .ops import (
MainThreadItem,
execute_in_main_thread
@ -40,9 +43,16 @@ def get_unique_number(
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
return "01"
asset_groups = avalon_container.all_objects
container_names = [c.name for c in asset_groups if c.type == 'EMPTY']
# Check the names of both object and collection containers
obj_asset_groups = avalon_container.objects
obj_group_names = {
c.name for c in obj_asset_groups
if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)}
coll_asset_groups = avalon_container.children
coll_group_names = {
c.name for c in coll_asset_groups
if c.get(AVALON_PROPERTY)}
container_names = obj_group_names.union(coll_group_names)
count = 1
name = f"{asset}_{count:0>2}_{subset}"
while name in container_names:

View file

@ -15,6 +15,8 @@ class CreateBlendScene(plugin.Creator):
family = "blendScene"
icon = "cubes"
maintain_selection = False
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
@ -31,21 +33,20 @@ class CreateBlendScene(plugin.Creator):
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
# Create the new asset group as collection
asset_group = bpy.data.collections.new(name=name)
instances.children.link(asset_group)
self.data['task'] = get_current_task_name()
lib.imprint(asset_group, self.data)
# Add selected objects to instance
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
if obj.parent in selected:
obj.select_set(False)
continue
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
selection = lib.get_selection(include_collections=True)
for data in selection:
if isinstance(data, bpy.types.Collection):
asset_group.children.link(data)
elif isinstance(data, bpy.types.Object):
asset_group.objects.link(data)
return asset_group

View file

@ -20,7 +20,7 @@ from openpype.hosts.blender.api.pipeline import (
class BlendLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
families = ["model", "rig", "layout", "camera", "blendScene"]
families = ["model", "rig", "layout", "camera"]
representations = ["blend"]
label = "Append Blend"

View file

@ -0,0 +1,221 @@
from typing import Dict, List, Optional
from pathlib import Path
import bpy
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID,
)
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.lib import imprint
from openpype.hosts.blender.api.pipeline import (
AVALON_CONTAINERS,
AVALON_PROPERTY,
)
class BlendSceneLoader(plugin.AssetLoader):
"""Load assets from a .blend file."""
families = ["blendScene"]
representations = ["blend"]
label = "Append Blend"
icon = "code-fork"
color = "orange"
@staticmethod
def _get_asset_container(collections):
for coll in collections:
parents = [c for c in collections if c.user_of_id(coll)]
if coll.get(AVALON_PROPERTY) and not parents:
return coll
return None
def _process_data(self, libpath, group_name, family):
# Append all the data from the .blend file
with bpy.data.libraries.load(
libpath, link=False, relative=False
) as (data_from, data_to):
for attr in dir(data_to):
setattr(data_to, attr, getattr(data_from, attr))
members = []
# Rename the object to add the asset name
for attr in dir(data_to):
for data in getattr(data_to, attr):
data.name = f"{group_name}:{data.name}"
members.append(data)
container = self._get_asset_container(
data_to.collections)
assert container, "No asset group found"
container.name = group_name
# Link the group to the scene
bpy.context.scene.collection.children.link(container)
# Remove the library from the blend file
library = bpy.data.libraries.get(bpy.path.basename(libpath))
bpy.data.libraries.remove(library)
return container, members
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
asset = context["asset"]["name"]
subset = context["subset"]["name"]
try:
family = context["representation"]["context"]["family"]
except ValueError:
family = "model"
asset_name = plugin.asset_name(asset, subset)
unique_number = plugin.get_unique_number(asset, subset)
group_name = plugin.asset_name(asset, subset, unique_number)
namespace = namespace or f"{asset}_{unique_number}"
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
bpy.context.scene.collection.children.link(avalon_container)
container, members = self._process_data(libpath, group_name, family)
avalon_container.children.link(container)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"family": context["representation"]["context"]["family"],
"objectName": group_name,
"members": members,
}
container[AVALON_PROPERTY] = data
objects = [
obj for obj in bpy.data.objects
if obj.name.startswith(f"{group_name}:")
]
self[:] = objects
return objects
def exec_update(self, container: Dict, representation: Dict):
"""
Update the loaded asset.
"""
group_name = container["objectName"]
asset_group = bpy.data.collections.get(group_name)
libpath = Path(get_representation_path(representation)).as_posix()
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
)
# Get the parents of the members of the asset group, so we can
# re-link them after the update.
# Also gets the transform for each object to reapply after the update.
collection_parents = {}
member_transforms = {}
members = asset_group.get(AVALON_PROPERTY).get("members", [])
loaded_collections = {c for c in bpy.data.collections if c in members}
loaded_collections.add(bpy.data.collections.get(AVALON_CONTAINERS))
for member in members:
if isinstance(member, bpy.types.Object):
member_parents = set(member.users_collection)
member_transforms[member.name] = member.matrix_basis.copy()
elif isinstance(member, bpy.types.Collection):
member_parents = {
c for c in bpy.data.collections if c.user_of_id(member)}
else:
continue
member_parents = member_parents.difference(loaded_collections)
if member_parents:
collection_parents[member.name] = list(member_parents)
old_data = dict(asset_group.get(AVALON_PROPERTY))
self.exec_remove(container)
family = container["family"]
asset_group, members = self._process_data(libpath, group_name, family)
for member in members:
if member.name in collection_parents:
for parent in collection_parents[member.name]:
if isinstance(member, bpy.types.Object):
parent.objects.link(member)
elif isinstance(member, bpy.types.Collection):
parent.children.link(member)
if member.name in member_transforms and isinstance(
member, bpy.types.Object
):
member.matrix_basis = member_transforms[member.name]
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
avalon_container.children.link(asset_group)
# Restore the old data, but reset members, as they don't exist anymore
# This avoids a crash, because the memory addresses of those members
# are not valid anymore
old_data["members"] = []
asset_group[AVALON_PROPERTY] = old_data
new_data = {
"libpath": libpath,
"representation": str(representation["_id"]),
"parent": str(representation["parent"]),
"members": members,
}
imprint(asset_group, new_data)
def exec_remove(self, container: Dict) -> bool:
"""
Remove an existing container from a Blender scene.
"""
group_name = container["objectName"]
asset_group = bpy.data.collections.get(group_name)
members = set(asset_group.get(AVALON_PROPERTY).get("members", []))
if members:
for attr_name in dir(bpy.data):
attr = getattr(bpy.data, attr_name)
if not isinstance(attr, bpy.types.bpy_prop_collection):
continue
# ensure to make a list copy because we
# we remove members as we iterate
for data in list(attr):
if data not in members or data == asset_group:
continue
attr.remove(data)
bpy.data.collections.remove(asset_group)

View file

@ -1,4 +1,3 @@
import json
from typing import Generator
import bpy
@ -50,6 +49,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
for group in asset_groups:
instance = self.create_instance(context, group)
instance.data["instance_group"] = group
members = []
if isinstance(group, bpy.types.Collection):
members = list(group.objects)
@ -65,6 +65,6 @@ class CollectInstances(pyblish.api.ContextPlugin):
members.append(group)
instance[:] = members
self.log.debug(json.dumps(instance.data, indent=4))
self.log.debug(instance.data)
for obj in instance:
self.log.debug(obj)

View file

@ -17,7 +17,10 @@ class ExtractABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -59,8 +62,8 @@ class ExtractABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")
class ExtractModelABC(ExtractABC):

View file

@ -17,7 +17,11 @@ class ExtractAnimationABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -66,5 +70,5 @@ class ExtractAnimationABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractBlend(publish.Extractor):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -25,19 +28,27 @@ class ExtractBlend(publish.Extractor):
data_blocks = set()
for obj in instance:
data_blocks.add(obj)
for data in instance:
data_blocks.add(data)
# Pack used images in the blend files.
if obj.type == 'MESH':
for material_slot in obj.material_slots:
mat = material_slot.material
if mat and mat.use_nodes:
tree = mat.node_tree
if tree.type == 'SHADER':
for node in tree.nodes:
if node.bl_idname == 'ShaderNodeTexImage':
if node.image:
node.image.pack()
if not (
isinstance(data, bpy.types.Object) and data.type == 'MESH'
):
continue
for material_slot in data.material_slots:
mat = material_slot.material
if not (mat and mat.use_nodes):
continue
tree = mat.node_tree
if tree.type != 'SHADER':
continue
for node in tree.nodes:
if node.bl_idname != 'ShaderNodeTexImage':
continue
# Check if image is not packed already
# and pack it if not.
if node.image and node.image.packed_file is None:
node.image.pack()
bpy.data.libraries.write(filepath, data_blocks)
@ -52,5 +63,5 @@ class ExtractBlend(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractBlendAnimation(publish.Extractor):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -50,5 +53,5 @@ class ExtractBlendAnimation(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -18,7 +18,10 @@ class ExtractCameraABC(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.abc"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.abc"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -64,5 +67,5 @@ class ExtractCameraABC(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -17,7 +17,10 @@ class ExtractCamera(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -73,5 +76,5 @@ class ExtractCamera(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -18,7 +18,10 @@ class ExtractFBX(publish.Extractor):
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
filename = f"{instance_name}.fbx"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
@ -84,5 +87,5 @@ class ExtractFBX(publish.Extractor):
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {representation}")

View file

@ -86,7 +86,10 @@ class ExtractAnimationFBX(publish.Extractor):
asset_group.select_set(True)
armature.select_set(True)
fbx_filename = f"{instance.name}_{armature.name}.fbx"
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
fbx_filename = f"{instance_name}_{armature.name}.fbx"
filepath = os.path.join(stagingdir, fbx_filename)
override = plugin.create_blender_context(
@ -119,7 +122,7 @@ class ExtractAnimationFBX(publish.Extractor):
pair[1].user_clear()
bpy.data.actions.remove(pair[1])
json_filename = f"{instance.name}.json"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
json_dict = {
@ -158,5 +161,5 @@ class ExtractAnimationFBX(publish.Extractor):
instance.data["representations"].append(fbx_representation)
instance.data["representations"].append(json_representation)
self.log.info("Extracted instance '{}' to: {}".format(
instance.name, fbx_representation))
self.log.info(
f"Extracted instance '{instance_name}' to: {fbx_representation}")

View file

@ -212,7 +212,11 @@ class ExtractLayout(publish.Extractor):
json_data.append(json_element)
json_filename = "{}.json".format(instance.name)
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
instance_name = f"{asset_name}_{subset}"
json_filename = f"{instance_name}.json"
json_path = os.path.join(stagingdir, json_filename)
with open(json_path, "w+") as file:
@ -245,5 +249,5 @@ class ExtractLayout(publish.Extractor):
}
instance.data["representations"].append(fbx_representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, json_representation)
self.log.info(
f"Extracted instance '{instance_name}' to: {json_representation}")

View file

@ -50,7 +50,10 @@ class ExtractPlayblast(publish.Extractor):
# get output path
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -27,7 +27,10 @@ class ExtractThumbnail(publish.Extractor):
self.log.debug("Extracting capture..")
stagingdir = self.staging_dir(instance)
filename = instance.name
asset_name = instance.data["assetEntity"]["name"]
subset = instance.data["subset"]
filename = f"{asset_name}_{subset}"
path = os.path.join(stagingdir, filename)
self.log.debug(f"Outputting images to {path}")

View file

@ -0,0 +1,23 @@
import bpy
import pyblish.api
class ValidateInstanceEmpty(pyblish.api.InstancePlugin):
"""Validator to verify that the instance is not empty"""
order = pyblish.api.ValidatorOrder - 0.01
hosts = ["blender"]
families = ["model", "pointcache", "rig", "camera" "layout", "blendScene"]
label = "Validate Instance is not Empty"
optional = False
def process(self, instance):
asset_group = instance.data["instance_group"]
if isinstance(asset_group, bpy.types.Collection):
if not (asset_group.objects or asset_group.children):
raise RuntimeError(f"Instance {instance.name} is empty.")
elif isinstance(asset_group, bpy.types.Object):
if not asset_group.children:
raise RuntimeError(f"Instance {instance.name} is empty.")

View file

@ -48,7 +48,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator):
"variant": variant
}
if AYON_SERVER_ENABLED:
data["folderpath"] = asset_name
data["folderPath"] = asset_name
else:
data["asset"] = asset_name

View file

@ -1,32 +1,39 @@
# -*- coding: utf-8 -*-
import pyblish.api
from openpype.pipeline import PublishValidationError
import hou
class ValidateHoudiniCommercialLicense(pyblish.api.InstancePlugin):
"""Validate the Houdini instance runs a Commercial license.
class ValidateHoudiniNotApprenticeLicense(pyblish.api.InstancePlugin):
"""Validate the Houdini instance runs a non Apprentice license.
When extracting USD files from a non-commercial Houdini license, even with
Houdini Indie license, the resulting files will get "scrambled" with
a license protection and get a special .usdnc or .usdlc suffix.
USD ROPs:
When extracting USD files from an apprentice Houdini license,
the resulting files will get "scrambled" with a license protection
and get a special .usdnc suffix.
This currently breaks the Subset/representation pipeline so we disallow
any publish with those licenses. Only the commercial license is valid.
This currently breaks the Subset/representation pipeline so we disallow
any publish with apprentice license.
Alembic ROPs:
Houdini Apprentice does not export Alembic.
"""
order = pyblish.api.ValidatorOrder
families = ["usd"]
families = ["usd", "abc"]
hosts = ["houdini"]
label = "Houdini Commercial License"
label = "Houdini Apprentice License"
def process(self, instance):
import hou
if hou.isApprentice():
# Find which family was matched with the plug-in
families = {instance.data["family"]}
families.update(instance.data.get("families", []))
disallowed_families = families.intersection(self.families)
families = " ".join(sorted(disallowed_families)).title()
license = hou.licenseCategory()
if license != hou.licenseCategoryType.Commercial:
raise PublishValidationError(
("USD Publishing requires a full Commercial "
"license. You are on: {}").format(license),
"{} publishing requires a non apprentice license."
.format(families),
title=self.label)

View file

@ -23,27 +23,36 @@ def play_preview_when_done(has_autoplay):
@contextlib.contextmanager
def viewport_camera(camera):
"""Set viewport camera during context
def viewport_layout_and_camera(camera, layout="layout_1"):
"""Set viewport layout and camera during context
***For 3dsMax 2024+
Args:
camera (str): viewport camera
layout (str): layout to use in viewport, defaults to `layout_1`
Use None to not change viewport layout during context.
"""
original = rt.viewport.getCamera()
if not original:
original_camera = rt.viewport.getCamera()
original_layout = rt.viewport.getLayout()
if not original_camera:
# if there is no original camera
# use the current camera as original
original = rt.getNodeByName(camera)
original_camera = rt.getNodeByName(camera)
review_camera = rt.getNodeByName(camera)
try:
if layout is not None:
layout = rt.Name(layout)
if rt.viewport.getLayout() != layout:
rt.viewport.setLayout(layout)
rt.viewport.setCamera(review_camera)
yield
finally:
rt.viewport.setCamera(original)
rt.viewport.setLayout(original_layout)
rt.viewport.setCamera(original_camera)
@contextlib.contextmanager
def viewport_preference_setting(general_viewport,
nitrous_manager,
nitrous_viewport,
vp_button_mgr):
"""Function to set viewport setting during context
@ -51,6 +60,7 @@ def viewport_preference_setting(general_viewport,
Args:
camera (str): Viewport camera for review render
general_viewport (dict): General viewport setting
nitrous_manager (dict): Nitrous graphic manager
nitrous_viewport (dict): Nitrous setting for
preview animation
vp_button_mgr (dict): Viewport button manager Setting
@ -64,6 +74,9 @@ def viewport_preference_setting(general_viewport,
vp_button_mgr_original = {
key: getattr(rt.ViewportButtonMgr, key) for key in vp_button_mgr
}
nitrous_manager_original = {
key: getattr(nitrousGraphicMgr, key) for key in nitrous_manager
}
nitrous_viewport_original = {
key: getattr(viewport_setting, key) for key in nitrous_viewport
}
@ -73,6 +86,8 @@ def viewport_preference_setting(general_viewport,
rt.viewport.EnableSolidBackgroundColorMode(general_viewport["dspBkg"])
for key, value in vp_button_mgr.items():
setattr(rt.ViewportButtonMgr, key, value)
for key, value in nitrous_manager.items():
setattr(nitrousGraphicMgr, key, value)
for key, value in nitrous_viewport.items():
if nitrous_viewport[key] != nitrous_viewport_original[key]:
setattr(viewport_setting, key, value)
@ -83,6 +98,8 @@ def viewport_preference_setting(general_viewport,
rt.viewport.EnableSolidBackgroundColorMode(orig_vp_bkg)
for key, value in vp_button_mgr_original.items():
setattr(rt.ViewportButtonMgr, key, value)
for key, value in nitrous_manager_original.items():
setattr(nitrousGraphicMgr, key, value)
for key, value in nitrous_viewport_original.items():
setattr(viewport_setting, key, value)
@ -149,24 +166,27 @@ def _render_preview_animation_max_2024(
def _render_preview_animation_max_pre_2024(
filepath, startFrame, endFrame, percentSize, ext):
filepath, startFrame, endFrame,
width, height, percentSize, ext):
"""Render viewport animation by creating bitmaps
***For 3dsMax Version <2024
Args:
filepath (str): filepath without frame numbers and extension
startFrame (int): start frame
endFrame (int): end frame
width (int): render resolution width
height (int): render resolution height
percentSize (float): render resolution multiplier by 100
e.g. 100.0 is 1x, 50.0 is 0.5x, 150.0 is 1.5x
ext (str): image extension
Returns:
list: Created filepaths
"""
# get the screenshot
percent = percentSize / 100.0
res_width = int(round(rt.renderWidth * percent))
res_height = int(round(rt.renderHeight * percent))
viewportRatio = float(res_width / res_height)
res_width = width * percent
res_height = height * percent
frame_template = "{}.{{:04}}.{}".format(filepath, ext)
frame_template.replace("\\", "/")
files = []
@ -178,23 +198,29 @@ def _render_preview_animation_max_pre_2024(
res_width, res_height, filename=filepath
)
dib = rt.gw.getViewportDib()
dib_width = float(dib.width)
dib_height = float(dib.height)
renderRatio = float(dib_width / dib_height)
if viewportRatio <= renderRatio:
dib_width = rt.renderWidth
dib_height = rt.renderHeight
# aspect ratio
viewportRatio = dib_width / dib_height
renderRatio = float(res_width / res_height)
if viewportRatio < renderRatio:
heightCrop = (dib_width / renderRatio)
topEdge = int((dib_height - heightCrop) / 2.0)
tempImage_bmp = rt.bitmap(dib_width, heightCrop)
src_box_value = rt.Box2(0, topEdge, dib_width, heightCrop)
else:
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
rt.copy(tempImage_bmp, preview_res)
rt.close(tempImage_bmp)
elif viewportRatio > renderRatio:
widthCrop = dib_height * renderRatio
leftEdge = int((dib_width - widthCrop) / 2.0)
tempImage_bmp = rt.bitmap(widthCrop, dib_height)
src_box_value = rt.Box2(0, leftEdge, dib_width, dib_height)
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
# copy the bitmap and close it
rt.copy(tempImage_bmp, preview_res)
rt.close(tempImage_bmp)
src_box_value = rt.Box2(leftEdge, 0, widthCrop, dib_height)
rt.pasteBitmap(dib, tempImage_bmp, src_box_value, rt.Point2(0, 0))
rt.copy(tempImage_bmp, preview_res)
rt.close(tempImage_bmp)
else:
rt.copy(dib, preview_res)
rt.save(preview_res)
rt.close(preview_res)
rt.close(dib)
@ -243,22 +269,25 @@ def render_preview_animation(
if viewport_options is None:
viewport_options = viewport_options_for_preview_animation()
with play_preview_when_done(False):
with viewport_camera(camera):
with render_resolution(width, height):
if int(get_max_version()) < 2024:
with viewport_preference_setting(
viewport_options["general_viewport"],
viewport_options["nitrous_viewport"],
viewport_options["vp_btn_mgr"]
):
return _render_preview_animation_max_pre_2024(
filepath,
start_frame,
end_frame,
percentSize,
ext
)
else:
with viewport_layout_and_camera(camera):
if int(get_max_version()) < 2024:
with viewport_preference_setting(
viewport_options["general_viewport"],
viewport_options["nitrous_manager"],
viewport_options["nitrous_viewport"],
viewport_options["vp_btn_mgr"]
):
return _render_preview_animation_max_pre_2024(
filepath,
start_frame,
end_frame,
width,
height,
percentSize,
ext
)
else:
with render_resolution(width, height):
return _render_preview_animation_max_2024(
filepath,
start_frame,
@ -299,6 +328,9 @@ def viewport_options_for_preview_animation():
"dspBkg": True,
"dspGrid": False
}
viewport_options["nitrous_manager"] = {
"AntialiasingQuality": "None"
}
viewport_options["nitrous_viewport"] = {
"VisualStyleMode": "defaultshading",
"ViewportPreset": "highquality",

View file

@ -12,6 +12,32 @@ class CreateReview(plugin.MaxCreator):
family = "review"
icon = "video-camera"
review_width = 1920
review_height = 1080
percentSize = 100
keep_images = False
image_format = "png"
visual_style = "Realistic"
viewport_preset = "Quality"
vp_texture = True
anti_aliasing = "None"
def apply_settings(self, project_settings):
settings = project_settings["max"]["CreateReview"] # noqa
# Take some defaults from settings
self.review_width = settings.get("review_width", self.review_width)
self.review_height = settings.get("review_height", self.review_height)
self.percentSize = settings.get("percentSize", self.percentSize)
self.keep_images = settings.get("keep_images", self.keep_images)
self.image_format = settings.get("image_format", self.image_format)
self.visual_style = settings.get("visual_style", self.visual_style)
self.viewport_preset = settings.get(
"viewport_preset", self.viewport_preset)
self.anti_aliasing = settings.get(
"anti_aliasing", self.anti_aliasing)
self.vp_texture = settings.get("vp_texture", self.vp_texture)
def create(self, subset_name, instance_data, pre_create_data):
# Transfer settings from pre create to instance
creator_attributes = instance_data.setdefault(
@ -23,6 +49,7 @@ class CreateReview(plugin.MaxCreator):
"percentSize",
"visualStyleMode",
"viewportPreset",
"antialiasingQuality",
"vpTexture"]:
if key in pre_create_data:
creator_attributes[key] = pre_create_data[key]
@ -33,7 +60,7 @@ class CreateReview(plugin.MaxCreator):
pre_create_data)
def get_instance_attr_defs(self):
image_format_enum = ["exr", "jpg", "png"]
image_format_enum = ["exr", "jpg", "png", "tga"]
visual_style_preset_enum = [
"Realistic", "Shaded", "Facets",
@ -45,41 +72,46 @@ class CreateReview(plugin.MaxCreator):
preview_preset_enum = [
"Quality", "Standard", "Performance",
"DXMode", "Customize"]
anti_aliasing_enum = ["None", "2X", "4X", "8X"]
return [
NumberDef("review_width",
label="Review width",
decimals=0,
minimum=0,
default=1920),
default=self.review_width),
NumberDef("review_height",
label="Review height",
decimals=0,
minimum=0,
default=1080),
BoolDef("keepImages",
label="Keep Image Sequences",
default=False),
EnumDef("imageFormat",
image_format_enum,
default="png",
label="Image Format Options"),
default=self.review_height),
NumberDef("percentSize",
label="Percent of Output",
default=100,
default=self.percentSize,
minimum=1,
decimals=0),
BoolDef("keepImages",
label="Keep Image Sequences",
default=self.keep_images),
EnumDef("imageFormat",
image_format_enum,
default=self.image_format,
label="Image Format Options"),
EnumDef("visualStyleMode",
visual_style_preset_enum,
default="Realistic",
default=self.visual_style,
label="Preference"),
EnumDef("viewportPreset",
preview_preset_enum,
default="Quality",
label="Pre-View Preset"),
default=self.viewport_preset,
label="Preview Preset"),
EnumDef("antialiasingQuality",
anti_aliasing_enum,
default=self.anti_aliasing,
label="Anti-aliasing Quality"),
BoolDef("vpTexture",
label="Viewport Texture",
default=False)
default=self.vp_texture)
]
def get_pre_create_attr_defs(self):

View file

@ -90,6 +90,9 @@ class CollectReview(pyblish.api.InstancePlugin,
"dspBkg": attr_values.get("dspBkg"),
"dspGrid": attr_values.get("dspGrid")
}
nitrous_manager = {
"AntialiasingQuality": creator_attrs["antialiasingQuality"],
}
nitrous_viewport = {
"VisualStyleMode": creator_attrs["visualStyleMode"],
"ViewportPreset": creator_attrs["viewportPreset"],
@ -97,6 +100,7 @@ class CollectReview(pyblish.api.InstancePlugin,
}
preview_data = {
"general_viewport": general_viewport,
"nitrous_manager": nitrous_manager,
"nitrous_viewport": nitrous_viewport,
"vp_btn_mgr": {"EnableButtons": False}
}

View file

@ -156,7 +156,7 @@ class FBXExtractor:
# Parse export options
options = self.default_options
options = self.parse_overrides(instance, options)
self.log.info("Export options: {0}".format(options))
self.log.debug("Export options: {0}".format(options))
# Collect the start and end including handles
start = instance.data.get("frameStartHandle") or \
@ -186,7 +186,7 @@ class FBXExtractor:
template = "FBXExport{0} {1}" if key == "UpAxis" else \
"FBXExport{0} -v {1}" # noqa
cmd = template.format(key, value)
self.log.info(cmd)
self.log.debug(cmd)
mel.eval(cmd)
# Never show the UI or generate a log

View file

@ -272,7 +272,7 @@ class MayaCreatorBase(object):
@six.add_metaclass(ABCMeta)
class MayaCreator(NewCreator, MayaCreatorBase):
settings_name = None
settings_category = "maya"
def create(self, subset_name, instance_data, pre_create_data):
@ -318,24 +318,6 @@ class MayaCreator(NewCreator, MayaCreatorBase):
default=True)
]
def apply_settings(self, project_settings):
"""Method called on initialization of plugin to apply settings."""
settings_name = self.settings_name
if settings_name is None:
settings_name = self.__class__.__name__
settings = project_settings["maya"]["create"]
settings = settings.get(settings_name)
if settings is None:
self.log.debug(
"No settings found for {}".format(self.__class__.__name__)
)
return
for key, value in settings.items():
setattr(self, key, value)
class MayaAutoCreator(AutoCreator, MayaCreatorBase):
"""Automatically triggered creator for Maya.
@ -344,6 +326,8 @@ class MayaAutoCreator(AutoCreator, MayaCreatorBase):
any arguments.
"""
settings_category = "maya"
def collect_instances(self):
return self._default_collect_instances()
@ -361,6 +345,8 @@ class MayaHiddenCreator(HiddenCreator, MayaCreatorBase):
arguments for 'create' method.
"""
settings_category = "maya"
def create(self, *args, **kwargs):
return MayaCreator.create(self, *args, **kwargs)
@ -774,7 +760,8 @@ class ReferenceLoader(Loader):
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic",
"fbx": "FBX"
"fbx": "FBX",
"usd": "USD Import"
}.get(representation["name"])
assert file_type, "Unsupported representation: %s" % representation

View file

@ -1,7 +1,9 @@
import os
import difflib
import contextlib
from maya import cmds
import qargparse
from openpype.settings import get_project_settings
import openpype.hosts.maya.api.plugin
@ -128,6 +130,12 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
if not attach_to_root:
group_name = namespace
kwargs = {}
if "file_options" in options:
kwargs["options"] = options["file_options"]
if "file_type" in options:
kwargs["type"] = options["file_type"]
path = self.filepath_from_context(context)
with maintained_selection():
cmds.loadPlugin("AbcImport.mll", quiet=True)
@ -139,7 +147,8 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
reference=True,
returnNewNodes=True,
groupReference=attach_to_root,
groupName=group_name)
groupName=group_name,
**kwargs)
shapes = cmds.ls(nodes, shapes=True, long=True)
@ -251,3 +260,92 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
else:
self.log.warning("This version of Maya does not support locking of"
" transforms of cameras.")
class MayaUSDReferenceLoader(ReferenceLoader):
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
families = ["usd"]
representations = ["usd"]
extensions = {"usd", "usda", "usdc"}
options = ReferenceLoader.options + [
qargparse.Boolean(
"readAnimData",
label="Load anim data",
default=True,
help="Load animation data from USD file"
),
qargparse.Boolean(
"useAsAnimationCache",
label="Use as animation cache",
default=True,
help=(
"Imports geometry prims with time-sampled point data using a "
"point-based deformer that references the imported "
"USD file.\n"
"This provides better import and playback performance when "
"importing time-sampled geometry from USD, and should "
"reduce the weight of the resulting Maya scene."
)
),
qargparse.Boolean(
"importInstances",
label="Import instances",
default=True,
help=(
"Import USD instanced geometries as Maya instanced shapes. "
"Will flatten the scene otherwise."
)
),
qargparse.String(
"primPath",
label="Prim Path",
default="/",
help=(
"Name of the USD scope where traversing will begin.\n"
"The prim at the specified primPath (including the prim) will "
"be imported.\n"
"Specifying the pseudo-root (/) means you want "
"to import everything in the file.\n"
"If the passed prim path is empty, it will first try to "
"import the defaultPrim for the rootLayer if it exists.\n"
"Otherwise, it will behave as if the pseudo-root was passed "
"in."
)
)
]
file_type = "USD Import"
def process_reference(self, context, name, namespace, options):
cmds.loadPlugin("mayaUsdPlugin", quiet=True)
def bool_option(key, default):
# Shorthand for getting optional boolean file option from options
value = int(bool(options.get(key, default)))
return "{}={}".format(key, value)
def string_option(key, default):
# Shorthand for getting optional string file option from options
value = str(options.get(key, default))
return "{}={}".format(key, value)
options["file_options"] = ";".join([
string_option("primPath", default="/"),
bool_option("importInstances", default=True),
bool_option("useAsAnimationCache", default=True),
bool_option("readAnimData", default=True),
# TODO: Expose more parameters
# "preferredMaterial=none",
# "importRelativeTextures=Automatic",
# "useCustomFrameRange=0",
# "startTime=0",
# "endTime=0",
# "importUSDZTextures=0"
])
options["file_type"] = self.file_type
return super(MayaUSDReferenceLoader, self).process_reference(
context, name, namespace, options
)

View file

@ -42,6 +42,16 @@ class ExtractFBXAnimation(publish.Extractor):
# Export from the rig's namespace so that the exported
# FBX does not include the namespace but preserves the node
# names as existing in the rig workfile
if not out_members:
skeleton_set = [
i for i in instance
if i.endswith("skeletonAnim_SET")
]
self.log.debug(
"Top group of animated skeleton not found in "
"{}.\nSkipping fbx animation extraction.".format(skeleton_set))
return
namespace = get_namespace(out_members[0])
relative_out_members = [
strip_namespace(node, namespace) for node in out_members

View file

@ -1,9 +1,10 @@
import os
import sys
from qtpy import QtWidgets, QtCore
from qtpy import QtWidgets, QtCore, QtGui
from openpype.tools.utils import host_tools
from openpype.pipeline import registered_host
def load_stylesheet():
@ -49,6 +50,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
)
self.setWindowTitle("OpenPype")
save_current_btn = QtWidgets.QPushButton("Save current file", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)
publish_btn = QtWidgets.QPushButton("Publish ...", self)
@ -70,6 +72,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(save_current_btn)
layout.addWidget(Spacer(15, self))
layout.addWidget(workfiles_btn)
layout.addWidget(create_btn)
layout.addWidget(publish_btn)
@ -94,6 +100,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.setLayout(layout)
save_current_btn.clicked.connect(self.on_save_current_clicked)
save_current_btn.setShortcut(QtGui.QKeySequence.Save)
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
@ -106,6 +114,18 @@ class OpenPypeMenu(QtWidgets.QWidget):
# reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
experimental_btn.clicked.connect(self.on_experimental_clicked)
def on_save_current_clicked(self):
host = registered_host()
current_file = host.get_current_workfile()
if not current_file:
print("Current project is not saved. "
"Please save once first via workfiles tool.")
host_tools.show_workfiles()
return
print(f"Saving current file to: {current_file}")
host.save_workfile(current_file)
def on_workfile_clicked(self):
print("Clicked Workfile")
host_tools.show_workfiles()

View file

@ -712,6 +712,10 @@ or updating already created. Publishing will create OTIO file.
"newAssetPublishing": True,
"trackStartFrame": track_start_frame,
"timelineOffset": timeline_offset,
"isEditorial": True,
# creator_attributes
"creator_attributes": creator_attributes
}
# update base instance data with context data
# and also update creator attributes with context data

View file

@ -27,6 +27,12 @@ class CollectSequenceFrameData(
if not self.is_active(instance.data):
return
# editorial would fail since they might not be in database yet
is_editorial = instance.data.get("isEditorial")
if is_editorial:
self.log.debug("Instance is Editorial. Skipping.")
return
frame_data = self.get_frame_data_from_repre_sequence(instance)
if not frame_data:

View file

@ -30,12 +30,17 @@ class ValidateFrameRange(OptionalPyblishPluginMixin,
if not self.is_active(instance.data):
return
# editorial would fail since they might not be in database yet
is_editorial = instance.data.get("isEditorial")
if is_editorial:
self.log.debug("Instance is Editorial. Skipping.")
return
if (self.skip_timelines_check and
any(re.search(pattern, instance.data["task"])
for pattern in self.skip_timelines_check)):
self.log.info("Skipping for {} task".format(instance.data["task"]))
asset_doc = instance.data["assetEntity"]
asset_data = asset_doc["data"]
frame_start = asset_data["frameStart"]
frame_end = asset_data["frameEnd"]

View file

@ -190,7 +190,7 @@ class LoadImage(plugin.Loader):
if pop_idx is None:
self.log.warning(
"Didn't found container in workfile containers. {}".format(
"Didn't find container in workfile containers. {}".format(
container
)
)

View file

@ -36,6 +36,7 @@ from openpype.settings import (
)
from openpype.client.mongo import validate_mongo_connection
from openpype.client import get_ayon_server_api_connection
_PLACEHOLDER = object()
@ -613,9 +614,8 @@ def get_openpype_username():
"""
if AYON_SERVER_ENABLED:
import ayon_api
return ayon_api.get_user()["name"]
con = get_ayon_server_api_connection()
return con.get_user()["name"]
username = os.environ.get("OPENPYPE_USERNAME")
if not username:

View file

@ -16,9 +16,9 @@ from abc import ABCMeta, abstractmethod
import six
import appdirs
import ayon_api
from openpype import AYON_SERVER_ENABLED
from openpype.client import get_ayon_server_api_connection
from openpype.settings import (
get_system_settings,
SYSTEM_SETTINGS_KEY,
@ -319,8 +319,11 @@ def load_modules(force=False):
def _get_ayon_bundle_data():
con = get_ayon_server_api_connection()
bundles = con.get_bundles()["bundles"]
bundle_name = os.getenv("AYON_BUNDLE_NAME")
bundles = ayon_api.get_bundles()["bundles"]
return next(
(
bundle
@ -345,7 +348,8 @@ def _get_ayon_addons_information(bundle_info):
output = []
bundle_addons = bundle_info["addons"]
addons = ayon_api.get_addons_info()["addons"]
con = get_ayon_server_api_connection()
addons = con.get_addons_info()["addons"]
for addon in addons:
name = addon["name"]
versions = addon.get("versions")

View file

@ -85,7 +85,7 @@ class AyonDeadlinePlugin(DeadlinePlugin):
}
for env, val in environment.items():
self.SetProcessEnvironmentVariable(env, val)
self.SetEnvironmentVariable(env, val)
exe_list = self.GetConfigEntry("AyonExecutable")
# clean '\ ' for MacOS pasting
@ -101,11 +101,11 @@ class AyonDeadlinePlugin(DeadlinePlugin):
if exe == "":
self.FailRender(
"Ayon executable was not found " +
"in the semicolon separated list " +
"\"" + ";".join(exe_list) + "\". " +
"The path to the render executable can be configured " +
"from the Plugin Configuration in the Deadline Monitor.")
"Ayon executable was not found in the semicolon separated "
"list: \"{}\". The path to the render executable can be "
"configured from the Plugin Configuration in the Deadline "
"Monitor.".format(exe_list)
)
return exe
def RenderArgument(self):

View file

@ -495,7 +495,10 @@ def inject_ayon_environment(deadlinePlugin):
"AYON_BUNDLE_NAME": ayon_bundle_name,
}
for env, val in environment.items():
# Add the env var for the Render Plugin that is about to render
deadlinePlugin.SetEnvironmentVariable(env, val)
# Add the env var for current calls to `DeadlinePlugin.RunProcess`
deadlinePlugin.SetProcessEnvironmentVariable(env, val)
args_str = subprocess.list2cmdline(args)
print(">>> Executing: {} {}".format(exe, args_str))

View file

@ -66,7 +66,7 @@ class TransferHierarchicalValues(ServerAction):
"items": [{
"type": "label",
"value": (
"Didn't found custom attributes"
"Didn't find custom attributes"
" that can be transferred."
)
}]

View file

@ -257,7 +257,7 @@ class NextTaskUpdate(BaseEvent):
new_task_name = mapping.get(old_status_name)
if not new_task_name:
self.log.debug(
"Didn't found mapping for status \"{}\".".format(
"Didn't find mapping for status \"{}\".".format(
task_status["name"]
)
)

View file

@ -387,7 +387,7 @@ class SyncToAvalonEvent(BaseEvent):
if not data:
# TODO logging
self.log.warning(
"Didn't found entity by key/value \"{}\" / \"{}\"".format(
"Didn't find entity by key/value \"{}\" / \"{}\"".format(
key, value
)
)

View file

@ -51,7 +51,7 @@ class ComponentOpen(BaseAction):
else:
return {
'success': False,
'message': "Didn't found file: " + fpath
'message': "Didn't find file: " + fpath
}
return {

View file

@ -169,7 +169,7 @@ class DeleteAssetSubset(BaseAction):
return {
"success": True,
"message": (
"Didn't found entities in avalon."
"Didn't find entities in avalon."
" You can use Ftrack's Delete button for the selection."
)
}

View file

@ -61,7 +61,7 @@ class Delivery(BaseAction):
return {
"success": False,
"message": (
"Didn't found project \"{}\" in avalon."
"Didn't find project \"{}\" in avalon."
).format(project_name)
}

View file

@ -29,7 +29,7 @@ class JobKiller(BaseAction):
if not jobs:
return {
"success": True,
"message": "Didn't found any running jobs"
"message": "Didn't find any running jobs"
}
# Collect user ids from jobs

View file

@ -5,7 +5,6 @@ import platform
import collections
import numbers
import ayon_api
import six
import time
@ -16,7 +15,7 @@ from openpype.settings.lib import (
from openpype.settings.constants import (
DEFAULT_PROJECT_KEY
)
from openpype.client import get_project
from openpype.client import get_project, get_ayon_server_api_connection
from openpype.lib import Logger, get_local_site_id
from openpype.lib.path_templates import (
TemplateUnsolved,
@ -479,7 +478,8 @@ class Anatomy(BaseAnatomy):
if AYON_SERVER_ENABLED:
if not project_name:
return
return ayon_api.get_project_roots_for_site(
con = get_ayon_server_api_connection()
return con.get_project_roots_for_site(
project_name, get_local_site_id()
)

View file

@ -11,6 +11,7 @@ import pyblish.api
from pyblish.lib import MessageHandler
import openpype
from openpype import AYON_SERVER_ENABLED
from openpype.host import HostBase
from openpype.client import (
get_project,
@ -18,6 +19,7 @@ from openpype.client import (
get_asset_by_name,
version_is_latest,
get_asset_name_identifier,
get_ayon_server_api_connection,
)
from openpype.lib.events import emit_event
from openpype.modules import load_modules, ModulesManager
@ -43,7 +45,7 @@ from . import (
_is_installed = False
_process_id = None
_registered_root = {"_": ""}
_registered_root = {"_": {}}
_registered_host = {"_": None}
# Keep modules manager (and it's modules) in memory
# - that gives option to register modules' callbacks
@ -84,15 +86,22 @@ def register_root(path):
def registered_root():
"""Return currently registered root"""
root = _registered_root["_"]
if root:
return root
"""Return registered roots from current project anatomy.
root = legacy_io.Session.get("AVALON_PROJECTS")
if root:
return os.path.normpath(root)
return ""
Consider this does return roots only for current project and current
platforms, only if host was installer using 'install_host'.
Deprecated:
Please use project 'Anatomy' to get roots. This function is still used
at current core functions of load logic, but that will change
in future and this function will be removed eventually. Using this
function at new places can cause problems in the future.
Returns:
dict[str, str]: Root paths.
"""
return _registered_root["_"]
def install_host(host):
@ -106,6 +115,10 @@ def install_host(host):
_is_installed = True
# Make sure global AYON connection has set site id and version
if AYON_SERVER_ENABLED:
get_ayon_server_api_connection()
legacy_io.install()
modules_manager = _get_modules_manager()

View file

@ -2255,11 +2255,11 @@ class CreateContext:
if task_name:
task_names_by_asset_name[asset_name].add(task_name)
asset_names = [
asset_names = {
asset_name
for asset_name in task_names_by_asset_name.keys()
if asset_name is not None
]
}
fields = {"name", "data.tasks"}
if AYON_SERVER_ENABLED:
fields |= {"data.parents"}
@ -2270,10 +2270,12 @@ class CreateContext:
))
task_names_by_asset_name = {}
asset_docs_by_name = collections.defaultdict(list)
for asset_doc in asset_docs:
asset_name = get_asset_name_identifier(asset_doc)
tasks = asset_doc.get("data", {}).get("tasks") or {}
task_names_by_asset_name[asset_name] = set(tasks.keys())
asset_docs_by_name[asset_doc["name"]].append(asset_doc)
for instance in instances:
if not instance.has_valid_asset or not instance.has_valid_task:
@ -2281,6 +2283,11 @@ class CreateContext:
if AYON_SERVER_ENABLED:
asset_name = instance["folderPath"]
if asset_name and "/" not in asset_name:
asset_docs = asset_docs_by_name.get(asset_name)
if len(asset_docs) == 1:
asset_name = get_asset_name_identifier(asset_docs[0])
instance["folderPath"] = asset_name
else:
asset_name = instance["asset"]

View file

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
import copy
import collections
@ -193,6 +194,12 @@ class BaseCreator:
# QUESTION make this required?
host_name = None
# Settings auto-apply helpers
# Root key in project settings (mandatory for auto-apply to work)
settings_category = None
# Name of plugin in create settings > class name is used if not set
settings_name = None
def __init__(
self, project_settings, system_settings, create_context, headless=False
):
@ -233,14 +240,90 @@ class BaseCreator:
" need to keep system settings."
).format(self.__class__.__name__))
@staticmethod
def _get_settings_values(project_settings, category_name, plugin_name):
"""Helper method to get settings values.
Args:
project_settings (dict[str, Any]): Project settings.
category_name (str): Category of settings.
plugin_name (str): Name of settings.
Returns:
Union[dict[str, Any], None]: Settings values or None.
"""
settings = project_settings.get(category_name)
if not settings:
return None
create_settings = settings.get("create")
if not create_settings:
return None
return create_settings.get(plugin_name)
def apply_settings(self, project_settings):
"""Method called on initialization of plugin to apply settings.
Default implementation tries to auto-apply settings values if are
in expected hierarchy.
Data hierarchy to auto-apply settings:
{self.settings_category} - Root key in settings
"create" - Hardcoded key
{self.settings_name} | {class name} - Name of plugin
... attribute values... - Attribute/value pair
It is mandatory to define 'settings_category' attribute. Attribute
'settings_name' is optional and class name is used if is not defined.
Example data:
ProjectSettings {
"maya": { # self.settings_category
"create": { # Hardcoded key
"CreateAnimation": { # self.settings_name / class name
"enabled": True, # --- Attributes to set ---
"optional": True,#
"active": True, #
"fps": 25, # -------------------------
},
...
},
...
},
...
}
Args:
project_settings (dict[str, Any]): Project settings.
"""
pass
settings_category = self.settings_category
if not settings_category:
return
cls_name = self.__class__.__name__
settings_name = self.settings_name or cls_name
settings = self._get_settings_values(
project_settings, settings_category, settings_name
)
if settings is None:
self.log.debug("No settings found for {}".format(cls_name))
return
for key, value in settings.items():
# Log out attributes that are not defined on plugin object
# - those may be potential dangerous typos in settings
if not hasattr(self, key):
self.log.debug((
"Applying settings to unknown attribute '{}' on '{}'."
).format(
key, cls_name
))
setattr(self, key, value)
@property
def identifier(self):

View file

@ -30,7 +30,7 @@ def install():
session = session_data_from_environment(context_keys=True)
session["schema"] = "openpype:session-3.0"
session["schema"] = "openpype:session-4.0"
try:
schema.validate(session)
except schema.ValidationError as e:

View file

@ -62,8 +62,6 @@ def auto_reconnect(func):
SESSION_CONTEXT_KEYS = (
# Root directory of projects on disk
"AVALON_PROJECTS",
# Name of current Project
"AVALON_PROJECT",
# Name of current Asset

View file

@ -0,0 +1,61 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "openpype:session-4.0",
"description": "The Avalon environment",
"type": "object",
"additionalProperties": true,
"required": [
"AVALON_PROJECT"
],
"properties": {
"AVALON_PROJECT": {
"description": "Name of project",
"type": "string",
"pattern": "^\\w*$",
"example": "Hulk"
},
"AVALON_ASSET": {
"description": "Name of asset",
"type": "string",
"pattern": "^[\\/\\w]*$",
"example": "Bruce"
},
"AVALON_TASK": {
"description": "Name of task",
"type": "string",
"pattern": "^\\w*$",
"example": "modeling"
},
"AVALON_APP": {
"description": "Name of host",
"type": "string",
"pattern": "^\\w*$",
"example": "maya"
},
"AVALON_DB": {
"description": "Name of database",
"type": "string",
"pattern": "^\\w*$",
"example": "avalon",
"default": "avalon"
},
"AVALON_LABEL": {
"description": "Nice name of Avalon, used in e.g. graphical user interfaces",
"type": "string",
"example": "MyLabel",
"default": "Avalon"
},
"AVALON_TIMEOUT": {
"description": "Wherever there is a need for a timeout, this is the default value.",
"type": "string",
"pattern": "^[0-9]*$",
"default": "1000",
"example": "1000"
}
}
}

View file

@ -4,7 +4,7 @@ import logging
from openpype import AYON_SERVER_ENABLED
from openpype.lib import Logger
from openpype.client import get_project
from openpype.client import get_project, get_ayon_server_api_connection
from . import legacy_io
from .anatomy import Anatomy
from .plugin_discover import (
@ -153,8 +153,6 @@ class ServerThumbnailResolver(ThumbnailResolver):
if not entity_type or not entity_id:
return None
import ayon_api
project_name = self.dbcon.active_project()
thumbnail_id = thumbnail_entity["_id"]
@ -169,7 +167,7 @@ class ServerThumbnailResolver(ThumbnailResolver):
# NOTE Use 'get_server_api_connection' because public function
# 'get_thumbnail_by_id' does not return output of 'ServerAPI'
# method.
con = ayon_api.get_server_api_connection()
con = get_ayon_server_api_connection()
if hasattr(con, "get_thumbnail_by_id"):
result = con.get_thumbnail_by_id(thumbnail_id)
if result.is_valid:

View file

@ -58,21 +58,21 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin):
# Store failed paths with exception
failed = []
# Store removed filepaths for logging
succeded_files = set()
succeeded_files = set()
# Remove file by file
for filepath in filepaths:
try:
os.remove(filepath)
succeded_files.add(filepath)
succeeded_files.add(filepath)
except Exception as exc:
failed.append((filepath, exc))
if succeded_files:
if succeeded_files:
self.log.info(
"Removed files:\n{}".format("\n".join(succeded_files))
"Removed files:\n{}".format("\n".join(sorted(succeeded_files)))
)
# Delete folders with it's content
# Delete folders with its content
succeeded = set()
for dirpath in dirpaths:
# Check if directory still exists
@ -87,17 +87,21 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin):
if succeeded:
self.log.info(
"Removed directories:\n{}".format("\n".join(succeeded))
"Removed directories:\n{}".format(
"\n".join(sorted(succeeded))
)
)
# Prepare lines for report of failed removements
# Prepare lines for report of failed removals
lines = []
for filepath, exc in failed:
lines.append("{}: {}".format(filepath, str(exc)))
if lines:
self.log.warning(
"Failed to remove filepaths:\n{}".format("\n".join(lines))
"Failed to remove filepaths:\n{}".format(
"\n".join(sorted(lines))
)
)
def _remove_empty_dirs(self, empty_dirpaths):
@ -134,8 +138,8 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin):
if to_skip_dirpaths:
self.log.debug(
"Skipped directories because contain files:\n{}".format(
"\n".join(to_skip_dirpaths)
"Skipped directories because they contain files:\n{}".format(
"\n".join(sorted(to_skip_dirpaths))
)
)
@ -147,6 +151,6 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin):
if to_delete_dirpaths:
self.log.debug(
"Deleted empty directories:\n{}".format(
"\n".join(to_delete_dirpaths)
"\n".join(sorted(to_delete_dirpaths))
)
)

View file

@ -54,6 +54,8 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
staging_dir = data_object.get("stagingDir")
if staging_dir:
data_object["stagingDir"] = anatomy.fill_root(staging_dir)
self.log.debug("Filling stagingDir with root to: %s",
data_object["stagingDir"])
def _process_path(self, data, anatomy):
"""Process data of a single JSON publish metadata file.
@ -108,7 +110,6 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
instance = self._context.create_instance(
instance_data.get("subset")
)
self.log.debug("Filling stagingDir...")
self._fill_staging_dir(instance_data, anatomy)
instance.data.update(instance_data)
@ -161,7 +162,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin):
anatomy.project_name
))
self.log.debug("anatomy: {}".format(anatomy.roots))
self.log.debug("Anatomy roots: {}".format(anatomy.roots))
try:
session_is_set = False
for path in paths:

View file

@ -68,6 +68,12 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
]
def process(self, instance):
# editorial would fail since they might not be in database yet
is_editorial = instance.data.get("isEditorial")
if is_editorial:
self.log.debug("Instance is Editorial. Skipping.")
return
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])

View file

@ -171,8 +171,6 @@ class ExtractBurnin(publish.Extractor):
).format(host_name, family, task_name, task_type, subset))
return
self.log.debug("profile: {}".format(profile))
# Pre-filter burnin definitions by instance families
burnin_defs = self.filter_burnins_defs(profile, instance)
if not burnin_defs:
@ -450,7 +448,7 @@ class ExtractBurnin(publish.Extractor):
filling burnin strings. `temp_data` are for repre pre-process
preparation.
"""
self.log.debug("Prepring basic data for burnins")
self.log.debug("Preparing basic data for burnins")
context = instance.context
version = instance.data.get("version")

View file

@ -326,7 +326,6 @@ class ExtractOIIOTranscode(publish.Extractor):
" | Task type \"{}\" | Subset \"{}\" "
).format(host_name, family, task_name, task_type, subset))
self.log.debug("profile: {}".format(profile))
return profile
def _repre_is_valid(self, repre):

View file

@ -68,7 +68,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx"]
image_exts = ["exr", "jpg", "jpeg", "png", "dpx", "tga"]
video_exts = ["mov", "mp4"]
supported_exts = image_exts + video_exts
@ -143,7 +143,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
custom_tags = repre.get("custom_tags")
if "review" not in tags:
self.log.debug((
"Repre: {} - Didn't found \"review\" in tags. Skipping"
"Repre: {} - Didn't find \"review\" in tags. Skipping"
).format(repre_name))
continue

View file

@ -200,7 +200,7 @@ class IntegrateThumbnails(pyblish.api.ContextPlugin):
if thumb_repre_doc is None:
self.log.debug(
"There is not representation with name \"thumbnail\""
"There is no representation with name \"thumbnail\""
)
return None

View file

@ -137,7 +137,7 @@ class IntegrateThumbnailsAYON(pyblish.api.ContextPlugin):
if thumb_repre_doc is None:
self.log.debug(
"There is not representation with name \"thumbnail\""
"There is no representation with name \"thumbnail\""
)
return None

View file

@ -20,7 +20,8 @@ import copy
import time
import six
import ayon_api
from openpype.client import get_ayon_server_api_connection
def _convert_color(color_value):
@ -1021,10 +1022,14 @@ def _convert_traypublisher_project_settings(ayon_settings, output):
item["family"] = item.pop("product_type")
shot_add_tasks = ayon_editorial_simple["shot_add_tasks"]
# TODO: backward compatibility and remove in future
if isinstance(shot_add_tasks, dict):
shot_add_tasks = []
# aggregate shot_add_tasks items
new_shot_add_tasks = {
item["name"]: item["task_type"]
item["name"]: {"type": item["task_type"]}
for item in shot_add_tasks
}
ayon_editorial_simple["shot_add_tasks"] = new_shot_add_tasks
@ -1445,7 +1450,8 @@ class _AyonSettingsCache:
@classmethod
def _use_bundles(cls):
if _AyonSettingsCache.use_bundles is None:
major, minor, _, _, _ = ayon_api.get_server_version_tuple()
con = get_ayon_server_api_connection()
major, minor, _, _, _ = con.get_server_version_tuple()
use_bundles = True
if (major, minor) < (0, 3):
use_bundles = False
@ -1462,7 +1468,13 @@ class _AyonSettingsCache:
variant = cls._get_dev_mode_settings_variant()
elif is_staging_enabled():
variant = "staging"
# Cache variant
_AyonSettingsCache.variant = variant
# Set the variant to global ayon api connection
con = get_ayon_server_api_connection()
con.set_default_settings_variant(variant)
return _AyonSettingsCache.variant
@classmethod
@ -1477,8 +1489,9 @@ class _AyonSettingsCache:
str: Name of settings variant.
"""
bundles = ayon_api.get_bundles()
user = ayon_api.get_user()
con = get_ayon_server_api_connection()
bundles = con.get_bundles()
user = con.get_user()
username = user["name"]
for bundle in bundles["bundles"]:
if (
@ -1494,20 +1507,23 @@ class _AyonSettingsCache:
def get_value_by_project(cls, project_name):
cache_item = _AyonSettingsCache.cache_by_project_name[project_name]
if cache_item.is_outdated:
con = get_ayon_server_api_connection()
if cls._use_bundles():
value = ayon_api.get_addons_settings(
value = con.get_addons_settings(
bundle_name=cls._get_bundle_name(),
project_name=project_name
project_name=project_name,
variant=cls._get_variant()
)
else:
value = ayon_api.get_addons_settings(project_name)
value = con.get_addons_settings(project_name)
cache_item.update_value(value)
return cache_item.get_value()
@classmethod
def _get_addon_versions_from_bundle(cls):
con = get_ayon_server_api_connection()
expected_bundle = cls._get_bundle_name()
bundles = ayon_api.get_bundles()["bundles"]
bundles = con.get_bundles()["bundles"]
bundle = next(
(
bundle
@ -1527,8 +1543,11 @@ class _AyonSettingsCache:
if cls._use_bundles():
addons = cls._get_addon_versions_from_bundle()
else:
settings_data = ayon_api.get_addons_settings(
only_values=False, variant=cls._get_variant())
con = get_ayon_server_api_connection()
settings_data = con.get_addons_settings(
only_values=False,
variant=cls._get_variant()
)
addons = settings_data["versions"]
cache_item.update_value(addons)

View file

@ -71,6 +71,11 @@
"optional": false,
"active": true
},
"ValidateInstanceEmpty": {
"enabled": true,
"optional": false,
"active": true
},
"ExtractBlend": {
"enabled": true,
"optional": true,

View file

@ -16,6 +16,17 @@
"image_format": "exr",
"multipass": true
},
"CreateReview": {
"review_width": 1920,
"review_height": 1080,
"percentSize": 100.0,
"keep_images": false,
"image_format": "png",
"visual_style": "Realistic",
"viewport_preset": "Quality",
"anti_aliasing": "None",
"vp_texture": true
},
"PointCloud": {
"attribute": {
"Age": "age",

View file

@ -352,7 +352,7 @@ class DictConditionalEntity(ItemEntity):
break
if result_key is None:
raise ValueError("Didn't found child {}".format(child_obj))
raise ValueError("Didn't find child {}".format(child_obj))
return "/".join([self.path, result_key])

View file

@ -232,7 +232,7 @@ class DictImmutableKeysEntity(ItemEntity):
break
if result_key is None:
raise ValueError("Didn't found child {}".format(child_obj))
raise ValueError("Didn't find child {}".format(child_obj))
return "/".join([self.path, result_key])

View file

@ -284,7 +284,7 @@ class DictMutableKeysEntity(EndpointEntity):
break
if result_key is None:
raise ValueError("Didn't found child {}".format(child_obj))
raise ValueError("Didn't find child {}".format(child_obj))
return "/".join([self.path, result_key])

View file

@ -295,7 +295,7 @@ class ListStrictEntity(ItemEntity):
break
if result_idx is None:
raise ValueError("Didn't found child {}".format(child_obj))
raise ValueError("Didn't find child {}".format(child_obj))
return "/".join([self.path, str(result_idx)])

View file

@ -258,7 +258,7 @@ class ListEntity(EndpointEntity):
break
if result_idx is None:
raise ValueError("Didn't found child {}".format(child_obj))
raise ValueError("Didn't find child {}".format(child_obj))
return "/".join([self.path, str(result_idx)])

View file

@ -270,7 +270,7 @@ class RootEntity(BaseItemEntity):
for key, _child_entity in self.non_gui_children.items():
if _child_entity is child_entity:
return key
raise ValueError("Didn't found child {}".format(child_entity))
raise ValueError("Didn't find child {}".format(child_entity))
@property
def value(self):

View file

@ -65,6 +65,104 @@
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CreateReview",
"label": "Create Review",
"children": [
{
"type": "number",
"key": "review_width",
"label": "Review Width"
},
{
"type": "number",
"key": "review_height",
"label": "Review Height"
},
{
"type": "number",
"key": "percentSize",
"label": "Percent of Output"
},
{
"type": "boolean",
"key": "keep_images",
"label": "Keep Image Sequences"
},
{
"key": "image_format",
"label": "Image Format Options",
"type": "enum",
"multiselection": false,
"defaults": "exr",
"enum_items": [
{"exr": "exr"},
{"jpg": "jpg"},
{"png": "png"},
{"tga": "tga"}
]
},
{
"key": "visual_style",
"label": "Preference",
"type": "enum",
"multiselection": false,
"defaults": "Realistic",
"enum_items": [
{"Realistic": "Realistic"},
{"Shaded": "Shaded"},
{"Facets": "Facets"},
{"ConsistentColors": "ConsistentColors"},
{"HiddenLine": "HiddenLine"},
{"Wireframe": "Wireframe"},
{"BoundingBox": "BoundingBox"},
{"Ink": "Ink"},
{"ColorInk": "ColorInk"},
{"Acrylic": "Acrylic"},
{"Tech": "Tech"},
{"Graphite": "Graphite"},
{"ColorPencil": "ColorPencil"},
{"Pastel": "Pastel"},
{"Clay": "Clay"},
{"ModelAssist": "ModelAssist"}
]
},
{
"key": "viewport_preset",
"label": "Pre-View Preset",
"type": "enum",
"multiselection": false,
"defaults": "Quality",
"enum_items": [
{"Quality": "Quality"},
{"Standard": "Standard"},
{"Performance": "Performance"},
{"DXMode": "DXMode"},
{"Customize": "Customize"}
]
},
{
"key": "anti_aliasing",
"label": "Anti-aliasing Quality",
"type": "enum",
"multiselection": false,
"defaults": "None",
"enum_items": [
{"None": "None"},
{"2X": "2X"},
{"4X": "4X"},
{"8X": "8X"}
]
},
{
"type": "boolean",
"key": "vp_texture",
"label": "Viewport Texture"
}
]
},
{
"type": "dict",
"collapsible": true,

View file

@ -79,6 +79,22 @@
}
]
},
{
"type": "collapsible-wrap",
"label": "BlendScene",
"children": [
{
"type": "schema_template",
"name": "template_publish_plugin",
"template_data": [
{
"key": "ValidateInstanceEmpty",
"label": "Validate Instance is not Empty"
}
]
}
]
},
{
"type": "collapsible-wrap",
"label": "Render",

View file

@ -40,9 +40,9 @@ class SiteSyncModel:
dict[str, str]: Path by provider name.
"""
site_sync = self._get_sync_server_module()
if site_sync is None:
if not self.is_sync_server_enabled():
return {}
site_sync = self._get_sync_server_module()
return site_sync.get_site_icons()
def get_sites_information(self):

View file

@ -329,7 +329,9 @@ class LoadedFilesView(QtWidgets.QTreeView):
def __init__(self, *args, **kwargs):
super(LoadedFilesView, self).__init__(*args, **kwargs)
self.setEditTriggers(
self.EditKeyPressed | self.SelectedClicked | self.DoubleClicked
QtWidgets.QAbstractItemView.EditKeyPressed
| QtWidgets.QAbstractItemView.SelectedClicked
| QtWidgets.QAbstractItemView.DoubleClicked
)
self.setIndentation(0)
self.setAlternatingRowColors(True)
@ -366,7 +368,7 @@ class LoadedFilesView(QtWidgets.QTreeView):
def _on_rows_inserted(self):
header = self.header()
header.resizeSections(header.ResizeToContents)
header.resizeSections(QtWidgets.QHeaderView.ResizeToContents)
self._update_remove_btn()
def resizeEvent(self, event):
@ -377,7 +379,7 @@ class LoadedFilesView(QtWidgets.QTreeView):
super(LoadedFilesView, self).showEvent(event)
self._model.refresh()
header = self.header()
header.resizeSections(header.ResizeToContents)
header.resizeSections(QtWidgets.QHeaderView.ResizeToContents)
self._update_remove_btn()
def _on_selection_change(self):

View file

@ -580,6 +580,10 @@ class AssetsField(BaseClickableFrame):
"""Change to asset names set with last `set_selected_items` call."""
self.set_selected_items(self._origin_value)
def confirm_value(self):
self._origin_value = copy.deepcopy(self._selected_items)
self._has_value_changed = False
class TasksComboboxProxy(QtCore.QSortFilterProxyModel):
def __init__(self, *args, **kwargs):
@ -786,6 +790,15 @@ class TasksCombobox(QtWidgets.QComboBox):
self._set_is_valid(is_valid)
def confirm_value(self, asset_names):
new_task_name = self._selected_items[0]
self._origin_value = [
(asset_name, new_task_name)
for asset_name in asset_names
]
self._origin_selection = copy.deepcopy(self._selected_items)
self._has_value_changed = False
def set_selected_items(self, asset_task_combinations=None):
"""Set items for selected instances.
@ -920,6 +933,10 @@ class VariantInputWidget(PlaceholderLineEdit):
"""Change text of multiselection."""
self._multiselection_text = text
def confirm_value(self):
self._origin_value = copy.deepcopy(self._current_value)
self._has_value_changed = False
def _set_is_valid(self, valid):
if valid == self._is_valid:
return
@ -1111,6 +1128,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addStretch(1)
btns_layout.setSpacing(5)
btns_layout.addWidget(submit_btn)
btns_layout.addWidget(cancel_btn)
@ -1161,6 +1179,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
subset_names = set()
invalid_tasks = False
asset_names = []
for instance in self._current_instances:
new_variant_value = instance.get("variant")
if AYON_SERVER_ENABLED:
@ -1177,6 +1196,7 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
if task_name is not None:
new_task_name = task_name
asset_names.append(new_asset_name)
try:
new_subset_name = self._controller.get_subset_name(
instance.creator_identifier,
@ -1218,6 +1238,15 @@ class GlobalAttrsWidget(QtWidgets.QWidget):
self._set_btns_enabled(False)
self._set_btns_visible(invalid_tasks)
if variant_value is not None:
self.variant_input.confirm_value()
if asset_name is not None:
self.asset_value_widget.confirm_value()
if task_name is not None:
self.task_value_widget.confirm_value(asset_names)
self.instance_context_changed.emit()
def _on_cancel(self):

View file

@ -15,6 +15,7 @@ from openpype.tools.utils import (
MessageOverlayObject,
PixmapLabel,
)
from openpype.tools.utils.lib import center_window
from .constants import ResetKeySequence
from .publish_report_viewer import PublishReportViewerWidget
@ -529,6 +530,7 @@ class PublisherWindow(QtWidgets.QDialog):
def _on_first_show(self):
self.resize(self.default_width, self.default_height)
self.setStyleSheet(style.load_stylesheet())
center_window(self)
self._reset_on_show = self._reset_on_first_show
def _on_show_timer(self):

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.17.5"
__version__ = "3.17.6-nightly.3"

View file

@ -61,26 +61,20 @@ class PublishPuginsModel(BaseSettingsModel):
ValidateCameraZeroKeyframe: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Camera Zero Keyframe",
section="Validators"
section="General Validators"
)
ValidateFileSaved: ValidateFileSavedModel = Field(
default_factory=ValidateFileSavedModel,
title="Validate File Saved",
section="Validators"
)
ValidateRenderCameraIsSet: ValidatePluginModel = Field(
ValidateInstanceEmpty: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Camera Is Set",
section="Validators"
)
ValidateDeadlinePublish: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Output for Deadline",
section="Validators"
title="Validate Instance is not Empty"
)
ValidateMeshHasUvs: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Mesh Has Uvs"
title="Validate Mesh Has Uvs",
section="Model Validators"
)
ValidateMeshNoNegativeScale: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
@ -94,6 +88,15 @@ class PublishPuginsModel(BaseSettingsModel):
default_factory=ValidatePluginModel,
title="Validate No Colons In Name"
)
ValidateRenderCameraIsSet: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Camera Is Set",
section="Render Validators"
)
ValidateDeadlinePublish: ValidatePluginModel = Field(
default_factory=ValidatePluginModel,
title="Validate Render Output for Deadline",
)
ExtractBlend: ExtractBlendModel = Field(
default_factory=ExtractBlendModel,
title="Extract Blend",
@ -179,6 +182,11 @@ DEFAULT_BLENDER_PUBLISH_SETTINGS = {
"optional": False,
"active": True
},
"ValidateInstanceEmpty": {
"enabled": True,
"optional": False,
"active": True
},
"ExtractBlend": {
"enabled": True,
"optional": True,

View file

@ -1 +1 @@
__version__ = "0.1.4"
__version__ = "0.1.5"

View file

@ -26,7 +26,7 @@ class CreateStaticMeshModel(BaseSettingsModel):
default_factory=list,
title="Default Products"
)
static_mesh_prefixes: str = Field("S", title="Static Mesh Prefix")
static_mesh_prefix: str = Field("S", title="Static Mesh Prefix")
collision_prefixes: list[str] = Field(
default_factory=list,
title="Collision Prefixes"

View file

@ -33,9 +33,9 @@ class BasicValidateModel(BaseSettingsModel):
class PublishPluginsModel(BaseSettingsModel):
CollectRopFrameRange: CollectRopFrameRangeModel = Field(
default_factory=CollectRopFrameRangeModel,
title="Collect Rop Frame Range.",
CollectAssetHandles: CollectAssetHandlesModel = Field(
default_factory=CollectAssetHandlesModel,
title="Collect Asset Handles.",
section="Collectors"
)
ValidateContainers: BasicValidateModel = Field(
@ -60,7 +60,7 @@ class PublishPluginsModel(BaseSettingsModel):
DEFAULT_HOUDINI_PUBLISH_SETTINGS = {
"CollectRopFrameRange": {
"CollectAssetHandles": {
"use_asset_handles": True
},
"ValidateContainers": {

View file

@ -1 +1 @@
__version__ = "0.2.8"
__version__ = "0.2.9"

View file

@ -0,0 +1,93 @@
from pydantic import Field
from ayon_server.settings import BaseSettingsModel
def image_format_enum():
"""Return enumerator for image output formats."""
return [
{"label": "exr", "value": "exr"},
{"label": "jpg", "value": "jpg"},
{"label": "png", "value": "png"},
{"label": "tga", "value": "tga"}
]
def visual_style_enum():
"""Return enumerator for viewport visual style."""
return [
{"label": "Realistic", "value": "Realistic"},
{"label": "Shaded", "value": "Shaded"},
{"label": "Facets", "value": "Facets"},
{"label": "ConsistentColors",
"value": "ConsistentColors"},
{"label": "Wireframe", "value": "Wireframe"},
{"label": "BoundingBox", "value": "BoundingBox"},
{"label": "Ink", "value": "Ink"},
{"label": "ColorInk", "value": "ColorInk"},
{"label": "Acrylic", "value": "Acrylic"},
{"label": "Tech", "value": "Tech"},
{"label": "Graphite", "value": "Graphite"},
{"label": "ColorPencil", "value": "ColorPencil"},
{"label": "Pastel", "value": "Pastel"},
{"label": "Clay", "value": "Clay"},
{"label": "ModelAssist", "value": "ModelAssist"}
]
def preview_preset_enum():
"""Return enumerator for viewport visual preset."""
return [
{"label": "Quality", "value": "Quality"},
{"label": "Standard", "value": "Standard"},
{"label": "Performance", "value": "Performance"},
{"label": "DXMode", "value": "DXMode"},
{"label": "Customize", "value": "Customize"},
]
def anti_aliasing_enum():
"""Return enumerator for viewport anti-aliasing."""
return [
{"label": "None", "value": "None"},
{"label": "2X", "value": "2X"},
{"label": "4X", "value": "4X"},
{"label": "8X", "value": "8X"}
]
class CreateReviewModel(BaseSettingsModel):
review_width: int = Field(1920, title="Review Width")
review_height: int = Field(1080, title="Review Height")
percentSize: float = Field(100.0, title="Percent of Output")
keep_images: bool = Field(False, title="Keep Image Sequences")
image_format: str = Field(
enum_resolver=image_format_enum,
title="Image Format Options"
)
visual_style: str = Field(
enum_resolver=visual_style_enum,
title="Preference"
)
viewport_preset: str = Field(
enum_resolver=preview_preset_enum,
title="Preview Preset"
)
anti_aliasing: str = Field(
enum_resolver=anti_aliasing_enum,
title="Anti-aliasing Quality"
)
vp_texture: bool = Field(True, title="Viewport Texture")
DEFAULT_CREATE_REVIEW_SETTINGS = {
"review_width": 1920,
"review_height": 1080,
"percentSize": 100.0,
"keep_images": False,
"image_format": "png",
"visual_style": "Realistic",
"viewport_preset": "Quality",
"anti_aliasing": "None",
"vp_texture": True
}

View file

@ -4,6 +4,9 @@ from .imageio import ImageIOSettings
from .render_settings import (
RenderSettingsModel, DEFAULT_RENDER_SETTINGS
)
from .create_review_settings import (
CreateReviewModel, DEFAULT_CREATE_REVIEW_SETTINGS
)
from .publishers import (
PublishersModel, DEFAULT_PUBLISH_SETTINGS
)
@ -29,6 +32,10 @@ class MaxSettings(BaseSettingsModel):
default_factory=RenderSettingsModel,
title="Render Settings"
)
CreateReview: CreateReviewModel = Field(
default_factory=CreateReviewModel,
title="Create Review"
)
PointCloud: PointCloudSettings = Field(
default_factory=PointCloudSettings,
title="Point Cloud"
@ -40,6 +47,7 @@ class MaxSettings(BaseSettingsModel):
DEFAULT_VALUES = {
"RenderSettings": DEFAULT_RENDER_SETTINGS,
"CreateReview": DEFAULT_CREATE_REVIEW_SETTINGS,
"PointCloud": {
"attribute": [
{"name": "Age", "value": "age"},

View file

@ -1 +1 @@
__version__ = "0.1.1"
__version__ = "0.1.2"

View file

@ -27,7 +27,7 @@ class CreateUnrealStaticMeshModel(BaseSettingsModel):
default_factory=list,
title="Default Products"
)
static_mesh_prefixes: str = Field("S", title="Static Mesh Prefix")
static_mesh_prefix: str = Field("S", title="Static Mesh Prefix")
collision_prefixes: list[str] = Field(
default_factory=list,
title="Collision Prefixes"

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.5"
__version__ = "0.1.6"

View file

@ -5,19 +5,17 @@ from ayon_server.settings import BaseSettingsModel, task_types_enum
class ClipNameTokenizerItem(BaseSettingsModel):
_layout = "expanded"
# TODO was 'dict-modifiable', is list of dicts now, must be fixed in code
name: str = Field("#TODO", title="Tokenizer name")
name: str = Field("", title="Tokenizer name")
regex: str = Field("", title="Tokenizer regex")
class ShotAddTasksItem(BaseSettingsModel):
_layout = "expanded"
# TODO was 'dict-modifiable', is list of dicts now, must be fixed in code
name: str = Field('', title="Key")
task_type: list[str] = Field(
task_type: str = Field(
title="Task type",
default_factory=list,
enum_resolver=task_types_enum)
enum_resolver=task_types_enum
)
class ShotRenameSubmodel(BaseSettingsModel):
@ -54,7 +52,7 @@ class TokenToParentConvertorItem(BaseSettingsModel):
)
class ShotHierchySubmodel(BaseSettingsModel):
class ShotHierarchySubmodel(BaseSettingsModel):
enabled: bool = True
parents_path: str = Field(
"",
@ -102,9 +100,9 @@ class EditorialSimpleCreatorPlugin(BaseSettingsModel):
title="Shot Rename",
default_factory=ShotRenameSubmodel
)
shot_hierarchy: ShotHierchySubmodel = Field(
shot_hierarchy: ShotHierarchySubmodel = Field(
title="Shot Hierarchy",
default_factory=ShotHierchySubmodel
default_factory=ShotHierarchySubmodel
)
shot_add_tasks: list[ShotAddTasksItem] = Field(
title="Add tasks to shot",

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring addon version."""
__version__ = "0.1.2"
__version__ = "0.1.3"

View file

@ -14,23 +14,52 @@ How to run
- run in cmd
`{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py runtests {OPENPYPE_ROOT}/tests/integration`
- add `hosts/APP_NAME` after integration part to limit only on specific app (eg. `{OPENPYPE_ROOT}/tests/integration/hosts/maya`)
OR can use built executables
OR can use built executables
`openpype_console runtests {ABS_PATH}/tests/integration`
Command line arguments
----------------------
- "--mark" - "Run tests marked by",
- "--pyargs" - "Run tests from package",
- "--test_data_folder" - "Unzipped directory path of test file",
- "--persist" - "Persist test DB and published files after test end",
- "--app_variant" - "Provide specific app variant for test, empty for latest",
- "--app_group" - "Provide specific app group for test, empty for default",
- "--timeout" - "Provide specific timeout value for test case",
- "--setup_only" - "Only create dbs, do not run tests",
- "--mongo_url" - "MongoDB for testing.",
Run Tray for test
-----------------
In case of failed test you might want to run it manually and visually debug what happened.
For that:
- run tests that is failing
- add environment variables (to command line process or your IDE)
- OPENPYPE_DATABASE_NAME = openpype_tests
- AVALON_DB = avalon_tests
- run tray as usual
- `{OPENPYPE_ROOT}/.venv/Scripts/python.exe {OPENPYPE_ROOT}/start.py run tray --debug`
You should see only test asset and state of databases for that particular use case.
How to check logs/errors from app
--------------------------------
Keep PERSIST to True in the class and check `test_openpype.logs` collection.
Keep PERSIST to True in the class and check `test_openpype.logs` collection.
How to create test for publishing from host
------------------------------------------
- Extend PublishTest in `tests/lib/testing_classes.py`
- Use `resources\test_data.zip` skeleton file as a template for testing input data
- Put workfile into `test_data.zip/input/workfile`
- If you require other than base DB dumps provide them to `test_data.zip/input/dumps`
- Create subfolder `test_data` with matching name to your test file containing you test class
- (see `tests/integration/hosts/maya/test_publish_in_maya` and `test_publish_in_maya.py`)
- Put this subfolder name into TEST_FILES [(HASH_ID, FILE_NAME, MD5_OPTIONAL)]
- at first position, all others may be ""
- Put workfile into `test_data/input/workfile`
- If you require other than base DB dumps provide them to `test_data/input/dumps`
-- (Check commented code in `db_handler.py` how to dump specific DB. Currently all collections will be dumped.)
- Implement `last_workfile_path`
- `startup_scripts` - must contain pointing host to startup script saved into `test_data.zip/input/startup`
- Implement `last_workfile_path`
- `startup_scripts` - must contain pointing host to startup script saved into `test_data/input/startup`
-- Script must contain something like (pseudocode)
```
import openpype
@ -39,7 +68,7 @@ from avalon import api, HOST
from openpype.api import Logger
log = Logger().get_logger(__name__)
api.install(HOST)
log_lines = []
for result in pyblish.util.publish_iter():
@ -54,18 +83,20 @@ for result in pyblish.util.publish_iter():
EXIT_APP (command to exit host)
```
(Install and publish methods must be triggered only AFTER host app is fully initialized!)
- If you would like add any command line arguments for your host app add it to `test_data.zip/input/app_args/app_args.json` (as a json list)
- Provide any required environment variables to `test_data.zip/input/env_vars/env_vars.json` (as a json dictionary)
- Zip `test_data.zip`, named it with descriptive name, upload it to Google Drive, right click - `Get link`, copy hash id (file must be accessible to anyone with a link!)
- Put this hash id and zip file name into TEST_FILES [(HASH_ID, FILE_NAME, MD5_OPTIONAL)]. If you want to check MD5 of downloaded
file, provide md5 value of zipped file.
- If you would like add any command line arguments for your host app add it to `test_data/input/app_args/app_args.json` (as a json list)
- Provide any required environment variables to `test_data/input/env_vars/env_vars.json` (as a json dictionary)
- Implement any assert checks you need in extended class
- Run test class manually (via Pycharm or pytest runner (TODO))
- If you want test to visually compare expected files to published one, set PERSIST to True, run test manually
-- Locate temporary `publish` subfolder of temporary folder (found in debugging console log)
-- Copy whole folder content into .zip file into `expected` subfolder
-- Copy whole folder content into .zip file into `expected` subfolder
-- By default tests are comparing only structure of `expected` and published format (eg. if you want to save space, replace published files with empty files, but with expected names!)
-- Zip and upload again, change PERSIST to False
- Use `TEST_DATA_FOLDER` variable in your class to reuse existing downloaded and unzipped test data (for faster creation of tests)
- Keep `APP_VARIANT` empty if you want to trigger test on latest version of app, or provide explicit value (as '2022' for Photoshop for example)
- Keep `APP_VARIANT` empty if you want to trigger test on latest version of app, or provide explicit value (as '2022' for Photoshop for example)
For storing test zip files on Google Drive:
- Zip `test_data.zip`, named it with descriptive name, upload it to Google Drive, right click - `Get link`, copy hash id (file must be accessible to anyone with a link!)
- Put this hash id and zip file name into TEST_FILES [(HASH_ID, FILE_NAME, MD5_OPTIONAL)]. If you want to check MD5 of downloaded
file, provide md5 value of zipped file.

View file

@ -16,18 +16,25 @@ class MayaHostFixtures(HostFixtures):
Maya expects workfile in proper folder, so copy is done first.
"""
src_path = os.path.join(download_test_data,
"input",
"workfile",
"test_project_test_asset_test_task_v001.mb")
dest_folder = os.path.join(output_folder_url,
self.PROJECT,
self.ASSET,
"work",
self.TASK)
src_path = os.path.join(
download_test_data,
"input",
"workfile",
"test_project_test_asset_test_task_v001.ma"
)
dest_folder = os.path.join(
output_folder_url,
self.PROJECT,
self.ASSET,
"work",
self.TASK
)
os.makedirs(dest_folder)
dest_path = os.path.join(dest_folder,
"test_project_test_asset_test_task_v001.mb")
dest_path = os.path.join(
dest_folder, "test_project_test_asset_test_task_v001.ma"
)
shutil.copy(src_path, dest_path)
yield dest_path
@ -36,7 +43,7 @@ class MayaHostFixtures(HostFixtures):
def startup_scripts(self, monkeypatch_session, download_test_data):
"""Points Maya to userSetup file from input data"""
startup_path = os.path.join(
os.path.dirname(__file__), "input", "startup"
download_test_data, "input", "startup"
)
original_pythonpath = os.environ.get("PYTHONPATH")
monkeypatch_session.setenv(

View file

@ -24,8 +24,7 @@ class TestDeadlinePublishInMaya(MayaDeadlinePublishTestClass):
PERSIST = True
TEST_FILES = [
("1dDY7CbdFXfRksGVoiuwjhnPoTRCCf5ea",
"test_maya_deadline_publish.zip", "")
("test_deadline_publish_in_maya", "", "")
]
APP_GROUP = "maya"

View file

@ -0,0 +1,17 @@
Test data
---------
Each class implementing `TestCase` can provide test file(s) by adding them to
TEST_FILES ('GDRIVE_FILE_ID', 'ACTUAL_FILE_NAME', 'MD5HASH')
GDRIVE_FILE_ID can be pulled from shareable link from Google Drive app.
Currently it is expected that test file will be zip file with structure:
- expected - expected files (not implemented yet)
- input
- data - test data (workfiles, images etc)
- dumps - folder for BSOn dumps from (`mongodump`)
- env_vars
env_vars.json - dictionary with environment variables {key:value}
- sql - sql files to load with `mongoimport` (human readable)
- startup - scripts that should run in the host on its startup

Some files were not shown because too many files have changed in this diff Show more