Merge branch 'develop' into enhancement/add-ruff-and-code-spell

This commit is contained in:
Ondřej Samohel 2024-03-18 15:18:02 +01:00 committed by GitHub
commit aecb2522b1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
450 changed files with 8697 additions and 13938 deletions

View file

@ -1,12 +1,16 @@
import os
from .version import __version__
AYON_CORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# TODO remove after '1.x.x'
# -------------------------
# DEPRECATED - Remove before '1.x.x' release
# -------------------------
PACKAGE_DIR = AYON_CORE_ROOT
PLUGINS_DIR = os.path.join(AYON_CORE_ROOT, "plugins")
AYON_SERVER_ENABLED = True
# Indicate if AYON entities should be used instead of OpenPype entities
USE_AYON_ENTITIES = False
USE_AYON_ENTITIES = True
# -------------------------

View file

@ -14,9 +14,9 @@ from abc import ABCMeta, abstractmethod
import six
import appdirs
import ayon_api
from ayon_core.lib import Logger, is_dev_mode_enabled
from ayon_core.client import get_ayon_server_api_connection
from ayon_core.settings import get_studio_settings
from .interfaces import (
@ -147,8 +147,7 @@ def load_addons(force=False):
def _get_ayon_bundle_data():
con = get_ayon_server_api_connection()
bundles = con.get_bundles()["bundles"]
bundles = ayon_api.get_bundles()["bundles"]
bundle_name = os.getenv("AYON_BUNDLE_NAME")
@ -176,8 +175,7 @@ def _get_ayon_addons_information(bundle_info):
output = []
bundle_addons = bundle_info["addons"]
con = get_ayon_server_api_connection()
addons = con.get_addons_info()["addons"]
addons = ayon_api.get_addons_info()["addons"]
for addon in addons:
name = addon["name"]
versions = addon.get("versions")

View file

@ -11,6 +11,7 @@ import acre
from ayon_core import AYON_CORE_ROOT
from ayon_core.addon import AddonsManager
from ayon_core.settings import get_general_environments
from ayon_core.lib import initialize_ayon_connection
from .cli_commands import Commands
@ -243,6 +244,7 @@ def _set_addons_environments():
def main(*args, **kwargs):
initialize_ayon_connection()
python_path = os.getenv("PYTHONPATH", "")
split_paths = python_path.split(os.pathsep)

View file

@ -181,7 +181,7 @@ class Commands:
json.dump(env, file_stream, indent=4)
@staticmethod
def contextselection(output_path, project_name, asset_name, strict):
def contextselection(output_path, project_name, folder_path, strict):
from ayon_core.tools.context_dialog import main
main(output_path, project_name, asset_name, strict)
main(output_path, project_name, folder_path, strict)

View file

@ -1,110 +0,0 @@
from .utils import get_ayon_server_api_connection
from .entities import (
get_projects,
get_project,
get_whole_project,
get_asset_by_id,
get_asset_by_name,
get_assets,
get_archived_assets,
get_asset_ids_with_subsets,
get_subset_by_id,
get_subset_by_name,
get_subsets,
get_subset_families,
get_version_by_id,
get_version_by_name,
get_versions,
get_hero_version_by_id,
get_hero_version_by_subset_id,
get_hero_versions,
get_last_versions,
get_last_version_by_subset_id,
get_last_version_by_subset_name,
get_output_link_versions,
version_is_latest,
get_representation_by_id,
get_representation_by_name,
get_representations,
get_representation_parents,
get_representations_parents,
get_archived_representations,
get_thumbnail,
get_thumbnails,
get_thumbnail_id_from_source,
get_workfile_info,
get_asset_name_identifier,
)
from .entity_links import (
get_linked_asset_ids,
get_linked_assets,
get_linked_representation_id,
)
from .operations import (
create_project,
)
__all__ = (
"get_ayon_server_api_connection",
"get_projects",
"get_project",
"get_whole_project",
"get_asset_by_id",
"get_asset_by_name",
"get_assets",
"get_archived_assets",
"get_asset_ids_with_subsets",
"get_subset_by_id",
"get_subset_by_name",
"get_subsets",
"get_subset_families",
"get_version_by_id",
"get_version_by_name",
"get_versions",
"get_hero_version_by_id",
"get_hero_version_by_subset_id",
"get_hero_versions",
"get_last_versions",
"get_last_version_by_subset_id",
"get_last_version_by_subset_name",
"get_output_link_versions",
"version_is_latest",
"get_representation_by_id",
"get_representation_by_name",
"get_representations",
"get_representation_parents",
"get_representations_parents",
"get_archived_representations",
"get_thumbnail",
"get_thumbnails",
"get_thumbnail_id_from_source",
"get_workfile_info",
"get_linked_asset_ids",
"get_linked_assets",
"get_linked_representation_id",
"create_project",
"get_asset_name_identifier",
)

View file

@ -1,28 +0,0 @@
# --- Folders ---
DEFAULT_FOLDER_FIELDS = {
"id",
"name",
"path",
"parentId",
"active",
"parents",
"thumbnailId"
}
REPRESENTATION_FILES_FIELDS = {
"files.name",
"files.hash",
"files.id",
"files.path",
"files.size",
}
CURRENT_PROJECT_SCHEMA = "openpype:project-3.0"
CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0"
CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0"
CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0"
CURRENT_VERSION_SCHEMA = "openpype:version-3.0"
CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0"
CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0"
CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0"
CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0"

File diff suppressed because it is too large Load diff

View file

@ -1,741 +0,0 @@
import collections
from .constants import CURRENT_THUMBNAIL_SCHEMA
from .utils import get_ayon_server_api_connection
from .openpype_comp import get_folders_with_tasks
from .conversion_utils import (
project_fields_v3_to_v4,
convert_v4_project_to_v3,
folder_fields_v3_to_v4,
convert_v4_folder_to_v3,
subset_fields_v3_to_v4,
convert_v4_subset_to_v3,
version_fields_v3_to_v4,
convert_v4_version_to_v3,
representation_fields_v3_to_v4,
convert_v4_representation_to_v3,
workfile_info_fields_v3_to_v4,
convert_v4_workfile_info_to_v3,
)
def get_asset_name_identifier(asset_doc):
"""Get asset name identifier by asset document.
This function is added because of AYON implementation where name
identifier is not just a name but full path.
Asset document must have "name" key, and "data.parents" when in AYON mode.
Args:
asset_doc (dict[str, Any]): Asset document.
"""
parents = list(asset_doc["data"]["parents"])
parents.append(asset_doc["name"])
return "/" + "/".join(parents)
def get_projects(active=True, inactive=False, library=None, fields=None):
if not active and not inactive:
return
if active and inactive:
active = None
elif active:
active = True
elif inactive:
active = False
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
for project in con.get_projects(active, library, fields=fields):
yield convert_v4_project_to_v3(project)
def get_project(project_name, active=True, inactive=False, fields=None):
# Skip if both are disabled
con = get_ayon_server_api_connection()
fields = project_fields_v3_to_v4(fields, con)
return convert_v4_project_to_v3(
con.get_project(project_name, fields=fields)
)
def get_whole_project(*args, **kwargs):
raise NotImplementedError("'get_whole_project' not implemented")
def _get_subsets(
project_name,
subset_ids=None,
subset_names=None,
folder_ids=None,
names_by_folder_ids=None,
archived=False,
fields=None
):
# Convert fields and add minimum required fields
con = get_ayon_server_api_connection()
fields = subset_fields_v3_to_v4(fields, con)
if fields is not None:
for key in (
"id",
"active"
):
fields.add(key)
active = True
if archived:
active = None
for subset in con.get_products(
project_name,
product_ids=subset_ids,
product_names=subset_names,
folder_ids=folder_ids,
names_by_folder_ids=names_by_folder_ids,
active=active,
fields=fields,
):
yield convert_v4_subset_to_v3(subset)
def _get_versions(
project_name,
version_ids=None,
subset_ids=None,
versions=None,
hero=True,
standard=True,
latest=None,
active=None,
fields=None
):
con = get_ayon_server_api_connection()
fields = version_fields_v3_to_v4(fields, con)
# Make sure 'productId' and 'version' are available when hero versions
# are queried
if fields and hero:
fields = set(fields)
fields |= {"productId", "version"}
queried_versions = con.get_versions(
project_name,
version_ids=version_ids,
product_ids=subset_ids,
versions=versions,
hero=hero,
standard=standard,
latest=latest,
active=active,
fields=fields
)
version_entities = []
hero_versions = []
for version in queried_versions:
if version["version"] < 0:
hero_versions.append(version)
else:
version_entities.append(convert_v4_version_to_v3(version))
if hero_versions:
subset_ids = set()
versions_nums = set()
for hero_version in hero_versions:
versions_nums.add(abs(hero_version["version"]))
subset_ids.add(hero_version["productId"])
hero_eq_versions = con.get_versions(
project_name,
product_ids=subset_ids,
versions=versions_nums,
hero=False,
fields=["id", "version", "productId"]
)
hero_eq_by_subset_id = collections.defaultdict(list)
for version in hero_eq_versions:
hero_eq_by_subset_id[version["productId"]].append(version)
for hero_version in hero_versions:
abs_version = abs(hero_version["version"])
subset_id = hero_version["productId"]
version_id = None
for version in hero_eq_by_subset_id.get(subset_id, []):
if version["version"] == abs_version:
version_id = version["id"]
break
conv_hero = convert_v4_version_to_v3(hero_version)
conv_hero["version_id"] = version_id
version_entities.append(conv_hero)
return version_entities
def get_asset_by_id(project_name, asset_id, fields=None):
assets = get_assets(
project_name, asset_ids=[asset_id], fields=fields
)
for asset in assets:
return asset
return None
def get_asset_by_name(project_name, asset_name, fields=None):
assets = get_assets(
project_name, asset_names=[asset_name], fields=fields
)
for asset in assets:
return asset
return None
def _folders_query(project_name, con, fields, **kwargs):
if fields is None or "tasks" in fields:
folders = get_folders_with_tasks(
con, project_name, fields=fields, **kwargs
)
else:
folders = con.get_folders(project_name, fields=fields, **kwargs)
for folder in folders:
yield folder
def get_assets(
project_name,
asset_ids=None,
asset_names=None,
parent_ids=None,
archived=False,
fields=None
):
if not project_name:
return
active = True
if archived:
active = None
con = get_ayon_server_api_connection()
fields = folder_fields_v3_to_v4(fields, con)
kwargs = dict(
folder_ids=asset_ids,
parent_ids=parent_ids,
active=active,
)
if not asset_names:
for folder in _folders_query(project_name, con, fields, **kwargs):
yield convert_v4_folder_to_v3(folder, project_name)
return
new_asset_names = set()
folder_paths = set()
for name in asset_names:
if "/" in name:
folder_paths.add(name)
else:
new_asset_names.add(name)
yielded_ids = set()
if folder_paths:
for folder in _folders_query(
project_name, con, fields, folder_paths=folder_paths, **kwargs
):
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
if not new_asset_names:
return
for folder in _folders_query(
project_name, con, fields, folder_names=new_asset_names, **kwargs
):
if folder["id"] not in yielded_ids:
yielded_ids.add(folder["id"])
yield convert_v4_folder_to_v3(folder, project_name)
def get_archived_assets(
project_name,
asset_ids=None,
asset_names=None,
parent_ids=None,
fields=None
):
return get_assets(
project_name,
asset_ids,
asset_names,
parent_ids,
True,
fields
)
def get_asset_ids_with_subsets(project_name, asset_ids=None):
con = get_ayon_server_api_connection()
return con.get_folder_ids_with_products(project_name, asset_ids)
def get_subset_by_id(project_name, subset_id, fields=None):
subsets = get_subsets(
project_name, subset_ids=[subset_id], fields=fields
)
for subset in subsets:
return subset
return None
def get_subset_by_name(project_name, subset_name, asset_id, fields=None):
subsets = get_subsets(
project_name,
subset_names=[subset_name],
asset_ids=[asset_id],
fields=fields
)
for subset in subsets:
return subset
return None
def get_subsets(
project_name,
subset_ids=None,
subset_names=None,
asset_ids=None,
names_by_asset_ids=None,
archived=False,
fields=None
):
return _get_subsets(
project_name,
subset_ids,
subset_names,
asset_ids,
names_by_asset_ids,
archived,
fields=fields
)
def get_subset_families(project_name, subset_ids=None):
con = get_ayon_server_api_connection()
return con.get_product_type_names(project_name, subset_ids)
def get_version_by_id(project_name, version_id, fields=None):
versions = get_versions(
project_name,
version_ids=[version_id],
fields=fields,
hero=True
)
for version in versions:
return version
return None
def get_version_by_name(project_name, version, subset_id, fields=None):
versions = get_versions(
project_name,
subset_ids=[subset_id],
versions=[version],
fields=fields
)
for version in versions:
return version
return None
def get_versions(
project_name,
version_ids=None,
subset_ids=None,
versions=None,
hero=False,
fields=None
):
return _get_versions(
project_name,
version_ids,
subset_ids,
versions,
hero=hero,
standard=True,
fields=fields
)
def get_hero_version_by_id(project_name, version_id, fields=None):
versions = get_hero_versions(
project_name,
version_ids=[version_id],
fields=fields
)
for version in versions:
return version
return None
def get_hero_version_by_subset_id(
project_name, subset_id, fields=None
):
versions = get_hero_versions(
project_name,
subset_ids=[subset_id],
fields=fields
)
for version in versions:
return version
return None
def get_hero_versions(
project_name, subset_ids=None, version_ids=None, fields=None
):
return _get_versions(
project_name,
version_ids=version_ids,
subset_ids=subset_ids,
hero=True,
standard=False,
fields=fields
)
def get_last_versions(project_name, subset_ids, active=None, fields=None):
if fields:
fields = set(fields)
fields.add("parent")
versions = _get_versions(
project_name,
subset_ids=subset_ids,
latest=True,
hero=False,
active=active,
fields=fields
)
return {
version["parent"]: version
for version in versions
}
def get_last_version_by_subset_id(project_name, subset_id, fields=None):
versions = _get_versions(
project_name,
subset_ids=[subset_id],
latest=True,
hero=False,
fields=fields
)
if not versions:
return None
return versions[0]
def get_last_version_by_subset_name(
project_name,
subset_name,
asset_id=None,
asset_name=None,
fields=None
):
if not asset_id and not asset_name:
return None
if not asset_id:
asset = get_asset_by_name(
project_name, asset_name, fields=["_id"]
)
if not asset:
return None
asset_id = asset["_id"]
subset = get_subset_by_name(
project_name, subset_name, asset_id, fields=["_id"]
)
if not subset:
return None
return get_last_version_by_subset_id(
project_name, subset["_id"], fields=fields
)
def get_output_link_versions(project_name, version_id, fields=None):
if not version_id:
return []
con = get_ayon_server_api_connection()
version_links = con.get_version_links(
project_name, version_id, link_direction="out")
version_ids = {
link["entityId"]
for link in version_links
if link["entityType"] == "version"
}
if not version_ids:
return []
return get_versions(project_name, version_ids=version_ids, fields=fields)
def version_is_latest(project_name, version_id):
con = get_ayon_server_api_connection()
return con.version_is_latest(project_name, version_id)
def get_representation_by_id(project_name, representation_id, fields=None):
representations = get_representations(
project_name,
representation_ids=[representation_id],
fields=fields
)
for representation in representations:
return representation
return None
def get_representation_by_name(
project_name, representation_name, version_id, fields=None
):
representations = get_representations(
project_name,
representation_names=[representation_name],
version_ids=[version_id],
fields=fields
)
for representation in representations:
return representation
return None
def get_representations(
project_name,
representation_ids=None,
representation_names=None,
version_ids=None,
context_filters=None,
names_by_version_ids=None,
archived=False,
standard=True,
fields=None
):
if context_filters is not None:
# TODO should we add the support?
# - there was ability to fitler using regex
raise ValueError("OP v4 can't filter by representation context.")
if not archived and not standard:
return
if archived and not standard:
active = False
elif not archived and standard:
active = True
else:
active = None
con = get_ayon_server_api_connection()
fields = representation_fields_v3_to_v4(fields, con)
if fields and active is not None:
fields.add("active")
representations = con.get_representations(
project_name,
representation_ids=representation_ids,
representation_names=representation_names,
version_ids=version_ids,
names_by_version_ids=names_by_version_ids,
active=active,
fields=fields
)
for representation in representations:
yield convert_v4_representation_to_v3(representation)
def get_representation_parents(project_name, representation):
if not representation:
return None
repre_id = representation["_id"]
parents_by_repre_id = get_representations_parents(
project_name, [representation]
)
return parents_by_repre_id[repre_id]
def get_representations_parents(project_name, representations):
repre_ids = {
repre["_id"]
for repre in representations
}
con = get_ayon_server_api_connection()
parents_by_repre_id = con.get_representations_parents(project_name,
repre_ids)
folder_ids = set()
for parents in parents_by_repre_id .values():
folder_ids.add(parents[2]["id"])
tasks_by_folder_id = {}
new_parents = {}
for repre_id, parents in parents_by_repre_id .items():
version, subset, folder, project = parents
folder_tasks = tasks_by_folder_id.get(folder["id"]) or {}
folder["tasks"] = folder_tasks
new_parents[repre_id] = (
convert_v4_version_to_v3(version),
convert_v4_subset_to_v3(subset),
convert_v4_folder_to_v3(folder, project_name),
project
)
return new_parents
def get_archived_representations(
project_name,
representation_ids=None,
representation_names=None,
version_ids=None,
context_filters=None,
names_by_version_ids=None,
fields=None
):
return get_representations(
project_name,
representation_ids=representation_ids,
representation_names=representation_names,
version_ids=version_ids,
context_filters=context_filters,
names_by_version_ids=names_by_version_ids,
archived=True,
standard=False,
fields=fields
)
def get_thumbnail(
project_name, thumbnail_id, entity_type, entity_id, fields=None
):
"""Receive thumbnail entity data.
Args:
project_name (str): Name of project where to look for queried entities.
thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity.
entity_type (str): Type of entity for which the thumbnail should be
received.
entity_id (str): Id of entity for which the thumbnail should be
received.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
Returns:
None: If thumbnail with specified id was not found.
Dict: Thumbnail entity data which can be reduced to specified 'fields'.
"""
if not thumbnail_id or not entity_type or not entity_id:
return None
if entity_type == "asset":
entity_type = "folder"
elif entity_type == "hero_version":
entity_type = "version"
return {
"_id": thumbnail_id,
"type": "thumbnail",
"schema": CURRENT_THUMBNAIL_SCHEMA,
"data": {
"entity_type": entity_type,
"entity_id": entity_id
}
}
def get_thumbnails(project_name, thumbnail_contexts, fields=None):
"""Get thumbnail entities.
Warning:
This function is not OpenPype compatible. There is none usage of this
function in codebase so there is nothing to convert. The previous
implementation cannot be AYON compatible without entity types.
"""
thumbnail_items = set()
for thumbnail_context in thumbnail_contexts:
thumbnail_id, entity_type, entity_id = thumbnail_context
thumbnail_item = get_thumbnail(
project_name, thumbnail_id, entity_type, entity_id
)
if thumbnail_item:
thumbnail_items.add(thumbnail_item)
return list(thumbnail_items)
def get_thumbnail_id_from_source(project_name, src_type, src_id):
"""Receive thumbnail id from source entity.
Args:
project_name (str): Name of project where to look for queried entities.
src_type (str): Type of source entity ('asset', 'version').
src_id (Union[str, ObjectId]): Id of source entity.
Returns:
ObjectId: Thumbnail id assigned to entity.
None: If Source entity does not have any thumbnail id assigned.
"""
if not src_type or not src_id:
return None
if src_type == "version":
version = get_version_by_id(
project_name, src_id, fields=["data.thumbnail_id"]
) or {}
return version.get("data", {}).get("thumbnail_id")
if src_type == "asset":
asset = get_asset_by_id(
project_name, src_id, fields=["data.thumbnail_id"]
) or {}
return asset.get("data", {}).get("thumbnail_id")
return None
def get_workfile_info(
project_name, asset_id, task_name, filename, fields=None
):
if not asset_id or not task_name or not filename:
return None
con = get_ayon_server_api_connection()
task = con.get_task_by_name(
project_name, asset_id, task_name, fields=["id", "name", "folderId"]
)
if not task:
return None
fields = workfile_info_fields_v3_to_v4(fields)
for workfile_info in con.get_workfiles_info(
project_name, task_ids=[task["id"]], fields=fields
):
if workfile_info["name"] == filename:
return convert_v4_workfile_info_to_v3(workfile_info, task)
return None

View file

@ -1,157 +0,0 @@
from .utils import get_ayon_server_api_connection
from .entities import get_assets, get_representation_by_id
def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None):
"""Extract linked asset ids from asset document.
One of asset document or asset id must be passed.
Note:
Asset links now works only from asset to assets.
Args:
project_name (str): Project where to look for asset.
asset_doc (dict): Asset document from DB.
asset_id (str): Asset id to find its document.
Returns:
List[Union[ObjectId, str]]: Asset ids of input links.
"""
output = []
if not asset_doc and not asset_id:
return output
if not asset_id:
asset_id = asset_doc["_id"]
con = get_ayon_server_api_connection()
links = con.get_folder_links(project_name, asset_id, link_direction="in")
return [
link["entityId"]
for link in links
if link["entityType"] == "folder"
]
def get_linked_assets(
project_name, asset_doc=None, asset_id=None, fields=None
):
"""Return linked assets based on passed asset document.
One of asset document or asset id must be passed.
Args:
project_name (str): Name of project where to look for queried entities.
asset_doc (Dict[str, Any]): Asset document from database.
asset_id (Union[ObjectId, str]): Asset id. Can be used instead of
asset document.
fields (Iterable[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
Returns:
List[Dict[str, Any]]: Asset documents of input links for passed
asset doc.
"""
link_ids = get_linked_asset_ids(project_name, asset_doc, asset_id)
if not link_ids:
return []
return list(get_assets(project_name, asset_ids=link_ids, fields=fields))
def get_linked_representation_id(
project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None
):
"""Returns list of linked ids of particular type (if provided).
One of representation document or representation id must be passed.
Note:
Representation links now works only from representation through version
back to representations.
Todos:
Missing depth query. Not sure how it did find more representations in
depth, probably links to version?
Args:
project_name (str): Name of project where look for links.
repre_doc (Dict[str, Any]): Representation document.
repre_id (Union[ObjectId, str]): Representation id.
link_type (str): Type of link (e.g. 'reference', ...).
max_depth (int): Limit recursion level. Default: 0
Returns:
List[ObjectId] Linked representation ids.
"""
if repre_doc:
repre_id = repre_doc["_id"]
if not repre_id and not repre_doc:
return []
version_id = None
if repre_doc:
version_id = repre_doc.get("parent")
if not version_id:
repre_doc = get_representation_by_id(
project_name, repre_id, fields=["parent"]
)
if repre_doc:
version_id = repre_doc["parent"]
if not version_id:
return []
if max_depth is None or max_depth == 0:
max_depth = 1
link_types = None
if link_type:
link_types = [link_type]
con = get_ayon_server_api_connection()
# Store already found version ids to avoid recursion, and also to store
# output -> Don't forget to remove 'version_id' at the end!!!
linked_version_ids = {version_id}
# Each loop of depth will reset this variable
versions_to_check = {version_id}
for _ in range(max_depth):
if not versions_to_check:
break
versions_links = con.get_versions_links(
project_name,
versions_to_check,
link_types=link_types,
link_direction="out")
versions_to_check = set()
for links in versions_links.values():
for link in links:
# Care only about version links
if link["entityType"] != "version":
continue
entity_id = link["entityId"]
# Skip already found linked version ids
if entity_id in linked_version_ids:
continue
linked_version_ids.add(entity_id)
versions_to_check.add(entity_id)
linked_version_ids.remove(version_id)
if not linked_version_ids:
return []
con = get_ayon_server_api_connection()
representations = con.get_representations(
project_name,
version_ids=linked_version_ids,
fields=["id"])
return [
repre["id"]
for repre in representations
]

View file

@ -1,39 +0,0 @@
# Client functionality
## Reason
Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code.
Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tightly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state.
## Queries
Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity.
## Changes
Changes are a little bit complicated. Mongo has many options how update can happen which had to be reduced also it would be at this stage complicated to validate values which are created or updated thus automation is at this point almost none. Changes can be made using operations available in `~/client/operations.py`. Each operation require project name and entity type, but may require operation specific data.
### Create
Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues.
### Update
Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare_<entity type>_update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementation.
### Delete
Delete operation need entity id. Entity will be deleted from mongo.
## What (probably) won't be replaced
Some parts of code are still using direct mongo calls. In most of cases it is for very specific calls that are module specific or their usage will completely change in future.
- Mongo calls that are not project specific (out of `avalon` collection) will be removed or will have to use different mechanism how the data are stored. At this moment it is related to OpenPype settings and logs, ftrack server events, some other data.
- Sync server queries. They're complex and very specific for sync server module. Their replacement will require specific calls to OpenPype server in v4 thus their abstraction with wrapper is irrelevant and would complicate production in v3.
- Project managers (ftrack, kitsu, shotgrid, embedded Project Manager, etc.). Project managers are creating, updating or removing assets in v3, but in v4 will create folders with different structure. Wrapping creation of assets would not help to prepare for v4 because of new data structures. The same can be said about editorial Extract Hierarchy Avalon plugin which create project structure.
- Code parts that is marked as deprecated in v3 or will be deprecated in v4.
- integrate asset legacy publish plugin - already is legacy kept for safety
- integrate thumbnail - thumbnails will be stored in different way in v4
- input links - link will be stored in different way and will have different mechanism of linking. In v3 are links limited to same entity type "asset <-> asset" or "representation <-> representation".
## Known missing replacements
- change subset group in loader tool
- integrate subset group
- query input links in openpype lib
- create project in openpype lib
- save/create workfile doc in openpype lib
- integrate hero version

View file

@ -1,159 +0,0 @@
import collections
import json
import six
from ayon_api.graphql import GraphQlQuery, FIELD_VALUE, fields_to_dict
from .constants import DEFAULT_FOLDER_FIELDS
def folders_tasks_graphql_query(fields):
query = GraphQlQuery("FoldersQuery")
project_name_var = query.add_variable("projectName", "String!")
folder_ids_var = query.add_variable("folderIds", "[String!]")
parent_folder_ids_var = query.add_variable("parentFolderIds", "[String!]")
folder_paths_var = query.add_variable("folderPaths", "[String!]")
folder_names_var = query.add_variable("folderNames", "[String!]")
has_products_var = query.add_variable("folderHasProducts", "Boolean!")
project_field = query.add_field("project")
project_field.set_filter("name", project_name_var)
folders_field = project_field.add_field_with_edges("folders")
folders_field.set_filter("ids", folder_ids_var)
folders_field.set_filter("parentIds", parent_folder_ids_var)
folders_field.set_filter("names", folder_names_var)
folders_field.set_filter("paths", folder_paths_var)
folders_field.set_filter("hasProducts", has_products_var)
fields = set(fields)
fields.discard("tasks")
tasks_field = folders_field.add_field_with_edges("tasks")
tasks_field.add_field("name")
tasks_field.add_field("taskType")
nested_fields = fields_to_dict(fields)
query_queue = collections.deque()
for key, value in nested_fields.items():
query_queue.append((key, value, folders_field))
while query_queue:
item = query_queue.popleft()
key, value, parent = item
field = parent.add_field(key)
if value is FIELD_VALUE:
continue
for k, v in value.items():
query_queue.append((k, v, field))
return query
def get_folders_with_tasks(
con,
project_name,
folder_ids=None,
folder_paths=None,
folder_names=None,
parent_ids=None,
active=True,
fields=None
):
"""Query folders with tasks from server.
This is for v4 compatibility where tasks were stored on assets. This is
an inefficient way how folders and tasks are queried so it was added only
as compatibility function.
Todos:
Folder name won't be unique identifier, so we should add folder path
filtering.
Notes:
Filter 'active' don't have direct filter in GraphQl.
Args:
con (ServerAPI): Connection to server.
project_name (str): Name of project where folders are.
folder_ids (Iterable[str]): Folder ids to filter.
folder_paths (Iterable[str]): Folder paths used for filtering.
folder_names (Iterable[str]): Folder names used for filtering.
parent_ids (Iterable[str]): Ids of folder parents. Use 'None'
if folder is direct child of project.
active (Union[bool, None]): Filter active/inactive folders. Both
are returned if is set to None.
fields (Union[Iterable(str), None]): Fields to be queried
for folder. All possible folder fields are returned if 'None'
is passed.
Yields:
Dict[str, Any]: Queried folder entities.
"""
if not project_name:
return
filters = {
"projectName": project_name
}
if folder_ids is not None:
folder_ids = set(folder_ids)
if not folder_ids:
return
filters["folderIds"] = list(folder_ids)
if folder_paths is not None:
folder_paths = set(folder_paths)
if not folder_paths:
return
filters["folderPaths"] = list(folder_paths)
if folder_names is not None:
folder_names = set(folder_names)
if not folder_names:
return
filters["folderNames"] = list(folder_names)
if parent_ids is not None:
parent_ids = set(parent_ids)
if not parent_ids:
return
if None in parent_ids:
# Replace 'None' with '"root"' which is used during GraphQl
# query for parent ids filter for folders without folder
# parent
parent_ids.remove(None)
parent_ids.add("root")
if project_name in parent_ids:
# Replace project name with '"root"' which is used during
# GraphQl query for parent ids filter for folders without
# folder parent
parent_ids.remove(project_name)
parent_ids.add("root")
filters["parentFolderIds"] = list(parent_ids)
if fields:
fields = set(fields)
else:
fields = con.get_default_fields_for_type("folder")
fields |= DEFAULT_FOLDER_FIELDS
if active is not None:
fields.add("active")
query = folders_tasks_graphql_query(fields)
for attr, filter_value in filters.items():
query.set_variable_value(attr, filter_value)
parsed_data = query.query(con)
folders = parsed_data["project"]["folders"]
for folder in folders:
if active is not None and folder["active"] is not active:
continue
folder_data = folder.get("data")
if isinstance(folder_data, six.string_types):
folder["data"] = json.loads(folder_data)
yield folder

View file

@ -1,880 +0,0 @@
import copy
import json
import collections
import uuid
import datetime
from ayon_api.server_api import (
PROJECT_NAME_ALLOWED_SYMBOLS,
PROJECT_NAME_REGEX,
)
from .constants import (
CURRENT_PROJECT_SCHEMA,
CURRENT_PROJECT_CONFIG_SCHEMA,
CURRENT_ASSET_DOC_SCHEMA,
CURRENT_SUBSET_SCHEMA,
CURRENT_VERSION_SCHEMA,
CURRENT_HERO_VERSION_SCHEMA,
CURRENT_REPRESENTATION_SCHEMA,
CURRENT_WORKFILE_INFO_SCHEMA,
CURRENT_THUMBNAIL_SCHEMA,
)
from .operations_base import (
REMOVED_VALUE,
CreateOperation,
UpdateOperation,
DeleteOperation,
BaseOperationsSession
)
from .conversion_utils import (
convert_create_asset_to_v4,
convert_create_task_to_v4,
convert_create_subset_to_v4,
convert_create_version_to_v4,
convert_create_hero_version_to_v4,
convert_create_representation_to_v4,
convert_create_workfile_info_to_v4,
convert_update_folder_to_v4,
convert_update_subset_to_v4,
convert_update_version_to_v4,
convert_update_hero_version_to_v4,
convert_update_representation_to_v4,
convert_update_workfile_info_to_v4,
)
from .utils import create_entity_id, get_ayon_server_api_connection
def _create_or_convert_to_id(entity_id=None):
if entity_id is None:
return create_entity_id()
# Validate if can be converted to uuid
uuid.UUID(entity_id)
return entity_id
def new_project_document(
project_name, project_code, config, data=None, entity_id=None
):
"""Create skeleton data of project document.
Args:
project_name (str): Name of project. Used as identifier of a project.
project_code (str): Shorter version of projet without spaces and
special characters (in most of cases). Should be also considered
as unique name across projects.
config (Dic[str, Any]): Project config consist of roots, templates,
applications and other project Anatomy related data.
data (Dict[str, Any]): Project data with information about it's
attributes (e.g. 'fps' etc.) or integration specific keys.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of project document.
"""
if data is None:
data = {}
data["code"] = project_code
return {
"_id": _create_or_convert_to_id(entity_id),
"name": project_name,
"type": CURRENT_PROJECT_SCHEMA,
"entity_data": data,
"config": config
}
def new_asset_document(
name, project_id, parent_id, parents, data=None, entity_id=None
):
"""Create skeleton data of asset document.
Args:
name (str): Is considered as unique identifier of asset in project.
project_id (Union[str, ObjectId]): Id of project doument.
parent_id (Union[str, ObjectId]): Id of parent asset.
parents (List[str]): List of parent assets names.
data (Dict[str, Any]): Asset document data. Empty dictionary is used
if not passed. Value of 'parent_id' is used to fill 'visualParent'.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of asset document.
"""
if data is None:
data = {}
if parent_id is not None:
parent_id = _create_or_convert_to_id(parent_id)
data["visualParent"] = parent_id
data["parents"] = parents
return {
"_id": _create_or_convert_to_id(entity_id),
"type": "asset",
"name": name,
# This will be ignored
"parent": project_id,
"data": data,
"schema": CURRENT_ASSET_DOC_SCHEMA
}
def new_subset_document(name, family, asset_id, data=None, entity_id=None):
"""Create skeleton data of subset document.
Args:
name (str): Is considered as unique identifier of subset under asset.
family (str): Subset's family.
asset_id (Union[str, ObjectId]): Id of parent asset.
data (Dict[str, Any]): Subset document data. Empty dictionary is used
if not passed. Value of 'family' is used to fill 'family'.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of subset document.
"""
if data is None:
data = {}
data["family"] = family
return {
"_id": _create_or_convert_to_id(entity_id),
"schema": CURRENT_SUBSET_SCHEMA,
"type": "subset",
"name": name,
"data": data,
"parent": _create_or_convert_to_id(asset_id)
}
def new_version_doc(version, subset_id, data=None, entity_id=None):
"""Create skeleton data of version document.
Args:
version (int): Is considered as unique identifier of version
under subset.
subset_id (Union[str, ObjectId]): Id of parent subset.
data (Dict[str, Any]): Version document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_id(entity_id),
"schema": CURRENT_VERSION_SCHEMA,
"type": "version",
"name": int(version),
"parent": _create_or_convert_to_id(subset_id),
"data": data
}
def new_hero_version_doc(subset_id, data, version=None, entity_id=None):
"""Create skeleton data of hero version document.
Args:
subset_id (Union[str, ObjectId]): Id of parent subset.
data (Dict[str, Any]): Version document data.
version (int): Version of source version.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if version is None:
version = -1
elif version > 0:
version = -version
return {
"_id": _create_or_convert_to_id(entity_id),
"schema": CURRENT_HERO_VERSION_SCHEMA,
"type": "hero_version",
"version": version,
"parent": _create_or_convert_to_id(subset_id),
"data": data
}
def new_representation_doc(
name, version_id, context, data=None, entity_id=None
):
"""Create skeleton data of representation document.
Args:
name (str): Representation name considered as unique identifier
of representation under version.
version_id (Union[str, ObjectId]): Id of parent version.
context (Dict[str, Any]): Representation context used for fill template
of to query.
data (Dict[str, Any]): Representation document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_id(entity_id),
"schema": CURRENT_REPRESENTATION_SCHEMA,
"type": "representation",
"parent": _create_or_convert_to_id(version_id),
"name": name,
"data": data,
# Imprint shortcut to context for performance reasons.
"context": context
}
def new_thumbnail_doc(data=None, entity_id=None):
"""Create skeleton data of thumbnail document.
Args:
data (Dict[str, Any]): Thumbnail document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of thumbnail document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_id(entity_id),
"type": "thumbnail",
"schema": CURRENT_THUMBNAIL_SCHEMA,
"data": data
}
def new_workfile_info_doc(
filename, asset_id, task_name, files, data=None, entity_id=None
):
"""Create skeleton data of workfile info document.
Workfile document is at this moment used primarily for artist notes.
Args:
filename (str): Filename of workfile.
asset_id (Union[str, ObjectId]): Id of asset under which workfile live.
task_name (str): Task under which was workfile created.
files (List[str]): List of rootless filepaths related to workfile.
data (Dict[str, Any]): Additional metadata.
Returns:
Dict[str, Any]: Skeleton of workfile info document.
"""
if not data:
data = {}
return {
"_id": _create_or_convert_to_id(entity_id),
"type": "workfile",
"parent": _create_or_convert_to_id(asset_id),
"task_name": task_name,
"filename": filename,
"data": data,
"files": files
}
def _prepare_update_data(old_doc, new_doc, replace):
changes = {}
for key, value in new_doc.items():
if key not in old_doc or value != old_doc[key]:
changes[key] = value
if replace:
for key in old_doc.keys():
if key not in new_doc:
changes[key] = REMOVED_VALUE
return changes
def prepare_subset_update_data(old_doc, new_doc, replace=True):
"""Compare two subset documents and prepare update data.
Based on compared values will create update data for
'MongoUpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_version_update_data(old_doc, new_doc, replace=True):
"""Compare two version documents and prepare update data.
Based on compared values will create update data for
'MongoUpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_hero_version_update_data(old_doc, new_doc, replace=True):
"""Compare two hero version documents and prepare update data.
Based on compared values will create update data for 'UpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
changes = _prepare_update_data(old_doc, new_doc, replace)
changes.pop("version_id", None)
return changes
def prepare_representation_update_data(old_doc, new_doc, replace=True):
"""Compare two representation documents and prepare update data.
Based on compared values will create update data for
'MongoUpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
changes = _prepare_update_data(old_doc, new_doc, replace)
context = changes.get("data", {}).get("context")
# Make sure that both 'family' and 'subset' are in changes if
# one of them changed (they'll both become 'product').
if (
context
and ("family" in context or "subset" in context)
):
context["family"] = new_doc["data"]["context"]["family"]
context["subset"] = new_doc["data"]["context"]["subset"]
return changes
def prepare_workfile_info_update_data(old_doc, new_doc, replace=True):
"""Compare two workfile info documents and prepare update data.
Based on compared values will create update data for
'MongoUpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
class FailedOperations(Exception):
pass
def entity_data_json_default(value):
if isinstance(value, datetime.datetime):
return int(value.timestamp())
raise TypeError(
"Object of type {} is not JSON serializable".format(str(type(value)))
)
def failed_json_default(value):
return "< Failed value {} > {}".format(type(value), str(value))
class ServerCreateOperation(CreateOperation):
"""Operation to create an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
data (Dict[str, Any]): Data of entity that will be created.
"""
def __init__(self, project_name, entity_type, data, session):
self._session = session
if not data:
data = {}
data = copy.deepcopy(data)
if entity_type == "project":
raise ValueError("Project cannot be created using operations")
tasks = None
if entity_type in "asset":
# TODO handle tasks
entity_type = "folder"
if "data" in data:
tasks = data["data"].get("tasks")
project = self._session.get_project(project_name)
new_data = convert_create_asset_to_v4(data, project, self.con)
elif entity_type == "task":
project = self._session.get_project(project_name)
new_data = convert_create_task_to_v4(data, project, self.con)
elif entity_type == "subset":
new_data = convert_create_subset_to_v4(data, self.con)
entity_type = "product"
elif entity_type == "version":
new_data = convert_create_version_to_v4(data, self.con)
elif entity_type == "hero_version":
new_data = convert_create_hero_version_to_v4(
data, project_name, self.con
)
entity_type = "version"
elif entity_type in ("representation", "archived_representation"):
new_data = convert_create_representation_to_v4(data, self.con)
entity_type = "representation"
elif entity_type == "workfile":
new_data = convert_create_workfile_info_to_v4(
data, project_name, self.con
)
else:
raise ValueError(
"Unhandled entity type \"{}\"".format(entity_type)
)
# Simple check if data can be dumped into json
# - should raise error on 'ObjectId' object
try:
new_data = json.loads(
json.dumps(new_data, default=entity_data_json_default)
)
except:
raise ValueError("Couldn't json parse body: {}".format(
json.dumps(new_data, default=failed_json_default)
))
super(ServerCreateOperation, self).__init__(
project_name, entity_type, new_data
)
if "id" not in self._data:
self._data["id"] = create_entity_id()
if tasks:
copied_tasks = copy.deepcopy(tasks)
for task_name, task in copied_tasks.items():
task["name"] = task_name
task["folderId"] = self._data["id"]
self.session.create_entity(
project_name, "task", task, nested_id=self.id
)
@property
def con(self):
return self.session.con
@property
def session(self):
return self._session
@property
def entity_id(self):
return self._data["id"]
def to_server_operation(self):
return {
"id": self.id,
"type": "create",
"entityType": self.entity_type,
"entityId": self.entity_id,
"data": self._data
}
class ServerUpdateOperation(UpdateOperation):
"""Operation to update an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
entity_id (Union[str, ObjectId]): Identifier of an entity.
update_data (Dict[str, Any]): Key -> value changes that will be set in
database. If value is set to 'REMOVED_VALUE' the key will be
removed. Only first level of dictionary is checked (on purpose).
"""
def __init__(
self, project_name, entity_type, entity_id, update_data, session
):
self._session = session
update_data = copy.deepcopy(update_data)
if entity_type == "project":
raise ValueError("Project cannot be created using operations")
if entity_type in ("asset", "archived_asset"):
new_update_data = convert_update_folder_to_v4(
project_name, entity_id, update_data, self.con
)
entity_type = "folder"
elif entity_type == "subset":
new_update_data = convert_update_subset_to_v4(
project_name, entity_id, update_data, self.con
)
entity_type = "product"
elif entity_type == "version":
new_update_data = convert_update_version_to_v4(
project_name, entity_id, update_data, self.con
)
elif entity_type == "hero_version":
new_update_data = convert_update_hero_version_to_v4(
project_name, entity_id, update_data, self.con
)
entity_type = "version"
elif entity_type in ("representation", "archived_representation"):
new_update_data = convert_update_representation_to_v4(
project_name, entity_id, update_data, self.con
)
entity_type = "representation"
elif entity_type == "workfile":
new_update_data = convert_update_workfile_info_to_v4(
project_name, entity_id, update_data, self.con
)
else:
raise ValueError(
"Unhandled entity type \"{}\"".format(entity_type)
)
try:
new_update_data = json.loads(
json.dumps(new_update_data, default=entity_data_json_default)
)
except:
raise ValueError("Couldn't json parse body: {}".format(
json.dumps(new_update_data, default=failed_json_default)
))
super(ServerUpdateOperation, self).__init__(
project_name, entity_type, entity_id, new_update_data
)
@property
def con(self):
return self.session.con
@property
def session(self):
return self._session
def to_server_operation(self):
if not self._update_data:
return None
update_data = {}
for key, value in self._update_data.items():
if value is REMOVED_VALUE:
value = None
update_data[key] = value
return {
"id": self.id,
"type": "update",
"entityType": self.entity_type,
"entityId": self.entity_id,
"data": update_data
}
class ServerDeleteOperation(DeleteOperation):
"""Operation to delete an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
entity_id (Union[str, ObjectId]): Entity id that will be removed.
"""
def __init__(self, project_name, entity_type, entity_id, session):
self._session = session
if entity_type == "asset":
entity_type = "folder"
elif entity_type == "hero_version":
entity_type = "version"
elif entity_type == "subset":
entity_type = "product"
super(ServerDeleteOperation, self).__init__(
project_name, entity_type, entity_id
)
@property
def con(self):
return self.session.con
@property
def session(self):
return self._session
def to_server_operation(self):
return {
"id": self.id,
"type": self.operation_name,
"entityId": self.entity_id,
"entityType": self.entity_type,
}
class OperationsSession(BaseOperationsSession):
def __init__(self, con=None, *args, **kwargs):
super(OperationsSession, self).__init__(*args, **kwargs)
if con is None:
con = get_ayon_server_api_connection()
self._con = con
self._project_cache = {}
self._nested_operations = collections.defaultdict(list)
@property
def con(self):
return self._con
def get_project(self, project_name):
if project_name not in self._project_cache:
self._project_cache[project_name] = self.con.get_project(
project_name)
return copy.deepcopy(self._project_cache[project_name])
def commit(self):
"""Commit session operations."""
operations, self._operations = self._operations, []
if not operations:
return
operations_by_project = collections.defaultdict(list)
for operation in operations:
operations_by_project[operation.project_name].append(operation)
body_by_id = {}
results = []
for project_name, operations in operations_by_project.items():
operations_body = []
for operation in operations:
body = operation.to_server_operation()
if body is not None:
try:
json.dumps(body)
except:
raise ValueError("Couldn't json parse body: {}".format(
json.dumps(
body, indent=4, default=failed_json_default
)
))
body_by_id[operation.id] = body
operations_body.append(body)
if operations_body:
result = self._con.post(
"projects/{}/operations".format(project_name),
operations=operations_body,
canFail=False
)
results.append(result.data)
for result in results:
if result.get("success"):
continue
if "operations" not in result:
raise FailedOperations(
"Operation failed. Content: {}".format(str(result))
)
for op_result in result["operations"]:
if not op_result["success"]:
operation_id = op_result["id"]
raise FailedOperations((
"Operation \"{}\" failed with data:\n{}\nError: {}."
).format(
operation_id,
json.dumps(body_by_id[operation_id], indent=4),
op_result.get("error", "unknown"),
))
def create_entity(self, project_name, entity_type, data, nested_id=None):
"""Fast access to 'ServerCreateOperation'.
Args:
project_name (str): On which project the creation happens.
entity_type (str): Which entity type will be created.
data (Dicst[str, Any]): Entity data.
nested_id (str): Id of other operation from which is triggered
operation -> Operations can trigger suboperations but they
must be added to operations list after it's parent is added.
Returns:
ServerCreateOperation: Object of update operation.
"""
operation = ServerCreateOperation(
project_name, entity_type, data, self
)
if nested_id:
self._nested_operations[nested_id].append(operation)
else:
self.add(operation)
if operation.id in self._nested_operations:
self.extend(self._nested_operations.pop(operation.id))
return operation
def update_entity(
self, project_name, entity_type, entity_id, update_data, nested_id=None
):
"""Fast access to 'ServerUpdateOperation'.
Returns:
ServerUpdateOperation: Object of update operation.
"""
operation = ServerUpdateOperation(
project_name, entity_type, entity_id, update_data, self
)
if nested_id:
self._nested_operations[nested_id].append(operation)
else:
self.add(operation)
if operation.id in self._nested_operations:
self.extend(self._nested_operations.pop(operation.id))
return operation
def delete_entity(
self, project_name, entity_type, entity_id, nested_id=None
):
"""Fast access to 'ServerDeleteOperation'.
Returns:
ServerDeleteOperation: Object of delete operation.
"""
operation = ServerDeleteOperation(
project_name, entity_type, entity_id, self
)
if nested_id:
self._nested_operations[nested_id].append(operation)
else:
self.add(operation)
if operation.id in self._nested_operations:
self.extend(self._nested_operations.pop(operation.id))
return operation
def create_project(
project_name,
project_code,
library_project=False,
preset_name=None,
con=None
):
"""Create project using OpenPype settings.
This project creation function is not validating project document on
creation. It is because project document is created blindly with only
minimum required information about project which is it's name, code, type
and schema.
Entered project name must be unique and project must not exist yet.
Note:
This function is here to be OP v4 ready but in v3 has more logic
to do. That's why inner imports are in the body.
Args:
project_name (str): New project name. Should be unique.
project_code (str): Project's code should be unique too.
library_project (bool): Project is library project.
preset_name (str): Name of anatomy preset. Default is used if not
passed.
con (ServerAPI): Connection to server with logged user.
Raises:
ValueError: When project name already exists in MongoDB.
Returns:
dict: Created project document.
"""
if con is None:
con = get_ayon_server_api_connection()
return con.create_project(
project_name,
project_code,
library_project,
preset_name
)
def delete_project(project_name, con=None):
if con is None:
con = get_ayon_server_api_connection()
return con.delete_project(project_name)
def create_thumbnail(project_name, src_filepath, thumbnail_id=None, con=None):
if con is None:
con = get_ayon_server_api_connection()
return con.create_thumbnail(project_name, src_filepath, thumbnail_id)

View file

@ -1,289 +0,0 @@
import uuid
import copy
from abc import ABCMeta, abstractmethod, abstractproperty
import six
REMOVED_VALUE = object()
@six.add_metaclass(ABCMeta)
class AbstractOperation(object):
"""Base operation class.
Operation represent a call into database. The call can create, change or
remove data.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
"""
def __init__(self, project_name, entity_type):
self._project_name = project_name
self._entity_type = entity_type
self._id = str(uuid.uuid4())
@property
def project_name(self):
return self._project_name
@property
def id(self):
"""Identifier of operation."""
return self._id
@property
def entity_type(self):
return self._entity_type
@abstractproperty
def operation_name(self):
"""Stringified type of operation."""
pass
def to_data(self):
"""Convert operation to data that can be converted to json or others.
Warning:
Current state returns ObjectId objects which cannot be parsed by
json.
Returns:
Dict[str, Any]: Description of operation.
"""
return {
"id": self._id,
"entity_type": self.entity_type,
"project_name": self.project_name,
"operation": self.operation_name
}
class CreateOperation(AbstractOperation):
"""Operation to create an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
data (Dict[str, Any]): Data of entity that will be created.
"""
operation_name = "create"
def __init__(self, project_name, entity_type, data):
super(CreateOperation, self).__init__(project_name, entity_type)
if not data:
data = {}
else:
data = copy.deepcopy(dict(data))
self._data = data
def __setitem__(self, key, value):
self.set_value(key, value)
def __getitem__(self, key):
return self.data[key]
def set_value(self, key, value):
self.data[key] = value
def get(self, key, *args, **kwargs):
return self.data.get(key, *args, **kwargs)
@abstractproperty
def entity_id(self):
pass
@property
def data(self):
return self._data
def to_data(self):
output = super(CreateOperation, self).to_data()
output["data"] = copy.deepcopy(self.data)
return output
class UpdateOperation(AbstractOperation):
"""Operation to update an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
entity_id (Union[str, ObjectId]): Identifier of an entity.
update_data (Dict[str, Any]): Key -> value changes that will be set in
database. If value is set to 'REMOVED_VALUE' the key will be
removed. Only first level of dictionary is checked (on purpose).
"""
operation_name = "update"
def __init__(self, project_name, entity_type, entity_id, update_data):
super(UpdateOperation, self).__init__(project_name, entity_type)
self._entity_id = entity_id
self._update_data = update_data
@property
def entity_id(self):
return self._entity_id
@property
def update_data(self):
return self._update_data
def to_data(self):
changes = {}
for key, value in self._update_data.items():
if value is REMOVED_VALUE:
value = None
changes[key] = value
output = super(UpdateOperation, self).to_data()
output.update({
"entity_id": self.entity_id,
"changes": changes
})
return output
class DeleteOperation(AbstractOperation):
"""Operation to delete an entity.
Args:
project_name (str): On which project operation will happen.
entity_type (str): Type of entity on which change happens.
e.g. 'asset', 'representation' etc.
entity_id (Union[str, ObjectId]): Entity id that will be removed.
"""
operation_name = "delete"
def __init__(self, project_name, entity_type, entity_id):
super(DeleteOperation, self).__init__(project_name, entity_type)
self._entity_id = entity_id
@property
def entity_id(self):
return self._entity_id
def to_data(self):
output = super(DeleteOperation, self).to_data()
output["entity_id"] = self.entity_id
return output
class BaseOperationsSession(object):
"""Session storing operations that should happen in an order.
At this moment does not handle anything special can be considered as
stupid list of operations that will happen after each other. If creation
of same entity is there multiple times it's handled in any way and document
values are not validated.
"""
def __init__(self):
self._operations = []
def __len__(self):
return len(self._operations)
def add(self, operation):
"""Add operation to be processed.
Args:
operation (BaseOperation): Operation that should be processed.
"""
if not isinstance(
operation,
(CreateOperation, UpdateOperation, DeleteOperation)
):
raise TypeError("Expected Operation object got {}".format(
str(type(operation))
))
self._operations.append(operation)
def append(self, operation):
"""Add operation to be processed.
Args:
operation (BaseOperation): Operation that should be processed.
"""
self.add(operation)
def extend(self, operations):
"""Add operations to be processed.
Args:
operations (List[BaseOperation]): Operations that should be
processed.
"""
for operation in operations:
self.add(operation)
def remove(self, operation):
"""Remove operation."""
self._operations.remove(operation)
def clear(self):
"""Clear all registered operations."""
self._operations = []
def to_data(self):
return [
operation.to_data()
for operation in self._operations
]
@abstractmethod
def commit(self):
"""Commit session operations."""
pass
def create_entity(self, project_name, entity_type, data):
"""Fast access to 'CreateOperation'.
Returns:
CreateOperation: Object of update operation.
"""
operation = CreateOperation(project_name, entity_type, data)
self.add(operation)
return operation
def update_entity(self, project_name, entity_type, entity_id, update_data):
"""Fast access to 'UpdateOperation'.
Returns:
UpdateOperation: Object of update operation.
"""
operation = UpdateOperation(
project_name, entity_type, entity_id, update_data
)
self.add(operation)
return operation
def delete_entity(self, project_name, entity_type, entity_id):
"""Fast access to 'DeleteOperation'.
Returns:
DeleteOperation: Object of delete operation.
"""
operation = DeleteOperation(project_name, entity_type, entity_id)
self.add(operation)
return operation

View file

@ -1,229 +0,0 @@
"""Cache of thumbnails downloaded from AYON server.
Thumbnails are cached to appdirs to predefined directory.
This should be moved to thumbnails logic in pipeline but because it would
overflow OpenPype logic it's here for now.
"""
import os
import time
import collections
import appdirs
FileInfo = collections.namedtuple(
"FileInfo",
("path", "size", "modification_time")
)
class AYONThumbnailCache:
"""Cache of thumbnails on local storage.
Thumbnails are cached to appdirs to predefined directory. Each project has
own subfolder with thumbnails -> that's because each project has own
thumbnail id validation and file names are thumbnail ids with matching
extension. Extensions are predefined (.png and .jpeg).
Cache has cleanup mechanism which is triggered on initialized by default.
The cleanup has 2 levels:
1. soft cleanup which remove all files that are older then 'days_alive'
2. max size cleanup which remove all files until the thumbnails folder
contains less then 'max_filesize'
- this is time consuming so it's not triggered automatically
Args:
cleanup (bool): Trigger soft cleanup (Cleanup expired thumbnails).
"""
# Lifetime of thumbnails (in seconds)
# - default 3 days
days_alive = 3
# Max size of thumbnail directory (in bytes)
# - default 2 Gb
max_filesize = 2 * 1024 * 1024 * 1024
def __init__(self, cleanup=True):
self._thumbnails_dir = None
self._days_alive_secs = self.days_alive * 24 * 60 * 60
if cleanup:
self.cleanup()
def get_thumbnails_dir(self):
"""Root directory where thumbnails are stored.
Returns:
str: Path to thumbnails root.
"""
if self._thumbnails_dir is None:
# TODO use generic function
directory = appdirs.user_data_dir("AYON", "Ynput")
self._thumbnails_dir = os.path.join(directory, "thumbnails")
return self._thumbnails_dir
thumbnails_dir = property(get_thumbnails_dir)
def get_thumbnails_dir_file_info(self):
"""Get information about all files in thumbnails directory.
Returns:
List[FileInfo]: List of file information about all files.
"""
thumbnails_dir = self.thumbnails_dir
files_info = []
if not os.path.exists(thumbnails_dir):
return files_info
for root, _, filenames in os.walk(thumbnails_dir):
for filename in filenames:
path = os.path.join(root, filename)
files_info.append(FileInfo(
path, os.path.getsize(path), os.path.getmtime(path)
))
return files_info
def get_thumbnails_dir_size(self, files_info=None):
"""Got full size of thumbnail directory.
Args:
files_info (List[FileInfo]): Prepared file information about
files in thumbnail directory.
Returns:
int: File size of all files in thumbnail directory.
"""
if files_info is None:
files_info = self.get_thumbnails_dir_file_info()
if not files_info:
return 0
return sum(
file_info.size
for file_info in files_info
)
def cleanup(self, check_max_size=False):
"""Cleanup thumbnails directory.
Args:
check_max_size (bool): Also cleanup files to match max size of
thumbnails directory.
"""
thumbnails_dir = self.get_thumbnails_dir()
# Skip if thumbnails dir does not exists yet
if not os.path.exists(thumbnails_dir):
return
self._soft_cleanup(thumbnails_dir)
if check_max_size:
self._max_size_cleanup(thumbnails_dir)
def _soft_cleanup(self, thumbnails_dir):
current_time = time.time()
for root, _, filenames in os.walk(thumbnails_dir):
for filename in filenames:
path = os.path.join(root, filename)
modification_time = os.path.getmtime(path)
if current_time - modification_time > self._days_alive_secs:
os.remove(path)
def _max_size_cleanup(self, thumbnails_dir):
files_info = self.get_thumbnails_dir_file_info()
size = self.get_thumbnails_dir_size(files_info)
if size < self.max_filesize:
return
sorted_file_info = collections.deque(
sorted(files_info, key=lambda item: item.modification_time)
)
diff = size - self.max_filesize
while diff > 0:
if not sorted_file_info:
break
file_info = sorted_file_info.popleft()
diff -= file_info.size
os.remove(file_info.path)
def get_thumbnail_filepath(self, project_name, thumbnail_id):
"""Get thumbnail by thumbnail id.
Args:
project_name (str): Name of project.
thumbnail_id (str): Thumbnail id.
Returns:
Union[str, None]: Path to thumbnail image or None if thumbnail
is not cached yet.
"""
if not thumbnail_id:
return None
for ext in (
".png",
".jpeg",
):
filepath = os.path.join(
self.thumbnails_dir, project_name, thumbnail_id + ext
)
if os.path.exists(filepath):
return filepath
return None
def get_project_dir(self, project_name):
"""Path to root directory for specific project.
Args:
project_name (str): Name of project for which root directory path
should be returned.
Returns:
str: Path to root of project's thumbnails.
"""
return os.path.join(self.thumbnails_dir, project_name)
def make_sure_project_dir_exists(self, project_name):
project_dir = self.get_project_dir(project_name)
if not os.path.exists(project_dir):
os.makedirs(project_dir)
return project_dir
def store_thumbnail(self, project_name, thumbnail_id, content, mime_type):
"""Store thumbnail to cache folder.
Args:
project_name (str): Project where the thumbnail belong to.
thumbnail_id (str): Id of thumbnail.
content (bytes): Byte content of thumbnail file.
mime_data (str): Type of content.
Returns:
str: Path to cached thumbnail image file.
"""
if mime_type == "image/png":
ext = ".png"
elif mime_type == "image/jpeg":
ext = ".jpeg"
else:
raise ValueError(
"Unknown mime type for thumbnail \"{}\"".format(mime_type))
project_dir = self.make_sure_project_dir_exists(project_name)
thumbnail_path = os.path.join(project_dir, thumbnail_id + ext)
with open(thumbnail_path, "wb") as stream:
stream.write(content)
current_time = time.time()
os.utime(thumbnail_path, (current_time, current_time))
return thumbnail_path

View file

@ -1,134 +0,0 @@
import os
import uuid
import ayon_api
from ayon_core.client.operations_base import REMOVED_VALUE
class _GlobalCache:
initialized = False
def get_ayon_server_api_connection():
if _GlobalCache.initialized:
con = ayon_api.get_server_api_connection()
else:
from ayon_core.lib.local_settings import get_local_site_id
_GlobalCache.initialized = True
site_id = get_local_site_id()
version = os.getenv("AYON_VERSION")
if ayon_api.is_connection_created():
con = ayon_api.get_server_api_connection()
con.set_site_id(site_id)
con.set_client_version(version)
else:
con = ayon_api.create_connection(site_id, version)
return con
def create_entity_id():
return uuid.uuid1().hex
def prepare_attribute_changes(old_entity, new_entity, replace=False):
"""Prepare changes of attributes on entities.
Compare 'attrib' of old and new entity data to prepare only changed
values that should be sent to server for update.
Example:
>>> # Limited entity data to 'attrib'
>>> old_entity = {
... "attrib": {"attr_1": 1, "attr_2": "MyString", "attr_3": True}
... }
>>> new_entity = {
... "attrib": {"attr_1": 2, "attr_3": True, "attr_4": 3}
... }
>>> # Changes if replacement should not happen
>>> expected_changes = {
... "attr_1": 2,
... "attr_4": 3
... }
>>> changes = prepare_attribute_changes(old_entity, new_entity)
>>> changes == expected_changes
True
>>> # Changes if replacement should happen
>>> expected_changes_replace = {
... "attr_1": 2,
... "attr_2": REMOVED_VALUE,
... "attr_4": 3
... }
>>> changes_replace = prepare_attribute_changes(
... old_entity, new_entity, True)
>>> changes_replace == expected_changes_replace
True
Args:
old_entity (dict[str, Any]): Data of entity queried from server.
new_entity (dict[str, Any]): Entity data with applied changes.
replace (bool): New entity should fully replace all old entity values.
Returns:
Dict[str, Any]: Values from new entity only if value has changed.
"""
attrib_changes = {}
new_attrib = new_entity.get("attrib")
old_attrib = old_entity.get("attrib")
if new_attrib is None:
if not replace:
return attrib_changes
new_attrib = {}
if old_attrib is None:
return new_attrib
for attr, new_attr_value in new_attrib.items():
old_attr_value = old_attrib.get(attr)
if old_attr_value != new_attr_value:
attrib_changes[attr] = new_attr_value
if replace:
for attr in old_attrib:
if attr not in new_attrib:
attrib_changes[attr] = REMOVED_VALUE
return attrib_changes
def prepare_entity_changes(old_entity, new_entity, replace=False):
"""Prepare changes of AYON entities.
Compare old and new entity to filter values from new data that changed.
Args:
old_entity (dict[str, Any]): Data of entity queried from server.
new_entity (dict[str, Any]): Entity data with applied changes.
replace (bool): All attributes should be replaced by new values. So
all attribute values that are not on new entity will be removed.
Returns:
Dict[str, Any]: Only values from new entity that changed.
"""
changes = {}
for key, new_value in new_entity.items():
if key == "attrib":
continue
old_value = old_entity.get(key)
if old_value != new_value:
changes[key] = new_value
if replace:
for key in old_entity:
if key not in new_entity:
changes[key] = REMOVED_VALUE
attr_changes = prepare_attribute_changes(old_entity, new_entity, replace)
if attr_changes:
changes["attrib"] = attr_changes
return changes

View file

@ -54,21 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook):
self.log.info("Last workfile does not exist.")
project_name = self.data["project_name"]
asset_name = self.data["folder_path"]
folder_path = self.data["folder_path"]
task_name = self.data["task_name"]
host_name = self.application.host_name
project_settings = get_project_settings(project_name)
project_doc = self.data.get("project_doc")
asset_doc = self.data.get("asset_doc")
project_entity = self.data.get("project_entity")
folder_entity = self.data.get("folder_entity")
task_entity = self.data.get("task_entity")
anatomy = self.data.get("anatomy")
if project_doc and asset_doc:
if project_entity and folder_entity and task_entity:
self.log.debug("Started filtering of custom template paths.")
template_path = get_custom_workfile_template(
project_doc,
asset_doc,
task_name,
project_entity,
folder_entity,
task_entity,
host_name,
anatomy,
project_settings
@ -81,7 +82,7 @@ class CopyTemplateWorkfile(PreLaunchHook):
))
template_path = get_custom_workfile_template_by_string_context(
project_name,
asset_name,
folder_path,
task_name,
host_name,
anatomy,

View file

@ -1,4 +1,5 @@
from ayon_core.client import get_project, get_asset_by_name
from ayon_api import get_project, get_folder_by_path, get_task_by_name
from ayon_core.lib.applications import (
PreLaunchHook,
EnvironmentPrepData,
@ -16,7 +17,7 @@ class GlobalHostDataHook(PreLaunchHook):
"""Prepare global objects to `data` that will be used for sure."""
self.prepare_global_data()
if not self.data.get("asset_doc"):
if not self.data.get("folder_entity"):
return
app = self.launch_context.application
@ -27,8 +28,9 @@ class GlobalHostDataHook(PreLaunchHook):
"app": app,
"project_doc": self.data["project_doc"],
"asset_doc": self.data["asset_doc"],
"project_entity": self.data["project_entity"],
"folder_entity": self.data["folder_entity"],
"task_entity": self.data["task_entity"],
"anatomy": self.data["anatomy"],
@ -59,19 +61,37 @@ class GlobalHostDataHook(PreLaunchHook):
return
self.log.debug("Project name is set to \"{}\"".format(project_name))
# Project Entity
project_entity = get_project(project_name)
self.data["project_entity"] = project_entity
# Anatomy
self.data["anatomy"] = Anatomy(project_name)
self.data["anatomy"] = Anatomy(
project_name, project_entity=project_entity
)
# Project document
project_doc = get_project(project_name)
self.data["project_doc"] = project_doc
asset_name = self.data.get("folder_path")
if not asset_name:
folder_path = self.data.get("folder_path")
if not folder_path:
self.log.warning(
"Asset name was not set. Skipping asset document query."
"Folder path is not set. Skipping folder query."
)
return
asset_doc = get_asset_by_name(project_name, asset_name)
self.data["asset_doc"] = asset_doc
folder_entity = get_folder_by_path(project_name, folder_path)
self.data["folder_entity"] = folder_entity
task_name = self.data.get("task_name")
if not task_name:
self.log.warning(
"Task name is not set. Skipping task query."
)
return
if not folder_entity:
return
task_entity = get_task_by_name(
project_name, folder_entity["id"], task_name
)
self.data["task_entity"] = task_entity

View file

@ -28,7 +28,7 @@ class OCIOEnvHook(PreLaunchHook):
template_data = get_template_data_with_names(
project_name=self.data["project_name"],
asset_name=self.data["folder_path"],
folder_path=self.data["folder_path"],
task_name=self.data["task_name"],
host_name=self.host_name,
settings=self.data["project_settings"]

View file

@ -18,7 +18,7 @@ class HostBase(object):
Compared to 'avalon' concept:
What was before considered as functions in host implementation folder. The
host implementation should primarily care about adding ability of creation
(mark subsets to be published) and optionally about referencing published
(mark products to be published) and optionally about referencing published
representations as containers.
Host may need extend some functionality like working with workfiles
@ -108,7 +108,7 @@ class HostBase(object):
return os.environ.get("AYON_PROJECT_NAME")
def get_current_asset_name(self):
def get_current_folder_path(self):
"""
Returns:
Union[str, None]: Current asset name.
@ -139,7 +139,7 @@ class HostBase(object):
return {
"project_name": self.get_current_project_name(),
"folder_path": self.get_current_asset_name(),
"folder_path": self.get_current_folder_path(),
"task_name": self.get_current_task_name()
}
@ -161,13 +161,13 @@ class HostBase(object):
# Use current context to fill the context title
current_context = self.get_current_context()
project_name = current_context["project_name"]
asset_name = current_context["folder_path"]
folder_path = current_context["folder_path"]
task_name = current_context["task_name"]
items = []
if project_name:
items.append(project_name)
if asset_name:
items.append(asset_name.lstrip("/"))
if folder_path:
items.append(folder_path.lstrip("/"))
if task_name:
items.append(task_name)
if items:

View file

@ -17,7 +17,7 @@ from .pipeline import (
from .lib import (
maintained_selection,
get_extension_manifest_path,
get_asset_settings,
get_folder_settings,
set_settings
)
@ -37,7 +37,7 @@ __all__ = [
# lib
"maintained_selection",
"get_extension_manifest_path",
"get_asset_settings",
"get_folder_settings",
"set_settings",
# plugin

View file

@ -286,20 +286,21 @@ class AfterEffectsRoute(WebSocketRoute):
# This method calls function on the client side
# client functions
async def set_context(self, project, asset, task):
async def set_context(self, project, folder, task):
"""
Sets 'project' and 'asset' to envs, eg. setting context
Sets 'project', 'folder' and 'task' to envs, eg. setting context
Args:
project (str)
asset (str)
folder (str)
task (str)
"""
log.info("Setting context change")
log.info("project {} asset {} ".format(project, asset))
log.info("project {} folder {} ".format(project, folder))
if project:
os.environ["AYON_PROJECT_NAME"] = project
if asset:
os.environ["AYON_FOLDER_PATH"] = asset
if folder:
os.environ["AYON_FOLDER_PATH"] = folder
if task:
os.environ["AYON_TASK_NAME"] = task

View file

@ -4,8 +4,10 @@ import json
import contextlib
import logging
import ayon_api
from ayon_core.pipeline.context_tools import get_current_context
from ayon_core.client import get_asset_by_name
from .ws_stub import get_stub
log = logging.getLogger(__name__)
@ -85,21 +87,21 @@ def get_background_layers(file_url):
return layers
def get_asset_settings(asset_doc):
"""Get settings on current asset from database.
def get_folder_settings(folder_entity):
"""Get settings of current folder.
Returns:
dict: Scene data.
"""
asset_data = asset_doc["data"]
fps = asset_data.get("fps", 0)
frame_start = asset_data.get("frameStart", 0)
frame_end = asset_data.get("frameEnd", 0)
handle_start = asset_data.get("handleStart", 0)
handle_end = asset_data.get("handleEnd", 0)
resolution_width = asset_data.get("resolutionWidth", 0)
resolution_height = asset_data.get("resolutionHeight", 0)
folder_attributes = folder_entity["attrib"]
fps = folder_attributes.get("fps", 0)
frame_start = folder_attributes.get("frameStart", 0)
frame_end = folder_attributes.get("frameEnd", 0)
handle_start = folder_attributes.get("handleStart", 0)
handle_end = folder_attributes.get("handleEnd", 0)
resolution_width = folder_attributes.get("resolutionWidth", 0)
resolution_height = folder_attributes.get("resolutionHeight", 0)
duration = (frame_end - frame_start + 1) + handle_start + handle_end
return {
@ -127,9 +129,11 @@ def set_settings(frames, resolution, comp_ids=None, print_msg=True):
frame_start = frames_duration = fps = width = height = None
current_context = get_current_context()
asset_doc = get_asset_by_name(current_context["project_name"],
current_context["folder_path"])
settings = get_asset_settings(asset_doc)
folder_entity = ayon_api.get_folder_by_path(
current_context["project_name"],
current_context["folder_path"]
)
settings = get_folder_settings(folder_entity)
msg = ''
if frames:

View file

@ -271,7 +271,7 @@ def containerise(name,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"members": comp.members or [comp.id]
}

View file

@ -218,7 +218,13 @@ class RenderCreator(Creator):
"""
def get_dynamic_data(
self, project_name, asset_doc, task_name, variant, host_name, instance
self,
project_name,
folder_entity,
task_entity,
variant,
host_name,
instance
):
dynamic_data = {}
if instance is not None:

View file

@ -1,5 +1,6 @@
import ayon_api
import ayon_core.hosts.aftereffects.api as api
from ayon_core.client import get_asset_by_name
from ayon_core.pipeline import (
AutoCreator,
CreatedInstance
@ -39,32 +40,37 @@ class AEWorkfileCreator(AutoCreator):
context = self.create_context
project_name = context.get_current_project_name()
asset_name = context.get_current_asset_name()
folder_path = context.get_current_folder_path()
task_name = context.get_current_task_name()
host_name = context.host_name
existing_asset_name = None
existing_folder_path = None
if existing_instance is not None:
existing_asset_name = existing_instance.get("folderPath")
existing_folder_path = existing_instance.get("folderPath")
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
data = {
"folderPath": asset_name,
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant,
}
data.update(self.get_dynamic_data(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
None,
@ -79,17 +85,22 @@ class AEWorkfileCreator(AutoCreator):
new_instance.data_to_store())
elif (
existing_asset_name != asset_name
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
existing_instance["folderPath"] = asset_name
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -31,7 +31,7 @@ class BackgroundLoader(api.AfterEffectsLoader):
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["asset"]["name"], name))
"{}_{}".format(context["folder"]["name"], name))
path = self.filepath_from_context(context)
layers = get_background_layers(path)
@ -59,12 +59,10 @@ class BackgroundLoader(api.AfterEffectsLoader):
def update(self, container, context):
""" Switch asset or change version """
stub = self.get_stub()
asset_doc = context["asset"]
subset_doc = context["subset"]
repre_doc = context["representation"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
repre_entity = context["representation"]
folder_name = asset_doc["name"]
product_name = subset_doc["name"]
_ = container.pop("layer")
# without iterator number (_001, 002...)
@ -82,7 +80,7 @@ class BackgroundLoader(api.AfterEffectsLoader):
else: # switching version - keep same name
comp_name = container["namespace"]
path = get_representation_path(repre_doc)
path = get_representation_path(repre_entity)
layers = get_background_layers(path)
comp = stub.reload_background(container["members"][1],
@ -90,7 +88,7 @@ class BackgroundLoader(api.AfterEffectsLoader):
layers)
# update container
container["representation"] = str(repre_doc["_id"])
container["representation"] = repre_entity["id"]
container["name"] = product_name
container["namespace"] = comp_name
container["members"] = comp.members

View file

@ -25,7 +25,10 @@ class FileLoader(api.AfterEffectsLoader):
layers = stub.get_items(comps=True, folders=True, footages=True)
existing_layers = [layer.name for layer in layers]
comp_name = get_unique_layer_name(
existing_layers, "{}_{}".format(context["asset"]["name"], name))
existing_layers, "{}_{}".format(
context["folder"]["name"], name
)
)
import_options = {}
@ -35,7 +38,7 @@ class FileLoader(api.AfterEffectsLoader):
import_options['sequence'] = True
if not path:
repr_id = context["representation"]["_id"]
repr_id = context["representation"]["id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
@ -69,12 +72,9 @@ class FileLoader(api.AfterEffectsLoader):
stub = self.get_stub()
layer = container.pop("layer")
asset_doc = context["asset"]
subset_doc = context["subset"]
repre_doc = context["representation"]
folder_name = asset_doc["name"]
product_name = subset_doc["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
repre_entity = context["representation"]
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
@ -88,11 +88,11 @@ class FileLoader(api.AfterEffectsLoader):
"{}_{}".format(folder_name, product_name))
else: # switching version - keep same name
layer_name = container["namespace"]
path = get_representation_path(repre_doc)
path = get_representation_path(repre_entity)
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
layer.id, {"representation": str(repre_doc["_id"]),
layer.id, {"representation": repre_entity["id"],
"name": product_name,
"namespace": layer_name}
)

View file

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Subset context</title>
<title>Product context</title>
<description>
## Invalid product context
@ -15,7 +15,7 @@ You can fix this with "repair" button on the right and refresh Publish at the bo
### __Detailed Info__ (optional)
This might happen if you are reuse old workfile and open it in different context.
(Eg. you created product name "renderCompositingDefault" from folder "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing product for "Robot" asset stayed in the workfile.)
(Eg. you created product name "renderCompositingDefault" from folder "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing product for "Robot" folder stayed in the workfile.)
</detail>
</error>
</root>

View file

@ -5,20 +5,20 @@
<description>
## Invalid scene setting found
One of the settings in a scene doesn't match to asset settings in database.
One of the settings in a scene doesn't match to folder settings in database.
{invalid_setting_str}
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
Change values for {invalid_keys_str} in the scene OR change them in the folder database if they are wrong there.
In the scene it is right mouse click on published composition > `Composition Settings`.
</description>
<detail>
### __Detailed Info__ (optional)
This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
This error is shown when for example resolution in the scene doesn't match to resolution set on the folder in the database.
Either value in the database or in the scene is wrong.
</detail>
</error>

View file

@ -1,6 +1,6 @@
import pyblish.api
from ayon_core.pipeline import get_current_asset_name
from ayon_core.pipeline import get_current_folder_path
from ayon_core.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
@ -8,8 +8,8 @@ from ayon_core.pipeline.publish import (
from ayon_core.hosts.aftereffects.api import get_stub
class ValidateInstanceAssetRepair(pyblish.api.Action):
"""Repair the instance asset with value from Context."""
class ValidateInstanceFolderRepair(pyblish.api.Action):
"""Repair the instance folder with value from Context."""
label = "Repair"
icon = "wrench"
@ -30,35 +30,35 @@ class ValidateInstanceAssetRepair(pyblish.api.Action):
for instance in instances:
data = stub.read(instance[0])
data["folderPath"] = get_current_asset_name()
data["folderPath"] = get_current_folder_path()
stub.imprint(instance[0].instance_id, data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
"""Validate the instance asset is the current selected context asset.
class ValidateInstanceFolder(pyblish.api.InstancePlugin):
"""Validate the instance folder is the current selected context folder.
As it might happen that multiple worfiles are opened at same time,
switching between them would mess with selected context. (From Launcher
or Ftrack).
In that case outputs might be output under wrong asset!
In that case outputs might be output under wrong folder!
Repair action will use Context asset value (from Workfiles or Launcher)
Repair action will use Context folder value (from Workfiles or Launcher)
Closing and reopening with Workfiles will refresh Context value.
"""
label = "Validate Instance Asset"
label = "Validate Instance Folder"
hosts = ["aftereffects"]
actions = [ValidateInstanceAssetRepair]
actions = [ValidateInstanceFolderRepair]
order = ValidateContentsOrder
def process(self, instance):
instance_asset = instance.data["folderPath"]
current_asset = get_current_asset_name()
instance_folder = instance.data["folderPath"]
current_folder = get_current_folder_path()
msg = (
f"Instance asset {instance_asset} is not the same "
f"as current context {current_asset}."
f"Instance folder {instance_folder} is not the same "
f"as current context {current_folder}."
)
if instance_asset != current_asset:
if instance_folder != current_folder:
raise PublishXmlValidationError(self, msg)

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Validate scene settings.
Requires:
instance -> assetEntity
instance -> folderEntity
instance -> anatomyData
"""
import os
@ -13,7 +13,7 @@ from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin
)
from ayon_core.hosts.aftereffects.api import get_asset_settings
from ayon_core.hosts.aftereffects.api import get_folder_settings
class ValidateSceneSettings(OptionalPyblishPluginMixin,
@ -48,7 +48,7 @@ class ValidateSceneSettings(OptionalPyblishPluginMixin,
fps
handleStart
handleEnd
skip_resolution_check - fill entity type ('asset') to skip validation
skip_resolution_check - fill entity type ('folder') to skip validation
resolutionWidth
resolutionHeight
TODO support in extension is missing for now
@ -71,11 +71,11 @@ class ValidateSceneSettings(OptionalPyblishPluginMixin,
if not self.is_active(instance.data):
return
asset_doc = instance.data["assetEntity"]
expected_settings = get_asset_settings(asset_doc)
folder_entity = instance.data["folderEntity"]
expected_settings = get_folder_settings(folder_entity)
self.log.info("config from DB::{}".format(expected_settings))
task_name = instance.data["anatomyData"]["task"]["name"]
task_name = instance.data["task"]
if any(re.search(pattern, task_name)
for pattern in self.skip_resolution_check):
expected_settings.pop("resolutionWidth")

View file

@ -16,7 +16,7 @@ import bpy
import bpy.utils.previews
from ayon_core import style
from ayon_core.pipeline import get_current_asset_name, get_current_task_name
from ayon_core.pipeline import get_current_folder_path, get_current_task_name
from ayon_core.tools.utils import host_tools
from .workio import OpenFileCacher
@ -355,7 +355,7 @@ class SetFrameRange(bpy.types.Operator):
bl_label = "Set Frame Range"
def execute(self, context):
data = pipeline.get_asset_data()
data = pipeline.get_folder_attributes()
pipeline.set_frame_range(data)
return {"FINISHED"}
@ -365,7 +365,7 @@ class SetResolution(bpy.types.Operator):
bl_label = "Set Resolution"
def execute(self, context):
data = pipeline.get_asset_data()
data = pipeline.get_folder_attributes()
pipeline.set_resolution(data)
return {"FINISHED"}
@ -388,9 +388,9 @@ class TOPBAR_MT_avalon(bpy.types.Menu):
else:
pyblish_menu_icon_id = 0
asset = get_current_asset_name()
task = get_current_task_name()
context_label = f"{asset}, {task}"
folder_path = get_current_folder_path()
task_name = get_current_task_name()
context_label = f"{folder_path}, {task_name}"
context_label_item = layout.row()
context_label_item.operator(
LaunchWorkFiles.bl_idname, text=context_label

View file

@ -9,6 +9,7 @@ from . import lib
from . import ops
import pyblish.api
import ayon_api
from ayon_core.host import (
HostBase,
@ -16,11 +17,10 @@ from ayon_core.host import (
IPublishHost,
ILoadHost
)
from ayon_core.client import get_asset_by_name
from ayon_core.pipeline import (
schema,
get_current_project_name,
get_current_asset_name,
get_current_folder_path,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
@ -221,12 +221,12 @@ def message_window(title, message):
_process_app_events()
def get_asset_data():
def get_folder_attributes():
project_name = get_current_project_name()
asset_name = get_current_asset_name()
asset_doc = get_asset_by_name(project_name, asset_name)
folder_path = get_current_folder_path()
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
return asset_doc.get("data")
return folder_entity["attrib"]
def set_frame_range(data):
@ -279,7 +279,7 @@ def on_new():
set_resolution_startup = settings.get("set_resolution_startup")
set_frames_startup = settings.get("set_frames_startup")
data = get_asset_data()
data = get_folder_attributes()
if set_resolution_startup:
set_resolution(data)
@ -300,7 +300,7 @@ def on_open():
set_resolution_startup = settings.get("set_resolution_startup")
set_frames_startup = settings.get("set_frames_startup")
data = get_asset_data()
data = get_folder_attributes()
if set_resolution_startup:
set_resolution(data)
@ -468,7 +468,7 @@ def containerise(name: str,
"""
node_name = f"{context['asset']['name']}_{name}"
node_name = f"{context['folder']['name']}_{name}"
if namespace:
node_name = f"{namespace}:{node_name}"
if suffix:
@ -484,7 +484,7 @@ def containerise(name: str,
"name": name,
"namespace": namespace or '',
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
}
metadata_update(container, data)
@ -523,7 +523,7 @@ def containerise_existing(
"name": name,
"namespace": namespace or '',
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
}
metadata_update(container, data)

View file

@ -49,7 +49,7 @@ def prepare_scene_name(
def get_unique_number(
folder_name: str, product_name: str
) -> str:
"""Return a unique number based on the asset name."""
"""Return a unique number based on the folder name."""
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
return "01"
@ -220,9 +220,9 @@ class BaseCreator(Creator):
Create new instance and store it.
Args:
product_name(str): Subset name of created instance.
instance_data(dict): Instance base data.
pre_create_data(dict): Data based on pre creation attributes.
product_name (str): Product name of created instance.
instance_data (dict): Instance base data.
pre_create_data (dict): Data based on pre creation attributes.
Those may affect how creator works.
"""
# Get Instance Container or create it if it does not exist
@ -232,9 +232,9 @@ class BaseCreator(Creator):
bpy.context.scene.collection.children.link(instances)
# Create asset group
asset_name = instance_data["folderPath"].split("/")[-1]
folder_name = instance_data["folderPath"].split("/")[-1]
name = prepare_scene_name(asset_name, product_name)
name = prepare_scene_name(folder_name, product_name)
if self.create_as_asset_group:
# Create instance as empty
instance_node = bpy.data.objects.new(name=name, object_data=None)
@ -312,9 +312,9 @@ class BaseCreator(Creator):
"productName" in changes.changed_keys
or "folderPath" in changes.changed_keys
) and created_instance.product_type != "workfile":
asset_name = data["folderPath"].split("/")[-1]
folder_name = data["folderPath"].split("/")[-1]
name = prepare_scene_name(
asset_name, data["productName"]
folder_name, data["productName"]
)
node.name = name
@ -346,7 +346,7 @@ class BaseCreator(Creator):
"""Fill instance data with required items.
Args:
product_name(str): Subset name of created instance.
product_name(str): Product name of created instance.
instance_data(dict): Instance base data.
instance_node(bpy.types.ID): Instance node in blender scene.
"""
@ -465,8 +465,8 @@ class AssetLoader(LoaderPlugin):
filepath = self.filepath_from_context(context)
assert Path(filepath).exists(), f"{filepath} doesn't exist."
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
unique_number = get_unique_number(
folder_name, product_name
)
@ -498,8 +498,8 @@ class AssetLoader(LoaderPlugin):
# loader=self.__class__.__name__,
# )
# folder_name = context["asset"]["name"]
# product_name = context["subset"]["name"]
# folder_name = context["folder"]["name"]
# product_name = context["product"]["name"]
# instance_name = prepare_scene_name(
# folder_name, product_name, unique_number
# ) + '_CON'

View file

@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
"""Converter for legacy Houdini products."""
from ayon_core.pipeline.create.creator_plugins import SubsetConvertorPlugin
from ayon_core.pipeline.create.creator_plugins import ProductConvertorPlugin
from ayon_core.hosts.blender.api.lib import imprint
class BlenderLegacyConvertor(SubsetConvertorPlugin):
class BlenderLegacyConvertor(ProductConvertorPlugin):
"""Find and convert any legacy products in the scene.
This Converter will find all legacy products in the scene and will

View file

@ -1,7 +1,7 @@
import bpy
import ayon_api
from ayon_core.pipeline import CreatedInstance, AutoCreator
from ayon_core.client import get_asset_by_name
from ayon_core.hosts.blender.api.plugin import BaseCreator
from ayon_core.hosts.blender.api.pipeline import (
AVALON_PROPERTY,
@ -33,33 +33,38 @@ class CreateWorkfile(BaseCreator, AutoCreator):
)
project_name = self.project_name
asset_name = self.create_context.get_current_asset_name()
folder_path = self.create_context.get_current_folder_path()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
existing_asset_name = None
existing_folder_path = None
if workfile_instance is not None:
existing_asset_name = workfile_instance.get("folderPath")
existing_folder_path = workfile_instance.get("folderPath")
if not workfile_instance:
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
task_name,
host_name,
)
data = {
"folderPath": asset_name,
"folderPath": folder_path,
"task": task_name,
"variant": task_name,
}
data.update(
self.get_dynamic_data(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
task_name,
host_name,
workfile_instance,
@ -72,20 +77,25 @@ class CreateWorkfile(BaseCreator, AutoCreator):
self._add_instance_to_context(workfile_instance)
elif (
existing_asset_name != asset_name
existing_folder_path != folder_path
or workfile_instance["task"] != task_name
):
# Update instance context if it's different
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
workfile_instance["folderPath"] = asset_name
workfile_instance["folderPath"] = folder_path
workfile_instance["task"] = task_name
workfile_instance["productName"] = product_name

View file

@ -4,8 +4,8 @@ from ayon_core.hosts.blender.api import plugin
def append_workfile(context, fname, do_import):
folder_name = context['asset']['name']
product_name = context['subset']['name']
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
group_name = plugin.prepare_scene_name(folder_name, product_name)

View file

@ -134,8 +134,8 @@ class CacheModelLoader(plugin.AssetLoader):
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -161,17 +161,17 @@ class CacheModelLoader(plugin.AssetLoader):
self._link_objects(objects, asset_group, containers, asset_group)
product_type = context["subset"]["data"]["family"]
product_type = context["product"]["productType"]
asset_group[AVALON_PROPERTY] = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"parent": context["representation"]["versionId"],
"productType": product_type,
"objectName": group_name
}
@ -191,16 +191,16 @@ class CacheModelLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -245,7 +245,7 @@ class CacheModelLoader(plugin.AssetLoader):
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["representation"] = repre_entity["id"]
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.

View file

@ -44,8 +44,8 @@ class BlendActionLoader(plugin.AssetLoader):
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
lib_container = plugin.prepare_scene_name(folder_name, product_name)
container_name = plugin.prepare_scene_name(
folder_name, product_name, namespace
@ -126,18 +126,18 @@ class BlendActionLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
collection = bpy.data.collections.get(
container["objectName"]
)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
logger.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert collection, (
@ -241,7 +241,7 @@ class BlendActionLoader(plugin.AssetLoader):
# Save the list of objects in the metadata container
collection_metadata["objects"] = objects_list
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(repre_doc["_id"])
collection_metadata["representation"] = repre_entity["id"]
bpy.ops.object.select_all(action='DESELECT')

View file

@ -39,8 +39,8 @@ class AudioLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -83,11 +83,11 @@ class AudioLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name,
"audio": audio
}
@ -105,15 +105,15 @@ class AudioLoader(plugin.AssetLoader):
representation (openpype:representation-1.0): Representation to
update, from `host.ls()`.
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -176,8 +176,8 @@ class AudioLoader(plugin.AssetLoader):
window_manager.windows[-1].screen.areas[0].type = old_type
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["parent"] = str(repre_doc["parent"])
metadata["representation"] = repre_entity["id"]
metadata["parent"] = repre_entity["versionId"]
metadata["audio"] = new_audio
def exec_remove(self, container: Dict) -> bool:

View file

@ -127,15 +127,15 @@ class BlendLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
try:
product_type = context["subset"]["data"]["family"]
product_type = context["product"]["productType"]
except ValueError:
product_type = "model"
representation = str(context["representation"]["_id"])
representation = context["representation"]["id"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -162,11 +162,11 @@ class BlendLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name,
"members": members,
}
@ -185,10 +185,10 @@ class BlendLoader(plugin.AssetLoader):
"""
Update the loaded asset.
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
group_name = container["objectName"]
asset_group = bpy.data.objects.get(group_name)
libpath = Path(get_representation_path(repre_doc)).as_posix()
libpath = Path(get_representation_path(repre_entity)).as_posix()
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
@ -235,8 +235,8 @@ class BlendLoader(plugin.AssetLoader):
new_data = {
"libpath": libpath,
"representation": str(repre_doc["_id"]),
"parent": str(repre_doc["parent"]),
"representation": repre_entity["id"],
"parent": repre_entity["versionId"],
"members": members,
}

View file

@ -82,11 +82,11 @@ class BlendSceneLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
try:
product_type = context["subset"]["data"]["family"]
product_type = context["product"]["productType"]
except ValueError:
product_type = "model"
@ -114,11 +114,11 @@ class BlendSceneLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name,
"members": members,
}
@ -137,10 +137,10 @@ class BlendSceneLoader(plugin.AssetLoader):
"""
Update the loaded asset.
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
group_name = container["objectName"]
asset_group = bpy.data.collections.get(group_name)
libpath = Path(get_representation_path(repre_doc)).as_posix()
libpath = Path(get_representation_path(repre_entity)).as_posix()
assert asset_group, (
f"The asset is not loaded: {container['objectName']}"
@ -202,8 +202,8 @@ class BlendSceneLoader(plugin.AssetLoader):
new_data = {
"libpath": libpath,
"representation": str(repre_doc["_id"]),
"parent": str(repre_doc["parent"]),
"representation": repre_entity["id"],
"parent": repre_entity["versionId"],
"members": members,
}

View file

@ -84,8 +84,8 @@ class AbcCameraLoader(plugin.AssetLoader):
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -119,11 +119,11 @@ class AbcCameraLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or "",
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name,
}
@ -142,16 +142,16 @@ class AbcCameraLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -186,7 +186,7 @@ class AbcCameraLoader(plugin.AssetLoader):
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["representation"] = repre_entity["id"]
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.

View file

@ -87,8 +87,8 @@ class FbxCameraLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -122,11 +122,11 @@ class FbxCameraLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name
}
@ -145,16 +145,16 @@ class FbxCameraLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -196,7 +196,7 @@ class FbxCameraLoader(plugin.AssetLoader):
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["representation"] = repre_entity["id"]
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.

View file

@ -131,8 +131,8 @@ class FbxModelLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -166,11 +166,11 @@ class FbxModelLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name
}
@ -189,16 +189,16 @@ class FbxModelLoader(plugin.AssetLoader):
Warning:
No nested collections are supported at the moment!
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -251,7 +251,7 @@ class FbxModelLoader(plugin.AssetLoader):
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["representation"] = repre_entity["id"]
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.

View file

@ -132,7 +132,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
# # name=f"{unique_number}_{product[name]}_animation",
# asset=asset,
# options={"useSelection": False}
# # data={"dependencies": str(context["representation"]["_id"])}
# # data={"dependencies": context["representation"]["id"]}
# )
def process_asset(self,
@ -148,8 +148,8 @@ class JsonLayoutLoader(plugin.AssetLoader):
options: Additional settings dictionary
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
asset_name = plugin.prepare_scene_name(folder_name, product_name)
unique_number = plugin.get_unique_number(folder_name, product_name)
@ -177,11 +177,11 @@ class JsonLayoutLoader(plugin.AssetLoader):
"name": name,
"namespace": namespace or '',
"loader": str(self.__class__.__name__),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"libpath": libpath,
"asset_name": asset_name,
"parent": str(context["representation"]["parent"]),
"productType": context["subset"]["data"]["family"],
"parent": context["representation"]["versionId"],
"productType": context["product"]["productType"],
"objectName": group_name
}
@ -197,16 +197,16 @@ class JsonLayoutLoader(plugin.AssetLoader):
will not be removed, only unlinked. Normally this should not be the
case though.
"""
repre_doc = context["representation"]
repre_entity = context["representation"]
object_name = container["objectName"]
asset_group = bpy.data.objects.get(object_name)
libpath = Path(get_representation_path(repre_doc))
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert asset_group, (
@ -270,7 +270,7 @@ class JsonLayoutLoader(plugin.AssetLoader):
asset_group.matrix_basis = mat
metadata["libpath"] = str(libpath)
metadata["representation"] = str(repre_doc["_id"])
metadata["representation"] = repre_entity["id"]
def exec_remove(self, container: Dict) -> bool:
"""Remove an existing container from a Blender scene.

View file

@ -93,8 +93,8 @@ class BlendLookLoader(plugin.AssetLoader):
"""
libpath = self.filepath_from_context(context)
folder_name = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
lib_container = plugin.prepare_scene_name(
folder_name, product_name
@ -130,8 +130,8 @@ class BlendLookLoader(plugin.AssetLoader):
metadata["objects"] = objects
metadata["materials"] = materials
metadata["parent"] = str(context["representation"]["parent"])
metadata["product_type"] = context["subset"]["data"]["family"]
metadata["parent"] = context["representation"]["versionId"]
metadata["product_type"] = context["product"]["productType"]
nodes = list(container.objects)
nodes.append(container)
@ -140,14 +140,14 @@ class BlendLookLoader(plugin.AssetLoader):
def update(self, container: Dict, context: Dict):
collection = bpy.data.collections.get(container["objectName"])
repre_doc = context["representation"]
libpath = Path(get_representation_path(repre_doc))
repre_entity = context["representation"]
libpath = Path(get_representation_path(repre_entity))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(repre_doc, indent=2),
pformat(repre_entity, indent=2),
)
assert collection, (
@ -202,7 +202,7 @@ class BlendLookLoader(plugin.AssetLoader):
collection_metadata["objects"] = objects
collection_metadata["materials"] = materials
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(repre_doc["_id"])
collection_metadata["representation"] = repre_entity["id"]
def remove(self, container: Dict) -> bool:
collection = bpy.data.collections.get(container["objectName"])

View file

@ -19,7 +19,7 @@ class ExtractABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.abc"

View file

@ -23,7 +23,7 @@ class ExtractAnimationABC(
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.abc"

View file

@ -23,7 +23,7 @@ class ExtractBlend(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.blend"

View file

@ -26,7 +26,7 @@ class ExtractBlendAnimation(
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.blend"

View file

@ -21,7 +21,7 @@ class ExtractCameraABC(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.abc"

View file

@ -20,7 +20,7 @@ class ExtractCamera(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.fbx"

View file

@ -21,7 +21,7 @@ class ExtractFBX(publish.Extractor, publish.OptionalPyblishPluginMixin):
# Define extract output file path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
filename = f"{instance_name}.fbx"

View file

@ -145,7 +145,7 @@ class ExtractAnimationFBX(
root.select_set(True)
armature.select_set(True)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
fbx_filename = f"{instance_name}_{armature.name}.fbx"

View file

@ -5,7 +5,8 @@ import bpy
import bpy_extras
import bpy_extras.anim_utils
from ayon_core.client import get_representation_by_name
from ayon_api import get_representations
from ayon_core.pipeline import publish
from ayon_core.hosts.blender.api import plugin
from ayon_core.hosts.blender.api.pipeline import AVALON_PROPERTY
@ -134,6 +135,8 @@ class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
fbx_count = 0
project_name = instance.context.data["projectName"]
version_ids = set()
filtered_assets = []
for asset in asset_group.children:
metadata = asset.get(AVALON_PROPERTY)
if not metadata:
@ -146,42 +149,47 @@ class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
)
continue
filtered_assets.append((asset, metadata))
version_ids.add(metadata["parent"])
repre_entities = get_representations(
project_name,
representation_names={"blend", "fbx", "abc"},
version_ids=version_ids,
fields={"id", "versionId", "name"}
)
repre_mapping_by_version_id = {
version_id: {}
for version_id in version_ids
}
for repre_entity in repre_entities:
version_id = repre_entity["versionId"]
repre_mapping_by_version_id[version_id][repre_entity["name"]] = (
repre_entity
)
for asset, metadata in filtered_assets:
version_id = metadata["parent"]
product_type = metadata.get("product_type")
if product_type is None:
product_type = metadata["family"]
repres_by_name = repre_mapping_by_version_id[version_id]
self.log.debug("Parent: {}".format(version_id))
# Get blend reference
blend = get_representation_by_name(
project_name, "blend", version_id, fields=["_id"]
)
blend_id = None
if blend:
blend_id = blend["_id"]
# Get fbx reference
fbx = get_representation_by_name(
project_name, "fbx", version_id, fields=["_id"]
)
fbx_id = None
if fbx:
fbx_id = fbx["_id"]
# Get abc reference
abc = get_representation_by_name(
project_name, "abc", version_id, fields=["_id"]
)
abc_id = None
if abc:
abc_id = abc["_id"]
json_element = {}
if blend_id:
json_element["reference"] = str(blend_id)
if fbx_id:
json_element["reference_fbx"] = str(fbx_id)
if abc_id:
json_element["reference_abc"] = str(abc_id)
# Get blend, fbx and abc reference
blend_id = repres_by_name.get("blend", {}).get("id")
fbx_id = repres_by_name.get("fbx", {}).get("id")
abc_id = repres_by_name.get("abc", {}).get("id")
json_element = {
key: value
for key, value in (
("reference", blend_id),
("reference_fbx", fbx_id),
("reference_abc", abc_id),
)
if value
}
json_element["product_type"] = product_type
json_element["instance_name"] = asset.name
json_element["asset_name"] = metadata["asset_name"]
@ -228,7 +236,7 @@ class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
json_data.append(json_element)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
instance_name = f"{folder_name}_{product_name}"
json_filename = f"{instance_name}.json"

View file

@ -55,7 +55,7 @@ class ExtractPlayblast(publish.Extractor, publish.OptionalPyblishPluginMixin):
# get output path
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
filename = f"{folder_name}_{product_name}"

View file

@ -32,7 +32,7 @@ class ExtractThumbnail(publish.Extractor):
return
stagingdir = self.staging_dir(instance)
folder_name = instance.data["assetEntity"]["name"]
folder_name = instance.data["folderEntity"]["name"]
product_name = instance.data["productName"]
filename = f"{folder_name}_{product_name}"

View file

@ -44,7 +44,7 @@ class IntegrateAnimation(
break
if not rep:
continue
obj_id = rep["representation"]["_id"]
obj_id = rep["representation"]["id"]
if obj_id:
json_dict["representation_id"] = str(obj_id)

View file

@ -16,9 +16,9 @@ class CelactionPrelaunchHook(PreLaunchHook):
launch_types = {LaunchTypes.local}
def execute(self):
asset_doc = self.data["asset_doc"]
width = asset_doc["data"]["resolutionWidth"]
height = asset_doc["data"]["resolutionHeight"]
folder_attributes = self.data["folder_entity"]["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
# Add workfile path to launch arguments
workfile_path = self.workfile_path()

View file

@ -1,8 +1,6 @@
import os
import pyblish.api
from ayon_core.client import get_asset_name_identifier
class CollectCelactionInstances(pyblish.api.ContextPlugin):
""" Adds the celaction render instances """
@ -16,24 +14,20 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
version = context.data["version"]
asset_entity = context.data["assetEntity"]
project_entity = context.data["projectEntity"]
asset_name = get_asset_name_identifier(asset_entity)
folder_entity = context.data["folderEntity"]
folder_attributes = folder_entity["attrib"]
shared_instance_data = {
"folderPath": asset_name,
"frameStart": asset_entity["data"]["frameStart"],
"frameEnd": asset_entity["data"]["frameEnd"],
"handleStart": asset_entity["data"]["handleStart"],
"handleEnd": asset_entity["data"]["handleEnd"],
"fps": asset_entity["data"]["fps"],
"resolutionWidth": asset_entity["data"].get(
"resolutionWidth",
project_entity["data"]["resolutionWidth"]),
"resolutionHeight": asset_entity["data"].get(
"resolutionHeight",
project_entity["data"]["resolutionHeight"]),
"folderPath": folder_entity["path"],
"frameStart": folder_attributes["frameStart"],
"frameEnd": folder_attributes["frameEnd"],
"handleStart": folder_attributes["handleStart"],
"handleEnd": folder_attributes["handleEnd"],
"fps": folder_attributes["fps"],
"resolutionWidth": folder_attributes["resolutionWidth"],
"resolutionHeight": folder_attributes["resolutionHeight"],
"pixelAspect": 1,
"step": 1,
"version": version
@ -83,7 +77,7 @@ class CollectCelactionInstances(pyblish.api.ContextPlugin):
# getting instance state
instance.data["publish"] = True
# add assetEntity data into instance
# add folderEntity data into instance
instance.data.update({
"label": "{} - farm".format(product_name),
"productType": product_type,

View file

@ -73,7 +73,7 @@ def containerise(flame_clip_segment,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
}
if data:

View file

@ -748,18 +748,16 @@ class ClipLoader(LoaderPlugin):
Returns:
str: colorspace name or None
"""
version = context['version']
version_data = version.get("data", {})
colorspace = version_data.get(
"colorspace", None
)
version_entity = context["version"]
version_attributes = version_entity["attrib"]
colorspace = version_attributes.get("colorSpace")
if (
not colorspace
or colorspace == "Unknown"
):
colorspace = context["representation"]["data"].get(
"colorspace", None)
"colorspace")
return colorspace

View file

@ -36,8 +36,8 @@ class FlamePrelaunch(PreLaunchHook):
self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"]
"""Hook entry method."""
project_doc = self.data["project_doc"]
project_name = project_doc["name"]
project_entity = self.data["project_entity"]
project_name = project_entity["name"]
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
# get image io
@ -63,20 +63,22 @@ class FlamePrelaunch(PreLaunchHook):
hostname = socket.gethostname() # not returning wiretap host name
self.log.debug("Collected user \"{}\"".format(user_name))
self.log.info(pformat(project_doc))
_db_p_data = project_doc["data"]
width = _db_p_data["resolutionWidth"]
height = _db_p_data["resolutionHeight"]
fps = float(_db_p_data["fps"])
self.log.info(pformat(project_entity))
project_attribs = project_entity["attrib"]
width = project_attribs["resolutionWidth"]
height = project_attribs["resolutionHeight"]
fps = float(project_attribs["fps"])
project_data = {
"Name": project_doc["name"],
"Nickname": _db_p_data["code"],
"Name": project_entity["name"],
"Nickname": project_entity["code"],
"Description": "Created by OpenPype",
"SetupDir": project_doc["name"],
"SetupDir": project_entity["name"],
"FrameWidth": int(width),
"FrameHeight": int(height),
"AspectRatio": float((width / height) * _db_p_data["pixelAspect"]),
"AspectRatio": float(
(width / height) * project_attribs["pixelAspect"]
),
"FrameRate": self._get_flame_fps(fps)
}

View file

@ -207,14 +207,14 @@ class CreateShotClip(opfapi.Creator):
"value": ["[ track name ]", "main", "bg", "fg", "bg",
"animatic"],
"type": "QComboBox",
"label": "Subset Name",
"label": "Product Name",
"target": "ui",
"toolTip": "chose product name pattern, if [ track name ] is selected, name of track layer will be used", # noqa
"order": 0},
"productType": {
"value": ["plate", "take"],
"type": "QComboBox",
"label": "Subset Family",
"label": "Product Type",
"target": "ui", "toolTip": "What use of this product is for", # noqa
"order": 1},
"reviewTrack": {

View file

@ -48,9 +48,9 @@ class LoadClip(opfapi.ClipLoader):
self.fpd = fproject.current_workspace.desktop
# load clip to timeline and get main variables
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
version_entity = context["version"]
version_attributes = version_entity["attrib"]
version_name = version_entity["version"]
colorspace = self.get_colorspace(context)
# in case output is not in context replace key to representation
@ -112,11 +112,10 @@ class LoadClip(opfapi.ClipLoader):
]
# move all version data keys to tag data
data_imprint = {}
for key in add_keys:
data_imprint.update({
key: version_data.get(key, str(None))
})
data_imprint = {
key: version_attributes.get(key, str(None))
for key in add_keys
}
# add variables related to version context
data_imprint.update({
@ -187,20 +186,20 @@ class LoadClip(opfapi.ClipLoader):
# """ Updating previously loaded clips
# """
# # load clip to timeline and get main variables
# repre_doc = context['representation']
# repre_entity = context['representation']
# name = container['name']
# namespace = container['namespace']
# track_item = phiero.get_track_items(
# track_item_name=namespace)
# version = io.find_one({
# "type": "version",
# "_id": repre_doc["parent"]
# "id": repre_entity["versionId"]
# })
# version_data = version.get("data", {})
# version_name = version.get("name", None)
# colorspace = version_data.get("colorspace", None)
# colorspace = version_data.get("colorSpace", None)
# object_name = "{}_{}".format(name, namespace)
# file = get_representation_path(repre_doc).replace("\\", "/")
# file = get_representation_path(repre_entity).replace("\\", "/")
# clip = track_item.source()
# # reconnect media to new path
@ -225,7 +224,7 @@ class LoadClip(opfapi.ClipLoader):
# # add variables related to version context
# data_imprint.update({
# "representation": str(repre_doc["_id"]),
# "representation": repre_entity["id"],
# "version": version_name,
# "colorspace": colorspace,
# "objectName": object_name

View file

@ -45,9 +45,9 @@ class LoadClipBatch(opfapi.ClipLoader):
self.batch = options.get("batch") or flame.batch
# load clip to timeline and get main variables
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
version_entity = context["version"]
version_attributes =version_entity["attrib"]
version_name = version_entity["version"]
colorspace = self.get_colorspace(context)
clip_name_template = self.clip_name_template
@ -59,20 +59,20 @@ class LoadClipBatch(opfapi.ClipLoader):
layer_rename_template = layer_rename_template.replace(
"output", "representation")
asset_doc = context["asset"]
subset_doc = context["subset"]
folder_entity = context["folder"]
product_entity = context["product"]
formatting_data = deepcopy(context["representation"]["context"])
formatting_data["batch"] = self.batch.name.get_value()
formatting_data.update({
"asset": asset_doc["name"],
"asset": folder_entity["name"],
"folder": {
"name": asset_doc["name"],
"name": folder_entity["name"],
},
"subset": subset_doc["name"],
"family": subset_doc["data"]["family"],
"subset": product_entity["name"],
"family": product_entity["productType"],
"product": {
"name": subset_doc["name"],
"type": subset_doc["data"]["family"],
"name": product_entity["name"],
"type": product_entity["productType"],
}
})
@ -129,7 +129,7 @@ class LoadClipBatch(opfapi.ClipLoader):
# move all version data keys to tag data
data_imprint = {
key: version_data.get(key, str(None))
key: version_attributes.get(key, str(None))
for key in add_keys
}
# add variables related to version context

View file

@ -1,6 +1,5 @@
import pyblish.api
from ayon_core.client import get_asset_name_identifier
import ayon_core.hosts.flame.api as opfapi
from ayon_core.hosts.flame.otio import flame_export
from ayon_core.pipeline.create import get_product_name
@ -18,31 +17,33 @@ class CollecTimelineOTIO(pyblish.api.ContextPlugin):
variant = "otioTimeline"
# main
asset_doc = context.data["assetEntity"]
task_name = context.data["task"]
folder_entity = context.data["folderEntity"]
project = opfapi.get_current_project()
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
# create product name
task_entity = context.data["taskEntity"]
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
product_name = get_product_name(
context.data["projectName"],
asset_doc,
task_name,
task_type,
context.data["hostName"],
product_type,
variant,
project_settings=context.data["project_settings"]
)
folder_path = get_asset_name_identifier(asset_doc)
# adding otio timeline to context
with opfapi.maintained_segment_selection(sequence) as selected_seg:
otio_timeline = flame_export.create_otio_timeline(sequence)
instance_data = {
"name": product_name,
"folderPath": folder_path,
"folderPath": folder_entity["path"],
"productName": product_name,
"productType": product_type,
"family": product_type,

View file

@ -219,7 +219,7 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin):
# update task data in anatomy data
project_task_types = anatomy_obj["tasks"]
task_code = project_task_types.get(task_type, {}).get("short_name")
task_code = project_task_types.get(task_type, {}).get("shortName")
anatomy_data.update({
"task": {
"name": task_name,
@ -247,7 +247,7 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin):
os.makedirs(render_dir_path, mode=0o777)
# TODO: add most of these to `imageio/flame/batch/write_node`
name = "{project[code]}_{asset}_{task[name]}".format(
name = "{project[code]}_{folder[name]}_{task[name]}".format(
**anatomy_data
)
@ -321,16 +321,17 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin):
))
def _get_shot_task_dir_path(self, instance, task_data):
project_doc = instance.data["projectEntity"]
asset_entity = instance.data["assetEntity"]
project_entity = instance.data["projectEntity"]
folder_entity = instance.data["folderEntity"]
task_entity = instance.data["taskEntity"]
anatomy = instance.context.data["anatomy"]
project_settings = instance.context.data["project_settings"]
return get_workdir(
project_doc,
asset_entity,
task_data["name"],
project_entity,
folder_entity,
task_entity,
"flame",
anatomy,
anatomy=anatomy,
project_settings=project_settings
)

View file

@ -9,7 +9,7 @@ from .pipeline import (
from .lib import (
maintained_selection,
update_frame_range,
set_asset_framerange,
set_current_context_framerange,
get_current_comp,
get_bmd_library,
comp_lock_and_undo_chunk
@ -29,7 +29,7 @@ __all__ = [
# lib
"maintained_selection",
"update_frame_range",
"set_asset_framerange",
"set_current_context_framerange",
"get_current_comp",
"get_bmd_library",
"comp_lock_and_undo_chunk",

View file

@ -4,19 +4,8 @@ import re
import contextlib
from ayon_core.lib import Logger
from ayon_core.client import (
get_asset_by_name,
get_subset_by_name,
get_last_version_by_subset_id,
get_representation_by_id,
get_representation_by_name,
get_representation_parents,
)
from ayon_core.pipeline import (
switch_container,
get_current_project_name,
)
from ayon_core.pipeline.context_tools import get_current_project_asset
from ayon_core.pipeline.context_tools import get_current_project_folder
self = sys.modules[__name__]
self._project = None
@ -63,23 +52,25 @@ def update_frame_range(start, end, comp=None, set_render_range=True,
comp.SetAttrs(attrs)
def set_asset_framerange():
"""Set Comp's frame range based on current asset"""
asset_doc = get_current_project_asset()
start = asset_doc["data"]["frameStart"]
end = asset_doc["data"]["frameEnd"]
handle_start = asset_doc["data"]["handleStart"]
handle_end = asset_doc["data"]["handleEnd"]
def set_current_context_framerange():
"""Set Comp's frame range based on current folder."""
folder_entity = get_current_project_folder()
folder_attributes = folder_entity["attrib"]
start = folder_attributes["frameStart"]
end = folder_attributes["frameEnd"]
handle_start = folder_attributes["handleStart"]
handle_end = folder_attributes["handleEnd"]
update_frame_range(start, end, set_render_range=True,
handle_start=handle_start,
handle_end=handle_end)
def set_asset_resolution():
"""Set Comp's resolution width x height default based on current asset"""
asset_doc = get_current_project_asset()
width = asset_doc["data"]["resolutionWidth"]
height = asset_doc["data"]["resolutionHeight"]
def set_current_context_resolution():
"""Set Comp's resolution width x height default based on current folder"""
folder_entity = get_current_project_folder()
folder_attributes = folder_entity["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
comp = get_current_comp()
print("Setting comp frame format resolution to {}x{}".format(width,
@ -91,7 +82,7 @@ def set_asset_resolution():
def validate_comp_prefs(comp=None, force_repair=False):
"""Validate current comp defaults with asset settings.
"""Validate current comp defaults with folder settings.
Validates fps, resolutionWidth, resolutionHeight, aspectRatio.
@ -103,22 +94,23 @@ def validate_comp_prefs(comp=None, force_repair=False):
log = Logger.get_logger("validate_comp_prefs")
fields = [
"name",
"data.fps",
"data.resolutionWidth",
"data.resolutionHeight",
"data.pixelAspect"
]
asset_doc = get_current_project_asset(fields=fields)
asset_data = asset_doc["data"]
fields = {
"path",
"attrib.fps",
"attrib.resolutionWidth",
"attrib.resolutionHeight",
"attrib.pixelAspect",
}
folder_entity = get_current_project_folder(fields=fields)
folder_path = folder_entity["path"]
folder_attributes = folder_entity["attrib"]
comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat")
# Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert
# the data to something that is more sensible to Fusion
asset_data["pixelAspectX"] = asset_data.pop("pixelAspect")
asset_data["pixelAspectY"] = 1.0
folder_attributes["pixelAspectX"] = folder_attributes.pop("pixelAspect")
folder_attributes["pixelAspectY"] = 1.0
validations = [
("fps", "Rate", "FPS"),
@ -130,23 +122,23 @@ def validate_comp_prefs(comp=None, force_repair=False):
invalid = []
for key, comp_key, label in validations:
asset_value = asset_data[key]
folder_value = folder_attributes[key]
comp_value = comp_frame_format_prefs.get(comp_key)
if asset_value != comp_value:
if folder_value != comp_value:
invalid_msg = "{} {} should be {}".format(label,
comp_value,
asset_value)
folder_value)
invalid.append(invalid_msg)
if not force_repair:
# Do not log warning if we force repair anyway
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
"Comp {pref} {value} does not match folder "
"'{folder_path}' {pref} {folder_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
folder_path=folder_path,
folder_value=folder_value)
)
if invalid:
@ -154,7 +146,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
value = asset_data[key]
value = folder_value[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
@ -170,7 +162,7 @@ def validate_comp_prefs(comp=None, force_repair=False):
dialog = SimplePopup(parent=menu.menu)
dialog.setWindowTitle("Fusion comp has invalid configuration")
msg = "Comp preferences mismatches '{}'".format(asset_doc["name"])
msg = "Comp preferences mismatches '{}'".format(folder_path)
msg += "\n" + "\n".join(invalid)
dialog.set_message(msg)
dialog.set_button_text("Repair")

View file

@ -10,10 +10,10 @@ from ayon_core.hosts.fusion.scripts import (
duplicate_with_inputs,
)
from ayon_core.hosts.fusion.api.lib import (
set_asset_framerange,
set_asset_resolution,
set_current_context_framerange,
set_current_context_resolution,
)
from ayon_core.pipeline import get_current_asset_name
from ayon_core.pipeline import get_current_folder_path
from ayon_core.resources import get_ayon_icon_filepath
from ayon_core.tools.utils import get_qt_app
@ -49,15 +49,15 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.render_mode_widget = None
self.setWindowTitle(MENU_LABEL)
asset_label = QtWidgets.QLabel("Context", self)
asset_label.setStyleSheet(
context_label = QtWidgets.QLabel("Context", self)
context_label.setStyleSheet(
"""QLabel {
font-size: 14px;
font-weight: 600;
color: #5f9fb8;
}"""
)
asset_label.setAlignment(QtCore.Qt.AlignHCenter)
context_label.setAlignment(QtCore.Qt.AlignHCenter)
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
@ -74,7 +74,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(asset_label)
layout.addWidget(context_label)
layout.addSpacing(20)
@ -103,7 +103,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
self.setLayout(layout)
# Store reference so we can update the label
self.asset_label = asset_label
self.context_label = context_label
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
@ -131,8 +131,8 @@ class OpenPypeMenu(QtWidgets.QWidget):
def on_task_changed(self):
# Update current context label
label = get_current_asset_name()
self.asset_label.setText(label)
label = get_current_folder_path()
self.context_label.setText(label)
def register_callback(self, name, fn):
# Create a wrapper callback that we only store
@ -168,10 +168,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
duplicate_with_inputs.duplicate_with_input_connections()
def on_set_resolution_clicked(self):
set_asset_resolution()
set_current_context_resolution()
def on_set_framerange_clicked(self):
set_asset_framerange()
set_current_context_framerange()
def launch_openpype_menu():

View file

@ -252,7 +252,7 @@ def imprint_container(tool,
("name", str(name)),
("namespace", str(namespace)),
("loader", str(loader)),
("representation", str(context["representation"]["_id"])),
("representation", context["representation"]["id"]),
]
for key, value in data:

View file

@ -138,7 +138,7 @@ class GenericCreateSaver(Creator):
# get output format
ext = data["creator_attributes"]["image_format"]
# Subset change detected
# Product change detected
product_type = formatting_data["productType"]
f_product_name = formatting_data["productName"]

View file

@ -15,7 +15,7 @@ class CreateSaver(GenericCreateSaver):
product_type = "render"
description = "Fusion Saver to generate image sequence"
default_frame_range_option = "asset_db"
default_frame_range_option = "current_folder"
def get_detail_description(self):
return """Fusion Saver to generate image sequence.
@ -24,7 +24,7 @@ class CreateSaver(GenericCreateSaver):
product type. (But can publish even single frame 'render'.)
Select what should be source of render range:
- "Current asset context" - values set on Asset in DB (Ftrack)
- "Current Folder context" - values set on folder on AYON server
- "From render in/out" - from node itself
- "From composition timeline" - from timeline
@ -50,7 +50,7 @@ class CreateSaver(GenericCreateSaver):
def _get_frame_range_enum(self):
frame_range_options = {
"asset_db": "Current asset context",
"current_folder": "Current Folder context",
"render_range": "From render in/out",
"comp_range": "From composition timeline",
}

View file

@ -1,7 +1,8 @@
import ayon_api
from ayon_core.hosts.fusion.api import (
get_current_comp
)
from ayon_core.client import get_asset_by_name
from ayon_core.pipeline import (
AutoCreator,
CreatedInstance,
@ -54,7 +55,6 @@ class FusionWorkfileCreator(AutoCreator):
comp.SetData(self.data_key, data)
def create(self, options=None):
comp = get_current_comp()
if not comp:
self.log.error("Unable to find current comp")
@ -67,33 +67,37 @@ class FusionWorkfileCreator(AutoCreator):
break
project_name = self.create_context.get_current_project_name()
asset_name = self.create_context.get_current_asset_name()
folder_path = self.create_context.get_current_folder_path()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
if existing_instance is None:
existing_instance_asset = None
else:
existing_instance_asset = existing_instance["folderPath"]
existing_folder_path = None
if existing_instance is not None:
existing_folder_path = existing_instance["folderPath"]
if existing_instance is None:
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
data = {
"folderPath": asset_name,
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant,
}
data.update(self.get_dynamic_data(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
None
@ -107,17 +111,22 @@ class FusionWorkfileCreator(AutoCreator):
self._add_instance_to_context(new_instance)
elif (
existing_instance_asset != asset_name
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
asset_doc = get_asset_by_name(project_name, asset_name)
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
asset_doc,
task_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
existing_instance["folderPath"] = asset_name
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -27,11 +27,10 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin):
from ayon_core.hosts.fusion.api import lib
version = context['version']
version_data = version.get("data", {})
version_attributes = context["version"]["attrib"]
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
start = version_attributes.get("frameStart", None)
end = version_attributes.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -62,11 +61,9 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
from ayon_core.hosts.fusion.api import lib
version = context['version']
version_data = version.get("data", {})
start = version_data.get("frameStart", None)
end = version_data.get("frameEnd", None)
version_attributes = context["version"]["attrib"]
start = version_attributes.get("frameStart", None)
end = version_attributes.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@ -74,7 +71,7 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
return
# Include handles
start -= version_data.get("handleStart", 0)
end += version_data.get("handleEnd", 0)
start -= version_attributes.get("handleStart", 0)
end += version_attributes.get("handleEnd", 0)
lib.update_frame_range(start, end)

View file

@ -24,9 +24,9 @@ class FusionLoadAlembicMesh(load.LoaderPlugin):
tool_type = "SurfaceAlembicMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context['asset']['name']
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
@ -54,14 +54,14 @@ class FusionLoadAlembicMesh(load.LoaderPlugin):
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_doc = context["representation"]
path = get_representation_path(repre_doc)
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(repre_doc["_id"]))
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]

View file

@ -38,9 +38,9 @@ class FusionLoadFBXMesh(load.LoaderPlugin):
tool_type = "SurfaceFBXMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["asset"]["name"]
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
@ -69,14 +69,14 @@ class FusionLoadFBXMesh(load.LoaderPlugin):
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_doc = context["representation"]
path = get_representation_path(repre_doc)
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["ImportFile"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(repre_doc["_id"]))
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]

View file

@ -1,7 +1,6 @@
import contextlib
import ayon_core.pipeline.load as load
from ayon_core.pipeline.load import get_representation_context
from ayon_core.hosts.fusion.api import (
imprint_container,
get_current_comp,
@ -149,9 +148,9 @@ class FusionLoadSequence(load.LoaderPlugin):
color = "orange"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["asset"]["name"]
namespace = context["folder"]["name"]
# Use the first file for now
path = self.filepath_from_context(context)
@ -224,8 +223,7 @@ class FusionLoadSequence(load.LoaderPlugin):
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
repre_doc = context["representation"]
context = get_representation_context(repre_doc)
repre_entity = context["representation"]
path = self.filepath_from_context(context)
# Get start frame from version data
@ -256,7 +254,7 @@ class FusionLoadSequence(load.LoaderPlugin):
)
# Update the imprinted representation
tool.SetData("avalon.representation", str(repre_doc["_id"]))
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]
@ -266,17 +264,17 @@ class FusionLoadSequence(load.LoaderPlugin):
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
tool.Delete()
def _get_start(self, version_doc, tool):
def _get_start(self, version_entity, tool):
"""Return real start frame of published files (incl. handles)"""
data = version_doc["data"]
attributes = version_entity["attrib"]
# Get start frame directly with handle if it's in data
start = data.get("frameStartHandle")
start = attributes.get("frameStartHandle")
if start is not None:
return start
# Get frame start without handles
start = data.get("frameStart")
start = attributes.get("frameStart")
if start is None:
self.log.warning(
"Missing start frame for version "
@ -286,7 +284,7 @@ class FusionLoadSequence(load.LoaderPlugin):
return 0
# Use `handleStart` if the data is available
handle_start = data.get("handleStart")
handle_start = attributes.get("handleStart")
if handle_start:
start -= handle_start

View file

@ -40,9 +40,9 @@ class FusionLoadUSD(load.LoaderPlugin):
cls.enabled = is_usd_supported
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context['asset']['name']
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
@ -69,14 +69,14 @@ class FusionLoadUSD(load.LoaderPlugin):
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_doc = context["representation"]
path = get_representation_path(repre_doc)
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(repre_doc["_id"]))
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]

View file

@ -25,8 +25,8 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
frame_range_source = creator_attributes.get("frame_range_source")
instance.data["frame_range_source"] = frame_range_source
# get asset frame ranges to all instances
# render product type instances `asset_db` render target
# get folder frame ranges to all instances
# render product type instances `current_folder` render target
start = context.data["frameStart"]
end = context.data["frameEnd"]
handle_start = context.data["handleStart"]

View file

@ -70,10 +70,10 @@ class FusionRenderLocal(
# Log render status
self.log.info(
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
nm=instance.data["name"],
ast=instance.data["folderPath"],
tsk=instance.data["task"],
"Rendered '{}' for folder '{}' under the task '{}'".format(
instance.data["name"],
instance.data["folderPath"],
instance.data["task"],
)
)

View file

@ -11,10 +11,10 @@ from ayon_core.hosts.fusion.api import comp_lock_and_undo_chunk
class ValidateSaverResolution(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
):
"""Validate that the saver input resolution matches the asset resolution"""
"""Validate that the saver input resolution matches the folder resolution"""
order = pyblish.api.ValidatorOrder
label = "Validate Asset Resolution"
label = "Validate Folder Resolution"
families = ["render", "image"]
hosts = ["fusion"]
optional = True
@ -29,7 +29,7 @@ class ValidateSaverResolution(
if resolution != expected_resolution:
raise PublishValidationError(
"The input's resolution does not match "
"the asset's resolution {}x{}.\n\n"
"the folder's resolution {}x{}.\n\n"
"The input's resolution is {}x{}.".format(
expected_resolution[0], expected_resolution[1],
resolution[0], resolution[1]
@ -55,8 +55,8 @@ class ValidateSaverResolution(
@classmethod
def get_expected_resolution(cls, instance):
data = instance.data["assetEntity"]["data"]
return data["resolutionWidth"], data["resolutionHeight"]
attributes = instance.data["folderEntity"]["attrib"]
return attributes["resolutionWidth"], attributes["resolutionHeight"]
@classmethod
def get_tool_resolution(cls, tool, frame):

View file

@ -10,7 +10,7 @@ class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
"""Ensure all instances have a unique product name"""
order = pyblish.api.ValidatorOrder
label = "Validate Unique Subsets"
label = "Validate Unique Products"
families = ["render", "image"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@ -27,7 +27,7 @@ class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
instance
)
# Find which asset + subset combination has more than one instance
# Find which folder + subset combination has more than one instance
# Those are considered invalid because they'd integrate to the same
# destination.
invalid = []

View file

@ -597,7 +597,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
read_node = harmony.send(
{
"function": copy_files + import_files,
"args": ["Top", files, context["version"]["data"]["subset"], 1]
"args": ["Top", files, context["product"]["name"], 1]
}
)["result"]
@ -614,9 +614,9 @@ class ImageSequenceLoader(load.LoaderPlugin):
def update(self, container, context):
node = container.pop("node")
repre_doc = context["representation"]
repre_entity = context["representation"]
project_name = get_current_project_name()
version = get_version_by_id(project_name, repre_doc["parent"])
version = get_version_by_id(project_name, repre_entity["versionId"])
files = []
for f in version["data"]["files"]:
files.append(
@ -633,7 +633,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
)
harmony.imprint(
node, {"representation": str(repre_doc["_id"])}
node, {"representation": repre_entity["id"]}
)
def remove(self, container):

View file

@ -11,7 +11,7 @@ from .pipeline import (
select_instance,
containerise,
set_scene_settings,
get_asset_settings,
get_current_context_settings,
ensure_scene_settings,
check_inventory,
application_launch,
@ -55,7 +55,7 @@ __all__ = [
"select_instance",
"containerise",
"set_scene_settings",
"get_asset_settings",
"get_current_context_settings",
"ensure_scene_settings",
"check_inventory",
"application_launch",

View file

@ -13,7 +13,7 @@ from ayon_core.pipeline import (
AVALON_CONTAINER_ID,
)
from ayon_core.pipeline.load import get_outdated_containers
from ayon_core.pipeline.context_tools import get_current_project_asset
from ayon_core.pipeline.context_tools import get_current_project_folder
from ayon_core.hosts.harmony import HARMONY_HOST_DIR
import ayon_core.hosts.harmony.api as harmony
@ -42,24 +42,25 @@ def set_scene_settings(settings):
{"function": "PypeHarmony.setSceneSettings", "args": settings})
def get_asset_settings():
"""Get settings on current asset from database.
def get_current_context_settings():
"""Get settings on current folder from server.
Returns:
dict: Scene data.
"""
asset_doc = get_current_project_asset()
asset_data = asset_doc["data"]
fps = asset_data.get("fps")
frame_start = asset_data.get("frameStart")
frame_end = asset_data.get("frameEnd")
handle_start = asset_data.get("handleStart")
handle_end = asset_data.get("handleEnd")
resolution_width = asset_data.get("resolutionWidth")
resolution_height = asset_data.get("resolutionHeight")
entity_type = asset_data.get("entityType")
folder_entity = get_current_project_folder()
folder_attributes = folder_entity["attrib"]
fps = folder_attributes.get("fps")
frame_start = folder_attributes.get("frameStart")
frame_end = folder_attributes.get("frameEnd")
handle_start = folder_attributes.get("handleStart")
handle_end = folder_attributes.get("handleEnd")
resolution_width = folder_attributes.get("resolutionWidth")
resolution_height = folder_attributes.get("resolutionHeight")
entity_type = folder_attributes.get("entityType")
scene_data = {
"fps": fps,
@ -77,7 +78,7 @@ def get_asset_settings():
def ensure_scene_settings():
"""Validate if Harmony scene has valid settings."""
settings = get_asset_settings()
settings = get_current_context_settings()
invalid_settings = []
valid_settings = {}
@ -336,7 +337,7 @@ def containerise(name,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
"representation": context["representation"]["id"],
"nodes": nodes
}

View file

@ -34,7 +34,7 @@ PypeHarmony.message = function(message) {
/**
* Set scene setting based on shot/asset settngs.
* Set scene setting based on folder settngs.
* @function
* @param {obj} settings Scene settings.
*/

View file

@ -87,7 +87,7 @@ ImageSequenceLoader.getUniqueColumnName = function(columnPrefix) {
* // Arguments are in following order:
* var args = [
* files, // Files in file sequences.
* asset, // Asset name.
* folderName, // Folder name.
* productName, // Product name.
* startFrame, // Sequence starting frame.
* groupId // Unique group ID (uuid4).
@ -105,7 +105,7 @@ ImageSequenceLoader.prototype.importFiles = function(args) {
var doc = $.scn;
var files = args[0];
var asset = args[1];
var folderName = args[1];
var productName = args[2];
var startFrame = args[3];
var groupId = args[4];
@ -124,7 +124,7 @@ ImageSequenceLoader.prototype.importFiles = function(args) {
var num = 0;
var name = '';
do {
name = asset + '_' + (num++) + '_' + productName;
name = folderName + '_' + (num++) + '_' + productName;
} while (currentGroup.getNodeByName(name) != null);
extension = filename.substr(pos+1).toLowerCase();

View file

@ -30,7 +30,7 @@ var TemplateLoader = function() {};
* // arguments are in following order:
* var args = [
* templatePath, // Path to tpl file.
* assetName, // Asset name.
* folderName, // Folder name.
* productName, // Product name.
* groupId // unique ID (uuid4)
* ];
@ -38,7 +38,7 @@ var TemplateLoader = function() {};
TemplateLoader.prototype.loadContainer = function(args) {
var doc = $.scn;
var templatePath = args[0];
var assetName = args[1];
var folderName = args[1];
var productName = args[2];
var groupId = args[3];
@ -62,7 +62,7 @@ TemplateLoader.prototype.loadContainer = function(args) {
var num = 0;
var containerGroupName = '';
do {
containerGroupName = assetName + '_' + (num++) + '_' + productName;
containerGroupName = folderName + '_' + (num++) + '_' + productName;
} while (currentGroup.getNodeByName(containerGroupName) != null);
// import the template

View file

@ -42,10 +42,10 @@ class ImportAudioLoader(load.LoaderPlugin):
def load(self, context, name=None, namespace=None, data=None):
wav_file = get_representation_path(context["representation"])
harmony.send(
{"function": func, "args": [context["subset"]["name"], wav_file]}
{"function": func, "args": [context["product"]["name"], wav_file]}
)
product_name = context["subset"]["name"]
product_name = context["product"]["name"]
return harmony.containerise(
product_name,

View file

@ -254,7 +254,7 @@ class BackgroundLoader(load.LoaderPlugin):
bg_folder = os.path.dirname(path)
product_name = context["subset"]["name"]
product_name = context["product"]["name"]
# read_node_name += "_{}".format(uuid.uuid4())
container_nodes = []
@ -281,8 +281,8 @@ class BackgroundLoader(load.LoaderPlugin):
)
def update(self, container, context):
repre_doc = context["representation"]
path = get_representation_path(repre_doc)
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with open(path) as json_file:
data = json.load(json_file)
@ -302,7 +302,7 @@ class BackgroundLoader(load.LoaderPlugin):
print(container)
is_latest = is_representation_from_latest(repre_doc)
is_latest = is_representation_from_latest(repre_entity)
for layer in sorted(layers):
file_to_import = [
os.path.join(bg_folder, layer).replace("\\", "/")
@ -354,7 +354,7 @@ class BackgroundLoader(load.LoaderPlugin):
harmony.imprint(
container['name'],
{
"representation": str(repre_doc["_id"]),
"representation": repre_entity["id"],
"nodes": container["nodes"]
}
)

View file

@ -46,8 +46,8 @@ class ImageSequenceLoader(load.LoaderPlugin):
else:
files.append(fname.parent.joinpath(remainder[0]).as_posix())
asset = context["asset"]["name"]
product_name = context["subset"]["name"]
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
group_id = str(uuid.uuid4())
read_node = harmony.send(
@ -55,7 +55,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
"function": f"PypeHarmony.Loaders.{self_name}.importFiles", # noqa: E501
"args": [
files,
asset,
folder_name,
product_name,
1,
group_id
@ -64,7 +64,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
)["result"]
return harmony.containerise(
f"{asset}_{product_name}",
f"{folder_name}_{product_name}",
namespace,
read_node,
context,
@ -83,8 +83,8 @@ class ImageSequenceLoader(load.LoaderPlugin):
self_name = self.__class__.__name__
node = container.get("nodes").pop()
repre_doc = context["representation"]
path = get_representation_path(repre_doc)
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
collections, remainder = clique.assemble(
os.listdir(os.path.dirname(path))
)
@ -111,7 +111,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
)
# Colour node.
if is_representation_from_latest(repre_doc):
if is_representation_from_latest(repre_entity):
harmony.send(
{
"function": "PypeHarmony.setColor",
@ -125,7 +125,7 @@ class ImageSequenceLoader(load.LoaderPlugin):
})
harmony.imprint(
node, {"representation": str(repre_doc["_id"])}
node, {"representation": repre_entity["id"]}
)
def remove(self, container):

View file

@ -27,16 +27,15 @@ class ImportPaletteLoader(load.LoaderPlugin):
)
def load_palette(self, context):
subset_doc = context["subset"]
repre_doc = context["representation"]
product_name = subset_doc["name"]
product_name = context["product"]["name"]
repre_entity = context["representation"]
name = product_name.replace("palette", "")
# Overwrite palette on disk.
scene_path = harmony.send(
{"function": "scene.currentProjectPath"}
)["result"]
src = get_representation_path(repre_doc)
src = get_representation_path(repre_entity)
dst = os.path.join(
scene_path,
"palette-library",
@ -68,7 +67,7 @@ class ImportPaletteLoader(load.LoaderPlugin):
self.remove(container)
name = self.load_palette(context)
repre_doc = context["representation"]
container["representation"] = str(repre_doc["_id"])
repre_entity = context["representation"]
container["representation"] = repre_entity["id"]
container["name"] = name
harmony.imprint(name, container)

View file

@ -52,8 +52,8 @@ class TemplateLoader(load.LoaderPlugin):
{
"function": f"PypeHarmony.Loaders.{self_name}.loadContainer",
"args": [template_path,
context["asset"]["name"],
context["subset"]["name"],
context["folder"]["name"],
context["product"]["name"],
group_id]
}
)["result"]
@ -82,8 +82,8 @@ class TemplateLoader(load.LoaderPlugin):
node = harmony.find_node_by_name(node_name, "GROUP")
self_name = self.__class__.__name__
repre_doc = context["representation"]
if is_representation_from_latest(repre_doc):
repre_entity = context["representation"]
if is_representation_from_latest(repre_entity):
self._set_green(node)
else:
self._set_red(node)
@ -111,7 +111,7 @@ class TemplateLoader(load.LoaderPlugin):
None, container["data"])
harmony.imprint(
node, {"representation": str(repre_doc["_id"])}
node, {"representation": repre_entity["id"]}
)
def remove(self, container):

View file

@ -40,7 +40,7 @@ class ImportTemplateLoader(load.LoaderPlugin):
shutil.rmtree(temp_dir)
product_name = context["subset"]["name"]
product_name = context["product"]["name"]
return harmony.containerise(
product_name,

View file

@ -17,10 +17,15 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"""Plugin entry point."""
product_type = "workfile"
basename = os.path.basename(context.data["currentFile"])
task_entity = context.data["taskEntity"]
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
product_name = get_product_name(
context.data["projectName"],
context.data["assetEntity"],
context.data["task"],
task_name,
task_type,
context.data["hostName"],
product_type,
"",

View file

@ -1,25 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Subset context</title>
<title>Product context</title>
<description>
## Invalid product context
Asset name found '{found}' in products, expected '{expected}'.
Folder path found '{found}' in products, expected '{expected}'.
### How to repair?
You can fix this with `Repair` button on the right. This will use '{expected}' asset name and overwrite '{found}' asset name in scene metadata.
You can fix this with `Repair` button on the right. This will use '{expected}' folder path and overwrite '{found}' folder path in scene metadata.
After that restart `Publish` with a `Reload button`.
If this is unwanted, close workfile and open again, that way different asset value would be used for context information.
If this is unwanted, close workfile and open again, that way different folder value would be used for context information.
</description>
<detail>
### __Detailed Info__ (optional)
This might happen if you are reuse old workfile and open it in different context.
(Eg. you created product "renderCompositingDefault" from asset "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing product for "Robot" asset stayed in the workfile.)
(Eg. you created product "renderCompositingDefault" from folder "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing product for "Robot" folder stayed in the workfile.)
</detail>
</error>
</root>

View file

@ -5,18 +5,18 @@
<description>
## Invalid scene setting found
One of the settings in a scene doesn't match to asset settings in database.
One of the settings in a scene doesn't match to folder attributes on server.
{invalid_setting_str}
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the asset database if they are wrong there.
Change values for {invalid_keys_str} in the scene OR change them on the folder if they are wrong there.
</description>
<detail>
### __Detailed Info__ (optional)
This error is shown when for example resolution in the scene doesn't match to resolution set on the asset in the database.
This error is shown when for example resolution in the scene doesn't match to resolution set on the folder on the server.
Either value in the database or in the scene is wrong.
</detail>
</error>

Some files were not shown because too many files have changed in this diff Show more