Merge branch 'develop' into bugfix/folder_widget_set_selected_folder_path

This commit is contained in:
Jakub Trllo 2024-06-27 16:15:26 +02:00 committed by GitHub
commit d0bdea6357
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
58 changed files with 0 additions and 7163 deletions

View file

@ -1,9 +0,0 @@
from .version import __version__
from .addon import TrayPublishAddon
__all__ = (
"__version__",
"TrayPublishAddon",
)

View file

@ -1,122 +0,0 @@
import os
from pathlib import Path
from ayon_core.lib import get_ayon_launcher_args
from ayon_core.lib.execute import run_detached_process
from ayon_core.addon import (
click_wrap,
AYONAddon,
ITrayAction,
IHostAddon,
)
from .version import __version__
TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class TrayPublishAddon(AYONAddon, IHostAddon, ITrayAction):
label = "Publisher"
name = "traypublisher"
version = __version__
host_name = "traypublisher"
def initialize(self, settings):
self.publish_paths = [
os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish")
]
def tray_init(self):
return
def on_action_trigger(self):
self.run_traypublisher()
def connect_with_addons(self, enabled_addons):
"""Collect publish paths from other addons."""
publish_paths = self.manager.collect_plugin_paths()["publish"]
self.publish_paths.extend(publish_paths)
def run_traypublisher(self):
args = get_ayon_launcher_args(
"addon", self.name, "launch"
)
run_detached_process(args)
def cli(self, click_group):
click_group.add_command(cli_main.to_click_obj())
@click_wrap.group(
TrayPublishAddon.name,
help="TrayPublisher related commands.")
def cli_main():
pass
@cli_main.command()
def launch():
"""Launch TrayPublish tool UI."""
from ayon_traypublisher import ui
ui.main()
@cli_main.command()
@click_wrap.option(
"--filepath",
help="Full path to CSV file with data",
type=str,
required=True
)
@click_wrap.option(
"--project",
help="Project name in which the context will be used",
type=str,
required=True
)
@click_wrap.option(
"--folder-path",
help="Asset name in which the context will be used",
type=str,
required=True
)
@click_wrap.option(
"--task",
help="Task name under Asset in which the context will be used",
type=str,
required=False
)
@click_wrap.option(
"--ignore-validators",
help="Option to ignore validators",
type=bool,
is_flag=True,
required=False
)
def ingestcsv(
filepath,
project,
folder_path,
task,
ignore_validators
):
"""Ingest CSV file into project.
This command will ingest CSV file into project. CSV file must be in
specific format. See documentation for more information.
"""
from .csv_publish import csvpublish
# use Path to check if csv_filepath exists
if not Path(filepath).exists():
raise FileNotFoundError(f"File {filepath} does not exist.")
csvpublish(
filepath,
project,
folder_path,
task,
ignore_validators
)

View file

@ -1,8 +0,0 @@
from .pipeline import (
TrayPublisherHost,
)
__all__ = (
"TrayPublisherHost",
)

View file

@ -1,363 +0,0 @@
import re
from copy import deepcopy
import ayon_api
from ayon_core.pipeline.create import CreatorError
class ShotMetadataSolver:
""" Solving hierarchical metadata
Used during editorial publishing. Works with input
clip name and settings defining python formatable
template. Settings also define searching patterns
and its token keys used for formatting in templates.
"""
NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}")
def __init__(self, logger):
self.clip_name_tokenizer = []
self.shot_rename = {
"enabled": False,
"shot_rename_template": "",
}
self.shot_hierarchy = {
"enabled": False,
"parents": [],
"parents_path": "",
}
self.shot_add_tasks = []
self.log = logger
def update_data(
self,
clip_name_tokenizer,
shot_rename,
shot_hierarchy,
shot_add_tasks
):
self.clip_name_tokenizer = clip_name_tokenizer
self.shot_rename = shot_rename
self.shot_hierarchy = shot_hierarchy
self.shot_add_tasks = shot_add_tasks
def _rename_template(self, data):
"""Shot renaming function
Args:
data (dict): formatting data
Raises:
CreatorError: If missing keys
Returns:
str: formatted new name
"""
shot_rename_template = self.shot_rename[
"shot_rename_template"]
try:
# format to new shot name
return shot_rename_template.format(**data)
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct:: \n\n"
f"From template string {shot_rename_template} > "
f"`{_error}` has no equivalent in \n"
f"{list(data.keys())} input formatting keys!"
))
def _generate_tokens(self, clip_name, source_data):
"""Token generator
Settings defines token pairs key and regex expression.
Args:
clip_name (str): name of clip in editorial
source_data (dict): data for formatting
Raises:
CreatorError: if missing key
Returns:
dict: updated source_data
"""
output_data = deepcopy(source_data["anatomy_data"])
output_data["clip_name"] = clip_name
if not self.clip_name_tokenizer:
return output_data
parent_name = source_data["selected_folder_entity"]["name"]
search_text = parent_name + clip_name
for clip_name_item in self.clip_name_tokenizer:
token_key = clip_name_item["name"]
pattern = clip_name_item["regex"]
p = re.compile(pattern)
match = p.findall(search_text)
if not match:
raise CreatorError((
"Make sure regex expression works with your data: \n\n"
f"'{token_key}' with regex '{pattern}' in your settings\n"
"can't find any match in your clip name "
f"'{search_text}'!\n\nLook to: "
"'project_settings/traypublisher/editorial_creators"
"/editorial_simple/clip_name_tokenizer'\n"
"at your project settings..."
))
# QUESTION:how to refactor `match[-1]` to some better way?
output_data[token_key] = match[-1]
return output_data
def _create_parents_from_settings(self, parents, data):
"""formatting parent components.
Args:
parents (list): list of dict parent components
data (dict): formatting data
Raises:
CreatorError: missing formatting key
CreatorError: missing token key
KeyError: missing parent token
Returns:
list: list of dict of parent components
"""
# fill the parents parts from presets
shot_hierarchy = deepcopy(self.shot_hierarchy)
hierarchy_parents = shot_hierarchy["parents"]
# fill parent keys data template from anatomy data
try:
_parent_tokens_formatting_data = {
parent_token["name"]: parent_token["value"].format(**data)
for parent_token in hierarchy_parents
}
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct : \n"
f"`{_error}` has no equivalent in \n{list(data.keys())}"
))
_parent_tokens_type = {
parent_token["name"]: parent_token["parent_type"]
for parent_token in hierarchy_parents
}
for _index, _parent in enumerate(
shot_hierarchy["parents_path"].split("/")
):
# format parent token with value which is formatted
try:
parent_name = _parent.format(
**_parent_tokens_formatting_data)
except KeyError as _error:
raise CreatorError((
"Make sure all keys in settings are correct:\n\n"
f"`{_error}` from template string"
f" {shot_hierarchy['parents_path']},"
f" has no equivalent in"
f"\n{list(_parent_tokens_formatting_data.keys())} parents"
))
parent_token_name = (
self.NO_DECOR_PATERN.findall(_parent).pop())
if not parent_token_name:
raise KeyError(
f"Parent token is not found in: `{_parent}`")
# find parent type
parent_token_type = _parent_tokens_type[parent_token_name]
# in case selected context is set to the same folder
# TODO keep index with 'parents' - name check is not enough
if (
_index == 0
and parents[-1]["entity_name"] == parent_name
):
continue
# in case first parent is project then start parents from start
if (
_index == 0
and parent_token_type.lower() == "project"
):
project_parent = parents[0]
parents = [project_parent]
continue
parents.append({
"entity_type": "folder",
"folder_type": parent_token_type.lower(),
"entity_name": parent_name
})
return parents
def _create_hierarchy_path(self, parents):
"""Converting hierarchy path from parents
Args:
parents (list): list of dict parent components
Returns:
str: hierarchy path
"""
return "/".join(
[
p["entity_name"] for p in parents
if p["entity_type"] != "project"
]
) if parents else ""
def _get_parents_from_selected_folder(
self,
project_entity,
folder_entity,
):
"""Returning parents from context on selected folder.
Context defined in Traypublisher project tree.
Args:
project_entity (dict[str, Any]): Project entity.
folder_entity (dict[str, Any]): Selected folder entity.
Returns:
list: list of dict parent components
"""
project_name = project_entity["name"]
path_entries = folder_entity["path"].split("/")
subpaths = []
subpath_items = []
for name in path_entries:
subpath_items.append(name)
if name:
subpaths.append("/".join(subpath_items))
# Remove last name because we already have folder entity
subpaths.pop(-1)
folder_entity_by_path = {}
if subpaths:
folder_entity_by_path = {
parent_folder["path"]: parent_folder
for parent_folder in ayon_api.get_folders(
project_name, folder_paths=subpaths
)
}
folders_hierarchy = [
folder_entity_by_path[folder_path]
for folder_path in subpaths
]
folders_hierarchy.append(folder_entity)
# add current selection context hierarchy
output = [{
"entity_type": "project",
"entity_name": project_name,
}]
for entity in folders_hierarchy:
output.append({
"entity_type": "folder",
"folder_type": entity["folderType"],
"entity_name": entity["name"]
})
return output
def _generate_tasks_from_settings(self, project_entity):
"""Convert settings inputs to task data.
Args:
project_entity (dict): Project entity.
Raises:
KeyError: Missing task type in project doc
Returns:
dict: tasks data
"""
tasks_to_add = {}
project_task_types = project_entity["taskTypes"]
task_type_names = {
task_type["name"]
for task_type in project_task_types
}
for task_item in self.shot_add_tasks:
task_name = task_item["name"]
task_type = task_item["task_type"]
# check if task type in project task types
if task_type not in task_type_names:
raise KeyError(
"Missing task type `{}` for `{}` is not"
" existing in `{}``".format(
task_type,
task_name,
list(task_type_names)
)
)
tasks_to_add[task_name] = {"type": task_type}
return tasks_to_add
def generate_data(self, clip_name, source_data):
"""Metadata generator.
Converts input data to hierarchy mentadata.
Args:
clip_name (str): clip name
source_data (dict): formatting data
Returns:
(str, dict): shot name and hierarchy data
"""
tasks = {}
folder_entity = source_data["selected_folder_entity"]
project_entity = source_data["project_entity"]
# match clip to shot name at start
shot_name = clip_name
# parse all tokens and generate formatting data
formatting_data = self._generate_tokens(shot_name, source_data)
# generate parents from selected folder
parents = self._get_parents_from_selected_folder(
project_entity, folder_entity
)
if self.shot_rename["enabled"]:
shot_name = self._rename_template(formatting_data)
self.log.info(f"Renamed shot name: {shot_name}")
if self.shot_hierarchy["enabled"]:
parents = self._create_parents_from_settings(
parents, formatting_data)
if self.shot_add_tasks:
tasks = self._generate_tasks_from_settings(
project_entity)
# generate hierarchy path from parents
hierarchy_path = self._create_hierarchy_path(parents)
if hierarchy_path:
folder_path = f"/{hierarchy_path}/{shot_name}"
else:
folder_path = f"/{shot_name}"
return shot_name, {
"hierarchy": hierarchy_path,
"folderPath": folder_path,
"parents": parents,
"tasks": tasks
}

View file

@ -1,179 +0,0 @@
import os
import json
import tempfile
import atexit
import pyblish.api
from ayon_core.pipeline import (
register_creator_plugin_path,
)
from ayon_core.host import HostBase, IPublishHost
ROOT_DIR = os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
))
PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish")
CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create")
class TrayPublisherHost(HostBase, IPublishHost):
name = "traypublisher"
def install(self):
os.environ["AYON_HOST_NAME"] = self.name
pyblish.api.register_host("traypublisher")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_creator_plugin_path(CREATE_PATH)
def get_context_title(self):
return HostContext.get_project_name()
def get_context_data(self):
return HostContext.get_context_data()
def update_context_data(self, data, changes):
HostContext.save_context_data(data)
def set_project_name(self, project_name):
# TODO Deregister project specific plugins and register new project
# plugins
os.environ["AYON_PROJECT_NAME"] = project_name
HostContext.set_project_name(project_name)
class HostContext:
_context_json_path = None
@staticmethod
def _on_exit():
if (
HostContext._context_json_path
and os.path.exists(HostContext._context_json_path)
):
os.remove(HostContext._context_json_path)
@classmethod
def get_context_json_path(cls):
if cls._context_json_path is None:
output_file = tempfile.NamedTemporaryFile(
mode="w", prefix="traypub_", suffix=".json"
)
output_file.close()
cls._context_json_path = output_file.name
atexit.register(HostContext._on_exit)
print(cls._context_json_path)
return cls._context_json_path
@classmethod
def _get_data(cls, group=None):
json_path = cls.get_context_json_path()
data = {}
if not os.path.exists(json_path):
with open(json_path, "w") as json_stream:
json.dump(data, json_stream)
else:
with open(json_path, "r") as json_stream:
content = json_stream.read()
if content:
data = json.loads(content)
if group is None:
return data
return data.get(group)
@classmethod
def _save_data(cls, group, new_data):
json_path = cls.get_context_json_path()
data = cls._get_data()
data[group] = new_data
with open(json_path, "w") as json_stream:
json.dump(data, json_stream)
@classmethod
def add_instance(cls, instance):
instances = cls.get_instances()
instances.append(instance)
cls.save_instances(instances)
@classmethod
def get_instances(cls):
return cls._get_data("instances") or []
@classmethod
def save_instances(cls, instances):
cls._save_data("instances", instances)
@classmethod
def get_context_data(cls):
return cls._get_data("context") or {}
@classmethod
def save_context_data(cls, data):
cls._save_data("context", data)
@classmethod
def get_project_name(cls):
return cls._get_data("project_name")
@classmethod
def set_project_name(cls, project_name):
cls._save_data("project_name", project_name)
@classmethod
def get_data_to_store(cls):
return {
"project_name": cls.get_project_name(),
"instances": cls.get_instances(),
"context": cls.get_context_data(),
}
def list_instances():
return HostContext.get_instances()
def update_instances(update_list):
updated_instances = {}
for instance, _changes in update_list:
updated_instances[instance.id] = instance.data_to_store()
instances = HostContext.get_instances()
for instance_data in instances:
instance_id = instance_data["instance_id"]
if instance_id in updated_instances:
new_instance_data = updated_instances[instance_id]
old_keys = set(instance_data.keys())
new_keys = set(new_instance_data.keys())
instance_data.update(new_instance_data)
for key in (old_keys - new_keys):
instance_data.pop(key)
HostContext.save_instances(instances)
def remove_instances(instances):
if not isinstance(instances, (tuple, list)):
instances = [instances]
current_instances = HostContext.get_instances()
for instance in instances:
instance_id = instance.data["instance_id"]
found_idx = None
for idx, _instance in enumerate(current_instances):
if instance_id == _instance["instance_id"]:
found_idx = idx
break
if found_idx is not None:
current_instances.pop(found_idx)
HostContext.save_instances(current_instances)
def get_context_data():
return HostContext.get_context_data()
def update_context_data(data, changes):
HostContext.save_context_data(data)

View file

@ -1,337 +0,0 @@
import ayon_api
from ayon_core.lib.attribute_definitions import (
FileDef,
BoolDef,
NumberDef,
UISeparatorDef,
)
from ayon_core.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
from ayon_core.pipeline.create import (
Creator,
HiddenCreator,
CreatedInstance,
cache_and_get_instances,
PRE_CREATE_THUMBNAIL_KEY,
)
from .pipeline import (
list_instances,
update_instances,
remove_instances,
HostContext,
)
REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS)
SHARED_DATA_KEY = "ayon.traypublisher.instances"
class HiddenTrayPublishCreator(HiddenCreator):
host_name = "traypublisher"
settings_category = "traypublisher"
def collect_instances(self):
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, list_instances
)
for instance_data in instances_by_identifier[self.identifier]:
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
update_instances(update_list)
def remove_instances(self, instances):
remove_instances(instances)
for instance in instances:
self._remove_instance_from_context(instance)
def _store_new_instance(self, new_instance):
"""Tray publisher specific method to store instance.
Instance is stored into "workfile" of traypublisher and also add it
to CreateContext.
Args:
new_instance (CreatedInstance): Instance that should be stored.
"""
# Host implementation of storing metadata about instance
HostContext.add_instance(new_instance.data_to_store())
# Add instance to current context
self._add_instance_to_context(new_instance)
class TrayPublishCreator(Creator):
create_allow_context_change = True
host_name = "traypublisher"
settings_category = "traypublisher"
def collect_instances(self):
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, list_instances
)
for instance_data in instances_by_identifier[self.identifier]:
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
update_instances(update_list)
def remove_instances(self, instances):
remove_instances(instances)
for instance in instances:
self._remove_instance_from_context(instance)
def _store_new_instance(self, new_instance):
"""Tray publisher specific method to store instance.
Instance is stored into "workfile" of traypublisher and also add it
to CreateContext.
Args:
new_instance (CreatedInstance): Instance that should be stored.
"""
# Host implementation of storing metadata about instance
HostContext.add_instance(new_instance.data_to_store())
new_instance.mark_as_stored()
# Add instance to current context
self._add_instance_to_context(new_instance)
class SettingsCreator(TrayPublishCreator):
create_allow_context_change = True
create_allow_thumbnail = True
allow_version_control = False
extensions = []
def create(self, product_name, data, pre_create_data):
# Pass precreate data to creator attributes
thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None)
# Fill 'version_to_use' if version control is enabled
if self.allow_version_control:
folder_path = data["folderPath"]
product_entities_by_folder_path = self._prepare_next_versions(
[folder_path], [product_name])
version = product_entities_by_folder_path[folder_path].get(
product_name
)
pre_create_data["version_to_use"] = version
data["_previous_last_version"] = version
data["creator_attributes"] = pre_create_data
data["settings_creator"] = True
# Create new instance
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
self._store_new_instance(new_instance)
if thumbnail_path:
self.set_instance_thumbnail_path(new_instance.id, thumbnail_path)
def _prepare_next_versions(self, folder_paths, product_names):
"""Prepare next versions for given folder and product names.
Todos:
Expect combination of product names by folder path to avoid
unnecessary server calls for unused products.
Args:
folder_paths (Iterable[str]): Folder paths.
product_names (Iterable[str]): Product names.
Returns:
dict[str, dict[str, int]]: Last versions by fodler path
and product names.
"""
# Prepare all versions for all combinations to '1'
# TODO use 'ayon_core.pipeline.version_start' logic
product_entities_by_folder_path = {
folder_path: {
product_name: 1
for product_name in product_names
}
for folder_path in folder_paths
}
if not folder_paths or not product_names:
return product_entities_by_folder_path
folder_entities = ayon_api.get_folders(
self.project_name,
folder_paths=folder_paths,
fields={"id", "path"}
)
folder_paths_by_id = {
folder_entity["id"]: folder_entity["path"]
for folder_entity in folder_entities
}
product_entities = list(ayon_api.get_products(
self.project_name,
folder_ids=folder_paths_by_id.keys(),
product_names=product_names,
fields={"id", "name", "folderId"}
))
product_ids = {p["id"] for p in product_entities}
last_versions = ayon_api.get_last_versions(
self.project_name,
product_ids,
fields={"version", "productId"})
for product_entity in product_entities:
product_id = product_entity["id"]
product_name = product_entity["name"]
folder_id = product_entity["folderId"]
folder_path = folder_paths_by_id[folder_id]
last_version = last_versions.get(product_id)
version = 0
if last_version is not None:
version = last_version["version"]
product_entities_by_folder_path[folder_path][product_name] += (
version
)
return product_entities_by_folder_path
def _fill_next_versions(self, instances_data):
"""Fill next version for instances.
Instances have also stored previous next version to be able to
recognize if user did enter different version. If version was
not changed by user, or user set it to '0' the next version will be
updated by current database state.
"""
filtered_instance_data = []
for instance in instances_data:
previous_last_version = instance.get("_previous_last_version")
creator_attributes = instance["creator_attributes"]
use_next_version = creator_attributes.get(
"use_next_version", True)
version = creator_attributes.get("version_to_use", 0)
if (
use_next_version
or version == 0
or version == previous_last_version
):
filtered_instance_data.append(instance)
folder_paths = {
instance["folderPath"]
for instance in filtered_instance_data
}
product_names = {
instance["productName"]
for instance in filtered_instance_data}
product_entities_by_folder_path = self._prepare_next_versions(
folder_paths, product_names
)
for instance in filtered_instance_data:
folder_path = instance["folderPath"]
product_name = instance["productName"]
version = product_entities_by_folder_path[folder_path][product_name]
instance["creator_attributes"]["version_to_use"] = version
instance["_previous_last_version"] = version
def collect_instances(self):
"""Collect instances from host.
Overriden to be able to manage version control attributes. If version
control is disabled, the attributes will be removed from instances,
and next versions are filled if is version control enabled.
"""
instances_by_identifier = cache_and_get_instances(
self, SHARED_DATA_KEY, list_instances
)
instances = instances_by_identifier[self.identifier]
if not instances:
return
if self.allow_version_control:
self._fill_next_versions(instances)
for instance_data in instances:
# Make sure that there are not data related to version control
# if plugin does not support it
if not self.allow_version_control:
instance_data.pop("_previous_last_version", None)
creator_attributes = instance_data["creator_attributes"]
creator_attributes.pop("version_to_use", None)
creator_attributes.pop("use_next_version", None)
instance = CreatedInstance.from_existing(instance_data, self)
self._add_instance_to_context(instance)
def get_instance_attr_defs(self):
defs = self.get_pre_create_attr_defs()
if self.allow_version_control:
defs += [
UISeparatorDef(),
BoolDef(
"use_next_version",
default=True,
label="Use next version",
),
NumberDef(
"version_to_use",
default=1,
minimum=0,
maximum=999,
label="Version to use",
)
]
return defs
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attributes
return [
FileDef(
"representation_files",
folders=False,
extensions=self.extensions,
allow_sequences=self.allow_sequences,
single_item=not self.allow_multiple_items,
label="Representations",
),
FileDef(
"reviewable",
folders=False,
extensions=REVIEW_EXTENSIONS,
allow_sequences=True,
single_item=True,
label="Reviewable representations",
extensions_label="Single reviewable item"
)
]
@classmethod
def from_settings(cls, item_data):
identifier = item_data["identifier"]
product_type = item_data["product_type"]
if not identifier:
identifier = "settings_{}".format(product_type)
return type(
"{}{}".format(cls.__name__, identifier),
(cls, ),
{
"product_type": product_type,
"identifier": identifier,
"label": item_data["label"].strip(),
"icon": item_data["icon"],
"description": item_data["description"],
"detailed_description": item_data["detailed_description"],
"extensions": item_data["extensions"],
"allow_sequences": item_data["allow_sequences"],
"allow_multiple_items": item_data["allow_multiple_items"],
"allow_version_control": item_data.get(
"allow_version_control", False),
"default_variants": item_data["default_variants"],
}
)

View file

@ -1,122 +0,0 @@
"""Functions to parse asset names, versions from file names"""
import os
import re
import ayon_api
from ayon_core.lib import Logger
def get_folder_entity_from_filename(
project_name,
source_filename,
version_regex,
all_selected_folder_ids=None
):
"""Try to parse out folder name from file name provided.
Artists might provide various file name formats.
Currently handled:
- chair.mov
- chair_v001.mov
- my_chair_to_upload.mov
"""
version = None
folder_name = os.path.splitext(source_filename)[0]
# Always first check if source filename is directly folder
# (eg. 'chair.mov')
matching_folder_entity = get_folder_by_name_case_not_sensitive(
project_name, folder_name, all_selected_folder_ids)
if matching_folder_entity is None:
# name contains also a version
matching_folder_entity, version = (
parse_with_version(
project_name,
folder_name,
version_regex,
all_selected_folder_ids
)
)
if matching_folder_entity is None:
matching_folder_entity = parse_containing(
project_name,
folder_name,
all_selected_folder_ids
)
return matching_folder_entity, version
def parse_with_version(
project_name,
folder_name,
version_regex,
all_selected_folder_ids=None,
log=None
):
"""Try to parse folder name from a file name containing version too
Eg. 'chair_v001.mov' >> 'chair', 1
"""
if not log:
log = Logger.get_logger(__name__)
log.debug(
("Folder entity by \"{}\" was not found, trying version regex.".
format(folder_name)))
matching_folder_entity = version_number = None
regex_result = version_regex.findall(folder_name)
if regex_result:
_folder_name, _version_number = regex_result[0]
matching_folder_entity = get_folder_by_name_case_not_sensitive(
project_name,
_folder_name,
all_selected_folder_ids=all_selected_folder_ids
)
if matching_folder_entity:
version_number = int(_version_number)
return matching_folder_entity, version_number
def parse_containing(project_name, folder_name, all_selected_folder_ids=None):
"""Look if file name contains any existing folder name"""
for folder_entity in ayon_api.get_folders(
project_name,
folder_ids=all_selected_folder_ids,
fields={"id", "name"}
):
if folder_entity["name"].lower() in folder_name.lower():
return ayon_api.get_folder_by_id(
project_name,
folder_entity["id"]
)
def get_folder_by_name_case_not_sensitive(
project_name,
folder_name,
all_selected_folder_ids=None,
log=None
):
"""Handle more cases in file names"""
if not log:
log = Logger.get_logger(__name__)
folder_name = re.compile(folder_name, re.IGNORECASE)
folder_entities = list(ayon_api.get_folders(
project_name,
folder_ids=all_selected_folder_ids,
folder_names=[folder_name]
))
if len(folder_entities) > 1:
log.warning("Too many records found for {}".format(
folder_name))
return None
if folder_entities:
return folder_entities.pop()

View file

@ -1,84 +0,0 @@
import pyblish.api
import pyblish.util
from ayon_api import get_folder_by_path, get_task_by_name
from ayon_core.lib.attribute_definitions import FileDefItem
from ayon_core.pipeline import install_host
from ayon_core.pipeline.create import CreateContext
from ayon_traypublisher.api import TrayPublisherHost
def csvpublish(
filepath,
project_name,
folder_path,
task_name=None,
ignore_validators=False
):
"""Publish CSV file.
Args:
filepath (str): Path to CSV file.
project_name (str): Project name.
folder_path (str): Folder path.
task_name (Optional[str]): Task name.
ignore_validators (Optional[bool]): Option to ignore validators.
"""
# initialization of host
host = TrayPublisherHost()
install_host(host)
# setting host context into project
host.set_project_name(project_name)
# form precreate data with field values
file_field = FileDefItem.from_paths([filepath], False).pop().to_dict()
precreate_data = {
"csv_filepath_data": file_field,
}
# create context initialization
create_context = CreateContext(host, headless=True)
folder_entity = get_folder_by_path(
project_name,
folder_path=folder_path,
)
if not folder_entity:
ValueError(
f"Folder path '{folder_path}' doesn't "
f"exists at project '{project_name}'."
)
task_entity = get_task_by_name(
project_name,
folder_entity["id"],
task_name,
)
if not task_entity:
ValueError(
f"Task name '{task_name}' doesn't "
f"exists at folder '{folder_path}'."
)
create_context.create(
"io.ayon.creators.traypublisher.csv_ingest",
"Main",
folder_entity=folder_entity,
task_entity=task_entity,
pre_create_data=precreate_data,
)
# publishing context initialization
pyblish_context = pyblish.api.Context()
pyblish_context.data["create_context"] = create_context
# redefine targets (skip 'local' to disable validators)
if ignore_validators:
targets = ["default", "ingest"]
# publishing
pyblish.util.publish(context=pyblish_context, targets=targets)

View file

@ -1,176 +0,0 @@
# -*- coding: utf-8 -*-
"""Creator of colorspace look files.
This creator is used to publish colorspace look files thanks to
production type `ociolook`. All files are published as representation.
"""
from pathlib import Path
import ayon_api
from ayon_core.lib.attribute_definitions import (
FileDef, EnumDef, TextDef, UISeparatorDef
)
from ayon_core.pipeline import (
CreatedInstance,
CreatorError
)
from ayon_core.pipeline import colorspace
from ayon_traypublisher.api.plugin import TrayPublishCreator
class CreateColorspaceLook(TrayPublishCreator):
"""Creates colorspace look files."""
identifier = "io.ayon.creators.traypublisher.colorspace_look"
label = "Colorspace Look"
product_type = "ociolook"
description = "Publishes color space look file."
extensions = [".cc", ".cube", ".3dl", ".spi1d", ".spi3d", ".csp", ".lut"]
enabled = False
colorspace_items = [
(None, "Not set")
]
colorspace_attr_show = False
config_items = None
config_data = None
def get_detail_description(self):
return """# Colorspace Look
This creator publishes color space look file (LUT).
"""
def get_icon(self):
return "mdi.format-color-fill"
def create(self, product_name, instance_data, pre_create_data):
repr_file = pre_create_data.get("luts_file")
if not repr_file:
raise CreatorError("No files specified")
files = repr_file.get("filenames")
if not files:
# this should never happen
raise CreatorError("Missing files from representation")
folder_path = instance_data["folderPath"]
task_name = instance_data["task"]
folder_entity = ayon_api.get_folder_by_path(
self.project_name, folder_path)
task_entity = None
if task_name:
task_entity = ayon_api.get_task_by_name(
self.project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name=self.project_name,
folder_entity=folder_entity,
task_entity=task_entity,
variant=instance_data["variant"],
)
instance_data["creator_attributes"] = {
"abs_lut_path": (
Path(repr_file["directory"]) / files[0]).as_posix()
}
# Create new instance
new_instance = CreatedInstance(self.product_type, product_name,
instance_data, self)
new_instance.transient_data["config_items"] = self.config_items
new_instance.transient_data["config_data"] = self.config_data
self._store_new_instance(new_instance)
def collect_instances(self):
super().collect_instances()
for instance in self.create_context.instances:
if instance.creator_identifier == self.identifier:
instance.transient_data["config_items"] = self.config_items
instance.transient_data["config_data"] = self.config_data
def get_instance_attr_defs(self):
return [
EnumDef(
"working_colorspace",
self.colorspace_items,
default="Not set",
label="Working Colorspace",
),
UISeparatorDef(
label="Advanced1"
),
TextDef(
"abs_lut_path",
label="LUT Path",
),
EnumDef(
"input_colorspace",
self.colorspace_items,
default="Not set",
label="Input Colorspace",
),
EnumDef(
"direction",
[
(None, "Not set"),
("forward", "Forward"),
("inverse", "Inverse")
],
default="Not set",
label="Direction"
),
EnumDef(
"interpolation",
[
(None, "Not set"),
("linear", "Linear"),
("tetrahedral", "Tetrahedral"),
("best", "Best"),
("nearest", "Nearest")
],
default="Not set",
label="Interpolation"
),
EnumDef(
"output_colorspace",
self.colorspace_items,
default="Not set",
label="Output Colorspace",
),
]
def get_pre_create_attr_defs(self):
return [
FileDef(
"luts_file",
folders=False,
extensions=self.extensions,
allow_sequences=False,
single_item=True,
label="Look Files",
)
]
def apply_settings(self, project_settings):
config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
if not config_data:
self.enabled = False
return
filepath = config_data["path"]
config_items = colorspace.get_ocio_config_colorspaces(filepath)
labeled_colorspaces = colorspace.get_colorspaces_enumerator_items(
config_items,
include_aliases=True,
include_roles=True
)
self.config_items = config_items
self.config_data = config_data
self.colorspace_items.extend(labeled_colorspaces)
self.enabled = True

View file

@ -1,812 +0,0 @@
import os
import re
import csv
import collections
from io import StringIO
from copy import deepcopy, copy
from typing import Optional, List, Set, Dict, Union, Any
import clique
import ayon_api
from ayon_core.pipeline.create import get_product_name
from ayon_core.pipeline import CreatedInstance
from ayon_core.lib import FileDef, BoolDef
from ayon_core.lib.transcoding import (
VIDEO_EXTENSIONS, IMAGE_EXTENSIONS
)
from ayon_core.pipeline.create import CreatorError
from ayon_traypublisher.api.plugin import TrayPublishCreator
def _get_row_value_with_validation(
columns_config: Dict[str, Any],
column_name: str,
row_data: Dict[str, Any],
):
"""Get row value with validation"""
# get column data from column config
column_data = None
for column in columns_config["columns"]:
if column["name"] == column_name:
column_data = column
break
if not column_data:
raise CreatorError(
f"Column '{column_name}' not found in column config."
)
# get column value from row
column_value = row_data.get(column_name)
column_required = column_data["required_column"]
# check if column value is not empty string and column is required
if column_value == "" and column_required:
raise CreatorError(
f"Value in column '{column_name}' is required."
)
# get column type
column_type = column_data["type"]
# get column validation regex
column_validation = column_data["validation_pattern"]
# get column default value
column_default = column_data["default"]
if column_type in ["number", "decimal"] and column_default == 0:
column_default = None
# check if column value is not empty string
if column_value == "":
# set default value if column value is empty string
column_value = column_default
# set column value to correct type following column type
if column_type == "number" and column_value is not None:
column_value = int(column_value)
elif column_type == "decimal" and column_value is not None:
column_value = float(column_value)
elif column_type == "bool":
column_value = column_value in ["true", "True"]
# check if column value matches validation regex
if (
column_value is not None and
not re.match(str(column_validation), str(column_value))
):
raise CreatorError(
f"Column '{column_name}' value '{column_value}'"
f" does not match validation regex '{column_validation}'"
f"\nRow data: {row_data}"
f"\nColumn data: {column_data}"
)
return column_value
class RepreItem:
def __init__(
self,
name,
filepath,
frame_start,
frame_end,
handle_start,
handle_end,
fps,
thumbnail_path,
colorspace,
comment,
slate_exists,
tags,
):
self.name = name
self.filepath = filepath
self.frame_start = frame_start
self.frame_end = frame_end
self.handle_start = handle_start
self.handle_end = handle_end
self.fps = fps
self.thumbnail_path = thumbnail_path
self.colorspace = colorspace
self.comment = comment
self.slate_exists = slate_exists
self.tags = tags
@classmethod
def from_csv_row(cls, columns_config, repre_config, row):
kwargs = {
dst_key: _get_row_value_with_validation(
columns_config, column_name, row
)
for dst_key, column_name in (
# Representation information
("filepath", "File Path"),
("frame_start", "Frame Start"),
("frame_end", "Frame End"),
("handle_start", "Handle Start"),
("handle_end", "Handle End"),
("fps", "FPS"),
# Optional representation information
("thumbnail_path", "Version Thumbnail"),
("colorspace", "Representation Colorspace"),
("comment", "Version Comment"),
("name", "Representation"),
("slate_exists", "Slate Exists"),
("repre_tags", "Representation Tags"),
)
}
# Should the 'int' and 'float' conversion happen?
# - looks like '_get_row_value_with_validation' is already handling it
for key in {"frame_start", "frame_end", "handle_start", "handle_end"}:
kwargs[key] = int(kwargs[key])
kwargs["fps"] = float(kwargs["fps"])
# Convert tags value to list
tags_list = copy(repre_config["default_tags"])
repre_tags: Optional[str] = kwargs.pop("repre_tags")
if repre_tags:
tags_list = []
tags_delimiter = repre_config["tags_delimiter"]
# strip spaces from repre_tags
if tags_delimiter in repre_tags:
tags = repre_tags.split(tags_delimiter)
for _tag in tags:
tags_list.append(_tag.strip().lower())
else:
tags_list.append(repre_tags)
kwargs["tags"] = tags_list
return cls(**kwargs)
class ProductItem:
def __init__(
self,
folder_path: str,
task_name: str,
version: int,
variant: str,
product_type: str,
task_type: Optional[str] = None,
):
self.folder_path = folder_path
self.task_name = task_name
self.task_type = task_type
self.version = version
self.variant = variant
self.product_type = product_type
self.repre_items: List[RepreItem] = []
self._unique_name = None
self._pre_product_name = None
@property
def unique_name(self) -> str:
if self._unique_name is None:
self._unique_name = "/".join([
self.folder_path,
self.task_name,
f"{self.variant}{self.product_type}{self.version}".replace(
" ", ""
).lower()
])
return self._unique_name
@property
def instance_name(self):
if self._pre_product_name is None:
self._pre_product_name = (
f"{self.task_name}{self.variant}"
f"{self.product_type}{self.version}"
).replace(" ", "").lower()
return self._pre_product_name
def add_repre_item(self, repre_item: RepreItem):
self.repre_items.append(repre_item)
@classmethod
def from_csv_row(cls, columns_config, row):
kwargs = {
dst_key: _get_row_value_with_validation(
columns_config, column_name, row
)
for dst_key, column_name in (
# Context information
("folder_path", "Folder Path"),
("task_name", "Task Name"),
("version", "Version"),
("variant", "Variant"),
("product_type", "Product Type"),
)
}
return cls(**kwargs)
class IngestCSV(TrayPublishCreator):
"""CSV ingest creator class"""
icon = "fa.file"
label = "CSV Ingest"
product_type = "csv_ingest_file"
identifier = "io.ayon.creators.traypublisher.csv_ingest"
default_variants = ["Main"]
description = "Ingest products' data from CSV file"
detailed_description = """
Ingest products' data from CSV file following column and representation
configuration in project settings.
"""
# Position in the list of creators.
order = 10
# settings for this creator
columns_config = {}
representations_config = {}
def get_instance_attr_defs(self):
return [
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_pre_create_attr_defs(self):
"""Creating pre-create attributes at creator plugin.
Returns:
list: list of attribute object instances
"""
# Use same attributes as for instance attributes
return [
FileDef(
"csv_filepath_data",
folders=False,
extensions=[".csv"],
allow_sequences=False,
single_item=True,
label="CSV File",
),
]
def create(
self,
product_name: str,
instance_data: Dict[str, Any],
pre_create_data: Dict[str, Any]
):
"""Create product from each row found in the CSV.
Args:
product_name (str): The subset name.
instance_data (dict): The instance data.
pre_create_data (dict):
"""
csv_filepath_data = pre_create_data.get("csv_filepath_data", {})
csv_dir = csv_filepath_data.get("directory", "")
if not os.path.exists(csv_dir):
raise CreatorError(
f"Directory '{csv_dir}' does not exist."
)
filename = csv_filepath_data.get("filenames", [])
self._process_csv_file(
product_name, instance_data, csv_dir, filename[0]
)
def _pass_data_to_csv_instance(
self,
instance_data: Dict[str, Any],
staging_dir: str,
filename: str
):
"""Pass CSV representation file to instance data"""
representation = {
"name": "csv",
"ext": "csv",
"files": filename,
"stagingDir": staging_dir,
"stagingDir_persistent": True,
}
instance_data.update({
"label": f"CSV: {filename}",
"representations": [representation],
"stagingDir": staging_dir,
"stagingDir_persistent": True,
})
def _process_csv_file(
self,
product_name: str,
instance_data: Dict[str, Any],
csv_dir: str,
filename: str
):
"""Process CSV file.
Args:
product_name (str): The subset name.
instance_data (dict): The instance data.
csv_dir (str): The csv directory.
filename (str): The filename.
"""
# create new instance from the csv file via self function
self._pass_data_to_csv_instance(
instance_data,
csv_dir,
filename
)
csv_instance = CreatedInstance(
self.product_type, product_name, instance_data, self
)
csv_instance["csvFileData"] = {
"filename": filename,
"staging_dir": csv_dir,
}
# create instances from csv data via self function
instances = self._create_instances_from_csv_data(csv_dir, filename)
for instance in instances:
self._store_new_instance(instance)
self._store_new_instance(csv_instance)
def _resolve_repre_path(
self, csv_dir: str, filepath: Union[str, None]
) -> Union[str, None]:
if not filepath:
return filepath
# Validate only existence of file directory as filename
# may contain frame specific char (e.g. '%04d' or '####').
filedir, filename = os.path.split(filepath)
if not filedir or filedir == ".":
# If filedir is empty or "." then use same directory as
# csv path
filepath = os.path.join(csv_dir, filepath)
elif not os.path.exists(filedir):
# If filepath does not exist, first try to find it in the
# same directory as the csv file is, but keep original
# value otherwise.
new_filedir = os.path.join(csv_dir, filedir)
if os.path.exists(new_filedir):
filepath = os.path.join(new_filedir, filename)
return filepath
def _get_data_from_csv(
self, csv_dir: str, filename: str
) -> Dict[str, ProductItem]:
"""Generate instances from the csv file"""
# get current project name and code from context.data
project_name = self.create_context.get_current_project_name()
csv_path = os.path.join(csv_dir, filename)
# make sure csv file contains columns from following list
required_columns = [
column["name"]
for column in self.columns_config["columns"]
if column["required_column"]
]
# read csv file
with open(csv_path, "r") as csv_file:
csv_content = csv_file.read()
# read csv file with DictReader
csv_reader = csv.DictReader(
StringIO(csv_content),
delimiter=self.columns_config["csv_delimiter"]
)
# fix fieldnames
# sometimes someone can keep extra space at the start or end of
# the column name
all_columns = [
" ".join(column.rsplit())
for column in csv_reader.fieldnames
]
# return back fixed fieldnames
csv_reader.fieldnames = all_columns
# check if csv file contains all required columns
if any(column not in all_columns for column in required_columns):
raise CreatorError(
f"Missing required columns: {required_columns}"
)
product_items_by_name: Dict[str, ProductItem] = {}
for row in csv_reader:
_product_item: ProductItem = ProductItem.from_csv_row(
self.columns_config, row
)
unique_name = _product_item.unique_name
if unique_name not in product_items_by_name:
product_items_by_name[unique_name] = _product_item
product_item: ProductItem = product_items_by_name[unique_name]
product_item.add_repre_item(
RepreItem.from_csv_row(
self.columns_config,
self.representations_config,
row
)
)
folder_paths: Set[str] = {
product_item.folder_path
for product_item in product_items_by_name.values()
}
folder_ids_by_path: Dict[str, str] = {
folder_entity["path"]: folder_entity["id"]
for folder_entity in ayon_api.get_folders(
project_name, folder_paths=folder_paths, fields={"id", "path"}
)
}
missing_paths: Set[str] = folder_paths - set(folder_ids_by_path.keys())
if missing_paths:
ending = "" if len(missing_paths) == 1 else "s"
joined_paths = "\n".join(sorted(missing_paths))
raise CreatorError(
f"Folder{ending} not found.\n{joined_paths}"
)
task_names: Set[str] = {
product_item.task_name
for product_item in product_items_by_name.values()
}
task_entities_by_folder_id = collections.defaultdict(list)
for task_entity in ayon_api.get_tasks(
project_name,
folder_ids=set(folder_ids_by_path.values()),
task_names=task_names,
fields={"folderId", "name", "taskType"}
):
folder_id = task_entity["folderId"]
task_entities_by_folder_id[folder_id].append(task_entity)
missing_tasks: Set[str] = set()
for product_item in product_items_by_name.values():
folder_path = product_item.folder_path
task_name = product_item.task_name
folder_id = folder_ids_by_path[folder_path]
task_entities = task_entities_by_folder_id[folder_id]
task_entity = next(
(
task_entity
for task_entity in task_entities
if task_entity["name"] == task_name
),
None
)
if task_entity is None:
missing_tasks.add("/".join([folder_path, task_name]))
else:
product_item.task_type = task_entity["taskType"]
if missing_tasks:
ending = "" if len(missing_tasks) == 1 else "s"
joined_paths = "\n".join(sorted(missing_tasks))
raise CreatorError(
f"Task{ending} not found.\n{joined_paths}"
)
for product_item in product_items_by_name.values():
repre_paths: Set[str] = set()
duplicated_paths: Set[str] = set()
for repre_item in product_item.repre_items:
# Resolve relative paths in csv file
repre_item.filepath = self._resolve_repre_path(
csv_dir, repre_item.filepath
)
repre_item.thumbnail_path = self._resolve_repre_path(
csv_dir, repre_item.thumbnail_path
)
filepath = repre_item.filepath
if filepath in repre_paths:
duplicated_paths.add(filepath)
repre_paths.add(filepath)
if duplicated_paths:
ending = "" if len(duplicated_paths) == 1 else "s"
joined_names = "\n".join(sorted(duplicated_paths))
raise CreatorError(
f"Duplicate filename{ending} in csv file.\n{joined_names}"
)
return product_items_by_name
def _add_thumbnail_repre(
self,
thumbnails: Set[str],
instance: CreatedInstance,
repre_item: RepreItem,
multiple_thumbnails: bool,
) -> Union[str, None]:
"""Add thumbnail to instance.
Add thumbnail as representation and set 'thumbnailPath' if is not set
yet.
Args:
thumbnails (Set[str]): Set of all thumbnail paths that should
create representation.
instance (CreatedInstance): Instance from create plugin.
repre_item (RepreItem): Representation item.
multiple_thumbnails (bool): There are multiple representations
with thumbnail.
Returns:
Uniom[str, None]: Explicit output name for thumbnail
representation.
"""
if not thumbnails:
return None
thumbnail_path = repre_item.thumbnail_path
if not thumbnail_path or thumbnail_path not in thumbnails:
return None
thumbnails.remove(thumbnail_path)
thumb_dir, thumb_file = os.path.split(thumbnail_path)
thumb_basename, thumb_ext = os.path.splitext(thumb_file)
# NOTE 'explicit_output_name' and custom repre name was set only
# when 'multiple_thumbnails' is True and 'review' tag is present.
# That was changed to set 'explicit_output_name' is set when
# 'multiple_thumbnails' is True.
# is_reviewable = "review" in repre_item.tags
repre_name = "thumbnail"
explicit_output_name = None
if multiple_thumbnails:
repre_name = f"thumbnail_{thumb_basename}"
explicit_output_name = repre_item.name
thumbnail_repre_data = {
"name": repre_name,
"ext": thumb_ext.lstrip("."),
"files": thumb_file,
"stagingDir": thumb_dir,
"stagingDir_persistent": True,
"tags": ["thumbnail", "delete"],
}
if explicit_output_name:
thumbnail_repre_data["outputName"] = explicit_output_name
instance["prepared_data_for_repres"].append({
"type": "thumbnail",
"colorspace": None,
"representation": thumbnail_repre_data,
})
# also add thumbnailPath for ayon to integrate
if not instance.get("thumbnailPath"):
instance["thumbnailPath"] = thumbnail_path
return explicit_output_name
def _add_representation(
self,
instance: CreatedInstance,
repre_item: RepreItem,
explicit_output_name: Optional[str] = None
):
"""Get representation data
Args:
repre_item (RepreItem): Representation item based on csv row.
explicit_output_name (Optional[str]): Explicit output name.
For grouping purposes with reviewable components.
"""
# get extension of file
basename: str = os.path.basename(repre_item.filepath)
extension: str = os.path.splitext(basename)[-1].lower()
# validate filepath is having correct extension based on output
repre_config_data: Union[Dict[str, Any], None] = None
for repre in self.representations_config["representations"]:
if repre["name"] == repre_item.name:
repre_config_data = repre
break
if not repre_config_data:
raise CreatorError(
f"Representation '{repre_item.name}' not found "
"in config representation data."
)
validate_extensions: List[str] = repre_config_data["extensions"]
if extension not in validate_extensions:
raise CreatorError(
f"File extension '{extension}' not valid for "
f"output '{validate_extensions}'."
)
is_sequence: bool = extension in IMAGE_EXTENSIONS
# convert ### string in file name to %03d
# this is for correct frame range validation
# example: file.###.exr -> file.%03d.exr
if "#" in basename:
padding = len(basename.split("#")) - 1
basename = basename.replace("#" * padding, f"%0{padding}d")
is_sequence = True
# make absolute path to file
dirname: str = os.path.dirname(repre_item.filepath)
# check if dirname exists
if not os.path.isdir(dirname):
raise CreatorError(
f"Directory '{dirname}' does not exist."
)
frame_start: Union[int, None] = None
frame_end: Union[int, None] = None
files: Union[str, List[str]] = basename
if is_sequence:
# collect all data from dirname
cols, _ = clique.assemble(list(os.listdir(dirname)))
if not cols:
raise CreatorError(
f"No collections found in directory '{dirname}'."
)
col = cols[0]
files = list(col)
frame_start = min(col.indexes)
frame_end = max(col.indexes)
tags: List[str] = deepcopy(repre_item.tags)
# if slate in repre_data is True then remove one frame from start
if repre_item.slate_exists:
tags.append("has_slate")
# get representation data
representation_data: Dict[str, Any] = {
"name": repre_item.name,
"ext": extension[1:],
"files": files,
"stagingDir": dirname,
"stagingDir_persistent": True,
"tags": tags,
}
if extension in VIDEO_EXTENSIONS:
representation_data.update({
"fps": repre_item.fps,
"outputName": repre_item.name,
})
if explicit_output_name:
representation_data["outputName"] = explicit_output_name
if frame_start:
representation_data["frameStart"] = frame_start
if frame_end:
representation_data["frameEnd"] = frame_end
instance["prepared_data_for_repres"].append({
"type": "media",
"colorspace": repre_item.colorspace,
"representation": representation_data,
})
def _prepare_representations(
self, product_item: ProductItem, instance: CreatedInstance
):
# Collect thumbnail paths from all representation items
# to check if multiple thumbnails are present.
# Once representation is created for certain thumbnail it is removed
# from the set.
thumbnails: Set[str] = {
repre_item.thumbnail_path
for repre_item in product_item.repre_items
if repre_item.thumbnail_path
}
multiple_thumbnails: bool = len(thumbnails) > 1
for repre_item in product_item.repre_items:
explicit_output_name = self._add_thumbnail_repre(
thumbnails,
instance,
repre_item,
multiple_thumbnails,
)
# get representation data
self._add_representation(
instance,
repre_item,
explicit_output_name
)
def _create_instances_from_csv_data(self, csv_dir: str, filename: str):
"""Create instances from csv data"""
# from special function get all data from csv file and convert them
# to new instances
product_items_by_name: Dict[str, ProductItem] = (
self._get_data_from_csv(csv_dir, filename)
)
instances = []
project_name: str = self.create_context.get_current_project_name()
for product_item in product_items_by_name.values():
folder_path: str = product_item.folder_path
version: int = product_item.version
product_name: str = get_product_name(
project_name,
product_item.task_name,
product_item.task_type,
self.host_name,
product_item.product_type,
product_item.variant
)
label: str = f"{folder_path}_{product_name}_v{version:>03}"
repre_items: List[RepreItem] = product_item.repre_items
first_repre_item: RepreItem = repre_items[0]
version_comment: Union[str, None] = next(
(
repre_item.comment
for repre_item in repre_items
if repre_item.comment
),
None
)
slate_exists: bool = any(
repre_item.slate_exists
for repre_item in repre_items
)
families: List[str] = ["csv_ingest"]
if slate_exists:
# adding slate to families mainly for loaders to be able
# to filter out slates
families.append("slate")
instance_data = {
"name": product_item.instance_name,
"folderPath": folder_path,
"families": families,
"label": label,
"task": product_item.task_name,
"variant": product_item.variant,
"source": "csv",
"frameStart": first_repre_item.frame_start,
"frameEnd": first_repre_item.frame_end,
"handleStart": first_repre_item.handle_start,
"handleEnd": first_repre_item.handle_end,
"fps": first_repre_item.fps,
"version": version,
"comment": version_comment,
"prepared_data_for_repres": []
}
# create new instance
new_instance: CreatedInstance = CreatedInstance(
product_item.product_type,
product_name,
instance_data,
self
)
self._prepare_representations(product_item, new_instance)
instances.append(new_instance)
return instances

View file

@ -1,847 +0,0 @@
import os
from copy import deepcopy
import ayon_api
import opentimelineio as otio
from ayon_traypublisher.api.plugin import (
TrayPublishCreator,
HiddenTrayPublishCreator
)
from ayon_traypublisher.api.editorial import (
ShotMetadataSolver
)
from ayon_core.pipeline import CreatedInstance
from ayon_core.lib import (
get_ffprobe_data,
convert_ffprobe_fps_value,
FileDef,
TextDef,
NumberDef,
EnumDef,
BoolDef,
UISeparatorDef,
UILabelDef
)
CLIP_ATTR_DEFS = [
EnumDef(
"fps",
items=[
{"value": "from_selection", "label": "From selection"},
{"value": 23.997, "label": "23.976"},
{"value": 24, "label": "24"},
{"value": 25, "label": "25"},
{"value": 29.97, "label": "29.97"},
{"value": 30, "label": "30"}
],
label="FPS"
),
NumberDef(
"workfile_start_frame",
default=1001,
label="Workfile start frame"
),
NumberDef(
"handle_start",
default=0,
label="Handle start"
),
NumberDef(
"handle_end",
default=0,
label="Handle end"
)
]
class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator):
"""Wrapper class for clip product type creators."""
host_name = "traypublisher"
def create(self, instance_data, source_data=None):
product_name = instance_data["productName"]
# Create new instance
new_instance = CreatedInstance(
self.product_type, product_name, instance_data, self
)
self._store_new_instance(new_instance)
return new_instance
def get_instance_attr_defs(self):
return [
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase):
"""Shot product type class
The shot metadata instance carrier.
"""
identifier = "editorial_shot"
product_type = "shot"
label = "Editorial Shot"
def get_instance_attr_defs(self):
instance_attributes = [
TextDef(
"folderPath",
label="Folder path"
)
]
instance_attributes.extend(CLIP_ATTR_DEFS)
return instance_attributes
class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase):
"""Plate product type class
Plate representation instance.
"""
identifier = "editorial_plate"
product_type = "plate"
label = "Editorial Plate"
class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase):
"""Audio product type class
Audio representation instance.
"""
identifier = "editorial_audio"
product_type = "audio"
label = "Editorial Audio"
class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase):
"""Review product type class
Review representation instance.
"""
identifier = "editorial_review"
product_type = "review"
label = "Editorial Review"
class EditorialSimpleCreator(TrayPublishCreator):
"""Editorial creator class
Simple workflow creator. This creator only disecting input
video file into clip chunks and then converts each to
defined format defined Settings for each product preset.
Args:
TrayPublishCreator (Creator): Tray publisher plugin class
"""
label = "Editorial Simple"
product_type = "editorial"
identifier = "editorial_simple"
default_variants = [
"main"
]
description = "Editorial files to generate shots."
detailed_description = """
Supporting publishing new shots to project
or updating already created. Publishing will create OTIO file.
"""
icon = "fa.file"
product_type_presets = []
def __init__(self, *args, **kwargs):
self._shot_metadata_solver = ShotMetadataSolver(self.log)
super(EditorialSimpleCreator, self).__init__(*args, **kwargs)
def apply_settings(self, project_settings):
editorial_creators = deepcopy(
project_settings["traypublisher"]["editorial_creators"]
)
creator_settings = editorial_creators.get(self.identifier)
self._shot_metadata_solver.update_data(
creator_settings["clip_name_tokenizer"],
creator_settings["shot_rename"],
creator_settings["shot_hierarchy"],
creator_settings["shot_add_tasks"]
)
self.product_type_presets = creator_settings["product_type_presets"]
default_variants = creator_settings.get("default_variants")
if default_variants:
self.default_variants = default_variants
def create(self, product_name, instance_data, pre_create_data):
allowed_product_type_presets = self._get_allowed_product_type_presets(
pre_create_data)
product_types = {
item["product_type"]
for item in self.product_type_presets
}
clip_instance_properties = {
k: v
for k, v in pre_create_data.items()
if k != "sequence_filepath_data"
if k not in product_types
}
folder_path = instance_data["folderPath"]
folder_entity = ayon_api.get_folder_by_path(
self.project_name, folder_path
)
if pre_create_data["fps"] == "from_selection":
# get 'fps' from folder attributes
fps = folder_entity["attrib"]["fps"]
else:
fps = float(pre_create_data["fps"])
instance_data.update({
"fps": fps
})
# get path of sequence
sequence_path_data = pre_create_data["sequence_filepath_data"]
media_path_data = pre_create_data["media_filepaths_data"]
sequence_paths = self._get_path_from_file_data(
sequence_path_data, multi=True)
media_path = self._get_path_from_file_data(media_path_data)
first_otio_timeline = None
for seq_path in sequence_paths:
# get otio timeline
otio_timeline = self._create_otio_timeline(
seq_path, fps)
# Create all clip instances
clip_instance_properties.update({
"fps": fps,
"variant": instance_data["variant"]
})
# create clip instances
self._get_clip_instances(
folder_entity,
otio_timeline,
media_path,
clip_instance_properties,
allowed_product_type_presets,
os.path.basename(seq_path),
first_otio_timeline,
)
if not first_otio_timeline:
# assign otio timeline for multi file to layer
first_otio_timeline = otio_timeline
# create otio editorial instance
self._create_otio_instance(
product_name,
instance_data,
seq_path,
media_path,
first_otio_timeline
)
def _create_otio_instance(
self,
product_name,
data,
sequence_path,
media_path,
otio_timeline
):
"""Otio instance creating function
Args:
product_name (str): Product name.
data (dict): instance data
sequence_path (str): path to sequence file
media_path (str): path to media file
otio_timeline (otio.Timeline): otio timeline object
"""
# Pass precreate data to creator attributes
data.update({
"sequenceFilePath": sequence_path,
"editorialSourcePath": media_path,
"otioTimeline": otio.adapters.write_to_string(otio_timeline)
})
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
self._store_new_instance(new_instance)
def _create_otio_timeline(self, sequence_path, fps):
"""Creating otio timeline from sequence path
Args:
sequence_path (str): path to sequence file
fps (float): frame per second
Returns:
otio.Timeline: otio timeline object
"""
# get editorial sequence file into otio timeline object
extension = os.path.splitext(sequence_path)[1]
kwargs = {}
if extension == ".edl":
# EDL has no frame rate embedded so needs explicit
# frame rate else 24 is assumed.
kwargs["rate"] = fps
kwargs["ignore_timecode_mismatch"] = True
return otio.adapters.read_from_file(sequence_path, **kwargs)
def _get_path_from_file_data(self, file_path_data, multi=False):
"""Converting creator path data to single path string
Args:
file_path_data (FileDefItem): creator path data inputs
multi (bool): switch to multiple files mode
Raises:
FileExistsError: in case nothing had been set
Returns:
str: path string
"""
return_path_list = []
if isinstance(file_path_data, list):
return_path_list = [
os.path.join(f["directory"], f["filenames"][0])
for f in file_path_data
]
if not return_path_list:
raise FileExistsError(
f"File path was not added: {file_path_data}")
return return_path_list if multi else return_path_list[0]
def _get_clip_instances(
self,
folder_entity,
otio_timeline,
media_path,
instance_data,
product_type_presets,
sequence_file_name,
first_otio_timeline=None
):
"""Helping function for creating clip instance
Args:
folder_entity (dict[str, Any]): Folder entity.
otio_timeline (otio.Timeline): otio timeline object
media_path (str): media file path string
instance_data (dict): clip instance data
product_type_presets (list): list of dict settings product presets
"""
tracks = [
track for track in otio_timeline.each_child(
descended_from_type=otio.schema.Track)
if track.kind == "Video"
]
# media data for audio stream and reference solving
media_data = self._get_media_source_metadata(media_path)
for track in tracks:
# set track name
track.name = f"{sequence_file_name} - {otio_timeline.name}"
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
track_start_frame -= self.timeline_frame_start
except AttributeError:
track_start_frame = 0
for otio_clip in track.each_child():
if not self._validate_clip_for_processing(otio_clip):
continue
# get available frames info to clip data
self._create_otio_reference(otio_clip, media_path, media_data)
# convert timeline range to source range
self._restore_otio_source_range(otio_clip)
base_instance_data = self._get_base_instance_data(
otio_clip,
instance_data,
track_start_frame,
folder_entity
)
parenting_data = {
"instance_label": None,
"instance_id": None
}
for product_type_preset in product_type_presets:
# exclude audio product type if no audio stream
if (
product_type_preset["product_type"] == "audio"
and not media_data.get("audio")
):
continue
self._make_product_instance(
otio_clip,
product_type_preset,
deepcopy(base_instance_data),
parenting_data
)
# add track to first otioTimeline if it is in input args
if first_otio_timeline:
first_otio_timeline.tracks.append(deepcopy(track))
def _restore_otio_source_range(self, otio_clip):
"""Infusing source range.
Otio clip is missing proper source clip range so
here we add them from from parent timeline frame range.
Args:
otio_clip (otio.Clip): otio clip object
"""
otio_clip.source_range = otio_clip.range_in_parent()
def _create_otio_reference(
self,
otio_clip,
media_path,
media_data
):
"""Creating otio reference at otio clip.
Args:
otio_clip (otio.Clip): otio clip object
media_path (str): media file path string
media_data (dict): media metadata
"""
start_frame = media_data["start_frame"]
frame_duration = media_data["duration"]
fps = media_data["fps"]
available_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
start_frame, fps),
duration=otio.opentime.RationalTime(
frame_duration, fps)
)
# in case old OTIO or video file create `ExternalReference`
media_reference = otio.schema.ExternalReference(
target_url=media_path,
available_range=available_range
)
otio_clip.media_reference = media_reference
def _get_media_source_metadata(self, path):
"""Get all available metadata from file
Args:
path (str): media file path string
Raises:
AssertionError: ffprobe couldn't read metadata
Returns:
dict: media file metadata
"""
return_data = {}
try:
media_data = get_ffprobe_data(
path, self.log
)
# get video stream data
video_streams = []
audio_streams = []
for stream in media_data["streams"]:
codec_type = stream.get("codec_type")
if codec_type == "audio":
audio_streams.append(stream)
elif codec_type == "video":
video_streams.append(stream)
if not video_streams:
raise ValueError(
"Could not find video stream in source file."
)
video_stream = video_streams[0]
return_data = {
"video": True,
"start_frame": 0,
"duration": int(video_stream["nb_frames"]),
"fps": float(
convert_ffprobe_fps_value(
video_stream["r_frame_rate"]
)
)
}
# get audio streams data
if audio_streams:
return_data["audio"] = True
except Exception as exc:
raise AssertionError((
"FFprobe couldn't read information about input file: "
f"\"{path}\". Error message: {exc}"
))
return return_data
def _make_product_instance(
self,
otio_clip,
product_type_preset,
instance_data,
parenting_data
):
"""Making product instance from input preset
Args:
otio_clip (otio.Clip): otio clip object
product_type_preset (dict): single product type preset
instance_data (dict): instance data
parenting_data (dict): shot instance parent data
Returns:
CreatedInstance: creator instance object
"""
product_type = product_type_preset["product_type"]
label = self._make_product_naming(
product_type_preset,
instance_data
)
instance_data["label"] = label
# add file extension filter only if it is not shot product type
if product_type == "shot":
instance_data["otioClip"] = (
otio.adapters.write_to_string(otio_clip))
c_instance = self.create_context.creators[
"editorial_shot"].create(
instance_data)
parenting_data.update({
"instance_label": label,
"instance_id": c_instance.data["instance_id"]
})
else:
# add review family if defined
instance_data.update({
"outputFileType": product_type_preset["output_file_type"],
"parent_instance_id": parenting_data["instance_id"],
"creator_attributes": {
"parent_instance": parenting_data["instance_label"],
"add_review_family": product_type_preset.get("review")
}
})
creator_identifier = f"editorial_{product_type}"
editorial_clip_creator = self.create_context.creators[
creator_identifier]
c_instance = editorial_clip_creator.create(
instance_data)
return c_instance
def _make_product_naming(self, product_type_preset, instance_data):
"""Product name maker
Args:
product_type_preset (dict): single preset item
instance_data (dict): instance data
Returns:
str: label string
"""
folder_path = instance_data["creator_attributes"]["folderPath"]
variant_name = instance_data["variant"]
product_type = product_type_preset["product_type"]
# get variant name from preset or from inheritance
_variant_name = product_type_preset.get("variant") or variant_name
# product name
product_name = "{}{}".format(
product_type, _variant_name.capitalize()
)
label = "{} {}".format(
folder_path,
product_name
)
instance_data.update({
"label": label,
"variant": _variant_name,
"productType": product_type,
"productName": product_name,
})
return label
def _get_base_instance_data(
self,
otio_clip,
instance_data,
track_start_frame,
folder_entity,
):
"""Factoring basic set of instance data.
Args:
otio_clip (otio.Clip): otio clip object
instance_data (dict): precreate instance data
track_start_frame (int): track start frame
Returns:
dict: instance data
"""
parent_folder_path = folder_entity["path"]
parent_folder_name = parent_folder_path.rsplit("/", 1)[-1]
# get clip instance properties
handle_start = instance_data["handle_start"]
handle_end = instance_data["handle_end"]
timeline_offset = instance_data["timeline_offset"]
workfile_start_frame = instance_data["workfile_start_frame"]
fps = instance_data["fps"]
variant_name = instance_data["variant"]
# basic unique folder name
clip_name = os.path.splitext(otio_clip.name)[0]
project_entity = ayon_api.get_project(self.project_name)
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(
clip_name,
{
"anatomy_data": {
"project": {
"name": self.project_name,
"code": project_entity["code"]
},
"parent": parent_folder_name,
"app": self.host_name
},
"selected_folder_entity": folder_entity,
"project_entity": project_entity
}
)
timing_data = self._get_timing_data(
otio_clip,
timeline_offset,
track_start_frame,
workfile_start_frame
)
# create creator attributes
creator_attributes = {
"workfile_start_frame": workfile_start_frame,
"fps": fps,
"handle_start": int(handle_start),
"handle_end": int(handle_end)
}
# add timing data
creator_attributes.update(timing_data)
# create base instance data
base_instance_data = {
"shotName": shot_name,
"variant": variant_name,
"task": None,
"newHierarchyIntegration": True,
# Backwards compatible (Deprecated since 24/06/06)
"newAssetPublishing": True,
"trackStartFrame": track_start_frame,
"timelineOffset": timeline_offset,
# creator_attributes
"creator_attributes": creator_attributes
}
# update base instance data with context data
# and also update creator attributes with context data
creator_attributes["folderPath"] = shot_metadata.pop("folderPath")
base_instance_data["folderPath"] = parent_folder_path
# add creator attributes to shared instance data
base_instance_data["creator_attributes"] = creator_attributes
# add hierarchy shot metadata
base_instance_data.update(shot_metadata)
return base_instance_data
def _get_timing_data(
self,
otio_clip,
timeline_offset,
track_start_frame,
workfile_start_frame
):
"""Returning available timing data
Args:
otio_clip (otio.Clip): otio clip object
timeline_offset (int): offset value
track_start_frame (int): starting frame input
workfile_start_frame (int): start frame for shot's workfiles
Returns:
dict: timing metadata
"""
# frame ranges data
clip_in = otio_clip.range_in_parent().start_time.value
clip_in += track_start_frame
clip_out = otio_clip.range_in_parent().end_time_inclusive().value
clip_out += track_start_frame
# add offset in case there is any
if timeline_offset:
clip_in += timeline_offset
clip_out += timeline_offset
clip_duration = otio_clip.duration().value
source_in = otio_clip.trimmed_range().start_time.value
source_out = source_in + clip_duration
# define starting frame for future shot
frame_start = (
clip_in if workfile_start_frame is None
else workfile_start_frame
)
frame_end = frame_start + (clip_duration - 1)
return {
"frameStart": int(frame_start),
"frameEnd": int(frame_end),
"clipIn": int(clip_in),
"clipOut": int(clip_out),
"clipDuration": int(otio_clip.duration().value),
"sourceIn": int(source_in),
"sourceOut": int(source_out)
}
def _get_allowed_product_type_presets(self, pre_create_data):
"""Filter out allowed product type presets.
Args:
pre_create_data (dict): precreate attributes inputs
Returns:
list: lit of dict with preset items
"""
return [
{"product_type": "shot"},
*[
preset
for preset in self.product_type_presets
if pre_create_data[preset["product_type"]]
]
]
def _validate_clip_for_processing(self, otio_clip):
"""Validate otio clip attributes
Args:
otio_clip (otio.Clip): otio clip object
Returns:
bool: True if all passing conditions
"""
if otio_clip.name is None:
return False
if isinstance(otio_clip, otio.schema.Gap):
return False
# skip all generators like black empty
if isinstance(
otio_clip.media_reference,
otio.schema.GeneratorReference):
return False
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(otio_clip, otio.schema.Transition):
return False
return True
def get_pre_create_attr_defs(self):
"""Creating pre-create attributes at creator plugin.
Returns:
list: list of attribute object instances
"""
# Use same attributes as for instance attrobites
attr_defs = [
FileDef(
"sequence_filepath_data",
folders=False,
extensions=[
".edl",
".xml",
".aaf",
".fcpxml"
],
allow_sequences=False,
single_item=False,
label="Sequence file",
),
FileDef(
"media_filepaths_data",
folders=False,
extensions=[
".mov",
".mp4",
".wav"
],
allow_sequences=False,
single_item=False,
label="Media files",
),
# TODO: perhaps better would be timecode and fps input
NumberDef(
"timeline_offset",
default=0,
label="Timeline offset"
),
UISeparatorDef(),
UILabelDef("Clip instance attributes"),
UISeparatorDef()
]
# add variants swithers
attr_defs.extend(
BoolDef(item["product_type"], label=item["product_type"])
for item in self.product_type_presets
)
attr_defs.append(UISeparatorDef())
attr_defs.extend(CLIP_ATTR_DEFS)
return attr_defs

View file

@ -1,96 +0,0 @@
from pathlib import Path
from ayon_core.pipeline import (
CreatedInstance,
)
from ayon_core.lib.attribute_definitions import (
FileDef,
BoolDef,
TextDef,
)
from ayon_traypublisher.api.plugin import TrayPublishCreator
class EditorialPackageCreator(TrayPublishCreator):
"""Creates instance for OTIO file from published folder.
Folder contains OTIO file and exported .mov files. Process should publish
whole folder as single `editorial_pkg` product type and (possibly) convert
.mov files into different format and copy them into `publish` `resources`
subfolder.
"""
identifier = "editorial_pkg"
label = "Editorial package"
product_type = "editorial_pkg"
description = "Publish folder with OTIO file and resources"
# Position batch creator after simple creators
order = 120
conversion_enabled = False
def apply_settings(self, project_settings):
self.conversion_enabled = (
project_settings["traypublisher"]
["publish"]
["ExtractEditorialPckgConversion"]
["conversion_enabled"]
)
def get_icon(self):
return "fa.folder"
def create(self, product_name, instance_data, pre_create_data):
folder_path = pre_create_data.get("folder_path")
if not folder_path:
return
instance_data["creator_attributes"] = {
"folder_path": (Path(folder_path["directory"]) /
Path(folder_path["filenames"][0])).as_posix(),
"conversion_enabled": pre_create_data["conversion_enabled"]
}
# Create new instance
new_instance = CreatedInstance(self.product_type, product_name,
instance_data, self)
self._store_new_instance(new_instance)
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attributes
return [
FileDef(
"folder_path",
folders=True,
single_item=True,
extensions=[],
allow_sequences=False,
label="Folder path"
),
BoolDef("conversion_enabled",
tooltip="Convert to output defined in Settings.",
default=self.conversion_enabled,
label="Convert resources"),
]
def get_instance_attr_defs(self):
return [
TextDef(
"folder_path",
label="Folder path",
disabled=True
),
BoolDef("conversion_enabled",
tooltip="Convert to output defined in Settings.",
label="Convert resources"),
]
def get_detail_description(self):
return """# Publish folder with OTIO file and video clips
Folder contains OTIO file and exported .mov files. Process should
publish whole folder as single `editorial_pkg` product type and
(possibly) convert .mov files into different format and copy them into
`publish` `resources` subfolder.
"""

View file

@ -1,22 +0,0 @@
import os
from ayon_core.lib import Logger
from ayon_core.settings import get_project_settings
log = Logger.get_logger(__name__)
def initialize():
from ayon_traypublisher.api.plugin import SettingsCreator
project_name = os.environ["AYON_PROJECT_NAME"]
project_settings = get_project_settings(project_name)
simple_creators = project_settings["traypublisher"]["simple_creators"]
global_variables = globals()
for item in simple_creators:
dynamic_plugin = SettingsCreator.from_settings(item)
global_variables[dynamic_plugin.__name__] = dynamic_plugin
initialize()

View file

@ -1,170 +0,0 @@
import copy
import os
import re
import collections
import ayon_api
from ayon_core.lib import (
FileDef,
BoolDef,
)
from ayon_core.pipeline import (
CreatedInstance,
)
from ayon_core.pipeline.create import (
get_product_name,
TaskNotSetError,
)
from ayon_traypublisher.api.plugin import TrayPublishCreator
from ayon_traypublisher.batch_parsing import (
get_folder_entity_from_filename
)
class BatchMovieCreator(TrayPublishCreator):
"""Creates instances from movie file(s).
Intended for .mov files, but should work for any video file.
Doesn't handle image sequences though.
"""
identifier = "render_movie_batch"
label = "Batch Movies"
product_type = "render"
description = "Publish batch of video files"
create_allow_context_change = False
version_regex = re.compile(r"^(.+)_v([0-9]+)$")
# Position batch creator after simple creators
order = 110
def apply_settings(self, project_settings):
creator_settings = (
project_settings["traypublisher"]["create"]["BatchMovieCreator"]
)
self.default_variants = creator_settings["default_variants"]
self.default_tasks = creator_settings["default_tasks"]
self.extensions = creator_settings["extensions"]
def get_icon(self):
return "fa.file"
def create(self, product_name, data, pre_create_data):
file_paths = pre_create_data.get("filepath")
if not file_paths:
return
data_by_folder_id = collections.defaultdict(list)
for file_info in file_paths:
instance_data = copy.deepcopy(data)
file_name = file_info["filenames"][0]
filepath = os.path.join(file_info["directory"], file_name)
instance_data["creator_attributes"] = {"filepath": filepath}
folder_entity, version = get_folder_entity_from_filename(
self.project_name, file_name, self.version_regex)
data_by_folder_id[folder_entity["id"]].append(
(instance_data, folder_entity)
)
all_task_entities = ayon_api.get_tasks(
self.project_name, task_ids=set(data_by_folder_id.keys())
)
task_entity_by_folder_id = collections.defaultdict(dict)
for task_entity in all_task_entities:
folder_id = task_entity["folderId"]
task_name = task_entity["name"].lower()
task_entity_by_folder_id[folder_id][task_name] = task_entity
for (
folder_id, (instance_data, folder_entity)
) in data_by_folder_id.items():
task_entities_by_name = task_entity_by_folder_id[folder_id]
task_name = None
task_entity = None
for default_task_name in self.default_tasks:
_name = default_task_name.lower()
if _name in task_entities_by_name:
task_name = task_entity["name"]
task_entity = task_entities_by_name[_name]
break
product_name = self._get_product_name(
self.project_name, task_entity, data["variant"]
)
instance_data["folderPath"] = folder_entity["path"]
instance_data["task"] = task_name
# Create new instance
new_instance = CreatedInstance(self.product_type, product_name,
instance_data, self)
self._store_new_instance(new_instance)
def _get_product_name(self, project_name, task_entity, variant):
"""Create product name according to standard template process"""
host_name = self.create_context.host_name
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
try:
product_name = get_product_name(
project_name,
task_name,
task_type,
host_name,
self.product_type,
variant,
)
except TaskNotSetError:
# Create instance with fake task
# - instance will be marked as invalid so it can't be published
# but user have ability to change it
# NOTE: This expect that there is not task 'Undefined' on folder
dumb_value = "Undefined"
product_name = get_product_name(
project_name,
dumb_value,
dumb_value,
host_name,
self.product_type,
variant,
)
return product_name
def get_instance_attr_defs(self):
return [
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attributes
return [
FileDef(
"filepath",
folders=False,
single_item=False,
extensions=self.extensions,
allow_sequences=False,
label="Filepath"
),
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_detail_description(self):
return """# Publish batch of .mov to multiple folders.
File names must then contain only folder name, or folder name + version.
(eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov`
"""

View file

@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-
"""Creator of online files.
Online file retain their original name and use it as product name. To
avoid conflicts, this creator checks if product with this name already
exists under selected folder.
"""
from pathlib import Path
# import ayon_api
from ayon_core.lib.attribute_definitions import FileDef, BoolDef
from ayon_core.pipeline import (
CreatedInstance,
CreatorError
)
from ayon_traypublisher.api.plugin import TrayPublishCreator
class OnlineCreator(TrayPublishCreator):
"""Creates instance from file and retains its original name."""
identifier = "io.ayon.creators.traypublisher.online"
label = "Online"
product_type = "online"
description = "Publish file retaining its original file name"
extensions = [".mov", ".mp4", ".mxf", ".m4v", ".mpg", ".exr",
".dpx", ".tif", ".png", ".jpg"]
def get_detail_description(self):
return """# Create file retaining its original file name.
This will publish files using template helping to retain original
file name and that file name is used as product name.
Bz default it tries to guard against multiple publishes of the same
file."""
def get_icon(self):
return "fa.file"
def create(self, product_name, instance_data, pre_create_data):
repr_file = pre_create_data.get("representation_file")
if not repr_file:
raise CreatorError("No files specified")
files = repr_file.get("filenames")
if not files:
# this should never happen
raise CreatorError("Missing files from representation")
origin_basename = Path(files[0]).stem
# disable check for existing product with the same name
"""
folder_entity = ayon_api.get_folder_by_path(
self.project_name, instance_data["folderPath"], fields={"id"})
if ayon_api.get_product_by_name(
self.project_name, origin_basename, folder_entity["id"],
fields={"id"}):
raise CreatorError(f"product with {origin_basename} already "
"exists in selected folder")
"""
instance_data["originalBasename"] = origin_basename
product_name = origin_basename
instance_data["creator_attributes"] = {
"path": (Path(repr_file["directory"]) / files[0]).as_posix()
}
# Create new instance
new_instance = CreatedInstance(self.product_type, product_name,
instance_data, self)
self._store_new_instance(new_instance)
def get_instance_attr_defs(self):
return [
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_pre_create_attr_defs(self):
return [
FileDef(
"representation_file",
folders=False,
extensions=self.extensions,
allow_sequences=True,
single_item=True,
label="Representation",
),
BoolDef(
"add_review_family",
default=True,
label="Review"
)
]
def get_product_name(
self,
project_name,
folder_entity,
task_entity,
variant,
host_name=None,
instance=None
):
if instance is None:
return "{originalBasename}"
return instance.data["productName"]

View file

@ -1,13 +0,0 @@
import pyblish.api
class CollectTrayPublisherAppName(pyblish.api.ContextPlugin):
"""Collect app name and label."""
label = "Collect App Name/Label"
order = pyblish.api.CollectorOrder - 0.5
hosts = ["traypublisher"]
def process(self, context):
context.data["appName"] = "tray publisher"
context.data["appLabel"] = "Tray publisher"

View file

@ -1,36 +0,0 @@
from pprint import pformat
import pyblish.api
class CollectClipInstance(pyblish.api.InstancePlugin):
"""Collect clip instances and resolve its parent"""
label = "Collect Clip Instances"
order = pyblish.api.CollectorOrder - 0.081
hosts = ["traypublisher"]
families = ["plate", "review", "audio"]
def process(self, instance):
creator_identifier = instance.data["creator_identifier"]
if creator_identifier not in [
"editorial_plate",
"editorial_audio",
"editorial_review"
]:
return
instance.data["families"].append("clip")
parent_instance_id = instance.data["parent_instance_id"]
edit_shared_data = instance.context.data["editorialSharedData"]
instance.data.update(
edit_shared_data[parent_instance_id]
)
if "editorialSourcePath" in instance.context.data.keys():
instance.data["editorialSourcePath"] = (
instance.context.data["editorialSourcePath"])
instance.data["families"].append("trimming")
self.log.debug(pformat(instance.data))

View file

@ -1,86 +0,0 @@
import os
from pprint import pformat
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.pipeline import colorspace
class CollectColorspaceLook(pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin):
"""Collect OCIO colorspace look from LUT file
"""
label = "Collect Colorspace Look"
order = pyblish.api.CollectorOrder
hosts = ["traypublisher"]
families = ["ociolook"]
def process(self, instance):
creator_attrs = instance.data["creator_attributes"]
lut_repre_name = "LUTfile"
file_url = creator_attrs["abs_lut_path"]
file_name = os.path.basename(file_url)
base_name, ext = os.path.splitext(file_name)
# set output name with base_name which was cleared
# of all symbols and all parts were capitalized
output_name = (base_name.replace("_", " ")
.replace(".", " ")
.replace("-", " ")
.title()
.replace(" ", ""))
# get config items
config_items = instance.data["transientData"]["config_items"]
config_data = instance.data["transientData"]["config_data"]
# get colorspace items
converted_color_data = {}
for colorspace_key in [
"working_colorspace",
"input_colorspace",
"output_colorspace"
]:
if creator_attrs[colorspace_key]:
color_data = colorspace.convert_colorspace_enumerator_item(
creator_attrs[colorspace_key], config_items)
converted_color_data[colorspace_key] = color_data
else:
converted_color_data[colorspace_key] = None
# add colorspace to config data
if converted_color_data["working_colorspace"]:
config_data["colorspace"] = (
converted_color_data["working_colorspace"]["name"]
)
# create lut representation data
lut_repre = {
"name": lut_repre_name,
"output": output_name,
"ext": ext.lstrip("."),
"files": file_name,
"stagingDir": os.path.dirname(file_url),
"tags": []
}
instance.data.update({
"representations": [lut_repre],
"source": file_url,
"ocioLookWorkingSpace": converted_color_data["working_colorspace"],
"ocioLookItems": [
{
"name": lut_repre_name,
"ext": ext.lstrip("."),
"input_colorspace": converted_color_data[
"input_colorspace"],
"output_colorspace": converted_color_data[
"output_colorspace"],
"direction": creator_attrs["direction"],
"interpolation": creator_attrs["interpolation"],
"config_data": config_data
}
],
})
self.log.debug(pformat(instance.data))

View file

@ -1,47 +0,0 @@
from pprint import pformat
import pyblish.api
from ayon_core.pipeline import publish
class CollectCSVIngestInstancesData(
pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin
):
"""Collect CSV Ingest data from instance.
"""
label = "Collect CSV Ingest instances data"
order = pyblish.api.CollectorOrder + 0.1
hosts = ["traypublisher"]
families = ["csv_ingest"]
def process(self, instance):
# expecting [(colorspace, repre_data), ...]
prepared_repres_data_items = instance.data[
"prepared_data_for_repres"]
for prep_repre_data in prepared_repres_data_items:
type = prep_repre_data["type"]
colorspace = prep_repre_data["colorspace"]
repre_data = prep_repre_data["representation"]
# thumbnails should be skipped
if type == "media":
# colorspace name is passed from CSV column
self.set_representation_colorspace(
repre_data, instance.context, colorspace
)
elif type == "media" and colorspace is None:
# TODO: implement colorspace file rules file parsing
self.log.warning(
"Colorspace is not defined in csv for following"
f" representation: {pformat(repre_data)}"
)
pass
elif type == "thumbnail":
# thumbnails should be skipped
pass
instance.data["representations"].append(repre_data)

View file

@ -1,48 +0,0 @@
import os
from pprint import pformat
import pyblish.api
import opentimelineio as otio
class CollectEditorialInstance(pyblish.api.InstancePlugin):
"""Collect data for instances created by settings creators."""
label = "Collect Editorial Instances"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["traypublisher"]
families = ["editorial"]
def process(self, instance):
if "families" not in instance.data:
instance.data["families"] = []
if "representations" not in instance.data:
instance.data["representations"] = []
fpath = instance.data["sequenceFilePath"]
otio_timeline_string = instance.data.pop("otioTimeline")
otio_timeline = otio.adapters.read_from_string(
otio_timeline_string)
instance.context.data["otioTimeline"] = otio_timeline
instance.context.data["editorialSourcePath"] = (
instance.data["editorialSourcePath"])
self.log.info(fpath)
instance.data["stagingDir"] = os.path.dirname(fpath)
_, ext = os.path.splitext(fpath)
instance.data["representations"].append({
"ext": ext[1:],
"name": ext[1:],
"stagingDir": instance.data["stagingDir"],
"files": os.path.basename(fpath)
})
self.log.debug("Created Editorial Instance {}".format(
pformat(instance.data)
))

View file

@ -1,58 +0,0 @@
"""Produces instance.data["editorial_pkg"] data used during integration.
Requires:
instance.data["creator_attributes"]["path"] - from creator
Provides:
instance -> editorial_pkg (dict):
folder_path (str)
otio_path (str) - from dragged folder
resource_paths (list)
"""
import os
import pyblish.api
from ayon_core.lib.transcoding import VIDEO_EXTENSIONS
class CollectEditorialPackage(pyblish.api.InstancePlugin):
"""Collects path to OTIO file and resources"""
label = "Collect Editorial Package"
order = pyblish.api.CollectorOrder - 0.1
hosts = ["traypublisher"]
families = ["editorial_pkg"]
def process(self, instance):
folder_path = instance.data["creator_attributes"]["folder_path"]
if not folder_path or not os.path.exists(folder_path):
self.log.info((
"Instance doesn't contain collected existing folder path."
))
return
instance.data["editorial_pkg"] = {}
instance.data["editorial_pkg"]["folder_path"] = folder_path
otio_path, resource_paths = (
self._get_otio_and_resource_paths(folder_path))
instance.data["editorial_pkg"]["otio_path"] = otio_path
instance.data["editorial_pkg"]["resource_paths"] = resource_paths
def _get_otio_and_resource_paths(self, folder_path):
otio_path = None
resource_paths = []
file_names = os.listdir(folder_path)
for filename in file_names:
_, ext = os.path.splitext(filename)
file_path = os.path.join(folder_path, filename)
if ext == ".otio":
otio_path = file_path
elif ext in VIDEO_EXTENSIONS:
resource_paths.append(file_path)
return otio_path, resource_paths

View file

@ -1,30 +0,0 @@
import pyblish.api
class CollectEditorialReviewable(pyblish.api.InstancePlugin):
""" Collect review input from user.
Adds the input to instance data.
"""
label = "Collect Editorial Reviewable"
order = pyblish.api.CollectorOrder
families = ["plate", "review", "audio"]
hosts = ["traypublisher"]
def process(self, instance):
creator_identifier = instance.data["creator_identifier"]
if creator_identifier not in [
"editorial_plate",
"editorial_audio",
"editorial_review"
]:
return
creator_attributes = instance.data["creator_attributes"]
if creator_attributes["add_review_family"]:
instance.data["families"].append("review")
self.log.debug("instance.data {}".format(instance.data))

View file

@ -1,101 +0,0 @@
import pyblish.api
from ayon_core.lib import EnumDef
from ayon_core.pipeline import colorspace
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import KnownPublishError
class CollectColorspace(pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin):
"""Collect explicit user defined representation colorspaces"""
label = "Choose representation colorspace"
order = pyblish.api.CollectorOrder + 0.49
hosts = ["traypublisher"]
families = ["render", "plate", "reference", "image", "online"]
enabled = False
default_colorspace_items = [
(None, "Don't override")
]
colorspace_items = list(default_colorspace_items)
colorspace_attr_show = False
config_items = None
def process(self, instance):
values = self.get_attr_values_from_data(instance.data)
colorspace_value = values.get("colorspace", None)
if colorspace_value is None:
return
color_data = colorspace.convert_colorspace_enumerator_item(
colorspace_value, self.config_items)
colorspace_name = self._colorspace_name_by_type(color_data)
self.log.debug("Explicit colorspace name: {}".format(colorspace_name))
context = instance.context
for repre in instance.data.get("representations", {}):
self.set_representation_colorspace(
representation=repre,
context=context,
colorspace=colorspace_name
)
def _colorspace_name_by_type(self, colorspace_data):
"""
Returns colorspace name by type
Arguments:
colorspace_data (dict): colorspace data
Returns:
str: colorspace name
"""
if colorspace_data["type"] == "colorspaces":
return colorspace_data["name"]
elif colorspace_data["type"] == "roles":
return colorspace_data["colorspace"]
else:
raise KnownPublishError(
(
"Collecting of colorspace failed. used config is missing "
"colorspace type: '{}' . Please contact your pipeline TD."
).format(colorspace_data['type'])
)
@classmethod
def apply_settings(cls, project_settings):
config_data = colorspace.get_current_context_imageio_config_preset(
project_settings=project_settings
)
enabled = False
colorspace_items = list(cls.default_colorspace_items)
config_items = None
if config_data:
enabled = True
filepath = config_data["path"]
config_items = colorspace.get_ocio_config_colorspaces(filepath)
labeled_colorspaces = colorspace.get_colorspaces_enumerator_items(
config_items,
include_aliases=True,
include_roles=True
)
colorspace_items.extend(labeled_colorspaces)
cls.config_items = config_items
cls.colorspace_items = colorspace_items
cls.enabled = enabled
@classmethod
def get_attribute_defs(cls):
return [
EnumDef(
"colorspace",
cls.colorspace_items,
default="Don't override",
label="Override Colorspace"
)
]

View file

@ -1,51 +0,0 @@
import pyblish.api
class CollectFrameDataFromAssetEntity(pyblish.api.InstancePlugin):
"""Collect Frame Data From 'folderEntity' found in context.
Frame range data will only be collected if the keys
are not yet collected for the instance.
"""
order = pyblish.api.CollectorOrder + 0.491
label = "Collect Missing Frame Data From Folder"
families = [
"plate",
"pointcache",
"vdbcache",
"online",
"render",
]
hosts = ["traypublisher"]
def process(self, instance):
missing_keys = []
for key in (
"fps",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
):
if key not in instance.data:
missing_keys.append(key)
# Skip the logic if all keys are already collected.
# NOTE: In editorial is not 'folderEntity' filled, so it would crash
# even if we don't need it.
if not missing_keys:
return
keys_set = []
folder_attributes = instance.data["folderEntity"]["attrib"]
for key in missing_keys:
if key in folder_attributes:
instance.data[key] = folder_attributes[key]
keys_set.append(key)
if keys_set:
self.log.debug(
f"Frame range data {keys_set} "
"has been collected from folder entity."
)

View file

@ -1,48 +0,0 @@
import os
import pyblish.api
from ayon_core.pipeline import AYONPyblishPluginMixin
class CollectMovieBatch(
pyblish.api.InstancePlugin, AYONPyblishPluginMixin
):
"""Collect file url for batch movies and create representation.
Adds review on instance and to repre.tags based on value of toggle button
on creator.
"""
label = "Collect Movie Batch Files"
order = pyblish.api.CollectorOrder
hosts = ["traypublisher"]
def process(self, instance):
if instance.data.get("creator_identifier") != "render_movie_batch":
return
creator_attributes = instance.data["creator_attributes"]
file_url = creator_attributes["filepath"]
file_name = os.path.basename(file_url)
_, ext = os.path.splitext(file_name)
repre = {
"name": ext[1:],
"ext": ext[1:],
"files": file_name,
"stagingDir": os.path.dirname(file_url),
"tags": []
}
instance.data["representations"].append(repre)
if creator_attributes["add_review_family"]:
repre["tags"].append("review")
instance.data["families"].append("review")
if not instance.data.get("thumbnailSource"):
instance.data["thumbnailSource"] = file_url
instance.data["source"] = file_url
self.log.debug("instance.data {}".format(instance.data))

View file

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
import pyblish.api
from pathlib import Path
class CollectOnlineFile(pyblish.api.InstancePlugin):
"""Collect online file and retain its file name."""
label = "Collect Online File"
order = pyblish.api.CollectorOrder
families = ["online"]
hosts = ["traypublisher"]
def process(self, instance):
file = Path(instance.data["creator_attributes"]["path"])
review = instance.data["creator_attributes"]["add_review_family"]
instance.data["review"] = review
if "review" not in instance.data["families"]:
instance.data["families"].append("review")
self.log.info(f"Adding review: {review}")
instance.data["representations"].append(
{
"name": file.suffix.lstrip("."),
"ext": file.suffix.lstrip("."),
"files": file.name,
"stagingDir": file.parent.as_posix(),
"tags": ["review"] if review else []
}
)

View file

@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
import pyblish.api
class CollectReviewInfo(pyblish.api.InstancePlugin):
"""Collect data required for review instances.
ExtractReview plugin requires frame start/end, fps on instance data which
are missing on instances from TrayPublishes.
Warning:
This is temporary solution to "make it work". Contains removed changes
from https://github.com/ynput/OpenPype/pull/4383 reduced only for
review instances.
"""
label = "Collect Review Info"
order = pyblish.api.CollectorOrder + 0.491
families = ["review"]
hosts = ["traypublisher"]
def process(self, instance):
folder_entity = instance.data.get("folderEntity")
if instance.data.get("frameStart") is not None or not folder_entity:
self.log.debug("Missing required data on instance")
return
folder_attributes = folder_entity["attrib"]
# Store collected data for logging
collected_data = {}
for key in (
"fps",
"frameStart",
"frameEnd",
"handleStart",
"handleEnd",
):
if key in instance.data or key not in folder_attributes:
continue
value = folder_attributes[key]
collected_data[key] = value
instance.data[key] = value
self.log.debug("Collected data: {}".format(str(collected_data)))

View file

@ -1,82 +0,0 @@
import pyblish.api
import clique
from ayon_core.pipeline import OptionalPyblishPluginMixin
class CollectSequenceFrameData(
pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin
):
"""Collect Original Sequence Frame Data
If the representation includes files with frame numbers,
then set `frameStart` and `frameEnd` for the instance to the
start and end frame respectively
"""
order = pyblish.api.CollectorOrder + 0.4905
label = "Collect Original Sequence Frame Data"
families = ["plate", "pointcache",
"vdbcache", "online",
"render"]
hosts = ["traypublisher"]
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
# editorial would fail since they might not be in database yet
new_hierarchy = (
instance.data.get("newHierarchyIntegration")
# Backwards compatible (Deprecated since 24/06/06)
or instance.data.get("newAssetPublishing")
)
if new_hierarchy:
self.log.debug("Instance is creating new folders. Skipping.")
return
frame_data = self.get_frame_data_from_repre_sequence(instance)
if not frame_data:
# if no dict data skip collecting the frame range data
return
for key, value in frame_data.items():
instance.data[key] = value
self.log.debug(f"Collected Frame range data '{key}':{value} ")
def get_frame_data_from_repre_sequence(self, instance):
repres = instance.data.get("representations")
folder_attributes = instance.data["folderEntity"]["attrib"]
if repres:
first_repre = repres[0]
if "ext" not in first_repre:
self.log.warning("Cannot find file extension"
" in representation data")
return
files = first_repre["files"]
if not isinstance(files, list):
files = [files]
collections, _ = clique.assemble(files)
if not collections:
# No sequences detected and we can't retrieve
# frame range
self.log.debug(
"No sequences detected in the representation data."
" Skipping collecting frame range data.")
return
collection = collections[0]
repres_frames = list(collection.indexes)
return {
"frameStart": repres_frames[0],
"frameEnd": repres_frames[-1],
"handleStart": 0,
"handleEnd": 0,
"fps": folder_attributes["fps"]
}

View file

@ -1,209 +0,0 @@
from pprint import pformat
import pyblish.api
import opentimelineio as otio
class CollectShotInstance(pyblish.api.InstancePlugin):
""" Collect shot instances
Resolving its user inputs from creator attributes
to instance data.
"""
label = "Collect Shot Instances"
order = pyblish.api.CollectorOrder - 0.09
hosts = ["traypublisher"]
families = ["shot"]
SHARED_KEYS = [
"folderPath",
"fps",
"handleStart",
"handleEnd",
"frameStart",
"frameEnd",
"clipIn",
"clipOut",
"clipDuration",
"sourceIn",
"sourceOut",
"otioClip",
"workfileFrameStart"
]
def process(self, instance):
creator_identifier = instance.data["creator_identifier"]
if "editorial" not in creator_identifier:
return
# get otio clip object
otio_clip = self._get_otio_clip(instance)
instance.data["otioClip"] = otio_clip
# first solve the inputs from creator attr
data = self._solve_inputs_to_data(instance)
instance.data.update(data)
# distribute all shared keys to clips instances
self._distribute_shared_data(instance)
self._solve_hierarchy_context(instance)
self.log.debug(pformat(instance.data))
def _get_otio_clip(self, instance):
""" Converts otio string data.
Convert them to proper otio object
and finds its equivalent at otio timeline.
This process is a hack to support also
resolving parent range.
Args:
instance (obj): publishing instance
Returns:
otio.Clip: otio clip object
"""
context = instance.context
# convert otio clip from string to object
otio_clip_string = instance.data.pop("otioClip")
otio_clip = otio.adapters.read_from_string(
otio_clip_string)
otio_timeline = context.data["otioTimeline"]
clips = [
clip for clip in otio_timeline.each_child(
descended_from_type=otio.schema.Clip)
if clip.name == otio_clip.name
if clip.parent().kind == "Video"
]
otio_clip = clips.pop()
return otio_clip
def _distribute_shared_data(self, instance):
""" Distribute all defined keys.
All data are shared between all related
instances in context.
Args:
instance (obj): publishing instance
"""
context = instance.context
instance_id = instance.data["instance_id"]
if not context.data.get("editorialSharedData"):
context.data["editorialSharedData"] = {}
context.data["editorialSharedData"][instance_id] = {
_k: _v for _k, _v in instance.data.items()
if _k in self.SHARED_KEYS
}
def _solve_inputs_to_data(self, instance):
""" Resolve all user inputs into instance data.
Args:
instance (obj): publishing instance
Returns:
dict: instance data updating data
"""
_cr_attrs = instance.data["creator_attributes"]
workfile_start_frame = _cr_attrs["workfile_start_frame"]
frame_start = _cr_attrs["frameStart"]
frame_end = _cr_attrs["frameEnd"]
frame_dur = frame_end - frame_start
return {
"fps": float(_cr_attrs["fps"]),
"handleStart": _cr_attrs["handle_start"],
"handleEnd": _cr_attrs["handle_end"],
"frameStart": workfile_start_frame,
"frameEnd": workfile_start_frame + frame_dur,
"clipIn": _cr_attrs["clipIn"],
"clipOut": _cr_attrs["clipOut"],
"clipDuration": _cr_attrs["clipDuration"],
"sourceIn": _cr_attrs["sourceIn"],
"sourceOut": _cr_attrs["sourceOut"],
"workfileFrameStart": workfile_start_frame,
"folderPath": _cr_attrs["folderPath"],
}
def _solve_hierarchy_context(self, instance):
""" Adding hierarchy data to context shared data.
Args:
instance (obj): publishing instance
"""
context = instance.context
final_context = (
context.data["hierarchyContext"]
if context.data.get("hierarchyContext")
else {}
)
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
in_info = {
"entity_type": "folder",
"folder_type": "Shot",
"attributes": {
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
"fps": instance.data["fps"]
},
"tasks": instance.data["tasks"]
}
parents = instance.data.get('parents', [])
folder_name = instance.data["folderPath"].split("/")[-1]
actual = {folder_name: in_info}
for parent in reversed(parents):
parent_name = parent["entity_name"]
parent_info = {
"entity_type": parent["entity_type"],
"children": actual,
}
if parent_info["entity_type"] == "folder":
parent_info["folder_type"] = parent["folder_type"]
actual = {parent_name: parent_info}
final_context = self._update_dict(final_context, actual)
# adding hierarchy context to instance
context.data["hierarchyContext"] = final_context
def _update_dict(self, ex_dict, new_dict):
""" Recursion function
Updating nested data with another nested data.
Args:
ex_dict (dict): nested data
new_dict (dict): nested data
Returns:
dict: updated nested data
"""
for key in ex_dict:
if key in new_dict and isinstance(ex_dict[key], dict):
new_dict[key] = self._update_dict(ex_dict[key], new_dict[key])
elif not ex_dict.get(key) or not new_dict.get(key):
new_dict[key] = ex_dict[key]
return new_dict

View file

@ -1,272 +0,0 @@
import os
import tempfile
from pathlib import Path
import clique
import pyblish.api
class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin):
"""Collect data for instances created by settings creators.
Plugin create representations for simple instances based
on 'representation_files' attribute stored on instance data.
There is also possibility to have reviewable representation which can be
stored under 'reviewable' attribute stored on instance data. If there was
already created representation with the same files as 'reviewable' contains
Representations can be marked for review and in that case is also added
'review' family to instance families. For review can be marked only one
representation so **first** representation that has extension available
in '_review_extensions' is used for review.
For instance 'source' is used path from last representation created
from 'representation_files'.
Set staging directory on instance. That is probably never used because
each created representation has it's own staging dir.
"""
label = "Collect Settings Simple Instances"
order = pyblish.api.CollectorOrder - 0.49
hosts = ["traypublisher"]
def process(self, instance):
if not instance.data.get("settings_creator"):
return
instance_label = instance.data["name"]
# Create instance's staging dir in temp
tmp_folder = tempfile.mkdtemp(prefix="traypublisher_")
instance.data["stagingDir"] = tmp_folder
instance.context.data["cleanupFullPaths"].append(tmp_folder)
self.log.debug((
"Created temp staging directory for instance {}. {}"
).format(instance_label, tmp_folder))
self._fill_version(instance, instance_label)
# Store filepaths for validation of their existence
source_filepaths = []
# Make sure there are no representations with same name
repre_names_counter = {}
# Store created names for logging
repre_names = []
# Store set of filepaths per each representation
representation_files_mapping = []
source = self._create_main_representations(
instance,
source_filepaths,
repre_names_counter,
repre_names,
representation_files_mapping
)
self._create_review_representation(
instance,
source_filepaths,
repre_names_counter,
repre_names,
representation_files_mapping
)
source_filepaths = list(set(source_filepaths))
instance.data["source"] = source
instance.data["sourceFilepaths"] = source_filepaths
# NOTE: Missing filepaths should not cause crashes (at least not here)
# - if filepaths are required they should crash on validation
if source_filepaths:
# NOTE: Original basename is not handling sequences
# - we should maybe not fill the key when sequence is used?
origin_basename = Path(source_filepaths[0]).stem
instance.data["originalBasename"] = origin_basename
self.log.debug(
(
"Created Simple Settings instance \"{}\""
" with {} representations: {}"
).format(
instance_label,
len(instance.data["representations"]),
", ".join(repre_names)
)
)
def _fill_version(self, instance, instance_label):
"""Fill instance version under which will be instance integrated.
Instance must have set 'use_next_version' to 'False'
and 'version_to_use' to version to use.
Args:
instance (pyblish.api.Instance): Instance to fill version for.
instance_label (str): Label of instance to fill version for.
"""
creator_attributes = instance.data["creator_attributes"]
use_next_version = creator_attributes.get("use_next_version", True)
# If 'version_to_use' is '0' it means that next version should be used
version_to_use = creator_attributes.get("version_to_use", 0)
if use_next_version or not version_to_use:
return
instance.data["version"] = version_to_use
self.log.debug(
"Version for instance \"{}\" was set to \"{}\"".format(
instance_label, version_to_use))
def _create_main_representations(
self,
instance,
source_filepaths,
repre_names_counter,
repre_names,
representation_files_mapping
):
creator_attributes = instance.data["creator_attributes"]
filepath_items = creator_attributes["representation_files"]
if not isinstance(filepath_items, list):
filepath_items = [filepath_items]
source = None
for filepath_item in filepath_items:
# Skip if filepath item does not have filenames
if not filepath_item["filenames"]:
continue
filepaths = {
os.path.join(filepath_item["directory"], filename)
for filename in filepath_item["filenames"]
}
source_filepaths.extend(filepaths)
source = self._calculate_source(filepaths)
representation = self._create_representation_data(
filepath_item, repre_names_counter, repre_names
)
instance.data["representations"].append(representation)
representation_files_mapping.append(
(filepaths, representation, source)
)
return source
def _create_review_representation(
self,
instance,
source_filepaths,
repre_names_counter,
repre_names,
representation_files_mapping
):
# Skip review representation creation if there are no representations
# created for "main" part
# - review representation must not be created in that case so
# validation can care about it
if not representation_files_mapping:
self.log.warning((
"There are missing source representations."
" Creation of review representation was skipped."
))
return
creator_attributes = instance.data["creator_attributes"]
review_file_item = creator_attributes["reviewable"]
filenames = review_file_item.get("filenames")
if not filenames:
self.log.debug((
"Filepath for review is not defined."
" Skipping review representation creation."
))
return
item_dir = review_file_item["directory"]
first_filepath = os.path.join(item_dir, filenames[0])
filepaths = {
os.path.join(item_dir, filename)
for filename in filenames
}
source_filepaths.extend(filepaths)
# First try to find out representation with same filepaths
# so it's not needed to create new representation just for review
review_representation = None
# Review path (only for logging)
review_path = None
for item in representation_files_mapping:
_filepaths, representation, repre_path = item
if _filepaths == filepaths:
review_representation = representation
review_path = repre_path
break
if review_representation is None:
self.log.debug("Creating new review representation")
review_path = self._calculate_source(filepaths)
review_representation = self._create_representation_data(
review_file_item, repre_names_counter, repre_names
)
instance.data["representations"].append(review_representation)
if "review" not in instance.data["families"]:
instance.data["families"].append("review")
if not instance.data.get("thumbnailSource"):
instance.data["thumbnailSource"] = first_filepath
review_representation["tags"].append("review")
# Adding "review" to representation name since it can clash with main
# representation if they share the same extension.
review_representation["outputName"] = "review"
self.log.debug("Representation {} was marked for review. {}".format(
review_representation["name"], review_path
))
def _create_representation_data(
self, filepath_item, repre_names_counter, repre_names
):
"""Create new representation data based on file item.
Args:
filepath_item (Dict[str, Any]): Item with information about
representation paths.
repre_names_counter (Dict[str, int]): Store count of representation
names.
repre_names (List[str]): All used representation names. For
logging purposes.
Returns:
Dict: Prepared base representation data.
"""
filenames = filepath_item["filenames"]
_, ext = os.path.splitext(filenames[0])
if len(filenames) == 1:
filenames = filenames[0]
repre_name = repre_ext = ext[1:]
if repre_name not in repre_names_counter:
repre_names_counter[repre_name] = 2
else:
counter = repre_names_counter[repre_name]
repre_names_counter[repre_name] += 1
repre_name = "{}_{}".format(repre_name, counter)
repre_names.append(repre_name)
return {
"ext": repre_ext,
"name": repre_name,
"stagingDir": filepath_item["directory"],
"files": filenames,
"tags": []
}
def _calculate_source(self, filepaths):
cols, rems = clique.assemble(filepaths)
if cols:
source = cols[0].format("{head}{padding}{tail}")
elif rems:
source = rems[0]
return source

View file

@ -1,24 +0,0 @@
import pyblish.api
class CollectSource(pyblish.api.ContextPlugin):
"""Collecting instances from traypublisher host."""
label = "Collect source"
order = pyblish.api.CollectorOrder - 0.49
hosts = ["traypublisher"]
def process(self, context):
# get json paths from os and load them
source_name = "traypublisher"
for instance in context:
source = instance.data.get("source")
if not source:
instance.data["source"] = source_name
self.log.info((
"Source of instance \"{}\" is changed to \"{}\""
).format(instance.data["name"], source_name))
else:
self.log.info((
"Source of instance \"{}\" was already set to \"{}\""
).format(instance.data["name"], source))

View file

@ -1,45 +0,0 @@
import os
import json
import pyblish.api
from ayon_core.pipeline import publish
class ExtractColorspaceLook(publish.Extractor,
publish.AYONPyblishPluginMixin):
"""Extract OCIO colorspace look from LUT file
"""
label = "Extract Colorspace Look"
order = pyblish.api.ExtractorOrder
hosts = ["traypublisher"]
families = ["ociolook"]
def process(self, instance):
ociolook_items = instance.data["ocioLookItems"]
ociolook_working_color = instance.data["ocioLookWorkingSpace"]
staging_dir = self.staging_dir(instance)
# create ociolook file attributes
ociolook_file_name = "ocioLookFile.json"
ociolook_file_content = {
"version": 1,
"data": {
"ocioLookItems": ociolook_items,
"ocioLookWorkingSpace": ociolook_working_color
}
}
# write ociolook content into json file saved in staging dir
file_url = os.path.join(staging_dir, ociolook_file_name)
with open(file_url, "w") as f_:
json.dump(ociolook_file_content, f_, indent=4)
# create lut representation data
ociolook_repre = {
"name": "ocioLookFile",
"ext": "json",
"files": ociolook_file_name,
"stagingDir": staging_dir,
"tags": []
}
instance.data["representations"].append(ociolook_repre)

View file

@ -1,31 +0,0 @@
import pyblish.api
from ayon_core.pipeline import publish
class ExtractCSVFile(publish.Extractor):
"""
Extractor export CSV file
"""
label = "Extract CSV file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["csv_ingest_file"]
hosts = ["traypublisher"]
def process(self, instance):
csv_file_data = instance.data["csvFileData"]
representation_csv = {
'name': "csv_data",
'ext': "csv",
'files': csv_file_data["filename"],
"stagingDir": csv_file_data["staging_dir"],
"stagingDir_persistent": True
}
instance.data["representations"].append(representation_csv)
self.log.info("Added CSV file representation: {}".format(
representation_csv))

View file

@ -1,232 +0,0 @@
import copy
import os.path
import subprocess
import opentimelineio
import pyblish.api
from ayon_core.lib import get_ffmpeg_tool_args, run_subprocess
from ayon_core.pipeline import publish
class ExtractEditorialPckgConversion(publish.Extractor):
"""Replaces movie paths in otio file with publish rootless
Prepares movie resources for integration (adds them to `transfers`).
Converts .mov files according to output definition.
"""
label = "Extract Editorial Package"
order = pyblish.api.ExtractorOrder - 0.45
hosts = ["traypublisher"]
families = ["editorial_pkg"]
def process(self, instance):
editorial_pkg_data = instance.data.get("editorial_pkg")
otio_path = editorial_pkg_data["otio_path"]
otio_basename = os.path.basename(otio_path)
staging_dir = self.staging_dir(instance)
editorial_pkg_repre = {
'name': "editorial_pkg",
'ext': "otio",
'files': otio_basename,
"stagingDir": staging_dir,
}
otio_staging_path = os.path.join(staging_dir, otio_basename)
instance.data["representations"].append(editorial_pkg_repre)
publish_resource_folder = self._get_publish_resource_folder(instance)
resource_paths = editorial_pkg_data["resource_paths"]
transfers = self._get_transfers(resource_paths,
publish_resource_folder)
project_settings = instance.context.data["project_settings"]
output_def = (project_settings["traypublisher"]
["publish"]
["ExtractEditorialPckgConversion"]
["output"])
conversion_enabled = (instance.data["creator_attributes"]
["conversion_enabled"])
if conversion_enabled and output_def["ext"]:
transfers = self._convert_resources(output_def, transfers)
instance.data["transfers"] = transfers
source_to_rootless = self._get_resource_path_mapping(instance,
transfers)
otio_data = editorial_pkg_data["otio_data"]
otio_data = self._replace_target_urls(otio_data, source_to_rootless)
opentimelineio.adapters.write_to_file(otio_data, otio_staging_path)
self.log.info("Added Editorial Package representation: {}".format(
editorial_pkg_repre))
def _get_publish_resource_folder(self, instance):
"""Calculates publish folder and create it."""
publish_path = self._get_published_path(instance)
publish_folder = os.path.dirname(publish_path)
publish_resource_folder = os.path.join(publish_folder, "resources")
if not os.path.exists(publish_resource_folder):
os.makedirs(publish_resource_folder, exist_ok=True)
return publish_resource_folder
def _get_resource_path_mapping(self, instance, transfers):
"""Returns dict of {source_mov_path: rootless_published_path}."""
replace_paths = {}
anatomy = instance.context.data["anatomy"]
for source, destination in transfers:
rootless_path = self._get_rootless(anatomy, destination)
source_file_name = os.path.basename(source)
replace_paths[source_file_name] = rootless_path
return replace_paths
def _get_transfers(self, resource_paths, publish_resource_folder):
"""Returns list of tuples (source, destination) with movie paths."""
transfers = []
for res_path in resource_paths:
res_basename = os.path.basename(res_path)
pub_res_path = os.path.join(publish_resource_folder, res_basename)
transfers.append((res_path, pub_res_path))
return transfers
def _replace_target_urls(self, otio_data, replace_paths):
"""Replace original movie paths with published rootless ones."""
for track in otio_data.tracks:
for clip in track:
# Check if the clip has a media reference
if clip.media_reference is not None:
# Access the target_url from the media reference
target_url = clip.media_reference.target_url
if not target_url:
continue
file_name = os.path.basename(target_url)
replace_path = replace_paths.get(file_name)
if replace_path:
clip.media_reference.target_url = replace_path
if clip.name == file_name:
clip.name = os.path.basename(replace_path)
return otio_data
def _get_rootless(self, anatomy, path):
"""Try to find rootless {root[work]} path from `path`"""
success, rootless_path = anatomy.find_root_template_from_path(
path)
if not success:
# `rootless_path` is not set to `output_dir` if none of roots match
self.log.warning(
f"Could not find root path for remapping '{path}'."
)
rootless_path = path
return rootless_path
def _get_published_path(self, instance):
"""Calculates expected `publish` folder"""
# determine published path from Anatomy.
template_data = instance.data.get("anatomyData")
rep = instance.data["representations"][0]
template_data["representation"] = rep.get("name")
template_data["ext"] = rep.get("ext")
template_data["comment"] = None
anatomy = instance.context.data["anatomy"]
template_data["root"] = anatomy.roots
template = anatomy.get_template_item("publish", "default", "path")
template_filled = template.format_strict(template_data)
return os.path.normpath(template_filled)
def _convert_resources(self, output_def, transfers):
"""Converts all resource files to configured format."""
out_extension = output_def["ext"]
if not out_extension:
self.log.warning("No output extension configured in "
"ayon+settings://traypublisher/publish/ExtractEditorialPckgConversion") # noqa
return transfers
final_transfers = []
out_def_ffmpeg_args = output_def["ffmpeg_args"]
ffmpeg_input_args = [
value.strip()
for value in out_def_ffmpeg_args["input"]
if value.strip()
]
ffmpeg_video_filters = [
value.strip()
for value in out_def_ffmpeg_args["video_filters"]
if value.strip()
]
ffmpeg_audio_filters = [
value.strip()
for value in out_def_ffmpeg_args["audio_filters"]
if value.strip()
]
ffmpeg_output_args = [
value.strip()
for value in out_def_ffmpeg_args["output"]
if value.strip()
]
ffmpeg_input_args = self._split_ffmpeg_args(ffmpeg_input_args)
generic_args = [
subprocess.list2cmdline(get_ffmpeg_tool_args("ffmpeg"))
]
generic_args.extend(ffmpeg_input_args)
if ffmpeg_video_filters:
generic_args.append("-filter:v")
generic_args.append(
"\"{}\"".format(",".join(ffmpeg_video_filters)))
if ffmpeg_audio_filters:
generic_args.append("-filter:a")
generic_args.append(
"\"{}\"".format(",".join(ffmpeg_audio_filters)))
for source, destination in transfers:
base_name = os.path.basename(destination)
file_name, ext = os.path.splitext(base_name)
dest_path = os.path.join(os.path.dirname(destination),
f"{file_name}.{out_extension}")
final_transfers.append((source, dest_path))
all_args = copy.deepcopy(generic_args)
all_args.append(f"-i \"{source}\"")
all_args.extend(ffmpeg_output_args) # order matters
all_args.append(f"\"{dest_path}\"")
subprcs_cmd = " ".join(all_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
run_subprocess(subprcs_cmd, shell=True, logger=self.log)
return final_transfers
def _split_ffmpeg_args(self, in_args):
"""Makes sure all entered arguments are separated in individual items.
Split each argument string with " -" to identify if string contains
one or more arguments.
"""
splitted_args = []
for arg in in_args:
sub_args = arg.split(" -")
if len(sub_args) == 1:
if arg and arg not in splitted_args:
splitted_args.append(arg)
continue
for idx, arg in enumerate(sub_args):
if idx != 0:
arg = "-" + arg
if arg and arg not in splitted_args:
splitted_args.append(arg)
return splitted_args

View file

@ -1,126 +0,0 @@
import os
from pprint import pformat
import pyblish.api
from ayon_core.lib import (
get_ffmpeg_tool_args,
run_subprocess,
)
from ayon_core.pipeline import publish
class ExtractTrimVideoAudio(publish.Extractor):
"""Trim with ffmpeg "mov" and "wav" files."""
# must be before `ExtractThumbnailSP`
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Trim Video/Audio"
hosts = ["traypublisher"]
families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_tool_args = get_ffmpeg_tool_args("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.debug("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialSourcePath"]
extensions = instance.data.get("extensions", ["mov"])
output_file_type = instance.data.get("outputFileType")
reviewable = "review" in instance.data["families"]
frame_start = int(instance.data["frameStart"])
frame_end = int(instance.data["frameEnd"])
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
clip_start_h = float(instance.data["clipInH"])
_dur = instance.data["clipDuration"]
handle_dur = (handle_start + handle_end)
clip_dur_h = float(_dur + handle_dur)
if output_file_type:
extensions = [output_file_type]
for ext in extensions:
self.log.debug("Processing ext: `{}`".format(ext))
if not ext.startswith("."):
ext = "." + ext
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
if ext == ".wav":
# offset time as ffmpeg is having bug
clip_start_h += 0.5
# remove "review" from families
instance.data["families"] = [
fml for fml in instance.data["families"]
if "trimming" not in fml
]
ffmpeg_args = ffmpeg_tool_args + [
"-ss", str(clip_start_h / fps),
"-i", video_file_path,
"-t", str(clip_dur_h / fps)
]
if ext in [".mov", ".mp4"]:
ffmpeg_args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"
])
elif ext in ".wav":
ffmpeg_args.extend([
"-vn",
"-acodec", "pcm_s16le",
"-ar", "48000",
"-ac", "2"
])
# add output path
ffmpeg_args.append(clip_trimed_path)
joined_args = " ".join(ffmpeg_args)
self.log.debug(f"Processing: {joined_args}")
run_subprocess(
ffmpeg_args, logger=self.log
)
repre = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start - handle_start,
"frameEndFtrack": frame_end + handle_end,
"fps": fps,
"tags": []
}
if ext in [".mov", ".mp4"] and reviewable:
repre.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repre)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Version already exists</title>
<description>
## Version already exists
Version {version} you have set on instance '{product_name}' under '{folder_path}' already exists. This validation is enabled by default to prevent accidental override of existing versions.
### How to repair?
- Click on 'Repair' action -> this will change version to next available.
- Disable validation on the instance if you are sure you want to override the version.
- Reset publishing and manually change the version number.
</description>
</error>
</root>

View file

@ -1,15 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Invalid frame range</title>
<description>
## Invalid frame range
Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames.
### How to repair?
Modify configuration in the database or tweak frame range in the workfile.
</description>
</error>
</root>

View file

@ -1,66 +0,0 @@
import pyblish.api
from ayon_core.pipeline import (
publish,
PublishValidationError
)
from ayon_core.pipeline.colorspace import (
get_ocio_config_colorspaces
)
class ValidateColorspace(pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin,
publish.ColormanagedPyblishPluginMixin):
"""Validate representation colorspaces"""
label = "Validate representation colorspace"
order = pyblish.api.ValidatorOrder
hosts = ["traypublisher"]
families = ["render", "plate", "reference", "image", "online"]
def process(self, instance):
config_colorspaces = {} # cache of colorspaces per config path
for repre in instance.data.get("representations", {}):
colorspace_data = repre.get("colorspaceData", {})
if not colorspace_data:
# Nothing to validate
continue
config_path = colorspace_data["config"]["path"]
if config_path not in config_colorspaces:
colorspaces = get_ocio_config_colorspaces(config_path)
if not colorspaces.get("colorspaces"):
message = (
f"OCIO config '{config_path}' does not contain any "
"colorspaces. This is an error in the OCIO config. "
"Contact your pipeline TD.",
)
raise PublishValidationError(
title="Colorspace validation",
message=message,
description=message
)
config_colorspaces[config_path] = set(
colorspaces["colorspaces"])
colorspace = colorspace_data["colorspace"]
self.log.debug(
f"Validating representation '{repre['name']}' "
f"colorspace '{colorspace}'"
)
if colorspace not in config_colorspaces[config_path]:
message = (
f"Representation '{repre['name']}' colorspace "
f"'{colorspace}' does not exist in OCIO config: "
f"{config_path}"
)
raise PublishValidationError(
title="Representation colorspace",
message=message,
description=message
)

View file

@ -1,89 +0,0 @@
import pyblish.api
from ayon_core.pipeline import (
publish,
PublishValidationError
)
class ValidateColorspaceLook(pyblish.api.InstancePlugin,
publish.AYONPyblishPluginMixin):
"""Validate colorspace look attributes"""
label = "Validate colorspace look attributes"
order = pyblish.api.ValidatorOrder
hosts = ["traypublisher"]
families = ["ociolook"]
def process(self, instance):
create_context = instance.context.data["create_context"]
created_instance = create_context.get_instance_by_id(
instance.data["instance_id"])
creator_defs = created_instance.creator_attribute_defs
ociolook_working_color = instance.data.get("ocioLookWorkingSpace")
ociolook_items = instance.data.get("ocioLookItems", [])
creator_defs_by_key = {_def.key: _def.label for _def in creator_defs}
not_set_keys = {}
if not ociolook_working_color:
not_set_keys["working_colorspace"] = creator_defs_by_key[
"working_colorspace"]
for ociolook_item in ociolook_items:
item_not_set_keys = self.validate_colorspace_set_attrs(
ociolook_item, creator_defs_by_key)
if item_not_set_keys:
not_set_keys[ociolook_item["name"]] = item_not_set_keys
if not_set_keys:
message = (
"Colorspace look attributes are not set: \n"
)
for key, value in not_set_keys.items():
if isinstance(value, list):
values_string = "\n\t- ".join(value)
message += f"\n\t{key}:\n\t- {values_string}"
else:
message += f"\n\t{value}"
raise PublishValidationError(
title="Colorspace Look attributes",
message=message,
description=message
)
def validate_colorspace_set_attrs(
self,
ociolook_item,
creator_defs_by_key
):
"""Validate colorspace look attributes"""
self.log.debug(f"Validate colorspace look attributes: {ociolook_item}")
check_keys = [
"input_colorspace",
"output_colorspace",
"direction",
"interpolation"
]
not_set_keys = []
for key in check_keys:
if ociolook_item[key]:
# key is set and it is correct
continue
def_label = creator_defs_by_key.get(key)
if not def_label:
# raise since key is not recognized by creator defs
raise KeyError(
f"Colorspace look attribute '{key}' is not "
f"recognized by creator attributes: {creator_defs_by_key}"
)
not_set_keys.append(def_label)
return not_set_keys

View file

@ -1,79 +0,0 @@
import os
import opentimelineio
from opentimelineio.exceptions import UnsupportedSchemaError
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateEditorialPackage(pyblish.api.InstancePlugin):
"""Checks that published folder contains all resources from otio
Currently checks only by file names and expects flat structure.
It ignores path to resources in otio file as folder might be dragged in and
published from different location than it was created.
"""
label = "Validate Editorial Package"
order = pyblish.api.ValidatorOrder - 0.49
hosts = ["traypublisher"]
families = ["editorial_pkg"]
def process(self, instance):
editorial_pkg_data = instance.data.get("editorial_pkg")
if not editorial_pkg_data:
raise PublishValidationError("Editorial package not collected")
folder_path = editorial_pkg_data["folder_path"]
otio_path = editorial_pkg_data["otio_path"]
if not otio_path:
raise PublishValidationError(
f"Folder {folder_path} missing otio file")
resource_paths = editorial_pkg_data["resource_paths"]
resource_file_names = {os.path.basename(path)
for path in resource_paths}
try:
otio_data = opentimelineio.adapters.read_from_file(otio_path)
except UnsupportedSchemaError as e:
raise PublishValidationError(
f"Unsupported schema in otio file '{otio_path}'."
"Version of your OpenTimelineIO library is too old."
"Please update it to the latest version."
f"Current version is '{opentimelineio.__version__}', "
"but required is at least 0.16.0."
) from e
target_urls = self._get_all_target_urls(otio_data)
missing_files = set()
for target_url in target_urls:
target_basename = os.path.basename(target_url)
if target_basename not in resource_file_names:
missing_files.add(target_basename)
if missing_files:
raise PublishValidationError(
f"Otio file contains missing files `{missing_files}`.\n\n"
f"Please add them to `{folder_path}` and republish.")
instance.data["editorial_pkg"]["otio_data"] = otio_data
def _get_all_target_urls(self, otio_data):
target_urls = []
# Iterate through tracks, clips, or other elements
for track in otio_data.tracks:
for clip in track:
# Check if the clip has a media reference
if clip.media_reference is not None:
# Access the target_url from the media reference
target_url = clip.media_reference.target_url
if target_url:
target_urls.append(target_url)
return target_urls

View file

@ -1,58 +0,0 @@
import pyblish.api
from ayon_core.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
OptionalPyblishPluginMixin,
RepairAction,
)
class ValidateExistingVersion(
OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin
):
label = "Validate Existing Version"
order = ValidateContentsOrder
hosts = ["traypublisher"]
targets = ["local"]
actions = [RepairAction]
settings_category = "traypublisher"
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
version = instance.data.get("version")
if version is None:
return
last_version = instance.data.get("latestVersion")
if last_version is None or last_version < version:
return
product_name = instance.data["productName"]
msg = "Version {} already exists for product {}.".format(
version, product_name)
formatting_data = {
"product_name": product_name,
"folder_path": instance.data["folderPath"],
"version": version
}
raise PublishXmlValidationError(
self, msg, formatting_data=formatting_data)
@classmethod
def repair(cls, instance):
create_context = instance.context.data["create_context"]
created_instance = create_context.get_instance_by_id(
instance.data["instance_id"])
creator_attributes = created_instance["creator_attributes"]
# Disable version override
creator_attributes["use_next_version"] = True
create_context.save_changes()

View file

@ -1,68 +0,0 @@
import os
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateFilePath(pyblish.api.InstancePlugin):
"""Validate existence of source filepaths on instance.
Plugins looks into key 'sourceFilepaths' and validate if paths there
actually exist on disk.
Also validate if the key is filled but is empty. In that case also
crashes so do not fill the key if unfilled value should not cause error.
This is primarily created for Simple Creator instances.
"""
label = "Validate Filepaths"
order = pyblish.api.ValidatorOrder - 0.49
hosts = ["traypublisher"]
def process(self, instance):
if "sourceFilepaths" not in instance.data:
self.log.info((
"Skipped validation of source filepaths existence."
" Instance does not have collected 'sourceFilepaths'"
))
return
product_type = instance.data["productType"]
label = instance.data["name"]
filepaths = instance.data["sourceFilepaths"]
if not filepaths:
raise PublishValidationError(
(
"Source filepaths of '{}' instance \"{}\" are not filled"
).format(product_type, label),
"File not filled",
(
"## Files were not filled"
"\nThis mean that you didn't enter any files into required"
" file input."
"\n- Please refresh publishing and check instance"
" <b>{}</b>"
).format(label)
)
not_found_files = [
filepath
for filepath in filepaths
if not os.path.exists(filepath)
]
if not_found_files:
joined_paths = "\n".join([
"- {}".format(filepath)
for filepath in not_found_files
])
raise PublishValidationError(
(
"Filepath of '{}' instance \"{}\" does not exist:\n{}"
).format(product_type, label, joined_paths),
"File not found",
(
"## Files were not found\nFiles\n{}"
"\n\nCheck if the path is still available."
).format(joined_paths)
)

View file

@ -1,86 +0,0 @@
import re
import pyblish.api
from ayon_core.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
OptionalPyblishPluginMixin,
)
class ValidateFrameRange(OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin):
"""Validating frame range of rendered files against state in DB."""
label = "Validate Frame Range"
hosts = ["traypublisher"]
families = ["render", "plate"]
targets = ["local"]
order = ValidateContentsOrder
optional = True
# published data might be sequence (.mov, .mp4) in that counting files
# doesn't make sense
check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga",
"gif", "svg"]
skip_timelines_check = [] # skip for specific task names (regex)
def process(self, instance):
# Skip the instance if is not active by data on the instance
if not self.is_active(instance.data):
return
# editorial would fail since they might not be in database yet
new_hierarchy = (
instance.data.get("newHierarchyIntegration")
# Backwards compatible (Deprecated since 24/06/06)
or instance.data.get("newAssetPublishing")
)
if new_hierarchy:
self.log.debug("Instance is creating new folder. Skipping.")
return
if (self.skip_timelines_check and
any(re.search(pattern, instance.data["task"])
for pattern in self.skip_timelines_check)):
self.log.info("Skipping for {} task".format(instance.data["task"]))
folder_attributes = instance.data["folderEntity"]["attrib"]
frame_start = folder_attributes["frameStart"]
frame_end = folder_attributes["frameEnd"]
handle_start = folder_attributes["handleStart"]
handle_end = folder_attributes["handleEnd"]
duration = (frame_end - frame_start + 1) + handle_start + handle_end
repres = instance.data.get("representations")
if not repres:
self.log.info("No representations, skipping.")
return
first_repre = repres[0]
ext = first_repre['ext'].replace(".", '')
if not ext or ext.lower() not in self.check_extensions:
self.log.warning("Cannot check for extension {}".format(ext))
return
files = first_repre["files"]
if isinstance(files, str):
files = [files]
frames = len(files)
msg = (
"Frame duration from DB:'{}' doesn't match number of files:'{}'"
" Please change frame range for Folder or limit no. of files"
). format(int(duration), frames)
formatting_data = {"duration": duration,
"found": frames}
if frames != duration:
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data)
self.log.debug("Valid ranges expected '{}' - found '{}'".
format(int(duration), frames))

View file

@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
import ayon_api
import pyblish.api
from ayon_core.pipeline.publish import (
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin,
)
class ValidateOnlineFile(OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin):
"""Validate that product doesn't exist yet."""
label = "Validate Existing Online Files"
hosts = ["traypublisher"]
families = ["online"]
order = ValidateContentsOrder
optional = True
def process(self, instance):
if not self.is_active(instance.data):
return
project_name = instance.context.data["projectName"]
folder_id = instance.data["folderEntity"]["id"]
product_entity = ayon_api.get_product_by_name(
project_name, instance.data["productName"], folder_id)
if product_entity:
raise PublishValidationError(
"Product to be published already exists.",
title=self.label
)

View file

@ -1,6 +0,0 @@
from .window import main
__all__ = (
"main",
)

View file

@ -1,271 +0,0 @@
"""Tray publisher is extending publisher tool.
Adds ability to select project using overlay widget with list of projects.
Tray publisher can be considered as host implementeation with creators and
publishing plugins.
"""
import platform
from qtpy import QtWidgets, QtCore
import qtawesome
from ayon_core.lib import AYONSettingsRegistry, is_running_from_build
from ayon_core.pipeline import install_host
from ayon_core.tools.publisher.control_qt import QtPublisherController
from ayon_core.tools.publisher.window import PublisherWindow
from ayon_core.tools.common_models import ProjectsModel
from ayon_core.tools.utils import (
PlaceholderLineEdit,
get_ayon_qt_app,
ProjectsQtModel,
ProjectSortFilterProxy,
PROJECT_NAME_ROLE,
)
from ayon_traypublisher.api import TrayPublisherHost
class TrayPublisherRegistry(AYONSettingsRegistry):
def __init__(self):
super(TrayPublisherRegistry, self).__init__("traypublisher")
class TrayPublisherController(QtPublisherController):
def __init__(self, *args, **kwargs):
super(TrayPublisherController, self).__init__(*args, **kwargs)
self._projects_model = ProjectsModel(self)
@property
def host(self):
return self._host
def reset_hierarchy_cache(self):
self._hierarchy_model.reset()
def get_project_items(self, sender=None):
return self._projects_model.get_project_items(sender)
class StandaloneOverlayWidget(QtWidgets.QFrame):
project_selected = QtCore.Signal(str)
def __init__(self, controller, publisher_window):
super(StandaloneOverlayWidget, self).__init__(publisher_window)
self.setObjectName("OverlayFrame")
middle_frame = QtWidgets.QFrame(self)
middle_frame.setObjectName("ChooseProjectFrame")
content_widget = QtWidgets.QWidget(middle_frame)
header_label = QtWidgets.QLabel("Choose project", content_widget)
header_label.setObjectName("ChooseProjectLabel")
# Create project models and view
projects_model = ProjectsQtModel(controller)
projects_proxy = ProjectSortFilterProxy()
projects_proxy.setSourceModel(projects_model)
projects_proxy.setFilterKeyColumn(0)
projects_view = QtWidgets.QListView(content_widget)
projects_view.setObjectName("ChooseProjectView")
projects_view.setModel(projects_proxy)
projects_view.setEditTriggers(
QtWidgets.QAbstractItemView.NoEditTriggers
)
confirm_btn = QtWidgets.QPushButton("Confirm", content_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", content_widget)
cancel_btn.setVisible(False)
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.addStretch(1)
btns_layout.addWidget(cancel_btn, 0)
btns_layout.addWidget(confirm_btn, 0)
txt_filter = PlaceholderLineEdit(content_widget)
txt_filter.setPlaceholderText("Quick filter projects..")
txt_filter.setClearButtonEnabled(True)
txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"),
QtWidgets.QLineEdit.LeadingPosition)
content_layout = QtWidgets.QVBoxLayout(content_widget)
content_layout.setContentsMargins(0, 0, 0, 0)
content_layout.setSpacing(20)
content_layout.addWidget(header_label, 0)
content_layout.addWidget(txt_filter, 0)
content_layout.addWidget(projects_view, 1)
content_layout.addLayout(btns_layout, 0)
middle_layout = QtWidgets.QHBoxLayout(middle_frame)
middle_layout.setContentsMargins(30, 30, 10, 10)
middle_layout.addWidget(content_widget)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(10, 10, 10, 10)
main_layout.addStretch(1)
main_layout.addWidget(middle_frame, 2)
main_layout.addStretch(1)
projects_view.doubleClicked.connect(self._on_double_click)
confirm_btn.clicked.connect(self._on_confirm_click)
cancel_btn.clicked.connect(self._on_cancel_click)
txt_filter.textChanged.connect(self._on_text_changed)
self._projects_view = projects_view
self._projects_model = projects_model
self._projects_proxy = projects_proxy
self._cancel_btn = cancel_btn
self._confirm_btn = confirm_btn
self._txt_filter = txt_filter
self._publisher_window = publisher_window
self._project_name = None
def showEvent(self, event):
self._projects_model.refresh()
# Sort projects after refresh
self._projects_proxy.sort(0)
setting_registry = TrayPublisherRegistry()
try:
project_name = setting_registry.get_item("project_name")
except ValueError:
project_name = None
if project_name:
src_index = self._projects_model.get_index_by_project_name(
project_name
)
index = self._projects_proxy.mapFromSource(src_index)
if index.isValid():
selection_model = self._projects_view.selectionModel()
selection_model.select(
index,
QtCore.QItemSelectionModel.SelectCurrent
)
self._projects_view.setCurrentIndex(index)
self._cancel_btn.setVisible(self._project_name is not None)
super(StandaloneOverlayWidget, self).showEvent(event)
def _on_double_click(self):
self.set_selected_project()
def _on_confirm_click(self):
self.set_selected_project()
def _on_cancel_click(self):
self._set_project(self._project_name)
def _on_text_changed(self):
self._projects_proxy.setFilterRegularExpression(
self._txt_filter.text())
def set_selected_project(self):
index = self._projects_view.currentIndex()
project_name = index.data(PROJECT_NAME_ROLE)
if project_name:
self._set_project(project_name)
@property
def host(self):
return self._publisher_window.controller.host
def _set_project(self, project_name):
self._project_name = project_name
self.host.set_project_name(project_name)
self.setVisible(False)
self.project_selected.emit(project_name)
setting_registry = TrayPublisherRegistry()
setting_registry.set_item("project_name", project_name)
class TrayPublishWindow(PublisherWindow):
def __init__(self, *args, **kwargs):
controller = TrayPublisherController()
super(TrayPublishWindow, self).__init__(
controller=controller, reset_on_show=False
)
flags = self.windowFlags()
# Disable always on top hint
if flags & QtCore.Qt.WindowStaysOnTopHint:
flags ^= QtCore.Qt.WindowStaysOnTopHint
self.setWindowFlags(flags)
overlay_widget = StandaloneOverlayWidget(controller, self)
btns_widget = self._header_extra_widget
back_to_overlay_btn = QtWidgets.QPushButton(
"Change project", btns_widget
)
save_btn = QtWidgets.QPushButton("Save", btns_widget)
# TODO implement save mechanism of tray publisher
save_btn.setVisible(False)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addWidget(save_btn, 0)
btns_layout.addWidget(back_to_overlay_btn, 0)
overlay_widget.project_selected.connect(self._on_project_select)
back_to_overlay_btn.clicked.connect(self._on_back_to_overlay)
save_btn.clicked.connect(self._on_tray_publish_save)
self._back_to_overlay_btn = back_to_overlay_btn
self._overlay_widget = overlay_widget
def _set_publish_frame_visible(self, publish_frame_visible):
super(TrayPublishWindow, self)._set_publish_frame_visible(
publish_frame_visible
)
self._back_to_overlay_btn.setVisible(not publish_frame_visible)
def _on_back_to_overlay(self):
self._overlay_widget.setVisible(True)
self._resize_overlay()
def _resize_overlay(self):
self._overlay_widget.resize(
self.width(),
self.height()
)
def resizeEvent(self, event):
super(TrayPublishWindow, self).resizeEvent(event)
self._resize_overlay()
def _on_project_select(self, project_name):
# TODO register project specific plugin paths
self._controller.save_changes(False)
self._controller.reset_hierarchy_cache()
self.reset()
if not self._controller.instances:
self._go_to_create_tab()
def _on_tray_publish_save(self):
self._controller.save_changes()
print("NOT YET IMPLEMENTED")
def main():
host = TrayPublisherHost()
install_host(host)
app_instance = get_ayon_qt_app()
if not is_running_from_build() and platform.system().lower() == "windows":
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(
u"traypublisher"
)
window = TrayPublishWindow()
window.show()
app_instance.exec_()

View file

@ -1,3 +0,0 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'traypublisher' version."""
__version__ = "0.2.5"

View file

@ -1,10 +0,0 @@
name = "traypublisher"
title = "TrayPublisher"
version = "0.2.5"
client_dir = "ayon_traypublisher"
ayon_required_addons = {
"core": ">0.3.2",
}
ayon_compatible_addons = {}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,4 +0,0 @@
Photoshp Addon
===============
Integration with Adobe Traypublisher.

View file

@ -1,11 +0,0 @@
from ayon_server.addons import BaseServerAddon
from .settings import TraypublisherSettings, DEFAULT_TRAYPUBLISHER_SETTING
class Traypublisher(BaseServerAddon):
settings_model = TraypublisherSettings
async def get_default_settings(self):
settings_model_cls = self.get_settings_model()
return settings_model_cls(**DEFAULT_TRAYPUBLISHER_SETTING)

View file

@ -1,10 +0,0 @@
from .main import (
TraypublisherSettings,
DEFAULT_TRAYPUBLISHER_SETTING,
)
__all__ = (
"TraypublisherSettings",
"DEFAULT_TRAYPUBLISHER_SETTING",
)

View file

@ -1,341 +0,0 @@
from pydantic import validator
from ayon_server.settings import BaseSettingsModel, SettingsField
from ayon_server.settings.validators import ensure_unique_names
from ayon_server.exceptions import BadRequestException
class BatchMovieCreatorPlugin(BaseSettingsModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
default_variants: list[str] = SettingsField(
title="Default variants",
default_factory=list
)
default_tasks: list[str] = SettingsField(
title="Default tasks",
default_factory=list
)
extensions: list[str] = SettingsField(
title="Extensions",
default_factory=list
)
class ColumnItemModel(BaseSettingsModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
name: str = SettingsField(
title="Name",
default=""
)
type: str = SettingsField(
title="Type",
default=""
)
default: str = SettingsField(
title="Default",
default=""
)
required_column: bool = SettingsField(
title="Required Column",
default=False
)
validation_pattern: str = SettingsField(
title="Validation Regex Pattern",
default="^(.*)$"
)
class ColumnConfigModel(BaseSettingsModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
csv_delimiter: str = SettingsField(
title="CSV delimiter",
default=","
)
columns: list[ColumnItemModel] = SettingsField(
title="Columns",
default_factory=list
)
@validator("columns")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class RepresentationItemModel(BaseSettingsModel):
"""Allows to publish multiple video files in one go.
Name of matching asset is parsed from file names
('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')
"""
name: str = SettingsField(
title="Name",
default=""
)
extensions: list[str] = SettingsField(
title="Extensions",
default_factory=list
)
@validator("extensions")
def validate_extension(cls, value):
for ext in value:
if not ext.startswith("."):
raise BadRequestException(f"Extension must start with '.': {ext}")
return value
class RepresentationConfigModel(BaseSettingsModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
tags_delimiter: str = SettingsField(
title="Tags delimiter",
default=";"
)
default_tags: list[str] = SettingsField(
title="Default tags",
default_factory=list
)
representations: list[RepresentationItemModel] = SettingsField(
title="Representations",
default_factory=list
)
@validator("representations")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class IngestCSVPluginModel(BaseSettingsModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
enabled: bool = SettingsField(
title="Enabled",
default=False
)
columns_config: ColumnConfigModel = SettingsField(
title="Columns config",
default_factory=ColumnConfigModel
)
representations_config: RepresentationConfigModel = SettingsField(
title="Representations config",
default_factory=RepresentationConfigModel
)
class TrayPublisherCreatePluginsModel(BaseSettingsModel):
BatchMovieCreator: BatchMovieCreatorPlugin = SettingsField(
title="Batch Movie Creator",
default_factory=BatchMovieCreatorPlugin
)
IngestCSV: IngestCSVPluginModel = SettingsField(
title="Ingest CSV",
default_factory=IngestCSVPluginModel
)
DEFAULT_CREATORS = {
"BatchMovieCreator": {
"default_variants": [
"Main"
],
"default_tasks": [
"Compositing"
],
"extensions": [
".mov"
]
},
"IngestCSV": {
"enabled": True,
"columns_config": {
"csv_delimiter": ",",
"columns": [
{
"name": "File Path",
"type": "text",
"default": "",
"required_column": True,
"validation_pattern": "^([a-zA-Z\\:\\ 0-9#._\\\\/]*)$"
},
{
"name": "Folder Path",
"type": "text",
"default": "",
"required_column": True,
"validation_pattern": "^([a-zA-Z0-9_\\/]*)$"
},
{
"name": "Task Name",
"type": "text",
"default": "",
"required_column": True,
"validation_pattern": "^(.*)$"
},
{
"name": "Product Type",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
},
{
"name": "Variant",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
},
{
"name": "Version",
"type": "number",
"default": "1",
"required_column": True,
"validation_pattern": "^(\\d{1,3})$"
},
{
"name": "Version Comment",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
},
{
"name": "Version Thumbnail",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^([a-zA-Z\\:\\ 0-9#._\\\\/]*)$"
},
{
"name": "Frame Start",
"type": "number",
"default": "0",
"required_column": True,
"validation_pattern": "^(\\d{1,8})$"
},
{
"name": "Frame End",
"type": "number",
"default": "0",
"required_column": True,
"validation_pattern": "^(\\d{1,8})$"
},
{
"name": "Handle Start",
"type": "number",
"default": "0",
"required_column": True,
"validation_pattern": "^(\\d)$"
},
{
"name": "Handle End",
"type": "number",
"default": "0",
"required_column": True,
"validation_pattern": "^(\\d)$"
},
{
"name": "FPS",
"type": "decimal",
"default": "0.0",
"required_column": True,
"validation_pattern": "^[0-9]*\\.[0-9]+$|^[0-9]+$"
},
{
"name": "Slate Exists",
"type": "bool",
"default": "True",
"required_column": False,
"validation_pattern": "(True|False)"
},
{
"name": "Representation",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
},
{
"name": "Representation Colorspace",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
},
{
"name": "Representation Tags",
"type": "text",
"default": "",
"required_column": False,
"validation_pattern": "^(.*)$"
}
]
},
"representations_config": {
"tags_delimiter": ";",
"default_tags": [
"review"
],
"representations": [
{
"name": "preview",
"extensions": [
".mp4",
".mov"
]
},
{
"name": "exr",
"extensions": [
".exr"
]
},
{
"name": "edit",
"extensions": [
".mov"
]
},
{
"name": "review",
"extensions": [
".mov"
]
},
{
"name": "nuke",
"extensions": [
".nk"
]
}
]
}
}
}

View file

@ -1,181 +0,0 @@
from ayon_server.settings import (
BaseSettingsModel,
SettingsField,
task_types_enum,
)
class ClipNameTokenizerItem(BaseSettingsModel):
_layout = "expanded"
name: str = SettingsField("", title="Tokenizer name")
regex: str = SettingsField("", title="Tokenizer regex")
class ShotAddTasksItem(BaseSettingsModel):
_layout = "expanded"
name: str = SettingsField('', title="Key")
task_type: str = SettingsField(
title="Task type",
enum_resolver=task_types_enum
)
class ShotRenameSubmodel(BaseSettingsModel):
enabled: bool = True
shot_rename_template: str = SettingsField(
"",
title="Shot rename template"
)
parent_type_enum = [
{"value": "Project", "label": "Project"},
{"value": "Folder", "label": "Folder"},
{"value": "Episode", "label": "Episode"},
{"value": "Sequence", "label": "Sequence"},
]
class TokenToParentConvertorItem(BaseSettingsModel):
# TODO - was 'type' must be renamed in code to `parent_type`
parent_type: str = SettingsField(
"Project",
enum_resolver=lambda: parent_type_enum
)
name: str = SettingsField(
"",
title="Parent token name",
description="Unique name used in `Parent path template`"
)
value: str = SettingsField(
"",
title="Parent token value",
description="Template where any text, Anatomy keys and Tokens could be used" # noqa
)
class ShotHierarchySubmodel(BaseSettingsModel):
enabled: bool = True
parents_path: str = SettingsField(
"",
title="Parents path template",
description="Using keys from \"Token to parent convertor\" or tokens directly" # noqa
)
parents: list[TokenToParentConvertorItem] = SettingsField(
default_factory=TokenToParentConvertorItem,
title="Token to parent convertor"
)
output_file_type = [
{"value": ".mp4", "label": "MP4"},
{"value": ".mov", "label": "MOV"},
{"value": ".wav", "label": "WAV"}
]
class ProductTypePresetItem(BaseSettingsModel):
product_type: str = SettingsField("", title="Product type")
# TODO add placeholder '< Inherited >'
variant: str = SettingsField("", title="Variant")
review: bool = SettingsField(True, title="Review")
output_file_type: str = SettingsField(
".mp4",
enum_resolver=lambda: output_file_type
)
class EditorialSimpleCreatorPlugin(BaseSettingsModel):
default_variants: list[str] = SettingsField(
default_factory=list,
title="Default Variants"
)
clip_name_tokenizer: list[ClipNameTokenizerItem] = SettingsField(
default_factory=ClipNameTokenizerItem,
description=(
"Using Regex expression to create tokens. \nThose can be used"
" later in \"Shot rename\" creator \nor \"Shot hierarchy\"."
"\n\nTokens should be decorated with \"_\" on each side"
)
)
shot_rename: ShotRenameSubmodel = SettingsField(
title="Shot Rename",
default_factory=ShotRenameSubmodel
)
shot_hierarchy: ShotHierarchySubmodel = SettingsField(
title="Shot Hierarchy",
default_factory=ShotHierarchySubmodel
)
shot_add_tasks: list[ShotAddTasksItem] = SettingsField(
title="Add tasks to shot",
default_factory=ShotAddTasksItem
)
product_type_presets: list[ProductTypePresetItem] = SettingsField(
default_factory=list
)
class TraypublisherEditorialCreatorPlugins(BaseSettingsModel):
editorial_simple: EditorialSimpleCreatorPlugin = SettingsField(
title="Editorial simple creator",
default_factory=EditorialSimpleCreatorPlugin,
)
DEFAULT_EDITORIAL_CREATORS = {
"editorial_simple": {
"default_variants": [
"Main"
],
"clip_name_tokenizer": [
{"name": "_sequence_", "regex": "(sc\\d{3})"},
{"name": "_shot_", "regex": "(sh\\d{3})"}
],
"shot_rename": {
"enabled": True,
"shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}"
},
"shot_hierarchy": {
"enabled": True,
"parents_path": "{project}/{folder}/{sequence}",
"parents": [
{
"parent_type": "Project",
"name": "project",
"value": "{project[name]}"
},
{
"parent_type": "Folder",
"name": "folder",
"value": "shots"
},
{
"parent_type": "Sequence",
"name": "sequence",
"value": "{_sequence_}"
}
]
},
"shot_add_tasks": [],
"product_type_presets": [
{
"product_type": "review",
"variant": "Reference",
"review": True,
"output_file_type": ".mp4"
},
{
"product_type": "plate",
"variant": "",
"review": False,
"output_file_type": ".mov"
},
{
"product_type": "audio",
"variant": "",
"review": False,
"output_file_type": ".wav"
}
]
}
}

View file

@ -1,62 +0,0 @@
from pydantic import validator
from ayon_server.settings import BaseSettingsModel, SettingsField
from ayon_server.settings.validators import ensure_unique_names
class ImageIOConfigModel(BaseSettingsModel):
"""[DEPRECATED] Addon OCIO config settings. Please set the OCIO config
path in the Core addon profiles here
(ayon+settings://core/imageio/ocio_config_profiles).
"""
override_global_config: bool = SettingsField(
False,
title="Override global OCIO config",
description=(
"DEPRECATED functionality. Please set the OCIO config path in the "
"Core addon profiles here (ayon+settings://core/imageio/"
"ocio_config_profiles)."
),
)
filepath: list[str] = SettingsField(
default_factory=list,
title="Config path",
description=(
"DEPRECATED functionality. Please set the OCIO config path in the "
"Core addon profiles here (ayon+settings://core/imageio/"
"ocio_config_profiles)."
),
)
class ImageIOFileRuleModel(BaseSettingsModel):
name: str = SettingsField("", title="Rule name")
pattern: str = SettingsField("", title="Regex pattern")
colorspace: str = SettingsField("", title="Colorspace name")
ext: str = SettingsField("", title="File extension")
class ImageIOFileRulesModel(BaseSettingsModel):
activate_host_rules: bool = SettingsField(False)
rules: list[ImageIOFileRuleModel] = SettingsField(
default_factory=list,
title="Rules"
)
@validator("rules")
def validate_unique_outputs(cls, value):
ensure_unique_names(value)
return value
class TrayPublisherImageIOModel(BaseSettingsModel):
activate_host_color_management: bool = SettingsField(
True, title="Enable Color Management"
)
ocio_config: ImageIOConfigModel = SettingsField(
default_factory=ImageIOConfigModel,
title="OCIO config"
)
file_rules: ImageIOFileRulesModel = SettingsField(
default_factory=ImageIOFileRulesModel,
title="File Rules"
)

View file

@ -1,51 +0,0 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
from .imageio import TrayPublisherImageIOModel
from .simple_creators import (
SimpleCreatorPlugin,
DEFAULT_SIMPLE_CREATORS,
)
from .editorial_creators import (
TraypublisherEditorialCreatorPlugins,
DEFAULT_EDITORIAL_CREATORS,
)
from .creator_plugins import (
TrayPublisherCreatePluginsModel,
DEFAULT_CREATORS,
)
from .publish_plugins import (
TrayPublisherPublishPlugins,
DEFAULT_PUBLISH_PLUGINS,
)
class TraypublisherSettings(BaseSettingsModel):
"""Traypublisher Project Settings."""
imageio: TrayPublisherImageIOModel = SettingsField(
default_factory=TrayPublisherImageIOModel,
title="Color Management (ImageIO)"
)
simple_creators: list[SimpleCreatorPlugin] = SettingsField(
title="Simple Create Plugins",
default_factory=SimpleCreatorPlugin,
)
editorial_creators: TraypublisherEditorialCreatorPlugins = SettingsField(
title="Editorial Creators",
default_factory=TraypublisherEditorialCreatorPlugins,
)
create: TrayPublisherCreatePluginsModel = SettingsField(
title="Create",
default_factory=TrayPublisherCreatePluginsModel
)
publish: TrayPublisherPublishPlugins = SettingsField(
title="Publish Plugins",
default_factory=TrayPublisherPublishPlugins
)
DEFAULT_TRAYPUBLISHER_SETTING = {
"simple_creators": DEFAULT_SIMPLE_CREATORS,
"editorial_creators": DEFAULT_EDITORIAL_CREATORS,
"create": DEFAULT_CREATORS,
"publish": DEFAULT_PUBLISH_PLUGINS,
}

View file

@ -1,116 +0,0 @@
from ayon_server.settings import (
BaseSettingsModel,
SettingsField,
)
class ValidatePluginModel(BaseSettingsModel):
_isGroup = True
enabled: bool = True
optional: bool = SettingsField(True, title="Optional")
active: bool = SettingsField(True, title="Active")
class ValidateFrameRangeModel(ValidatePluginModel):
"""Allows to publish multiple video files in one go. <br />Name of matching
asset is parsed from file names ('asset.mov', 'asset_v001.mov',
'my_asset_to_publish.mov')"""
class ExtractEditorialPckgFFmpegModel(BaseSettingsModel):
video_filters: list[str] = SettingsField(
default_factory=list,
title="Video filters"
)
audio_filters: list[str] = SettingsField(
default_factory=list,
title="Audio filters"
)
input: list[str] = SettingsField(
default_factory=list,
title="Input arguments"
)
output: list[str] = SettingsField(
default_factory=list,
title="Output arguments"
)
class ExtractEditorialPckgOutputDefModel(BaseSettingsModel):
_layout = "expanded"
ext: str = SettingsField("", title="Output extension")
ffmpeg_args: ExtractEditorialPckgFFmpegModel = SettingsField(
default_factory=ExtractEditorialPckgFFmpegModel,
title="FFmpeg arguments"
)
class ExtractEditorialPckgConversionModel(BaseSettingsModel):
"""Set output definition if resource files should be converted."""
conversion_enabled: bool = SettingsField(True,
title="Conversion enabled")
output: ExtractEditorialPckgOutputDefModel = SettingsField(
default_factory=ExtractEditorialPckgOutputDefModel,
title="Output Definitions",
)
class TrayPublisherPublishPlugins(BaseSettingsModel):
CollectFrameDataFromAssetEntity: ValidatePluginModel = SettingsField(
default_factory=ValidatePluginModel,
title="Collect Frame Data From Folder Entity",
)
ValidateFrameRange: ValidateFrameRangeModel = SettingsField(
title="Validate Frame Range",
default_factory=ValidateFrameRangeModel,
)
ValidateExistingVersion: ValidatePluginModel = SettingsField(
title="Validate Existing Version",
default_factory=ValidatePluginModel,
)
ExtractEditorialPckgConversion: ExtractEditorialPckgConversionModel = (
SettingsField(
default_factory=ExtractEditorialPckgConversionModel,
title="Extract Editorial Package Conversion"
)
)
DEFAULT_PUBLISH_PLUGINS = {
"CollectFrameDataFromAssetEntity": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateFrameRange": {
"enabled": True,
"optional": True,
"active": True
},
"ValidateExistingVersion": {
"enabled": True,
"optional": True,
"active": True
},
"ExtractEditorialPckgConversion": {
"optional": False,
"conversion_enabled": True,
"output": {
"ext": "",
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [
"-apply_trc gamma22"
],
"output": [
"-pix_fmt yuv420p",
"-crf 18",
"-intra"
]
}
}
}
}

View file

@ -1,310 +0,0 @@
from ayon_server.settings import BaseSettingsModel, SettingsField
class SimpleCreatorPlugin(BaseSettingsModel):
_layout = "expanded"
product_type: str = SettingsField("", title="Product type")
# TODO add placeholder
identifier: str = SettingsField("", title="Identifier")
label: str = SettingsField("", title="Label")
icon: str = SettingsField("", title="Icon")
default_variants: list[str] = SettingsField(
default_factory=list,
title="Default Variants"
)
description: str = SettingsField(
"",
title="Description",
widget="textarea"
)
detailed_description: str = SettingsField(
"",
title="Detailed Description",
widget="textarea"
)
allow_sequences: bool = SettingsField(
False,
title="Allow sequences"
)
allow_multiple_items: bool = SettingsField(
False,
title="Allow multiple items"
)
allow_version_control: bool = SettingsField(
False,
title="Allow version control"
)
extensions: list[str] = SettingsField(
default_factory=list,
title="Extensions"
)
DEFAULT_SIMPLE_CREATORS = [
{
"product_type": "workfile",
"identifier": "",
"label": "Workfile",
"icon": "fa.file",
"default_variants": [
"Main"
],
"description": "Backup of a working scene",
"detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.",
"allow_sequences": False,
"allow_multiple_items": False,
"allow_version_control": False,
"extensions": [
".ma",
".mb",
".nk",
".hrox",
".hip",
".hiplc",
".hipnc",
".blend",
".scn",
".tvpp",
".comp",
".zip",
".prproj",
".drp",
".psd",
".psb",
".aep"
]
},
{
"product_type": "model",
"identifier": "",
"label": "Model",
"icon": "fa.cubes",
"default_variants": [
"Main",
"Proxy",
"Sculpt"
],
"description": "Clean models",
"detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ",
"allow_sequences": False,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".ma",
".mb",
".obj",
".abc",
".fbx",
".bgeo",
".bgeogz",
".bgeosc",
".usd",
".blend"
]
},
{
"product_type": "pointcache",
"identifier": "",
"label": "Pointcache",
"icon": "fa.gears",
"default_variants": [
"Main"
],
"description": "Geometry Caches",
"detailed_description": "Alembic or bgeo cache of animated data",
"allow_sequences": True,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".abc",
".bgeo",
".bgeogz",
".bgeosc"
]
},
{
"product_type": "plate",
"identifier": "",
"label": "Plate",
"icon": "mdi.camera-image",
"default_variants": [
"Main",
"BG",
"Animatic",
"Reference",
"Offline"
],
"description": "Footage Plates",
"detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.",
"allow_sequences": True,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".exr",
".png",
".dng",
".dpx",
".jpg",
".tiff",
".tif",
".mov",
".mp4",
".avi"
]
},
{
"product_type": "render",
"identifier": "",
"label": "Render",
"icon": "mdi.folder-multiple-image",
"default_variants": [],
"description": "Rendered images or video",
"detailed_description": "Sequence or single file renders",
"allow_sequences": True,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".exr",
".png",
".dng",
".dpx",
".jpg",
".jpeg",
".tiff",
".tif",
".mov",
".mp4",
".avi"
]
},
{
"product_type": "camera",
"identifier": "",
"label": "Camera",
"icon": "fa.video-camera",
"default_variants": [],
"description": "3d Camera",
"detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.",
"allow_sequences": False,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".abc",
".ma",
".hip",
".blend",
".fbx",
".usd"
]
},
{
"product_type": "image",
"identifier": "",
"label": "Image",
"icon": "fa.image",
"default_variants": [
"Reference",
"Texture",
"Concept",
"Background"
],
"description": "Single image",
"detailed_description": "Any image data can be published as image product type. References, textures, concept art, matte paints. This is a fallback 2d product type for everything that doesn't fit more specific product type.",
"allow_sequences": False,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".exr",
".jpg",
".jpeg",
".dng",
".dpx",
".bmp",
".tif",
".tiff",
".png",
".psb",
".psd"
]
},
{
"product_type": "vdb",
"identifier": "",
"label": "VDB Volumes",
"icon": "fa.cloud",
"default_variants": [],
"description": "Sparse volumetric data",
"detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids",
"allow_sequences": True,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": [
".vdb"
]
},
{
"product_type": "matchmove",
"identifier": "",
"label": "Matchmove",
"icon": "fa.empire",
"default_variants": [
"Camera",
"Object",
"Mocap"
],
"description": "Matchmoving script",
"detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data",
"allow_sequences": False,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": []
},
{
"product_type": "rig",
"identifier": "",
"label": "Rig",
"icon": "fa.wheelchair",
"default_variants": [],
"description": "CG rig file",
"detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t",
"allow_sequences": False,
"allow_multiple_items": False,
"allow_version_control": False,
"extensions": [
".ma",
".blend",
".hip",
".hda"
]
},
{
"product_type": "simpleUnrealTexture",
"identifier": "",
"label": "Simple UE texture",
"icon": "fa.image",
"default_variants": [],
"description": "Simple Unreal Engine texture",
"detailed_description": "Texture files with Unreal Engine naming conventions",
"allow_sequences": False,
"allow_multiple_items": True,
"allow_version_control": False,
"extensions": []
},
{
"product_type": "audio",
"identifier": "",
"label": "Audio ",
"icon": "fa5s.file-audio",
"default_variants": [
"Main"
],
"description": "Audio product",
"detailed_description": "Audio files for review or final delivery",
"allow_sequences": False,
"allow_multiple_items": False,
"allow_version_control": False,
"extensions": [
".wav"
]
}
]