mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
490db4062a
64 changed files with 2748 additions and 594 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
|
@ -5,3 +5,6 @@
|
|||
[submodule "tools/modules/powershell/PSWriteColor"]
|
||||
path = tools/modules/powershell/PSWriteColor
|
||||
url = https://github.com/EvotecIT/PSWriteColor.git
|
||||
[submodule "vendor/configs/OpenColorIO-Configs"]
|
||||
path = vendor/configs/OpenColorIO-Configs
|
||||
url = https://github.com/imageworks/OpenColorIO-Configs
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from .settings import (
|
|||
)
|
||||
from .lib import (
|
||||
PypeLogger,
|
||||
Logger,
|
||||
Anatomy,
|
||||
config,
|
||||
execute,
|
||||
|
|
@ -58,8 +59,6 @@ from .action import (
|
|||
RepairContextAction
|
||||
)
|
||||
|
||||
# for backward compatibility with Pype 2
|
||||
Logger = PypeLogger
|
||||
|
||||
__all__ = [
|
||||
"get_system_settings",
|
||||
|
|
|
|||
|
|
@ -136,7 +136,8 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
"tasks": {
|
||||
task["name"]: {"type": task["type"]}
|
||||
for task in self.add_tasks},
|
||||
"representations": []
|
||||
"representations": [],
|
||||
"newAssetPublishing": True
|
||||
})
|
||||
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
"clipAnnotations": annotations,
|
||||
|
||||
# add all additional tags
|
||||
"tags": phiero.get_track_item_tags(track_item)
|
||||
"tags": phiero.get_track_item_tags(track_item),
|
||||
"newAssetPublishing": True
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
|
|
|
|||
|
|
@ -27,6 +27,29 @@ def escape_space(path):
|
|||
return '"{}"'.format(path) if " " in path else path
|
||||
|
||||
|
||||
def get_ocio_config_path(profile_folder):
|
||||
"""Path to OpenPype vendorized OCIO.
|
||||
|
||||
Vendorized OCIO config file path is grabbed from the specific path
|
||||
hierarchy specified below.
|
||||
|
||||
"{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio"
|
||||
Args:
|
||||
profile_folder (str): Name of folder to grab config file from.
|
||||
|
||||
Returns:
|
||||
str: Path to vendorized config file.
|
||||
"""
|
||||
return os.path.join(
|
||||
os.environ["OPENPYPE_ROOT"],
|
||||
"vendor",
|
||||
"configs",
|
||||
"OpenColorIO-Configs",
|
||||
profile_folder,
|
||||
"config.ocio"
|
||||
)
|
||||
|
||||
|
||||
def find_paths_by_hash(texture_hash):
|
||||
"""Find the texture hash key in the dictionary.
|
||||
|
||||
|
|
@ -79,10 +102,11 @@ def maketx(source, destination, *args):
|
|||
# use oiio-optimized settings for tile-size, planarconfig, metadata
|
||||
"--oiio",
|
||||
"--filter lanczos3",
|
||||
escape_space(source)
|
||||
]
|
||||
|
||||
cmd.extend(args)
|
||||
cmd.extend(["-o", escape_space(destination), escape_space(source)])
|
||||
cmd.extend(["-o", escape_space(destination)])
|
||||
|
||||
cmd = " ".join(cmd)
|
||||
|
||||
|
|
@ -493,6 +517,8 @@ class ExtractLook(openpype.api.Extractor):
|
|||
else:
|
||||
colorconvert = ""
|
||||
|
||||
config_path = get_ocio_config_path("nuke-default")
|
||||
color_config = "--colorconfig {0}".format(config_path)
|
||||
# Ensure folder exists
|
||||
if not os.path.exists(os.path.dirname(converted)):
|
||||
os.makedirs(os.path.dirname(converted))
|
||||
|
|
@ -502,10 +528,11 @@ class ExtractLook(openpype.api.Extractor):
|
|||
filepath,
|
||||
converted,
|
||||
# Include `source-hash` as string metadata
|
||||
"-sattrib",
|
||||
"--sattrib",
|
||||
"sourceHash",
|
||||
escape_space(texture_hash),
|
||||
colorconvert,
|
||||
color_config
|
||||
)
|
||||
|
||||
return converted, COPY, texture_hash
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ from openpype.api import (
|
|||
Logger,
|
||||
BuildWorkfile,
|
||||
get_version_from_path,
|
||||
get_workdir_data,
|
||||
get_current_project_settings,
|
||||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
|
|
@ -34,6 +33,7 @@ from openpype.settings import (
|
|||
get_anatomy_settings,
|
||||
)
|
||||
from openpype.modules import ModulesManager
|
||||
from openpype.pipeline.template_data import get_template_data_with_names
|
||||
from openpype.pipeline import (
|
||||
discover_legacy_creator_plugins,
|
||||
legacy_io,
|
||||
|
|
@ -910,19 +910,17 @@ def get_render_path(node):
|
|||
''' Generate Render path from presets regarding avalon knob data
|
||||
'''
|
||||
avalon_knob_data = read_avalon_data(node)
|
||||
data = {'avalon': avalon_knob_data}
|
||||
|
||||
nuke_imageio_writes = get_imageio_node_setting(
|
||||
node_class=avalon_knob_data["family"],
|
||||
plugin_name=avalon_knob_data["creator"],
|
||||
subset=avalon_knob_data["subset"]
|
||||
)
|
||||
host_name = os.environ.get("AVALON_APP")
|
||||
|
||||
data.update({
|
||||
"app": host_name,
|
||||
data = {
|
||||
"avalon": avalon_knob_data,
|
||||
"nuke_imageio_writes": nuke_imageio_writes
|
||||
})
|
||||
}
|
||||
|
||||
anatomy_filled = format_anatomy(data)
|
||||
return anatomy_filled["render"]["path"].replace("\\", "/")
|
||||
|
|
@ -965,12 +963,11 @@ def format_anatomy(data):
|
|||
data["version"] = get_version_from_path(file)
|
||||
|
||||
project_name = anatomy.project_name
|
||||
project_doc = get_project(project_name)
|
||||
asset_doc = get_asset_by_name(project_name, data["avalon"]["asset"])
|
||||
asset_name = data["avalon"]["asset"]
|
||||
task_name = os.environ["AVALON_TASK"]
|
||||
host_name = os.environ["AVALON_APP"]
|
||||
context_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, host_name
|
||||
context_data = get_template_data_with_names(
|
||||
project_name, asset_name, task_name, host_name
|
||||
)
|
||||
data.update(context_data)
|
||||
data.update({
|
||||
|
|
@ -1128,10 +1125,8 @@ def create_write_node(
|
|||
if knob["name"] == "file_type":
|
||||
representation = knob["value"]
|
||||
|
||||
host_name = os.environ.get("AVALON_APP")
|
||||
try:
|
||||
data.update({
|
||||
"app": host_name,
|
||||
"imageio_writes": imageio_writes,
|
||||
"representation": representation,
|
||||
})
|
||||
|
|
|
|||
|
|
@ -181,8 +181,6 @@ class ExporterReview(object):
|
|||
# get first and last frame
|
||||
self.first_frame = min(self.collection.indexes)
|
||||
self.last_frame = max(self.collection.indexes)
|
||||
if "slate" in self.instance.data["families"]:
|
||||
self.first_frame += 1
|
||||
else:
|
||||
self.fname = os.path.basename(self.path_in)
|
||||
self.fhead = os.path.splitext(self.fname)[0] + "."
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ class CollectSlate(pyblish.api.InstancePlugin):
|
|||
|
||||
if slate_node:
|
||||
instance.data["slateNode"] = slate_node
|
||||
instance.data["slate"] = True
|
||||
instance.data["families"].append("slate")
|
||||
instance.data["versionData"]["families"].append("slate")
|
||||
self.log.info(
|
||||
|
|
|
|||
|
|
@ -31,10 +31,6 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
|
||||
first_frame = instance.data.get("frameStartHandle", None)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in families:
|
||||
first_frame -= 1
|
||||
|
||||
last_frame = instance.data.get("frameEndHandle", None)
|
||||
node_subset_name = instance.data.get("name", None)
|
||||
|
||||
|
|
@ -68,10 +64,6 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
int(last_frame)
|
||||
)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in families:
|
||||
first_frame += 1
|
||||
|
||||
ext = node["file_type"].value()
|
||||
|
||||
if "representations" not in instance.data:
|
||||
|
|
@ -88,8 +80,11 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
repre = {
|
||||
'name': ext,
|
||||
'ext': ext,
|
||||
'frameStart': "%0{}d".format(
|
||||
len(str(last_frame))) % first_frame,
|
||||
'frameStart': (
|
||||
"{{:0>{}}}"
|
||||
.format(len(str(last_frame)))
|
||||
.format(first_frame)
|
||||
),
|
||||
'files': filenames,
|
||||
"stagingDir": out_dir
|
||||
}
|
||||
|
|
@ -105,13 +100,16 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
instance.data['family'] = 'render'
|
||||
families.remove('render.local')
|
||||
families.insert(0, "render2d")
|
||||
instance.data["anatomyData"]["family"] = "render"
|
||||
elif "prerender.local" in families:
|
||||
instance.data['family'] = 'prerender'
|
||||
families.remove('prerender.local')
|
||||
families.insert(0, "prerender")
|
||||
instance.data["anatomyData"]["family"] = "prerender"
|
||||
elif "still.local" in families:
|
||||
instance.data['family'] = 'image'
|
||||
families.remove('still.local')
|
||||
instance.data["anatomyData"]["family"] = "image"
|
||||
instance.data["families"] = families
|
||||
|
||||
collections, remainder = clique.assemble(filenames)
|
||||
|
|
@ -123,4 +121,4 @@ class NukeRenderLocal(openpype.api.Extractor):
|
|||
|
||||
self.log.info('Finished render')
|
||||
|
||||
self.log.debug("instance extracted: {}".format(instance.data))
|
||||
self.log.debug("_ instance.data: {}".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ from openpype.hosts.nuke.api import (
|
|||
get_view_process_node
|
||||
)
|
||||
|
||||
|
||||
class ExtractSlateFrame(openpype.api.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
|
|
@ -236,6 +237,7 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
def _render_slate_to_sequence(self, instance):
|
||||
# set slate frame
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
slate_first_frame = first_frame - 1
|
||||
|
||||
# render slate as sequence frame
|
||||
|
|
@ -284,6 +286,13 @@ class ExtractSlateFrame(openpype.api.Extractor):
|
|||
matching_repre["files"] = [first_filename, slate_filename]
|
||||
elif slate_filename not in matching_repre["files"]:
|
||||
matching_repre["files"].insert(0, slate_filename)
|
||||
matching_repre["frameStart"] = (
|
||||
"{{:0>{}}}"
|
||||
.format(len(str(last_frame)))
|
||||
.format(slate_first_frame)
|
||||
)
|
||||
self.log.debug(
|
||||
"__ matching_repre: {}".format(pformat(matching_repre)))
|
||||
|
||||
self.log.warning("Added slate frame to representation files")
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
# establish families
|
||||
family = avalon_knob_data["family"]
|
||||
families_ak = avalon_knob_data.get("families", [])
|
||||
families = list()
|
||||
families = []
|
||||
|
||||
# except disabled nodes but exclude backdrops in test
|
||||
if ("nukenodes" not in family) and (node["disable"].value()):
|
||||
|
|
@ -111,10 +111,10 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ families: `{}`".format(families))
|
||||
|
||||
# Get format
|
||||
format = root['format'].value()
|
||||
resolution_width = format.width()
|
||||
resolution_height = format.height()
|
||||
pixel_aspect = format.pixelAspect()
|
||||
format_ = root['format'].value()
|
||||
resolution_width = format_.width()
|
||||
resolution_height = format_.height()
|
||||
pixel_aspect = format_.pixelAspect()
|
||||
|
||||
# get publish knob value
|
||||
if "publish" not in node.knobs():
|
||||
|
|
@ -125,8 +125,11 @@ class PreCollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ _families_test: `{}`".format(_families_test))
|
||||
for family_test in _families_test:
|
||||
if family_test in self.sync_workfile_version_on_families:
|
||||
self.log.debug("Syncing version with workfile for '{}'"
|
||||
.format(family_test))
|
||||
self.log.debug(
|
||||
"Syncing version with workfile for '{}'".format(
|
||||
family_test
|
||||
)
|
||||
)
|
||||
# get version to instance for integration
|
||||
instance.data['version'] = instance.context.data['version']
|
||||
|
||||
|
|
|
|||
|
|
@ -144,8 +144,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
self.log.debug("colorspace: `{}`".format(colorspace))
|
||||
|
||||
version_data = {
|
||||
"families": [f.replace(".local", "").replace(".farm", "")
|
||||
for f in _families_test if "write" not in f],
|
||||
"families": [
|
||||
_f.replace(".local", "").replace(".farm", "")
|
||||
for _f in _families_test if "write" != _f
|
||||
],
|
||||
"colorspace": colorspace
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
|
|||
self.log.error(msg)
|
||||
raise ValidationException(msg)
|
||||
|
||||
collected_frames_len = int(len(collection.indexes))
|
||||
collected_frames_len = len(collection.indexes)
|
||||
coll_start = min(collection.indexes)
|
||||
coll_end = max(collection.indexes)
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
"publish": resolve.get_publish_attribute(timeline_item),
|
||||
"fps": context.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
"handleEnd": handle_end,
|
||||
"newAssetPublishing": True
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
|
|
|
|||
|
|
@ -170,7 +170,8 @@ class CollectInstances(pyblish.api.InstancePlugin):
|
|||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartH": frame_start - handle_start,
|
||||
"frameEndH": frame_end + handle_end
|
||||
"frameEndH": frame_end + handle_end,
|
||||
"newAssetPublishing": True
|
||||
}
|
||||
|
||||
for data_key in instance_data_filter:
|
||||
|
|
|
|||
331
openpype/hosts/traypublisher/api/editorial.py
Normal file
331
openpype/hosts/traypublisher/api/editorial.py
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from openpype.client import get_asset_by_id
|
||||
from openpype.pipeline.create import CreatorError
|
||||
|
||||
|
||||
class ShotMetadataSolver:
|
||||
""" Solving hierarchical metadata
|
||||
|
||||
Used during editorial publishing. Works with imput
|
||||
clip name and settings defining python formatable
|
||||
template. Settings also define searching patterns
|
||||
and its token keys used for formating in templates.
|
||||
"""
|
||||
|
||||
NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}")
|
||||
|
||||
# presets
|
||||
clip_name_tokenizer = None
|
||||
shot_rename = True
|
||||
shot_hierarchy = None
|
||||
shot_add_tasks = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clip_name_tokenizer,
|
||||
shot_rename,
|
||||
shot_hierarchy,
|
||||
shot_add_tasks,
|
||||
logger
|
||||
):
|
||||
self.clip_name_tokenizer = clip_name_tokenizer
|
||||
self.shot_rename = shot_rename
|
||||
self.shot_hierarchy = shot_hierarchy
|
||||
self.shot_add_tasks = shot_add_tasks
|
||||
self.log = logger
|
||||
|
||||
def _rename_template(self, data):
|
||||
"""Shot renaming function
|
||||
|
||||
Args:
|
||||
data (dict): formating data
|
||||
|
||||
Raises:
|
||||
CreatorError: If missing keys
|
||||
|
||||
Returns:
|
||||
str: formated new name
|
||||
"""
|
||||
shot_rename_template = self.shot_rename[
|
||||
"shot_rename_template"]
|
||||
try:
|
||||
# format to new shot name
|
||||
return shot_rename_template.format(**data)
|
||||
except KeyError as _E:
|
||||
raise CreatorError((
|
||||
"Make sure all keys in settings are correct:: \n\n"
|
||||
f"From template string {shot_rename_template} > "
|
||||
f"`{_E}` has no equivalent in \n"
|
||||
f"{list(data.keys())} input formating keys!"
|
||||
))
|
||||
|
||||
def _generate_tokens(self, clip_name, source_data):
|
||||
"""Token generator
|
||||
|
||||
Settings defines token pairs key and regex expression.
|
||||
|
||||
Args:
|
||||
clip_name (str): name of clip in editorial
|
||||
source_data (dict): data for formating
|
||||
|
||||
Raises:
|
||||
CreatorError: if missing key
|
||||
|
||||
Returns:
|
||||
dict: updated source_data
|
||||
"""
|
||||
output_data = deepcopy(source_data["anatomy_data"])
|
||||
output_data["clip_name"] = clip_name
|
||||
|
||||
if not self.clip_name_tokenizer:
|
||||
return output_data
|
||||
|
||||
parent_name = source_data["selected_asset_doc"]["name"]
|
||||
|
||||
search_text = parent_name + clip_name
|
||||
|
||||
for token_key, pattern in self.clip_name_tokenizer.items():
|
||||
p = re.compile(pattern)
|
||||
match = p.findall(search_text)
|
||||
if not match:
|
||||
raise CreatorError((
|
||||
"Make sure regex expression works with your data: \n\n"
|
||||
f"'{token_key}' with regex '{pattern}' in your settings\n"
|
||||
"can't find any match in your clip name "
|
||||
f"'{search_text}'!\n\nLook to: "
|
||||
"'project_settings/traypublisher/editorial_creators"
|
||||
"/editorial_simple/clip_name_tokenizer'\n"
|
||||
"at your project settings..."
|
||||
))
|
||||
|
||||
# QUESTION:how to refactory `match[-1]` to some better way?
|
||||
output_data[token_key] = match[-1]
|
||||
|
||||
return output_data
|
||||
|
||||
def _create_parents_from_settings(self, parents, data):
|
||||
"""Formating parent components.
|
||||
|
||||
Args:
|
||||
parents (list): list of dict parent components
|
||||
data (dict): formating data
|
||||
|
||||
Raises:
|
||||
CreatorError: missing formating key
|
||||
CreatorError: missing token key
|
||||
KeyError: missing parent token
|
||||
|
||||
Returns:
|
||||
list: list of dict of parent components
|
||||
"""
|
||||
# fill the parents parts from presets
|
||||
shot_hierarchy = deepcopy(self.shot_hierarchy)
|
||||
hierarchy_parents = shot_hierarchy["parents"]
|
||||
|
||||
# fill parent keys data template from anatomy data
|
||||
try:
|
||||
_parent_tokens_formating_data = {
|
||||
parent_token["name"]: parent_token["value"].format(**data)
|
||||
for parent_token in hierarchy_parents
|
||||
}
|
||||
except KeyError as _E:
|
||||
raise CreatorError((
|
||||
"Make sure all keys in settings are correct : \n"
|
||||
f"`{_E}` has no equivalent in \n{list(data.keys())}"
|
||||
))
|
||||
|
||||
_parent_tokens_type = {
|
||||
parent_token["name"]: parent_token["type"]
|
||||
for parent_token in hierarchy_parents
|
||||
}
|
||||
for _index, _parent in enumerate(
|
||||
shot_hierarchy["parents_path"].split("/")
|
||||
):
|
||||
# format parent token with value which is formated
|
||||
try:
|
||||
parent_name = _parent.format(
|
||||
**_parent_tokens_formating_data)
|
||||
except KeyError as _E:
|
||||
raise CreatorError((
|
||||
"Make sure all keys in settings are correct : \n\n"
|
||||
f"`{_E}` from template string "
|
||||
f"{shot_hierarchy['parents_path']}, "
|
||||
f" has no equivalent in \n"
|
||||
f"{list(_parent_tokens_formating_data.keys())} parents"
|
||||
))
|
||||
|
||||
parent_token_name = (
|
||||
self.NO_DECOR_PATERN.findall(_parent).pop())
|
||||
|
||||
if not parent_token_name:
|
||||
raise KeyError(
|
||||
f"Parent token is not found in: `{_parent}`")
|
||||
|
||||
# find parent type
|
||||
parent_token_type = _parent_tokens_type[parent_token_name]
|
||||
|
||||
# in case selected context is set to the same asset
|
||||
if (
|
||||
_index == 0
|
||||
and parents[-1]["entity_name"] == parent_name
|
||||
):
|
||||
self.log.debug(f" skipping : {parent_name}")
|
||||
continue
|
||||
|
||||
# in case first parent is project then start parents from start
|
||||
if (
|
||||
_index == 0
|
||||
and parent_token_type == "Project"
|
||||
):
|
||||
self.log.debug("rebuilding parents from scratch")
|
||||
project_parent = parents[0]
|
||||
parents = [project_parent]
|
||||
continue
|
||||
|
||||
parents.append({
|
||||
"entity_type": parent_token_type,
|
||||
"entity_name": parent_name
|
||||
})
|
||||
|
||||
self.log.debug(f"__ parents: {parents}")
|
||||
|
||||
return parents
|
||||
|
||||
def _create_hierarchy_path(self, parents):
|
||||
"""Converting hierarchy path from parents
|
||||
|
||||
Args:
|
||||
parents (list): list of dict parent components
|
||||
|
||||
Returns:
|
||||
str: hierarchy path
|
||||
"""
|
||||
return "/".join(
|
||||
[
|
||||
p["entity_name"] for p in parents
|
||||
if p["entity_type"] != "Project"
|
||||
]
|
||||
) if parents else ""
|
||||
|
||||
def _get_parents_from_selected_asset(
|
||||
self,
|
||||
asset_doc,
|
||||
project_doc
|
||||
):
|
||||
"""Returning parents from context on selected asset.
|
||||
|
||||
Context defined in Traypublisher project tree.
|
||||
|
||||
Args:
|
||||
asset_doc (db obj): selected asset doc
|
||||
project_doc (db obj): actual project doc
|
||||
|
||||
Returns:
|
||||
list: list of dict parent components
|
||||
"""
|
||||
project_name = project_doc["name"]
|
||||
visual_hierarchy = [asset_doc]
|
||||
current_doc = asset_doc
|
||||
|
||||
# looping trought all available visual parents
|
||||
# if they are not available anymore than it breaks
|
||||
while True:
|
||||
visual_parent_id = current_doc["data"]["visualParent"]
|
||||
visual_parent = None
|
||||
if visual_parent_id:
|
||||
visual_parent = get_asset_by_id(project_name, visual_parent_id)
|
||||
|
||||
if not visual_parent:
|
||||
visual_hierarchy.append(project_doc)
|
||||
break
|
||||
visual_hierarchy.append(visual_parent)
|
||||
current_doc = visual_parent
|
||||
|
||||
# add current selection context hierarchy
|
||||
return [
|
||||
{
|
||||
"entity_type": entity["data"]["entityType"],
|
||||
"entity_name": entity["name"]
|
||||
}
|
||||
for entity in reversed(visual_hierarchy)
|
||||
]
|
||||
|
||||
def _generate_tasks_from_settings(self, project_doc):
|
||||
"""Convert settings inputs to task data.
|
||||
|
||||
Args:
|
||||
project_doc (db obj): actual project doc
|
||||
|
||||
Raises:
|
||||
KeyError: Missing task type in project doc
|
||||
|
||||
Returns:
|
||||
dict: tasks data
|
||||
"""
|
||||
tasks_to_add = {}
|
||||
|
||||
project_tasks = project_doc["config"]["tasks"]
|
||||
for task_name, task_data in self.shot_add_tasks.items():
|
||||
_task_data = deepcopy(task_data)
|
||||
|
||||
# check if task type in project task types
|
||||
if _task_data["type"] in project_tasks.keys():
|
||||
tasks_to_add[task_name] = _task_data
|
||||
else:
|
||||
raise KeyError(
|
||||
"Missing task type `{}` for `{}` is not"
|
||||
" existing in `{}``".format(
|
||||
_task_data["type"],
|
||||
task_name,
|
||||
list(project_tasks.keys())
|
||||
)
|
||||
)
|
||||
|
||||
return tasks_to_add
|
||||
|
||||
def generate_data(self, clip_name, source_data):
|
||||
"""Metadata generator.
|
||||
|
||||
Converts input data to hierarchy mentadata.
|
||||
|
||||
Args:
|
||||
clip_name (str): clip name
|
||||
source_data (dict): formating data
|
||||
|
||||
Returns:
|
||||
(str, dict): shot name and hierarchy data
|
||||
"""
|
||||
self.log.info(f"_ source_data: {source_data}")
|
||||
|
||||
tasks = {}
|
||||
asset_doc = source_data["selected_asset_doc"]
|
||||
project_doc = source_data["project_doc"]
|
||||
|
||||
# match clip to shot name at start
|
||||
shot_name = clip_name
|
||||
|
||||
# parse all tokens and generate formating data
|
||||
formating_data = self._generate_tokens(shot_name, source_data)
|
||||
|
||||
# generate parents from selected asset
|
||||
parents = self._get_parents_from_selected_asset(asset_doc, project_doc)
|
||||
|
||||
if self.shot_rename["enabled"]:
|
||||
shot_name = self._rename_template(formating_data)
|
||||
self.log.info(f"Renamed shot name: {shot_name}")
|
||||
|
||||
if self.shot_hierarchy["enabled"]:
|
||||
parents = self._create_parents_from_settings(
|
||||
parents, formating_data)
|
||||
|
||||
if self.shot_add_tasks:
|
||||
tasks = self._generate_tasks_from_settings(
|
||||
project_doc)
|
||||
|
||||
return shot_name, {
|
||||
"hierarchy": self._create_hierarchy_path(parents),
|
||||
"parents": parents,
|
||||
"tasks": tasks
|
||||
}
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
from openpype.lib.attribute_definitions import FileDef
|
||||
from openpype.pipeline import (
|
||||
from openpype.pipeline.create import (
|
||||
Creator,
|
||||
HiddenCreator,
|
||||
CreatedInstance
|
||||
)
|
||||
|
||||
|
|
@ -11,7 +12,6 @@ from .pipeline import (
|
|||
HostContext,
|
||||
)
|
||||
|
||||
|
||||
IMAGE_EXTENSIONS = [
|
||||
".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal",
|
||||
".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits",
|
||||
|
|
@ -35,6 +35,42 @@ VIDEO_EXTENSIONS = [
|
|||
REVIEW_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS
|
||||
|
||||
|
||||
class HiddenTrayPublishCreator(HiddenCreator):
|
||||
host_name = "traypublisher"
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in list_instances():
|
||||
creator_id = instance_data.get("creator_identifier")
|
||||
if creator_id == self.identifier:
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
update_instances(update_list)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
remove_instances(instances)
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def _store_new_instance(self, new_instance):
|
||||
"""Tray publisher specific method to store instance.
|
||||
|
||||
Instance is stored into "workfile" of traypublisher and also add it
|
||||
to CreateContext.
|
||||
|
||||
Args:
|
||||
new_instance (CreatedInstance): Instance that should be stored.
|
||||
"""
|
||||
|
||||
# Host implementation of storing metadata about instance
|
||||
HostContext.add_instance(new_instance.data_to_store())
|
||||
# Add instance to current context
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
|
||||
class TrayPublishCreator(Creator):
|
||||
create_allow_context_change = True
|
||||
host_name = "traypublisher"
|
||||
|
|
@ -56,40 +92,12 @@ class TrayPublishCreator(Creator):
|
|||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attrobites
|
||||
return self.get_instance_attr_defs()
|
||||
|
||||
def _store_new_instance(self, new_instance):
|
||||
"""Tray publisher specific method to store instance.
|
||||
|
||||
Instance is stored into "workfile" of traypublisher and also add it
|
||||
to CreateContext.
|
||||
|
||||
Args:
|
||||
new_instance (CreatedInstance): Instance that should be stored.
|
||||
"""
|
||||
|
||||
# Host implementation of storing metadata about instance
|
||||
HostContext.add_instance(new_instance.data_to_store())
|
||||
# Add instance to current context
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
|
||||
class SettingsCreator(TrayPublishCreator):
|
||||
create_allow_context_change = True
|
||||
|
||||
extensions = []
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in list_instances():
|
||||
creator_id = instance_data.get("creator_identifier")
|
||||
if creator_id == self.identifier:
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def create(self, subset_name, data, pre_create_data):
|
||||
# Pass precreate data to creator attributes
|
||||
data["creator_attributes"] = pre_create_data
|
||||
|
|
@ -120,6 +128,10 @@ class SettingsCreator(TrayPublishCreator):
|
|||
)
|
||||
]
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
# Use same attributes as for instance attrobites
|
||||
return self.get_instance_attr_defs()
|
||||
|
||||
@classmethod
|
||||
def from_settings(cls, item_data):
|
||||
identifier = item_data["identifier"]
|
||||
|
|
|
|||
891
openpype/hosts/traypublisher/plugins/create/create_editorial.py
Normal file
891
openpype/hosts/traypublisher/plugins/create/create_editorial.py
Normal file
|
|
@ -0,0 +1,891 @@
|
|||
import os
|
||||
from copy import deepcopy
|
||||
from pprint import pformat
|
||||
import opentimelineio as otio
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
get_project
|
||||
)
|
||||
from openpype.hosts.traypublisher.api.plugin import (
|
||||
TrayPublishCreator,
|
||||
HiddenTrayPublishCreator
|
||||
)
|
||||
from openpype.hosts.traypublisher.api.editorial import (
|
||||
ShotMetadataSolver
|
||||
)
|
||||
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
from openpype.lib import (
|
||||
get_ffprobe_data,
|
||||
convert_ffprobe_fps_value,
|
||||
|
||||
FileDef,
|
||||
TextDef,
|
||||
NumberDef,
|
||||
EnumDef,
|
||||
BoolDef,
|
||||
UISeparatorDef,
|
||||
UILabelDef
|
||||
)
|
||||
|
||||
from openpype.hosts.traypublisher.api.pipeline import HostContext
|
||||
|
||||
|
||||
CLIP_ATTR_DEFS = [
|
||||
EnumDef(
|
||||
"fps",
|
||||
items={
|
||||
"from_selection": "From selection",
|
||||
23.997: "23.976",
|
||||
24: "24",
|
||||
25: "25",
|
||||
29.97: "29.97",
|
||||
30: "30"
|
||||
},
|
||||
label="FPS"
|
||||
),
|
||||
NumberDef(
|
||||
"workfile_start_frame",
|
||||
default=1001,
|
||||
label="Workfile start frame"
|
||||
),
|
||||
NumberDef(
|
||||
"handle_start",
|
||||
default=0,
|
||||
label="Handle start"
|
||||
),
|
||||
NumberDef(
|
||||
"handle_end",
|
||||
default=0,
|
||||
label="Handle end"
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator):
|
||||
""" Wrapper class for clip family creators
|
||||
|
||||
Args:
|
||||
HiddenTrayPublishCreator (BaseCreator): hidden supporting class
|
||||
"""
|
||||
host_name = "traypublisher"
|
||||
|
||||
def create(self, instance_data, source_data=None):
|
||||
self.log.info(f"instance_data: {instance_data}")
|
||||
subset_name = instance_data["subset"]
|
||||
|
||||
return self._create_instance(subset_name, instance_data)
|
||||
|
||||
def _create_instance(self, subset_name, data):
|
||||
|
||||
# Create new instance
|
||||
new_instance = CreatedInstance(self.family, subset_name, data, self)
|
||||
self.log.info(f"instance_data: {pformat(new_instance.data)}")
|
||||
|
||||
# Host implementation of storing metadata about instance
|
||||
HostContext.add_instance(new_instance.data_to_store())
|
||||
# Add instance to current context
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
return new_instance
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"add_review_family",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase):
|
||||
""" Shot family class
|
||||
|
||||
The shot metadata instance carrier.
|
||||
|
||||
Args:
|
||||
EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class
|
||||
"""
|
||||
identifier = "editorial_shot"
|
||||
family = "shot"
|
||||
label = "Editorial Shot"
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
attr_defs = [
|
||||
TextDef(
|
||||
"asset_name",
|
||||
label="Asset name",
|
||||
)
|
||||
]
|
||||
attr_defs.extend(CLIP_ATTR_DEFS)
|
||||
return attr_defs
|
||||
|
||||
|
||||
class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase):
|
||||
""" Plate family class
|
||||
|
||||
Plate representation instance.
|
||||
|
||||
Args:
|
||||
EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class
|
||||
"""
|
||||
identifier = "editorial_plate"
|
||||
family = "plate"
|
||||
label = "Editorial Plate"
|
||||
|
||||
|
||||
class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase):
|
||||
""" Audio family class
|
||||
|
||||
Audio representation instance.
|
||||
|
||||
Args:
|
||||
EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class
|
||||
"""
|
||||
identifier = "editorial_audio"
|
||||
family = "audio"
|
||||
label = "Editorial Audio"
|
||||
|
||||
|
||||
class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase):
|
||||
""" Review family class
|
||||
|
||||
Review representation instance.
|
||||
|
||||
Args:
|
||||
EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class
|
||||
"""
|
||||
identifier = "editorial_review"
|
||||
family = "review"
|
||||
label = "Editorial Review"
|
||||
|
||||
|
||||
class EditorialSimpleCreator(TrayPublishCreator):
|
||||
""" Editorial creator class
|
||||
|
||||
Simple workflow creator. This creator only disecting input
|
||||
video file into clip chunks and then converts each to
|
||||
defined format defined Settings for each subset preset.
|
||||
|
||||
Args:
|
||||
TrayPublishCreator (Creator): Tray publisher plugin class
|
||||
"""
|
||||
|
||||
label = "Editorial Simple"
|
||||
family = "editorial"
|
||||
identifier = "editorial_simple"
|
||||
default_variants = [
|
||||
"main"
|
||||
]
|
||||
description = "Editorial files to generate shots."
|
||||
detailed_description = """
|
||||
Supporting publishing new shots to project
|
||||
or updating already created. Publishing will create OTIO file.
|
||||
"""
|
||||
icon = "fa.file"
|
||||
|
||||
def __init__(
|
||||
self, project_settings, *args, **kwargs
|
||||
):
|
||||
super(EditorialSimpleCreator, self).__init__(
|
||||
project_settings, *args, **kwargs
|
||||
)
|
||||
editorial_creators = deepcopy(
|
||||
project_settings["traypublisher"]["editorial_creators"]
|
||||
)
|
||||
# get this creator settings by identifier
|
||||
self._creator_settings = editorial_creators.get(self.identifier)
|
||||
|
||||
clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"]
|
||||
shot_rename = self._creator_settings["shot_rename"]
|
||||
shot_hierarchy = self._creator_settings["shot_hierarchy"]
|
||||
shot_add_tasks = self._creator_settings["shot_add_tasks"]
|
||||
|
||||
self._shot_metadata_solver = ShotMetadataSolver(
|
||||
clip_name_tokenizer,
|
||||
shot_rename,
|
||||
shot_hierarchy,
|
||||
shot_add_tasks,
|
||||
self.log
|
||||
)
|
||||
|
||||
# try to set main attributes from settings
|
||||
if self._creator_settings.get("default_variants"):
|
||||
self.default_variants = self._creator_settings["default_variants"]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
allowed_family_presets = self._get_allowed_family_presets(
|
||||
pre_create_data)
|
||||
|
||||
clip_instance_properties = {
|
||||
k: v for k, v in pre_create_data.items()
|
||||
if k != "sequence_filepath_data"
|
||||
if k not in [
|
||||
i["family"] for i in self._creator_settings["family_presets"]
|
||||
]
|
||||
}
|
||||
# Create otio editorial instance
|
||||
asset_name = instance_data["asset"]
|
||||
asset_doc = get_asset_by_name(self.project_name, asset_name)
|
||||
|
||||
self.log.info(pre_create_data["fps"])
|
||||
|
||||
if pre_create_data["fps"] == "from_selection":
|
||||
# get asset doc data attributes
|
||||
fps = asset_doc["data"]["fps"]
|
||||
else:
|
||||
fps = float(pre_create_data["fps"])
|
||||
|
||||
instance_data.update({
|
||||
"fps": fps
|
||||
})
|
||||
|
||||
# get path of sequence
|
||||
sequence_path_data = pre_create_data["sequence_filepath_data"]
|
||||
media_path_data = pre_create_data["media_filepaths_data"]
|
||||
|
||||
sequence_path = self._get_path_from_file_data(sequence_path_data)
|
||||
media_path = self._get_path_from_file_data(media_path_data)
|
||||
|
||||
# get otio timeline
|
||||
otio_timeline = self._create_otio_timeline(
|
||||
sequence_path, fps)
|
||||
|
||||
# Create all clip instances
|
||||
clip_instance_properties.update({
|
||||
"fps": fps,
|
||||
"parent_asset_name": asset_name,
|
||||
"variant": instance_data["variant"]
|
||||
})
|
||||
|
||||
# create clip instances
|
||||
self._get_clip_instances(
|
||||
otio_timeline,
|
||||
media_path,
|
||||
clip_instance_properties,
|
||||
family_presets=allowed_family_presets
|
||||
|
||||
)
|
||||
|
||||
# create otio editorial instance
|
||||
self._create_otio_instance(
|
||||
subset_name, instance_data,
|
||||
sequence_path, media_path,
|
||||
otio_timeline
|
||||
)
|
||||
|
||||
def _create_otio_instance(
|
||||
self,
|
||||
subset_name,
|
||||
data,
|
||||
sequence_path,
|
||||
media_path,
|
||||
otio_timeline
|
||||
):
|
||||
"""Otio instance creating function
|
||||
|
||||
Args:
|
||||
subset_name (str): name of subset
|
||||
data (dict): instnance data
|
||||
sequence_path (str): path to sequence file
|
||||
media_path (str): path to media file
|
||||
otio_timeline (otio.Timeline): otio timeline object
|
||||
"""
|
||||
# Pass precreate data to creator attributes
|
||||
data.update({
|
||||
"sequenceFilePath": sequence_path,
|
||||
"editorialSourcePath": media_path,
|
||||
"otioTimeline": otio.adapters.write_to_string(otio_timeline)
|
||||
})
|
||||
|
||||
self._create_instance(self.family, subset_name, data)
|
||||
|
||||
def _create_otio_timeline(self, sequence_path, fps):
|
||||
"""Creating otio timeline from sequence path
|
||||
|
||||
Args:
|
||||
sequence_path (str): path to sequence file
|
||||
fps (float): frame per second
|
||||
|
||||
Returns:
|
||||
otio.Timeline: otio timeline object
|
||||
"""
|
||||
# get editorial sequence file into otio timeline object
|
||||
extension = os.path.splitext(sequence_path)[1]
|
||||
|
||||
kwargs = {}
|
||||
if extension == ".edl":
|
||||
# EDL has no frame rate embedded so needs explicit
|
||||
# frame rate else 24 is asssumed.
|
||||
kwargs["rate"] = fps
|
||||
kwargs["ignore_timecode_mismatch"] = True
|
||||
|
||||
self.log.info(f"kwargs: {kwargs}")
|
||||
return otio.adapters.read_from_file(sequence_path, **kwargs)
|
||||
|
||||
def _get_path_from_file_data(self, file_path_data):
|
||||
"""Converting creator path data to single path string
|
||||
|
||||
Args:
|
||||
file_path_data (FileDefItem): creator path data inputs
|
||||
|
||||
Raises:
|
||||
FileExistsError: in case nothing had been set
|
||||
|
||||
Returns:
|
||||
str: path string
|
||||
"""
|
||||
# TODO: just temporarly solving only one media file
|
||||
if isinstance(file_path_data, list):
|
||||
file_path_data = file_path_data.pop()
|
||||
|
||||
if len(file_path_data["filenames"]) == 0:
|
||||
raise FileExistsError(
|
||||
f"File path was not added: {file_path_data}")
|
||||
|
||||
return os.path.join(
|
||||
file_path_data["directory"], file_path_data["filenames"][0])
|
||||
|
||||
def _get_clip_instances(
|
||||
self,
|
||||
otio_timeline,
|
||||
media_path,
|
||||
instance_data,
|
||||
family_presets
|
||||
):
|
||||
"""Helping function fro creating clip instance
|
||||
|
||||
Args:
|
||||
otio_timeline (otio.Timeline): otio timeline object
|
||||
media_path (str): media file path string
|
||||
instance_data (dict): clip instance data
|
||||
family_presets (list): list of dict settings subset presets
|
||||
"""
|
||||
self.asset_name_check = []
|
||||
|
||||
tracks = otio_timeline.each_child(
|
||||
descended_from_type=otio.schema.Track
|
||||
)
|
||||
|
||||
# media data for audio sream and reference solving
|
||||
media_data = self._get_media_source_metadata(media_path)
|
||||
|
||||
for track in tracks:
|
||||
self.log.debug(f"track.name: {track.name}")
|
||||
try:
|
||||
track_start_frame = (
|
||||
abs(track.source_range.start_time.value)
|
||||
)
|
||||
self.log.debug(f"track_start_frame: {track_start_frame}")
|
||||
track_start_frame -= self.timeline_frame_start
|
||||
except AttributeError:
|
||||
track_start_frame = 0
|
||||
|
||||
self.log.debug(f"track_start_frame: {track_start_frame}")
|
||||
|
||||
for clip in track.each_child():
|
||||
if not self._validate_clip_for_processing(clip):
|
||||
continue
|
||||
|
||||
# get available frames info to clip data
|
||||
self._create_otio_reference(clip, media_path, media_data)
|
||||
|
||||
# convert timeline range to source range
|
||||
self._restore_otio_source_range(clip)
|
||||
|
||||
base_instance_data = self._get_base_instance_data(
|
||||
clip,
|
||||
instance_data,
|
||||
track_start_frame
|
||||
)
|
||||
|
||||
parenting_data = {
|
||||
"instance_label": None,
|
||||
"instance_id": None
|
||||
}
|
||||
self.log.info((
|
||||
"Creating subsets from presets: \n"
|
||||
f"{pformat(family_presets)}"
|
||||
))
|
||||
|
||||
for _fpreset in family_presets:
|
||||
# exclude audio family if no audio stream
|
||||
if (
|
||||
_fpreset["family"] == "audio"
|
||||
and not media_data.get("audio")
|
||||
):
|
||||
continue
|
||||
|
||||
instance = self._make_subset_instance(
|
||||
clip,
|
||||
_fpreset,
|
||||
deepcopy(base_instance_data),
|
||||
parenting_data
|
||||
)
|
||||
self.log.debug(f"{pformat(dict(instance.data))}")
|
||||
|
||||
def _restore_otio_source_range(self, otio_clip):
|
||||
"""Infusing source range.
|
||||
|
||||
Otio clip is missing proper source clip range so
|
||||
here we add them from from parent timeline frame range.
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
"""
|
||||
otio_clip.source_range = otio_clip.range_in_parent()
|
||||
|
||||
def _create_otio_reference(
|
||||
self,
|
||||
otio_clip,
|
||||
media_path,
|
||||
media_data
|
||||
):
|
||||
"""Creating otio reference at otio clip.
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
media_path (str): media file path string
|
||||
media_data (dict): media metadata
|
||||
"""
|
||||
start_frame = media_data["start_frame"]
|
||||
frame_duration = media_data["duration"]
|
||||
fps = media_data["fps"]
|
||||
|
||||
available_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
start_frame, fps),
|
||||
duration=otio.opentime.RationalTime(
|
||||
frame_duration, fps)
|
||||
)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
media_reference = otio.schema.ExternalReference(
|
||||
target_url=media_path,
|
||||
available_range=available_range
|
||||
)
|
||||
|
||||
otio_clip.media_reference = media_reference
|
||||
|
||||
def _get_media_source_metadata(self, path):
|
||||
"""Get all available metadata from file
|
||||
|
||||
Args:
|
||||
path (str): media file path string
|
||||
|
||||
Raises:
|
||||
AssertionError: ffprobe couldn't read metadata
|
||||
|
||||
Returns:
|
||||
dict: media file metadata
|
||||
"""
|
||||
return_data = {}
|
||||
|
||||
try:
|
||||
media_data = get_ffprobe_data(
|
||||
path, self.log
|
||||
)
|
||||
self.log.debug(f"__ media_data: {pformat(media_data)}")
|
||||
|
||||
# get video stream data
|
||||
video_stream = media_data["streams"][0]
|
||||
return_data = {
|
||||
"video": True,
|
||||
"start_frame": 0,
|
||||
"duration": int(video_stream["nb_frames"]),
|
||||
"fps": float(
|
||||
convert_ffprobe_fps_value(
|
||||
video_stream["r_frame_rate"]
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
# get audio streams data
|
||||
audio_stream = [
|
||||
stream for stream in media_data["streams"]
|
||||
if stream["codec_type"] == "audio"
|
||||
]
|
||||
|
||||
if audio_stream:
|
||||
return_data["audio"] = True
|
||||
|
||||
except Exception as exc:
|
||||
raise AssertionError((
|
||||
"FFprobe couldn't read information about input file: "
|
||||
f"\"{path}\". Error message: {exc}"
|
||||
))
|
||||
|
||||
return return_data
|
||||
|
||||
def _make_subset_instance(
|
||||
self,
|
||||
otio_clip,
|
||||
preset,
|
||||
instance_data,
|
||||
parenting_data
|
||||
):
|
||||
"""Making subset instance from input preset
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
preset (dict): sigle family preset
|
||||
instance_data (dict): instance data
|
||||
parenting_data (dict): shot instance parent data
|
||||
|
||||
Returns:
|
||||
CreatedInstance: creator instance object
|
||||
"""
|
||||
family = preset["family"]
|
||||
label = self._make_subset_naming(
|
||||
preset,
|
||||
instance_data
|
||||
)
|
||||
instance_data["label"] = label
|
||||
|
||||
# add file extension filter only if it is not shot family
|
||||
if family == "shot":
|
||||
instance_data["otioClip"] = (
|
||||
otio.adapters.write_to_string(otio_clip))
|
||||
c_instance = self.create_context.creators[
|
||||
"editorial_shot"].create(
|
||||
instance_data)
|
||||
parenting_data.update({
|
||||
"instance_label": label,
|
||||
"instance_id": c_instance.data["instance_id"]
|
||||
})
|
||||
else:
|
||||
# add review family if defined
|
||||
instance_data.update({
|
||||
"outputFileType": preset["output_file_type"],
|
||||
"parent_instance_id": parenting_data["instance_id"],
|
||||
"creator_attributes": {
|
||||
"parent_instance": parenting_data["instance_label"],
|
||||
"add_review_family": preset.get("review")
|
||||
}
|
||||
})
|
||||
|
||||
creator_identifier = f"editorial_{family}"
|
||||
editorial_clip_creator = self.create_context.creators[
|
||||
creator_identifier]
|
||||
c_instance = editorial_clip_creator.create(
|
||||
instance_data)
|
||||
|
||||
return c_instance
|
||||
|
||||
def _make_subset_naming(
|
||||
self,
|
||||
preset,
|
||||
instance_data
|
||||
):
|
||||
""" Subset name maker
|
||||
|
||||
Args:
|
||||
preset (dict): single preset item
|
||||
instance_data (dict): instance data
|
||||
|
||||
Returns:
|
||||
str: label string
|
||||
"""
|
||||
shot_name = instance_data["shotName"]
|
||||
variant_name = instance_data["variant"]
|
||||
family = preset["family"]
|
||||
|
||||
# get variant name from preset or from inharitance
|
||||
_variant_name = preset.get("variant") or variant_name
|
||||
|
||||
self.log.debug(f"__ family: {family}")
|
||||
self.log.debug(f"__ preset: {preset}")
|
||||
|
||||
# subset name
|
||||
subset_name = "{}{}".format(
|
||||
family, _variant_name.capitalize()
|
||||
)
|
||||
label = "{}_{}".format(
|
||||
shot_name,
|
||||
subset_name
|
||||
)
|
||||
|
||||
instance_data.update({
|
||||
"family": family,
|
||||
"label": label,
|
||||
"variant": _variant_name,
|
||||
"subset": subset_name,
|
||||
})
|
||||
|
||||
return label
|
||||
|
||||
def _get_base_instance_data(
|
||||
self,
|
||||
otio_clip,
|
||||
instance_data,
|
||||
track_start_frame,
|
||||
):
|
||||
""" Factoring basic set of instance data.
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
instance_data (dict): precreate instance data
|
||||
track_start_frame (int): track start frame
|
||||
|
||||
Returns:
|
||||
dict: instance data
|
||||
"""
|
||||
# get clip instance properties
|
||||
parent_asset_name = instance_data["parent_asset_name"]
|
||||
handle_start = instance_data["handle_start"]
|
||||
handle_end = instance_data["handle_end"]
|
||||
timeline_offset = instance_data["timeline_offset"]
|
||||
workfile_start_frame = instance_data["workfile_start_frame"]
|
||||
fps = instance_data["fps"]
|
||||
variant_name = instance_data["variant"]
|
||||
|
||||
# basic unique asset name
|
||||
clip_name = os.path.splitext(otio_clip.name)[0].lower()
|
||||
project_doc = get_project(self.project_name)
|
||||
|
||||
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(
|
||||
clip_name,
|
||||
{
|
||||
"anatomy_data": {
|
||||
"project": {
|
||||
"name": self.project_name,
|
||||
"code": project_doc["data"]["code"]
|
||||
},
|
||||
"parent": parent_asset_name,
|
||||
"app": self.host_name
|
||||
},
|
||||
"selected_asset_doc": get_asset_by_name(
|
||||
self.project_name, parent_asset_name),
|
||||
"project_doc": project_doc
|
||||
}
|
||||
)
|
||||
|
||||
self._validate_name_uniqueness(shot_name)
|
||||
|
||||
timing_data = self._get_timing_data(
|
||||
otio_clip,
|
||||
timeline_offset,
|
||||
track_start_frame,
|
||||
workfile_start_frame
|
||||
)
|
||||
|
||||
# create creator attributes
|
||||
creator_attributes = {
|
||||
"asset_name": shot_name,
|
||||
"Parent hierarchy path": shot_metadata["hierarchy"],
|
||||
"workfile_start_frame": workfile_start_frame,
|
||||
"fps": fps,
|
||||
"handle_start": int(handle_start),
|
||||
"handle_end": int(handle_end)
|
||||
}
|
||||
creator_attributes.update(timing_data)
|
||||
|
||||
# create shared new instance data
|
||||
base_instance_data = {
|
||||
"shotName": shot_name,
|
||||
"variant": variant_name,
|
||||
|
||||
# HACK: just for temporal bug workaround
|
||||
# TODO: should loockup shot name for update
|
||||
"asset": parent_asset_name,
|
||||
"task": "",
|
||||
|
||||
"newAssetPublishing": True,
|
||||
|
||||
# parent time properties
|
||||
"trackStartFrame": track_start_frame,
|
||||
"timelineOffset": timeline_offset,
|
||||
# creator_attributes
|
||||
"creator_attributes": creator_attributes
|
||||
}
|
||||
# add hierarchy shot metadata
|
||||
base_instance_data.update(shot_metadata)
|
||||
|
||||
return base_instance_data
|
||||
|
||||
def _get_timing_data(
|
||||
self,
|
||||
otio_clip,
|
||||
timeline_offset,
|
||||
track_start_frame,
|
||||
workfile_start_frame
|
||||
):
|
||||
"""Returning available timing data
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
timeline_offset (int): offset value
|
||||
track_start_frame (int): starting frame input
|
||||
workfile_start_frame (int): start frame for shot's workfiles
|
||||
|
||||
Returns:
|
||||
dict: timing metadata
|
||||
"""
|
||||
# frame ranges data
|
||||
clip_in = otio_clip.range_in_parent().start_time.value
|
||||
clip_in += track_start_frame
|
||||
clip_out = otio_clip.range_in_parent().end_time_inclusive().value
|
||||
clip_out += track_start_frame
|
||||
self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")
|
||||
|
||||
# add offset in case there is any
|
||||
self.log.debug(f"__ timeline_offset: {timeline_offset}")
|
||||
if timeline_offset:
|
||||
clip_in += timeline_offset
|
||||
clip_out += timeline_offset
|
||||
|
||||
clip_duration = otio_clip.duration().value
|
||||
self.log.info(f"clip duration: {clip_duration}")
|
||||
|
||||
source_in = otio_clip.trimmed_range().start_time.value
|
||||
source_out = source_in + clip_duration
|
||||
|
||||
# define starting frame for future shot
|
||||
frame_start = (
|
||||
clip_in if workfile_start_frame is None
|
||||
else workfile_start_frame
|
||||
)
|
||||
frame_end = frame_start + (clip_duration - 1)
|
||||
|
||||
return {
|
||||
"frameStart": int(frame_start),
|
||||
"frameEnd": int(frame_end),
|
||||
"clipIn": int(clip_in),
|
||||
"clipOut": int(clip_out),
|
||||
"clipDuration": int(otio_clip.duration().value),
|
||||
"sourceIn": int(source_in),
|
||||
"sourceOut": int(source_out)
|
||||
}
|
||||
|
||||
def _get_allowed_family_presets(self, pre_create_data):
|
||||
""" Filter out allowed family presets.
|
||||
|
||||
Args:
|
||||
pre_create_data (dict): precreate attributes inputs
|
||||
|
||||
Returns:
|
||||
list: lit of dict with preset items
|
||||
"""
|
||||
self.log.debug(f"__ pre_create_data: {pre_create_data}")
|
||||
return [
|
||||
{"family": "shot"},
|
||||
*[
|
||||
preset for preset in self._creator_settings["family_presets"]
|
||||
if pre_create_data[preset["family"]]
|
||||
]
|
||||
]
|
||||
|
||||
def _validate_clip_for_processing(self, otio_clip):
|
||||
"""Validate otio clip attribues
|
||||
|
||||
Args:
|
||||
otio_clip (otio.Clip): otio clip object
|
||||
|
||||
Returns:
|
||||
bool: True if all passing conditions
|
||||
"""
|
||||
if otio_clip.name is None:
|
||||
return False
|
||||
|
||||
if isinstance(otio_clip, otio.schema.Gap):
|
||||
return False
|
||||
|
||||
# skip all generators like black empty
|
||||
if isinstance(
|
||||
otio_clip.media_reference,
|
||||
otio.schema.GeneratorReference):
|
||||
return False
|
||||
|
||||
# Transitions are ignored, because Clips have the full frame
|
||||
# range.
|
||||
if isinstance(otio_clip, otio.schema.Transition):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _validate_name_uniqueness(self, name):
|
||||
""" Validating name uniqueness.
|
||||
|
||||
In context of other clip names in sequence file.
|
||||
|
||||
Args:
|
||||
name (str): shot name string
|
||||
"""
|
||||
if name not in self.asset_name_check:
|
||||
self.asset_name_check.append(name)
|
||||
else:
|
||||
self.log.warning(
|
||||
f"Duplicate shot name: {name}! "
|
||||
"Please check names in the input sequence files."
|
||||
)
|
||||
|
||||
def _create_instance(self, family, subset_name, instance_data):
|
||||
""" CreatedInstance object creator
|
||||
|
||||
Args:
|
||||
family (str): family name
|
||||
subset_name (str): subset name
|
||||
instance_data (dict): instance data
|
||||
"""
|
||||
# Create new instance
|
||||
new_instance = CreatedInstance(
|
||||
family, subset_name, instance_data, self
|
||||
)
|
||||
# Host implementation of storing metadata about instance
|
||||
HostContext.add_instance(new_instance.data_to_store())
|
||||
# Add instance to current context
|
||||
self._add_instance_to_context(new_instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
""" Creating pre-create attributes at creator plugin.
|
||||
|
||||
Returns:
|
||||
list: list of attribute object instances
|
||||
"""
|
||||
# Use same attributes as for instance attrobites
|
||||
attr_defs = [
|
||||
FileDef(
|
||||
"sequence_filepath_data",
|
||||
folders=False,
|
||||
extensions=[
|
||||
".edl",
|
||||
".xml",
|
||||
".aaf",
|
||||
".fcpxml"
|
||||
],
|
||||
allow_sequences=False,
|
||||
single_item=True,
|
||||
label="Sequence file",
|
||||
),
|
||||
FileDef(
|
||||
"media_filepaths_data",
|
||||
folders=False,
|
||||
extensions=[
|
||||
".mov",
|
||||
".mp4",
|
||||
".wav"
|
||||
],
|
||||
allow_sequences=False,
|
||||
single_item=False,
|
||||
label="Media files",
|
||||
),
|
||||
# TODO: perhpas better would be timecode and fps input
|
||||
NumberDef(
|
||||
"timeline_offset",
|
||||
default=0,
|
||||
label="Timeline offset"
|
||||
),
|
||||
UISeparatorDef(),
|
||||
UILabelDef("Clip instance attributes"),
|
||||
UISeparatorDef()
|
||||
]
|
||||
# add variants swithers
|
||||
attr_defs.extend(
|
||||
BoolDef(_var["family"], label=_var["family"])
|
||||
for _var in self._creator_settings["family_presets"]
|
||||
)
|
||||
attr_defs.append(UISeparatorDef())
|
||||
|
||||
attr_defs.extend(CLIP_ATTR_DEFS)
|
||||
return attr_defs
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
from openpype.api import get_project_settings, Logger
|
||||
|
||||
from openpype.api import get_project_settings
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def initialize():
|
||||
|
|
@ -13,6 +14,7 @@ def initialize():
|
|||
|
||||
global_variables = globals()
|
||||
for item in simple_creators:
|
||||
|
||||
dynamic_plugin = SettingsCreator.from_settings(item)
|
||||
global_variables[dynamic_plugin.__name__] = dynamic_plugin
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,36 @@
|
|||
from pprint import pformat
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectClipInstance(pyblish.api.InstancePlugin):
|
||||
"""Collect clip instances and resolve its parent"""
|
||||
|
||||
label = "Collect Clip Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.081
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
families = ["plate", "review", "audio"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_identifier = instance.data["creator_identifier"]
|
||||
if creator_identifier not in [
|
||||
"editorial_plate",
|
||||
"editorial_audio",
|
||||
"editorial_review"
|
||||
]:
|
||||
return
|
||||
|
||||
instance.data["families"].append("clip")
|
||||
|
||||
parent_instance_id = instance.data["parent_instance_id"]
|
||||
edit_shared_data = instance.context.data["editorialSharedData"]
|
||||
instance.data.update(
|
||||
edit_shared_data[parent_instance_id]
|
||||
)
|
||||
|
||||
if "editorialSourcePath" in instance.context.data.keys():
|
||||
instance.data["editorialSourcePath"] = (
|
||||
instance.context.data["editorialSourcePath"])
|
||||
instance.data["families"].append("trimming")
|
||||
|
||||
self.log.debug(pformat(instance.data))
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import pyblish.api
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
class CollectEditorialInstance(pyblish.api.InstancePlugin):
|
||||
"""Collect data for instances created by settings creators."""
|
||||
|
||||
label = "Collect Editorial Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
families = ["editorial"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
if "families" not in instance.data:
|
||||
instance.data["families"] = []
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
fpath = instance.data["sequenceFilePath"]
|
||||
otio_timeline_string = instance.data.pop("otioTimeline")
|
||||
otio_timeline = otio.adapters.read_from_string(
|
||||
otio_timeline_string)
|
||||
|
||||
instance.context.data["otioTimeline"] = otio_timeline
|
||||
instance.context.data["editorialSourcePath"] = (
|
||||
instance.data["editorialSourcePath"])
|
||||
|
||||
self.log.info(fpath)
|
||||
|
||||
instance.data["stagingDir"] = os.path.dirname(fpath)
|
||||
|
||||
_, ext = os.path.splitext(fpath)
|
||||
|
||||
instance.data["representations"].append({
|
||||
"ext": ext[1:],
|
||||
"name": ext[1:],
|
||||
"stagingDir": instance.data["stagingDir"],
|
||||
"files": os.path.basename(fpath)
|
||||
})
|
||||
|
||||
self.log.debug("Created Editorial Instance {}".format(
|
||||
pformat(instance.data)
|
||||
))
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectEditorialReviewable(pyblish.api.InstancePlugin):
|
||||
""" Collect review input from user.
|
||||
|
||||
Adds the input to instance data.
|
||||
"""
|
||||
|
||||
label = "Collect Editorial Reviewable"
|
||||
order = pyblish.api.CollectorOrder
|
||||
|
||||
families = ["plate", "review", "audio"]
|
||||
hosts = ["traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
creator_identifier = instance.data["creator_identifier"]
|
||||
if creator_identifier not in [
|
||||
"editorial_plate",
|
||||
"editorial_audio",
|
||||
"editorial_review"
|
||||
]:
|
||||
return
|
||||
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
|
||||
if creator_attributes["add_review_family"]:
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.debug("instance.data {}".format(instance.data))
|
||||
|
|
@ -0,0 +1,213 @@
|
|||
from pprint import pformat
|
||||
import pyblish.api
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
class CollectShotInstance(pyblish.api.InstancePlugin):
|
||||
""" Collect shot instances
|
||||
|
||||
Resolving its user inputs from creator attributes
|
||||
to instance data.
|
||||
"""
|
||||
|
||||
label = "Collect Shot Instances"
|
||||
order = pyblish.api.CollectorOrder - 0.09
|
||||
|
||||
hosts = ["traypublisher"]
|
||||
families = ["shot"]
|
||||
|
||||
SHARED_KEYS = [
|
||||
"asset",
|
||||
"fps",
|
||||
"handleStart",
|
||||
"handleEnd",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"clipIn",
|
||||
"clipOut",
|
||||
"clipDuration",
|
||||
"sourceIn",
|
||||
"sourceOut",
|
||||
"otioClip",
|
||||
"workfileFrameStart"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug(pformat(instance.data))
|
||||
|
||||
creator_identifier = instance.data["creator_identifier"]
|
||||
if "editorial" not in creator_identifier:
|
||||
return
|
||||
|
||||
# get otio clip object
|
||||
otio_clip = self._get_otio_clip(instance)
|
||||
instance.data["otioClip"] = otio_clip
|
||||
|
||||
# first solve the inputs from creator attr
|
||||
data = self._solve_inputs_to_data(instance)
|
||||
instance.data.update(data)
|
||||
|
||||
# distribute all shared keys to clips instances
|
||||
self._distribute_shared_data(instance)
|
||||
self._solve_hierarchy_context(instance)
|
||||
|
||||
self.log.debug(pformat(instance.data))
|
||||
|
||||
def _get_otio_clip(self, instance):
|
||||
""" Converts otio string data.
|
||||
|
||||
Convert them to proper otio object
|
||||
and finds its equivalent at otio timeline.
|
||||
This process is a hack to support also
|
||||
resolving parent range.
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
otio.Clip: otio clip object
|
||||
"""
|
||||
context = instance.context
|
||||
# convert otio clip from string to object
|
||||
otio_clip_string = instance.data.pop("otioClip")
|
||||
otio_clip = otio.adapters.read_from_string(
|
||||
otio_clip_string)
|
||||
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
|
||||
clips = [
|
||||
clip for clip in otio_timeline.each_child(
|
||||
descended_from_type=otio.schema.Clip)
|
||||
if clip.name == otio_clip.name
|
||||
]
|
||||
|
||||
otio_clip = clips.pop()
|
||||
self.log.debug(f"__ otioclip.parent: {otio_clip.parent}")
|
||||
|
||||
return otio_clip
|
||||
|
||||
def _distribute_shared_data(self, instance):
|
||||
""" Distribute all defined keys.
|
||||
|
||||
All data are shared between all related
|
||||
instances in context.
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
"""
|
||||
context = instance.context
|
||||
|
||||
instance_id = instance.data["instance_id"]
|
||||
|
||||
if not context.data.get("editorialSharedData"):
|
||||
context.data["editorialSharedData"] = {}
|
||||
|
||||
context.data["editorialSharedData"][instance_id] = {
|
||||
_k: _v for _k, _v in instance.data.items()
|
||||
if _k in self.SHARED_KEYS
|
||||
}
|
||||
|
||||
def _solve_inputs_to_data(self, instance):
|
||||
""" Resolve all user inputs into instance data.
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
|
||||
Returns:
|
||||
dict: instance data updating data
|
||||
"""
|
||||
_cr_attrs = instance.data["creator_attributes"]
|
||||
workfile_start_frame = _cr_attrs["workfile_start_frame"]
|
||||
frame_start = _cr_attrs["frameStart"]
|
||||
frame_end = _cr_attrs["frameEnd"]
|
||||
frame_dur = frame_end - frame_start
|
||||
|
||||
return {
|
||||
"asset": _cr_attrs["asset_name"],
|
||||
"fps": float(_cr_attrs["fps"]),
|
||||
"handleStart": _cr_attrs["handle_start"],
|
||||
"handleEnd": _cr_attrs["handle_end"],
|
||||
"frameStart": workfile_start_frame,
|
||||
"frameEnd": workfile_start_frame + frame_dur,
|
||||
"clipIn": _cr_attrs["clipIn"],
|
||||
"clipOut": _cr_attrs["clipOut"],
|
||||
"clipDuration": _cr_attrs["clipDuration"],
|
||||
"sourceIn": _cr_attrs["sourceIn"],
|
||||
"sourceOut": _cr_attrs["sourceOut"],
|
||||
"workfileFrameStart": workfile_start_frame
|
||||
}
|
||||
|
||||
def _solve_hierarchy_context(self, instance):
|
||||
""" Adding hierarchy data to context shared data.
|
||||
|
||||
Args:
|
||||
instance (obj): publishing instance
|
||||
"""
|
||||
context = instance.context
|
||||
|
||||
final_context = (
|
||||
context.data["hierarchyContext"]
|
||||
if context.data.get("hierarchyContext")
|
||||
else {}
|
||||
)
|
||||
|
||||
name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
||||
in_info = {
|
||||
"entity_type": "Shot",
|
||||
"custom_attributes": {
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
"fps": instance.data["fps"]
|
||||
},
|
||||
"tasks": instance.data["tasks"]
|
||||
}
|
||||
|
||||
parents = instance.data.get('parents', [])
|
||||
self.log.debug(f"parents: {pformat(parents)}")
|
||||
|
||||
actual = {name: in_info}
|
||||
|
||||
for parent in reversed(parents):
|
||||
parent_name = parent["entity_name"]
|
||||
next_dict = {
|
||||
parent_name: {
|
||||
"entity_type": parent["entity_type"],
|
||||
"childs": actual
|
||||
}
|
||||
}
|
||||
actual = next_dict
|
||||
|
||||
final_context = self._update_dict(final_context, actual)
|
||||
|
||||
# adding hierarchy context to instance
|
||||
context.data["hierarchyContext"] = final_context
|
||||
self.log.debug(pformat(final_context))
|
||||
|
||||
def _update_dict(self, ex_dict, new_dict):
|
||||
""" Recursion function
|
||||
|
||||
Updating nested data with another nested data.
|
||||
|
||||
Args:
|
||||
ex_dict (dict): nested data
|
||||
new_dict (dict): nested data
|
||||
|
||||
Returns:
|
||||
dict: updated nested data
|
||||
"""
|
||||
for key in ex_dict:
|
||||
if key in new_dict and isinstance(ex_dict[key], dict):
|
||||
new_dict[key] = self._update_dict(ex_dict[key], new_dict[key])
|
||||
elif not ex_dict.get(key) or not new_dict.get(key):
|
||||
new_dict[key] = ex_dict[key]
|
||||
|
||||
return new_dict
|
||||
|
|
@ -1,10 +1,8 @@
|
|||
import os
|
||||
|
||||
from openpype.client import get_project, get_asset_by_name
|
||||
from openpype.lib import (
|
||||
StringTemplate,
|
||||
get_workfile_template_key_from_context,
|
||||
get_workdir_data,
|
||||
get_last_workfile_with_version,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
|
|
@ -12,6 +10,7 @@ from openpype.pipeline import (
|
|||
legacy_io,
|
||||
Anatomy,
|
||||
)
|
||||
from openpype.pipeline.template_data import get_template_data_with_names
|
||||
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
|
||||
|
||||
|
||||
|
|
@ -54,9 +53,6 @@ class LoadWorkfile(plugin.Loader):
|
|||
asset_name = legacy_io.Session["AVALON_ASSET"]
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
project_doc = get_project(project_name)
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
template_key = get_workfile_template_key_from_context(
|
||||
asset_name,
|
||||
task_name,
|
||||
|
|
@ -66,7 +62,9 @@ class LoadWorkfile(plugin.Loader):
|
|||
)
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
data = get_workdir_data(project_doc, asset_doc, task_name, host_name)
|
||||
data = get_template_data_with_names(
|
||||
project_name, asset_name, task_name, host_name
|
||||
)
|
||||
data["root"] = anatomy.roots
|
||||
|
||||
file_template = anatomy.templates[template_key]["file"]
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Hook to launch Unreal and prepare projects."""
|
||||
import os
|
||||
import copy
|
||||
from pathlib import Path
|
||||
|
||||
from openpype.lib import (
|
||||
PreLaunchHook,
|
||||
ApplicationLaunchFailed,
|
||||
ApplicationNotFound,
|
||||
get_workdir_data,
|
||||
get_workfile_template_key
|
||||
)
|
||||
import openpype.hosts.unreal.lib as unreal_lib
|
||||
|
|
@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook):
|
|||
return last_workfile.name
|
||||
|
||||
# Prepare data for fill data and for getting workfile template key
|
||||
task_name = self.data["task_name"]
|
||||
anatomy = self.data["anatomy"]
|
||||
asset_doc = self.data["asset_doc"]
|
||||
project_doc = self.data["project_doc"]
|
||||
|
||||
asset_tasks = asset_doc.get("data", {}).get("tasks") or {}
|
||||
task_info = asset_tasks.get(task_name) or {}
|
||||
task_type = task_info.get("type")
|
||||
# Use already prepared workdir data
|
||||
workdir_data = copy.deepcopy(self.data["workdir_data"])
|
||||
task_type = workdir_data.get("task", {}).get("type")
|
||||
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, self.host_name
|
||||
)
|
||||
# QUESTION raise exception if version is part of filename template?
|
||||
workdir_data["version"] = 1
|
||||
workdir_data["ext"] = "uproject"
|
||||
|
|
|
|||
|
|
@ -63,7 +63,10 @@ from .execute import (
|
|||
path_to_subprocess_arg,
|
||||
CREATE_NO_WINDOW
|
||||
)
|
||||
from .log import PypeLogger, timeit
|
||||
from .log import (
|
||||
Logger,
|
||||
PypeLogger,
|
||||
)
|
||||
|
||||
from .path_templates import (
|
||||
merge_dict,
|
||||
|
|
@ -83,8 +86,9 @@ from .anatomy import (
|
|||
Anatomy
|
||||
)
|
||||
|
||||
from .config import (
|
||||
from .dateutils import (
|
||||
get_datetime_data,
|
||||
get_timestamp,
|
||||
get_formatted_current_time
|
||||
)
|
||||
|
||||
|
|
@ -370,13 +374,13 @@ __all__ = [
|
|||
"get_datetime_data",
|
||||
"get_formatted_current_time",
|
||||
|
||||
"Logger",
|
||||
"PypeLogger",
|
||||
|
||||
"get_default_components",
|
||||
"validate_mongo_connection",
|
||||
"OpenPypeMongoConnection",
|
||||
|
||||
"timeit",
|
||||
|
||||
"is_overlapping_otio_ranges",
|
||||
"otio_range_with_handles",
|
||||
"convert_to_padded_path",
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ from . import PypeLogger
|
|||
from .profiles_filtering import filter_profiles
|
||||
from .local_settings import get_openpype_username
|
||||
from .avalon_context import (
|
||||
get_workdir_data,
|
||||
get_workdir_with_workdir_data,
|
||||
get_workfile_template_key,
|
||||
get_last_workfile
|
||||
|
|
@ -1576,6 +1575,9 @@ def prepare_context_environments(data, env_group=None):
|
|||
data (EnvironmentPrepData): Dictionary where result and intermediate
|
||||
result will be stored.
|
||||
"""
|
||||
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
|
||||
# Context environments
|
||||
log = data["log"]
|
||||
|
||||
|
|
@ -1596,7 +1598,9 @@ def prepare_context_environments(data, env_group=None):
|
|||
# Load project specific environments
|
||||
project_name = project_doc["name"]
|
||||
project_settings = get_project_settings(project_name)
|
||||
system_settings = get_system_settings()
|
||||
data["project_settings"] = project_settings
|
||||
data["system_settings"] = system_settings
|
||||
# Apply project specific environments on current env value
|
||||
apply_project_environments_value(
|
||||
project_name, data["env"], project_settings, env_group
|
||||
|
|
@ -1619,8 +1623,8 @@ def prepare_context_environments(data, env_group=None):
|
|||
if not app.is_host:
|
||||
return
|
||||
|
||||
workdir_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_name, app.host_name
|
||||
workdir_data = get_template_data(
|
||||
project_doc, asset_doc, task_name, app.host_name, system_settings
|
||||
)
|
||||
data["workdir_data"] = workdir_data
|
||||
|
||||
|
|
|
|||
|
|
@ -13,22 +13,16 @@ from openpype.client import (
|
|||
get_project,
|
||||
get_assets,
|
||||
get_asset_by_name,
|
||||
get_subset_by_name,
|
||||
get_subsets,
|
||||
get_last_versions,
|
||||
get_last_version_by_subset_id,
|
||||
get_last_version_by_subset_name,
|
||||
get_representations,
|
||||
get_workfile_info,
|
||||
)
|
||||
from openpype.settings import (
|
||||
get_project_settings,
|
||||
get_system_settings
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from .profiles_filtering import filter_profiles
|
||||
from .events import emit_event
|
||||
from .path_templates import StringTemplate
|
||||
from .local_settings import get_openpype_username
|
||||
|
||||
legacy_io = None
|
||||
|
||||
|
|
@ -188,6 +182,9 @@ def is_latest(representation):
|
|||
|
||||
Returns:
|
||||
bool: Whether the representation is of latest version.
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
|
||||
from openpype.pipeline.context_tools import is_representation_from_latest
|
||||
|
|
@ -197,7 +194,11 @@ def is_latest(representation):
|
|||
|
||||
@deprecated("openpype.pipeline.load.any_outdated_containers")
|
||||
def any_outdated():
|
||||
"""Return whether the current scene has any outdated content"""
|
||||
"""Return whether the current scene has any outdated content.
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
|
||||
from openpype.pipeline.load import any_outdated_containers
|
||||
|
||||
|
|
@ -215,6 +216,9 @@ def get_asset(asset_name=None):
|
|||
|
||||
Returns:
|
||||
(MongoDB document)
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
|
|
@ -222,17 +226,15 @@ def get_asset(asset_name=None):
|
|||
return get_current_project_asset(asset_name=asset_name)
|
||||
|
||||
|
||||
@deprecated("openpype.pipeline.template_data.get_general_template_data")
|
||||
def get_system_general_anatomy_data(system_settings=None):
|
||||
if not system_settings:
|
||||
system_settings = get_system_settings()
|
||||
studio_name = system_settings["general"]["studio_name"]
|
||||
studio_code = system_settings["general"]["studio_code"]
|
||||
return {
|
||||
"studio": {
|
||||
"name": studio_name,
|
||||
"code": studio_code
|
||||
}
|
||||
}
|
||||
"""
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
from openpype.pipeline.template_data import get_general_template_data
|
||||
|
||||
return get_general_template_data(system_settings)
|
||||
|
||||
|
||||
def get_linked_asset_ids(asset_doc):
|
||||
|
|
@ -297,7 +299,10 @@ def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None):
|
|||
|
||||
Returns:
|
||||
None: If asset, subset or version were not found.
|
||||
dict: Last version document for entered .
|
||||
dict: Last version document for entered.
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
|
||||
if not project_name:
|
||||
|
|
@ -424,7 +429,7 @@ def get_workfile_template_key(
|
|||
return default
|
||||
|
||||
|
||||
# TODO rename function as is not just "work" specific
|
||||
@deprecated("openpype.pipeline.template_data.get_template_data")
|
||||
def get_workdir_data(project_doc, asset_doc, task_name, host_name):
|
||||
"""Prepare data for workdir template filling from entered information.
|
||||
|
||||
|
|
@ -437,40 +442,16 @@ def get_workdir_data(project_doc, asset_doc, task_name, host_name):
|
|||
|
||||
Returns:
|
||||
dict: Data prepared for filling workdir template.
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type')
|
||||
|
||||
project_task_types = project_doc["config"]["tasks"]
|
||||
task_code = project_task_types.get(task_type, {}).get("short_name")
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
|
||||
asset_parents = asset_doc["data"]["parents"]
|
||||
hierarchy = "/".join(asset_parents)
|
||||
|
||||
parent_name = project_doc["name"]
|
||||
if asset_parents:
|
||||
parent_name = asset_parents[-1]
|
||||
|
||||
data = {
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_doc["data"].get("code")
|
||||
},
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code,
|
||||
},
|
||||
"asset": asset_doc["name"],
|
||||
"parent": parent_name,
|
||||
"app": host_name,
|
||||
"user": get_openpype_username(),
|
||||
"hierarchy": hierarchy,
|
||||
}
|
||||
|
||||
system_general_data = get_system_general_anatomy_data()
|
||||
data.update(system_general_data)
|
||||
|
||||
return data
|
||||
return get_template_data(
|
||||
project_doc, asset_doc, task_name, host_name
|
||||
)
|
||||
|
||||
|
||||
def get_workdir_with_workdir_data(
|
||||
|
|
@ -552,11 +533,13 @@ def get_workdir(
|
|||
TemplateResult: Workdir path.
|
||||
"""
|
||||
|
||||
from openpype.pipeline import Anatomy
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
|
||||
if not anatomy:
|
||||
from openpype.pipeline import Anatomy
|
||||
anatomy = Anatomy(project_doc["name"])
|
||||
|
||||
workdir_data = get_workdir_data(
|
||||
workdir_data = get_template_data(
|
||||
project_doc, asset_doc, task_name, host_name
|
||||
)
|
||||
# Output is TemplateResult object which contain useful data
|
||||
|
|
@ -565,27 +548,23 @@ def get_workdir(
|
|||
)
|
||||
|
||||
|
||||
@with_pipeline_io
|
||||
@deprecated("openpype.pipeline.context_tools.get_template_data_from_session")
|
||||
def template_data_from_session(session=None):
|
||||
""" Return dictionary with template from session keys.
|
||||
|
||||
Args:
|
||||
session (dict, Optional): The Session to use. If not provided use the
|
||||
currently active global Session.
|
||||
|
||||
Returns:
|
||||
dict: All available data from session.
|
||||
|
||||
Deprecated:
|
||||
Function will be removed after release version 3.14.*
|
||||
"""
|
||||
|
||||
if session is None:
|
||||
session = legacy_io.Session
|
||||
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
host_name = session["AVALON_APP"]
|
||||
project_doc = get_project(project_name)
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
return get_workdir_data(project_doc, asset_doc, task_name, host_name)
|
||||
from openpype.pipeline.context_tools import get_template_data_from_session
|
||||
return get_template_data_from_session(session)
|
||||
|
||||
|
||||
@with_pipeline_io
|
||||
|
|
@ -660,13 +639,14 @@ def compute_session_changes(
|
|||
@with_pipeline_io
|
||||
def get_workdir_from_session(session=None, template_key=None):
|
||||
from openpype.pipeline import Anatomy
|
||||
from openpype.pipeline.context_tools import get_template_data_from_session
|
||||
|
||||
if session is None:
|
||||
session = legacy_io.Session
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
host_name = session["AVALON_APP"]
|
||||
anatomy = Anatomy(project_name)
|
||||
template_data = template_data_from_session(session)
|
||||
template_data = get_template_data_from_session(session)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
|
||||
if not template_key:
|
||||
|
|
@ -695,8 +675,8 @@ def update_current_task(task=None, asset=None, app=None, template_key=None):
|
|||
|
||||
Returns:
|
||||
dict: The changed key, values in the current Session.
|
||||
|
||||
"""
|
||||
|
||||
changes = compute_session_changes(
|
||||
legacy_io.Session,
|
||||
task=task,
|
||||
|
|
@ -768,7 +748,9 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
|
|||
dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and
|
||||
`legacy_io` is used if not entered.
|
||||
"""
|
||||
|
||||
from openpype.pipeline import Anatomy
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
|
||||
# Use legacy_io if dbcon is not entered
|
||||
if not dbcon:
|
||||
|
|
@ -787,7 +769,7 @@ def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None):
|
|||
# Prepare project for workdir data
|
||||
project_name = dbcon.active_project()
|
||||
project_doc = get_project(project_name)
|
||||
workdir_data = get_workdir_data(
|
||||
workdir_data = get_template_data(
|
||||
project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"]
|
||||
)
|
||||
# Prepare anatomy
|
||||
|
|
|
|||
|
|
@ -1,82 +1,41 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Get configuration data."""
|
||||
import datetime
|
||||
import warnings
|
||||
import functools
|
||||
|
||||
|
||||
def get_datetime_data(datetime_obj=None):
|
||||
"""Returns current datetime data as dictionary.
|
||||
class ConfigDeprecatedWarning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
Args:
|
||||
datetime_obj (datetime): Specific datetime object
|
||||
|
||||
Returns:
|
||||
dict: prepared date & time data
|
||||
def deprecated(func):
|
||||
"""Mark functions as deprecated.
|
||||
|
||||
Available keys:
|
||||
"d" - <Day of month number> in shortest possible way.
|
||||
"dd" - <Day of month number> with 2 digits.
|
||||
"ddd" - <Week day name> shortened week day. e.g.: `Mon`, ...
|
||||
"dddd" - <Week day name> full name of week day. e.g.: `Monday`, ...
|
||||
"m" - <Month number> in shortest possible way. e.g.: `1` if January
|
||||
"mm" - <Month number> with 2 digits.
|
||||
"mmm" - <Month name> shortened month name. e.g.: `Jan`, ...
|
||||
"mmmm" - <Month name> full month name. e.g.: `January`, ...
|
||||
"yy" - <Year number> shortened year. e.g.: `19`, `20`, ...
|
||||
"yyyy" - <Year number> full year. e.g.: `2019`, `2020`, ...
|
||||
"H" - <Hours number 24-hour> shortened hours.
|
||||
"HH" - <Hours number 24-hour> with 2 digits.
|
||||
"h" - <Hours number 12-hour> shortened hours.
|
||||
"hh" - <Hours number 12-hour> with 2 digits.
|
||||
"ht" - <Midday type> AM or PM.
|
||||
"M" - <Minutes number> shortened minutes.
|
||||
"MM" - <Minutes number> with 2 digits.
|
||||
"S" - <Seconds number> shortened seconds.
|
||||
"SS" - <Seconds number> with 2 digits.
|
||||
It will result in a warning being emitted when the function is used.
|
||||
"""
|
||||
|
||||
if not datetime_obj:
|
||||
datetime_obj = datetime.datetime.now()
|
||||
|
||||
year = datetime_obj.strftime("%Y")
|
||||
|
||||
month = datetime_obj.strftime("%m")
|
||||
month_name_full = datetime_obj.strftime("%B")
|
||||
month_name_short = datetime_obj.strftime("%b")
|
||||
day = datetime_obj.strftime("%d")
|
||||
|
||||
weekday_full = datetime_obj.strftime("%A")
|
||||
weekday_short = datetime_obj.strftime("%a")
|
||||
|
||||
hours = datetime_obj.strftime("%H")
|
||||
hours_midday = datetime_obj.strftime("%I")
|
||||
hour_midday_type = datetime_obj.strftime("%p")
|
||||
minutes = datetime_obj.strftime("%M")
|
||||
seconds = datetime_obj.strftime("%S")
|
||||
|
||||
return {
|
||||
"d": str(int(day)),
|
||||
"dd": str(day),
|
||||
"ddd": weekday_short,
|
||||
"dddd": weekday_full,
|
||||
"m": str(int(month)),
|
||||
"mm": str(month),
|
||||
"mmm": month_name_short,
|
||||
"mmmm": month_name_full,
|
||||
"yy": str(year[2:]),
|
||||
"yyyy": str(year),
|
||||
"H": str(int(hours)),
|
||||
"HH": str(hours),
|
||||
"h": str(int(hours_midday)),
|
||||
"hh": str(hours_midday),
|
||||
"ht": hour_midday_type,
|
||||
"M": str(int(minutes)),
|
||||
"MM": str(minutes),
|
||||
"S": str(int(seconds)),
|
||||
"SS": str(seconds),
|
||||
}
|
||||
@functools.wraps(func)
|
||||
def new_func(*args, **kwargs):
|
||||
warnings.simplefilter("always", ConfigDeprecatedWarning)
|
||||
warnings.warn(
|
||||
(
|
||||
"Deprecated import of function '{}'."
|
||||
" Class was moved to 'openpype.lib.dateutils.{}'."
|
||||
" Please change your imports."
|
||||
).format(func.__name__),
|
||||
category=ConfigDeprecatedWarning
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
return new_func
|
||||
|
||||
|
||||
@deprecated
|
||||
def get_datetime_data(datetime_obj=None):
|
||||
from .dateutils import get_datetime_data
|
||||
|
||||
return get_datetime_data(datetime_obj)
|
||||
|
||||
|
||||
@deprecated
|
||||
def get_formatted_current_time():
|
||||
return datetime.datetime.now().strftime(
|
||||
"%Y%m%dT%H%M%SZ"
|
||||
)
|
||||
from .dateutils import get_formatted_current_time
|
||||
|
||||
return get_formatted_current_time()
|
||||
|
|
|
|||
95
openpype/lib/dateutils.py
Normal file
95
openpype/lib/dateutils.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Get configuration data."""
|
||||
import datetime
|
||||
|
||||
|
||||
def get_datetime_data(datetime_obj=None):
|
||||
"""Returns current datetime data as dictionary.
|
||||
|
||||
Args:
|
||||
datetime_obj (datetime): Specific datetime object
|
||||
|
||||
Returns:
|
||||
dict: prepared date & time data
|
||||
|
||||
Available keys:
|
||||
"d" - <Day of month number> in shortest possible way.
|
||||
"dd" - <Day of month number> with 2 digits.
|
||||
"ddd" - <Week day name> shortened week day. e.g.: `Mon`, ...
|
||||
"dddd" - <Week day name> full name of week day. e.g.: `Monday`, ...
|
||||
"m" - <Month number> in shortest possible way. e.g.: `1` if January
|
||||
"mm" - <Month number> with 2 digits.
|
||||
"mmm" - <Month name> shortened month name. e.g.: `Jan`, ...
|
||||
"mmmm" - <Month name> full month name. e.g.: `January`, ...
|
||||
"yy" - <Year number> shortened year. e.g.: `19`, `20`, ...
|
||||
"yyyy" - <Year number> full year. e.g.: `2019`, `2020`, ...
|
||||
"H" - <Hours number 24-hour> shortened hours.
|
||||
"HH" - <Hours number 24-hour> with 2 digits.
|
||||
"h" - <Hours number 12-hour> shortened hours.
|
||||
"hh" - <Hours number 12-hour> with 2 digits.
|
||||
"ht" - <Midday type> AM or PM.
|
||||
"M" - <Minutes number> shortened minutes.
|
||||
"MM" - <Minutes number> with 2 digits.
|
||||
"S" - <Seconds number> shortened seconds.
|
||||
"SS" - <Seconds number> with 2 digits.
|
||||
"""
|
||||
|
||||
if not datetime_obj:
|
||||
datetime_obj = datetime.datetime.now()
|
||||
|
||||
year = datetime_obj.strftime("%Y")
|
||||
|
||||
month = datetime_obj.strftime("%m")
|
||||
month_name_full = datetime_obj.strftime("%B")
|
||||
month_name_short = datetime_obj.strftime("%b")
|
||||
day = datetime_obj.strftime("%d")
|
||||
|
||||
weekday_full = datetime_obj.strftime("%A")
|
||||
weekday_short = datetime_obj.strftime("%a")
|
||||
|
||||
hours = datetime_obj.strftime("%H")
|
||||
hours_midday = datetime_obj.strftime("%I")
|
||||
hour_midday_type = datetime_obj.strftime("%p")
|
||||
minutes = datetime_obj.strftime("%M")
|
||||
seconds = datetime_obj.strftime("%S")
|
||||
|
||||
return {
|
||||
"d": str(int(day)),
|
||||
"dd": str(day),
|
||||
"ddd": weekday_short,
|
||||
"dddd": weekday_full,
|
||||
"m": str(int(month)),
|
||||
"mm": str(month),
|
||||
"mmm": month_name_short,
|
||||
"mmmm": month_name_full,
|
||||
"yy": str(year[2:]),
|
||||
"yyyy": str(year),
|
||||
"H": str(int(hours)),
|
||||
"HH": str(hours),
|
||||
"h": str(int(hours_midday)),
|
||||
"hh": str(hours_midday),
|
||||
"ht": hour_midday_type,
|
||||
"M": str(int(minutes)),
|
||||
"MM": str(minutes),
|
||||
"S": str(int(seconds)),
|
||||
"SS": str(seconds),
|
||||
}
|
||||
|
||||
|
||||
def get_timestamp(datetime_obj=None):
|
||||
"""Get standardized timestamp from datetime object.
|
||||
|
||||
Args:
|
||||
datetime_obj (datetime.datetime): Object of datetime. Current time
|
||||
is used if not passed.
|
||||
"""
|
||||
|
||||
if datetime_obj is None:
|
||||
datetime_obj = datetime.datetime.now()
|
||||
return datetime_obj.strftime(
|
||||
"%Y%m%dT%H%M%SZ"
|
||||
)
|
||||
|
||||
|
||||
def get_formatted_current_time():
|
||||
return get_timestamp()
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
import git
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
class _GitProgress(git.remote.RemoteProgress):
|
||||
""" Class handling displaying progress during git operations.
|
||||
|
||||
This is using **tqdm** for showing progress bars. As **GitPython**
|
||||
is parsing progress directly from git command, it is somehow unreliable
|
||||
as in some operations it is difficult to get total count of iterations
|
||||
to display meaningful progress bar.
|
||||
|
||||
"""
|
||||
_t = None
|
||||
_code = 0
|
||||
_current_status = ''
|
||||
_current_max = ''
|
||||
|
||||
_description = {
|
||||
256: "Checking out files",
|
||||
4: "Counting objects",
|
||||
128: "Finding sources",
|
||||
32: "Receiving objects",
|
||||
64: "Resolving deltas",
|
||||
16: "Writing objects"
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def __del__(self):
|
||||
if self._t is not None:
|
||||
self._t.close()
|
||||
|
||||
def _detroy_tqdm(self):
|
||||
""" Used to close tqdm when operation ended.
|
||||
|
||||
"""
|
||||
if self._t is not None:
|
||||
self._t.close()
|
||||
self._t = None
|
||||
|
||||
def _check_mask(self, opcode: int) -> bool:
|
||||
"""" Add meaningful description to **GitPython** opcodes.
|
||||
|
||||
:param opcode: OP_MASK opcode
|
||||
:type opcode: int
|
||||
:return: String description of opcode
|
||||
:rtype: str
|
||||
|
||||
.. seealso:: For opcodes look at :class:`git.RemoteProgress`
|
||||
|
||||
"""
|
||||
if opcode & self.COUNTING:
|
||||
return self._description.get(self.COUNTING)
|
||||
elif opcode & self.CHECKING_OUT:
|
||||
return self._description.get(self.CHECKING_OUT)
|
||||
elif opcode & self.WRITING:
|
||||
return self._description.get(self.WRITING)
|
||||
elif opcode & self.RECEIVING:
|
||||
return self._description.get(self.RECEIVING)
|
||||
elif opcode & self.RESOLVING:
|
||||
return self._description.get(self.RESOLVING)
|
||||
elif opcode & self.FINDING_SOURCES:
|
||||
return self._description.get(self.FINDING_SOURCES)
|
||||
else:
|
||||
return "Processing"
|
||||
|
||||
def update(self, op_code, cur_count, max_count=None, message=''):
|
||||
""" Called when git operation update progress.
|
||||
|
||||
.. seealso:: For more details see
|
||||
:func:`git.objects.submodule.base.Submodule.update`
|
||||
`Documentation <https://gitpython.readthedocs.io/en/\
|
||||
stable/reference.html#git.objects.submodule.base.Submodule.update>`_
|
||||
|
||||
"""
|
||||
code = self._check_mask(op_code)
|
||||
if self._current_status != code or self._current_max != max_count:
|
||||
self._current_max = max_count
|
||||
self._current_status = code
|
||||
self._detroy_tqdm()
|
||||
self._t = tqdm(total=max_count)
|
||||
self._t.set_description(" . {}".format(code))
|
||||
|
||||
self._t.update(cur_count)
|
||||
|
|
@ -42,13 +42,13 @@ except ImportError:
|
|||
USE_UNICODE = hasattr(__builtins__, "unicode")
|
||||
|
||||
|
||||
class PypeStreamHandler(logging.StreamHandler):
|
||||
class LogStreamHandler(logging.StreamHandler):
|
||||
""" StreamHandler class designed to handle utf errors in python 2.x hosts.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, stream=None):
|
||||
super(PypeStreamHandler, self).__init__(stream)
|
||||
super(LogStreamHandler, self).__init__(stream)
|
||||
self.enabled = True
|
||||
|
||||
def enable(self):
|
||||
|
|
@ -57,7 +57,6 @@ class PypeStreamHandler(logging.StreamHandler):
|
|||
Used to silence output
|
||||
"""
|
||||
self.enabled = True
|
||||
pass
|
||||
|
||||
def disable(self):
|
||||
""" Disable StreamHandler
|
||||
|
|
@ -108,13 +107,13 @@ class PypeStreamHandler(logging.StreamHandler):
|
|||
self.handleError(record)
|
||||
|
||||
|
||||
class PypeFormatter(logging.Formatter):
|
||||
class LogFormatter(logging.Formatter):
|
||||
|
||||
DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]'
|
||||
default_formatter = logging.Formatter(DFT)
|
||||
|
||||
def __init__(self, formats):
|
||||
super(PypeFormatter, self).__init__()
|
||||
super(LogFormatter, self).__init__()
|
||||
self.formatters = {}
|
||||
for loglevel in formats:
|
||||
self.formatters[loglevel] = logging.Formatter(formats[loglevel])
|
||||
|
|
@ -142,7 +141,7 @@ class PypeFormatter(logging.Formatter):
|
|||
return out
|
||||
|
||||
|
||||
class PypeMongoFormatter(logging.Formatter):
|
||||
class MongoFormatter(logging.Formatter):
|
||||
|
||||
DEFAULT_PROPERTIES = logging.LogRecord(
|
||||
'', '', '', '', '', '', '', '').__dict__.keys()
|
||||
|
|
@ -162,7 +161,7 @@ class PypeMongoFormatter(logging.Formatter):
|
|||
'method': record.funcName,
|
||||
'lineNumber': record.lineno
|
||||
}
|
||||
document.update(PypeLogger.get_process_data())
|
||||
document.update(Logger.get_process_data())
|
||||
|
||||
# Standard document decorated with exception info
|
||||
if record.exc_info is not None:
|
||||
|
|
@ -182,7 +181,7 @@ class PypeMongoFormatter(logging.Formatter):
|
|||
return document
|
||||
|
||||
|
||||
class PypeLogger:
|
||||
class Logger:
|
||||
DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] '
|
||||
DBG = " - { %(name)s }: [ %(message)s ] "
|
||||
INF = ">>> [ %(message)s ] "
|
||||
|
|
@ -240,7 +239,7 @@ class PypeLogger:
|
|||
for handler in logger.handlers:
|
||||
if isinstance(handler, MongoHandler):
|
||||
add_mongo_handler = False
|
||||
elif isinstance(handler, PypeStreamHandler):
|
||||
elif isinstance(handler, LogStreamHandler):
|
||||
add_console_handler = False
|
||||
|
||||
if add_console_handler:
|
||||
|
|
@ -293,7 +292,7 @@ class PypeLogger:
|
|||
"username": components["username"],
|
||||
"password": components["password"],
|
||||
"capped": True,
|
||||
"formatter": PypeMongoFormatter()
|
||||
"formatter": MongoFormatter()
|
||||
}
|
||||
if components["port"] is not None:
|
||||
kwargs["port"] = int(components["port"])
|
||||
|
|
@ -304,10 +303,10 @@ class PypeLogger:
|
|||
|
||||
@classmethod
|
||||
def _get_console_handler(cls):
|
||||
formatter = PypeFormatter(cls.FORMAT_FILE)
|
||||
console_handler = PypeStreamHandler()
|
||||
formatter = LogFormatter(cls.FORMAT_FILE)
|
||||
console_handler = LogStreamHandler()
|
||||
|
||||
console_handler.set_name("PypeStreamHandler")
|
||||
console_handler.set_name("LogStreamHandler")
|
||||
console_handler.setFormatter(formatter)
|
||||
return console_handler
|
||||
|
||||
|
|
@ -418,9 +417,9 @@ class PypeLogger:
|
|||
def get_process_name(cls):
|
||||
"""Process name that is like "label" of a process.
|
||||
|
||||
Pype's logging can be used from pype itseld of from hosts. Even in Pype
|
||||
it's good to know if logs are from Pype tray or from pype's event
|
||||
server. This should help to identify that information.
|
||||
OpenPype's logging can be used from OpenPyppe itself of from hosts.
|
||||
Even in OpenPype process it's good to know if logs are from tray or
|
||||
from other cli commands. This should help to identify that information.
|
||||
"""
|
||||
if cls._process_name is not None:
|
||||
return cls._process_name
|
||||
|
|
@ -486,23 +485,13 @@ class PypeLogger:
|
|||
return OpenPypeMongoConnection.get_mongo_client()
|
||||
|
||||
|
||||
def timeit(method):
|
||||
"""Print time in function.
|
||||
|
||||
For debugging.
|
||||
|
||||
"""
|
||||
log = logging.getLogger()
|
||||
|
||||
def timed(*args, **kw):
|
||||
ts = time.time()
|
||||
result = method(*args, **kw)
|
||||
te = time.time()
|
||||
if 'log_time' in kw:
|
||||
name = kw.get('log_name', method.__name__.upper())
|
||||
kw['log_time'][name] = int((te - ts) * 1000)
|
||||
else:
|
||||
log.debug('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
|
||||
print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
|
||||
return result
|
||||
return timed
|
||||
class PypeLogger(Logger):
|
||||
@classmethod
|
||||
def get_logger(cls, *args, **kwargs):
|
||||
logger = Logger.get_logger(*args, **kwargs)
|
||||
# TODO uncomment when replaced most of places
|
||||
# logger.warning((
|
||||
# "'openpype.lib.PypeLogger' is deprecated class."
|
||||
# " Please use 'openpype.lib.Logger' instead."
|
||||
# ))
|
||||
return logger
|
||||
|
|
|
|||
|
|
@ -80,10 +80,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"Using published scene for render {}".format(script_path)
|
||||
)
|
||||
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
submit_frame_start -= 1
|
||||
|
||||
response = self.payload_submit(
|
||||
instance,
|
||||
script_path,
|
||||
|
|
@ -99,10 +95,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
instance.data["publishJobState"] = "Suspended"
|
||||
|
||||
if instance.data.get("bakingNukeScripts"):
|
||||
# exception for slate workflow
|
||||
if "slate" in instance.data["families"]:
|
||||
submit_frame_start += 1
|
||||
|
||||
for baking_script in instance.data["bakingNukeScripts"]:
|
||||
render_path = baking_script["bakeRenderPath"]
|
||||
script_path = baking_script["bakeScriptPath"]
|
||||
|
|
@ -365,7 +357,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
if not instance.data.get("expectedFiles"):
|
||||
instance.data["expectedFiles"] = []
|
||||
|
||||
dir = os.path.dirname(path)
|
||||
dirname = os.path.dirname(path)
|
||||
file = os.path.basename(path)
|
||||
|
||||
if "#" in file:
|
||||
|
|
@ -377,9 +369,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
instance.data["expectedFiles"].append(path)
|
||||
return
|
||||
|
||||
if instance.data.get("slate"):
|
||||
start_frame -= 1
|
||||
|
||||
for i in range(start_frame, (end_frame + 1)):
|
||||
instance.data["expectedFiles"].append(
|
||||
os.path.join(dir, (file % i)).replace("\\", "/"))
|
||||
os.path.join(dirname, (file % i)).replace("\\", "/"))
|
||||
|
||||
def get_limit_groups(self):
|
||||
"""Search for limit group nodes and return group name.
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# mapping of instance properties to be transfered to new instance for every
|
||||
# specified family
|
||||
instance_transfer = {
|
||||
"slate": ["slateFrames"],
|
||||
"slate": ["slateFrames", "slate"],
|
||||
"review": ["lutPath"],
|
||||
"render2d": ["bakingNukeScripts", "version"],
|
||||
"renderlayer": ["convertToScanline"]
|
||||
|
|
@ -585,11 +585,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(instance.get("frameStartHandle"))
|
||||
if instance.get("slate"):
|
||||
frame_start -= 1
|
||||
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"frameStart": int(instance.get("frameStartHandle")),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(instance.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
|
|||
from openpype_modules.ftrack.lib.custom_attributes import (
|
||||
query_custom_attributes
|
||||
)
|
||||
from openpype.lib import config
|
||||
from openpype.lib.dateutils import get_datetime_data
|
||||
from openpype.lib.delivery import (
|
||||
path_from_representation,
|
||||
get_format_dict,
|
||||
|
|
@ -555,7 +555,7 @@ class Delivery(BaseAction):
|
|||
|
||||
format_dict = get_format_dict(anatomy, location_path)
|
||||
|
||||
datetime_data = config.get_datetime_data()
|
||||
datetime_data = get_datetime_data()
|
||||
for repre in repres_to_deliver:
|
||||
source_path = repre.get("data", {}).get("path")
|
||||
debug_msg = "Processing representation {}".format(repre["_id"])
|
||||
|
|
|
|||
|
|
@ -11,13 +11,13 @@ from openpype.client import (
|
|||
get_project,
|
||||
get_assets,
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.settings import get_project_settings, get_system_settings
|
||||
from openpype.lib import (
|
||||
get_workfile_template_key,
|
||||
get_workdir_data,
|
||||
StringTemplate,
|
||||
)
|
||||
from openpype.pipeline import Anatomy
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
from openpype_modules.ftrack.lib import BaseAction, statics_icon
|
||||
from openpype_modules.ftrack.lib.avalon_sync import create_chunks
|
||||
|
||||
|
|
@ -279,14 +279,19 @@ class FillWorkfileAttributeAction(BaseAction):
|
|||
extension = "{ext}"
|
||||
project_doc = get_project(project_name)
|
||||
project_settings = get_project_settings(project_name)
|
||||
system_settings = get_system_settings()
|
||||
anatomy = Anatomy(project_name)
|
||||
templates_by_key = {}
|
||||
|
||||
operations = []
|
||||
for asset_doc, task_entities in asset_docs_with_task_entities:
|
||||
for task_entity in task_entities:
|
||||
workfile_data = get_workdir_data(
|
||||
project_doc, asset_doc, task_entity["name"], host_name
|
||||
workfile_data = get_template_data(
|
||||
project_doc,
|
||||
asset_doc,
|
||||
task_entity["name"],
|
||||
host_name,
|
||||
system_settings
|
||||
)
|
||||
# Use version 1 for each workfile
|
||||
workfile_data["version"] = 1
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
families = ["ftrack"]
|
||||
|
||||
def process(self, instance):
|
||||
session = instance.context.data["ftrackSession"]
|
||||
context = instance.context
|
||||
component_list = instance.data.get("ftrackComponentsList")
|
||||
if not component_list:
|
||||
self.log.info(
|
||||
|
|
@ -36,8 +34,8 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
)
|
||||
return
|
||||
|
||||
session = instance.context.data["ftrackSession"]
|
||||
context = instance.context
|
||||
session = context.data["ftrackSession"]
|
||||
|
||||
parent_entity = None
|
||||
default_asset_name = None
|
||||
|
|
|
|||
|
|
@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin):
|
|||
active = False
|
||||
|
||||
def process(self, instance):
|
||||
component_list = instance.data['ftrackComponentsList']
|
||||
component_list = instance.data.get('ftrackComponentsList')
|
||||
if not component_list:
|
||||
self.log.info("No component to overwrite...")
|
||||
return
|
||||
|
||||
for cl in component_list:
|
||||
cl['component_overwrite'] = True
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
version_number = int(instance_version)
|
||||
|
||||
family = instance.data["family"]
|
||||
family_low = instance.data["family"].lower()
|
||||
family_low = family.lower()
|
||||
|
||||
asset_type = instance.data.get("ftrackFamily")
|
||||
if not asset_type and family_low in self.family_mapping:
|
||||
|
|
@ -140,24 +140,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
first_thumbnail_component = None
|
||||
first_thumbnail_component_repre = None
|
||||
for repre in thumbnail_representations:
|
||||
published_path = repre.get("published_path")
|
||||
if not published_path:
|
||||
comp_files = repre["files"]
|
||||
if isinstance(comp_files, (tuple, list, set)):
|
||||
filename = comp_files[0]
|
||||
else:
|
||||
filename = comp_files
|
||||
|
||||
published_path = os.path.join(
|
||||
repre["stagingDir"], filename
|
||||
repre_path = self._get_repre_path(instance, repre, False)
|
||||
if not repre_path:
|
||||
self.log.warning(
|
||||
"Published path is not set and source was removed."
|
||||
)
|
||||
if not os.path.exists(published_path):
|
||||
continue
|
||||
repre["published_path"] = published_path
|
||||
continue
|
||||
|
||||
# Create copy of base comp item and append it
|
||||
thumbnail_item = copy.deepcopy(base_component_item)
|
||||
thumbnail_item["component_path"] = repre["published_path"]
|
||||
thumbnail_item["component_path"] = repre_path
|
||||
thumbnail_item["component_data"] = {
|
||||
"name": "thumbnail"
|
||||
}
|
||||
|
|
@ -216,6 +208,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
extended_asset_name = ""
|
||||
multiple_reviewable = len(review_representations) > 1
|
||||
for repre in review_representations:
|
||||
repre_path = self._get_repre_path(instance, repre, False)
|
||||
if not repre_path:
|
||||
self.log.warning(
|
||||
"Published path is not set and source was removed."
|
||||
)
|
||||
continue
|
||||
|
||||
# Create copy of base comp item and append it
|
||||
review_item = copy.deepcopy(base_component_item)
|
||||
|
||||
|
|
@ -270,7 +269,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
fps = instance_fps
|
||||
|
||||
# Change location
|
||||
review_item["component_path"] = repre["published_path"]
|
||||
review_item["component_path"] = repre_path
|
||||
# Change component data
|
||||
review_item["component_data"] = {
|
||||
# Default component name is "main".
|
||||
|
|
@ -327,7 +326,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# Add others representations as component
|
||||
for repre in other_representations:
|
||||
published_path = repre.get("published_path")
|
||||
published_path = self._get_repre_path(instance, repre, True)
|
||||
if not published_path:
|
||||
continue
|
||||
# Create copy of base comp item and append it
|
||||
|
|
@ -360,6 +359,51 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
))
|
||||
instance.data["ftrackComponentsList"] = component_list
|
||||
|
||||
def _get_repre_path(self, instance, repre, only_published):
|
||||
"""Get representation path that can be used for integration.
|
||||
|
||||
When 'only_published' is set to true the validation of path is not
|
||||
relevant. In that case we just need what is set in 'published_path'
|
||||
as "reference". The reference is not used to get or upload the file but
|
||||
for reference where the file was published.
|
||||
|
||||
Args:
|
||||
instance (pyblish.Instance): Processed instance object. Used
|
||||
for source of staging dir if representation does not have
|
||||
filled it.
|
||||
repre (dict): Representation on instance which could be and
|
||||
could not be integrated with main integrator.
|
||||
only_published (bool): Care only about published paths and
|
||||
ignore if filepath is not existing anymore.
|
||||
|
||||
Returns:
|
||||
str: Path to representation file.
|
||||
None: Path is not filled or does not exists.
|
||||
"""
|
||||
|
||||
published_path = repre.get("published_path")
|
||||
if published_path:
|
||||
published_path = os.path.normpath(published_path)
|
||||
if os.path.exists(published_path):
|
||||
return published_path
|
||||
|
||||
if only_published:
|
||||
return published_path
|
||||
|
||||
comp_files = repre["files"]
|
||||
if isinstance(comp_files, (tuple, list, set)):
|
||||
filename = comp_files[0]
|
||||
else:
|
||||
filename = comp_files
|
||||
|
||||
staging_dir = repre.get("stagingDir")
|
||||
if not staging_dir:
|
||||
staging_dir = instance.data["stagingDir"]
|
||||
src_path = os.path.normpath(os.path.join(staging_dir, filename))
|
||||
if os.path.exists(src_path):
|
||||
return src_path
|
||||
return None
|
||||
|
||||
def _get_asset_version_status_name(self, instance):
|
||||
if not self.asset_versions_status_profiles:
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -65,7 +65,13 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
order = pyblish.api.IntegratorOrder - 0.04
|
||||
label = 'Integrate Hierarchy To Ftrack'
|
||||
families = ["shot"]
|
||||
hosts = ["hiero", "resolve", "standalonepublisher", "flame"]
|
||||
hosts = [
|
||||
"hiero",
|
||||
"resolve",
|
||||
"standalonepublisher",
|
||||
"flame",
|
||||
"traypublisher"
|
||||
]
|
||||
optional = False
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -230,9 +230,9 @@ def update_op_assets(
|
|||
if item_type in ["Shot", "Sequence"]:
|
||||
# Name with parents hierarchy "({episode}_){sequence}_{shot}"
|
||||
# to avoid duplicate name issue
|
||||
item_name = "_".join(item_data["parents"] + [item_doc["name"]])
|
||||
item_name = f"{item_data['parents'][-1]}_{item['name']}"
|
||||
else:
|
||||
item_name = item_doc["name"]
|
||||
item_name = item["name"]
|
||||
|
||||
# Set root folders parents
|
||||
item_data["parents"] = entity_parent_folders + item_data["parents"]
|
||||
|
|
|
|||
|
|
@ -19,7 +19,9 @@ from openpype.client import (
|
|||
from openpype.modules import load_modules, ModulesManager
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.lib import filter_pyblish_plugins
|
||||
|
||||
from .anatomy import Anatomy
|
||||
from .template_data import get_template_data_with_names
|
||||
from . import (
|
||||
legacy_io,
|
||||
register_loader_plugin_path,
|
||||
|
|
@ -336,6 +338,7 @@ def get_current_project_asset(asset_name=None, asset_id=None, fields=None):
|
|||
return None
|
||||
return get_asset_by_name(project_name, asset_name, fields=fields)
|
||||
|
||||
|
||||
def is_representation_from_latest(representation):
|
||||
"""Return whether the representation is from latest version
|
||||
|
||||
|
|
@ -348,3 +351,29 @@ def is_representation_from_latest(representation):
|
|||
|
||||
project_name = legacy_io.active_project()
|
||||
return version_is_latest(project_name, representation["parent"])
|
||||
|
||||
|
||||
def get_template_data_from_session(session=None, system_settings=None):
|
||||
"""Template data for template fill from session keys.
|
||||
|
||||
Args:
|
||||
session (Union[Dict[str, str], None]): The Session to use. If not
|
||||
provided use the currently active global Session.
|
||||
system_settings (Union[Dict[str, Any], Any]): Prepared system settings.
|
||||
Optional are auto received if not passed.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: All available data from session.
|
||||
"""
|
||||
|
||||
if session is None:
|
||||
session = legacy_io.Session
|
||||
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
host_name = session["AVALON_APP"]
|
||||
|
||||
return get_template_data_with_names(
|
||||
project_name, asset_name, task_name, host_name, system_settings
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from .creator_plugins import (
|
|||
BaseCreator,
|
||||
Creator,
|
||||
AutoCreator,
|
||||
HiddenCreator,
|
||||
|
||||
discover_creator_plugins,
|
||||
discover_legacy_creator_plugins,
|
||||
|
|
@ -35,6 +36,7 @@ __all__ = (
|
|||
"BaseCreator",
|
||||
"Creator",
|
||||
"AutoCreator",
|
||||
"HiddenCreator",
|
||||
|
||||
"discover_creator_plugins",
|
||||
"discover_legacy_creator_plugins",
|
||||
|
|
|
|||
|
|
@ -416,6 +416,12 @@ class Creator(BaseCreator):
|
|||
return self.pre_create_attr_defs
|
||||
|
||||
|
||||
class HiddenCreator(BaseCreator):
|
||||
@abstractmethod
|
||||
def create(self, instance_data, source_data):
|
||||
pass
|
||||
|
||||
|
||||
class AutoCreator(BaseCreator):
|
||||
"""Creator which is automatically triggered without user interaction.
|
||||
|
||||
|
|
|
|||
228
openpype/pipeline/template_data.py
Normal file
228
openpype/pipeline/template_data.py
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
from openpype.client import get_project, get_asset_by_name
|
||||
from openpype.settings import get_system_settings
|
||||
from openpype.lib.local_settings import get_openpype_username
|
||||
|
||||
|
||||
def get_general_template_data(system_settings=None):
|
||||
"""General template data based on system settings or machine.
|
||||
|
||||
Output contains formatting keys:
|
||||
- 'studio[name]' - Studio name filled from system settings
|
||||
- 'studio[code]' - Studio code filled from system settings
|
||||
- 'user' - User's name using 'get_openpype_username'
|
||||
|
||||
Args:
|
||||
system_settings (Dict[str, Any]): System settings.
|
||||
"""
|
||||
|
||||
if not system_settings:
|
||||
system_settings = get_system_settings()
|
||||
studio_name = system_settings["general"]["studio_name"]
|
||||
studio_code = system_settings["general"]["studio_code"]
|
||||
return {
|
||||
"studio": {
|
||||
"name": studio_name,
|
||||
"code": studio_code
|
||||
},
|
||||
"user": get_openpype_username()
|
||||
}
|
||||
|
||||
|
||||
def get_project_template_data(project_doc):
|
||||
"""Extract data from project document that are used in templates.
|
||||
|
||||
Project document must have 'name' and (at this moment) optional
|
||||
key 'data.code'.
|
||||
|
||||
Output contains formatting keys:
|
||||
- 'project[name]' - Project name
|
||||
- 'project[code]' - Project code
|
||||
|
||||
Args:
|
||||
project_doc (Dict[str, Any]): Queried project document.
|
||||
|
||||
Returns:
|
||||
Dict[str, Dict[str, str]]: Template data based on project document.
|
||||
"""
|
||||
|
||||
project_code = project_doc.get("data", {}).get("code")
|
||||
return {
|
||||
"project": {
|
||||
"name": project_doc["name"],
|
||||
"code": project_code
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_asset_template_data(asset_doc, project_name):
|
||||
"""Extract data from asset document that are used in templates.
|
||||
|
||||
Output dictionary contains keys:
|
||||
- 'asset' - asset name
|
||||
- 'hierarchy' - parent asset names joined with '/'
|
||||
- 'parent' - direct parent name, project name used if is under project
|
||||
|
||||
Required document fields:
|
||||
Asset: 'name', 'data.parents'
|
||||
|
||||
Args:
|
||||
asset_doc (Dict[str, Any]): Queried asset document.
|
||||
project_name (str): Is used for 'parent' key if asset doc does not have
|
||||
any.
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: Data that are based on asset document and can be used
|
||||
in templates.
|
||||
"""
|
||||
|
||||
asset_parents = asset_doc["data"]["parents"]
|
||||
hierarchy = "/".join(asset_parents)
|
||||
if asset_parents:
|
||||
parent_name = asset_parents[-1]
|
||||
else:
|
||||
parent_name = project_name
|
||||
|
||||
return {
|
||||
"asset": asset_doc["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name
|
||||
}
|
||||
|
||||
|
||||
def get_task_type(asset_doc, task_name):
|
||||
"""Get task type based on asset document and task name.
|
||||
|
||||
Required document fields:
|
||||
Asset: 'data.tasks'
|
||||
|
||||
Args:
|
||||
asset_doc (Dict[str, Any]): Queried asset document.
|
||||
task_name (str): Task name which is under asset.
|
||||
|
||||
Returns:
|
||||
str: Task type name.
|
||||
None: Task was not found on asset document.
|
||||
"""
|
||||
|
||||
asset_tasks_info = asset_doc["data"]["tasks"]
|
||||
return asset_tasks_info.get(task_name, {}).get("type")
|
||||
|
||||
|
||||
def get_task_template_data(project_doc, asset_doc, task_name):
|
||||
""""Extract task specific data from project and asset documents.
|
||||
|
||||
Required document fields:
|
||||
Project: 'config.tasks'
|
||||
Asset: 'data.tasks'.
|
||||
|
||||
Args:
|
||||
project_doc (Dict[str, Any]): Queried project document.
|
||||
asset_doc (Dict[str, Any]): Queried asset document.
|
||||
tas_name (str): Name of task for which data should be returned.
|
||||
|
||||
Returns:
|
||||
Dict[str, Dict[str, str]]: Template data
|
||||
"""
|
||||
|
||||
project_task_types = project_doc["config"]["tasks"]
|
||||
task_type = get_task_type(asset_doc, task_name)
|
||||
task_code = project_task_types.get(task_type, {}).get("short_name")
|
||||
|
||||
return {
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_template_data(
|
||||
project_doc,
|
||||
asset_doc=None,
|
||||
task_name=None,
|
||||
host_name=None,
|
||||
system_settings=None
|
||||
):
|
||||
"""Prepare data for templates filling from entered documents and info.
|
||||
|
||||
This function does not "auto fill" any values except system settings and
|
||||
it's on purpose.
|
||||
|
||||
Universal function to receive template data from passed arguments. Only
|
||||
required argument is project document all other arguments are optional
|
||||
and their values won't be added to template data if are not passed.
|
||||
|
||||
Required document fields:
|
||||
Project: 'name', 'data.code', 'config.tasks'
|
||||
Asset: 'name', 'data.parents', 'data.tasks'
|
||||
|
||||
Args:
|
||||
project_doc (Dict[str, Any]): Mongo document of project from MongoDB.
|
||||
asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB.
|
||||
task_name (Union[str, None]): Task name under passed asset.
|
||||
host_name (Union[str, None]): Used to fill '{app}' key.
|
||||
system_settings (Union[Dict, None]): Prepared system settings.
|
||||
They're queried if not passed (may be slower).
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Data prepared for filling workdir template.
|
||||
"""
|
||||
|
||||
template_data = get_general_template_data(system_settings)
|
||||
template_data.update(get_project_template_data(project_doc))
|
||||
if asset_doc:
|
||||
template_data.update(get_asset_template_data(
|
||||
asset_doc, project_doc["name"]
|
||||
))
|
||||
if task_name:
|
||||
template_data.update(get_task_template_data(
|
||||
project_doc, asset_doc, task_name
|
||||
))
|
||||
|
||||
if host_name:
|
||||
template_data["app"] = host_name
|
||||
|
||||
return template_data
|
||||
|
||||
|
||||
def get_template_data_with_names(
|
||||
project_name,
|
||||
asset_name=None,
|
||||
task_name=None,
|
||||
host_name=None,
|
||||
system_settings=None
|
||||
):
|
||||
"""Prepare data for templates filling from entered entity names and info.
|
||||
|
||||
Copy of 'get_template_data' but based on entity names instead of documents.
|
||||
Only difference is that documents are queried.
|
||||
|
||||
Args:
|
||||
project_name (str): Project name for which template data are
|
||||
calculated.
|
||||
asset_name (Union[str, None]): Asset name for which template data are
|
||||
calculated.
|
||||
task_name (Union[str, None]): Task name under passed asset.
|
||||
host_name (Union[str, None]):Used to fill '{app}' key.
|
||||
because workdir template may contain `{app}` key.
|
||||
system_settings (Union[Dict, None]): Prepared system settings.
|
||||
They're queried if not passed.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Data prepared for filling workdir template.
|
||||
"""
|
||||
|
||||
project_doc = get_project(
|
||||
project_name, fields=["name", "data.code", "config.tasks"]
|
||||
)
|
||||
asset_doc = None
|
||||
if asset_name:
|
||||
asset_doc = get_asset_by_name(
|
||||
project_name,
|
||||
asset_name,
|
||||
fields=["name", "data.parents", "data.tasks"]
|
||||
)
|
||||
return get_template_data(
|
||||
project_doc, asset_doc, task_name, host_name, system_settings
|
||||
)
|
||||
|
|
@ -4,10 +4,10 @@ from collections import defaultdict
|
|||
from Qt import QtWidgets, QtCore, QtGui
|
||||
|
||||
from openpype.client import get_representations
|
||||
from openpype.lib import config
|
||||
from openpype.pipeline import load, Anatomy
|
||||
from openpype import resources, style
|
||||
|
||||
from openpype.lib.dateutils import get_datetime_data
|
||||
from openpype.lib.delivery import (
|
||||
sizeof_fmt,
|
||||
path_from_representation,
|
||||
|
|
@ -160,7 +160,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog):
|
|||
|
||||
selected_repres = self._get_selected_repres()
|
||||
|
||||
datetime_data = config.get_datetime_data()
|
||||
datetime_data = get_datetime_data()
|
||||
template_name = self.dropdown.currentText()
|
||||
format_dict = get_format_dict(self.anatomy, self.root_line_edit.text())
|
||||
for repre in self._representations:
|
||||
|
|
|
|||
|
|
@ -15,10 +15,8 @@ Provides:
|
|||
import json
|
||||
import pyblish.api
|
||||
|
||||
from openpype.lib import (
|
||||
get_system_general_anatomy_data
|
||||
)
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline.template_data import get_template_data
|
||||
|
||||
|
||||
class CollectAnatomyContextData(pyblish.api.ContextPlugin):
|
||||
|
|
@ -33,11 +31,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin):
|
|||
"asset": "AssetName",
|
||||
"hierarchy": "path/to/asset",
|
||||
"task": "Working",
|
||||
"user": "MeDespicable",
|
||||
# Duplicated entry
|
||||
"username": "MeDespicable",
|
||||
|
||||
# Current host name
|
||||
"app": "maya"
|
||||
|
||||
*** OPTIONAL ***
|
||||
"app": "maya" # Current application base name
|
||||
+ mutliple keys from `datetimeData` # see it's collector
|
||||
+ mutliple keys from `datetimeData` (See it's collector)
|
||||
}
|
||||
"""
|
||||
|
||||
|
|
@ -45,52 +47,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin):
|
|||
label = "Collect Anatomy Context Data"
|
||||
|
||||
def process(self, context):
|
||||
host_name = context.data["hostName"]
|
||||
system_settings = context.data["system_settings"]
|
||||
project_entity = context.data["projectEntity"]
|
||||
context_data = {
|
||||
"project": {
|
||||
"name": project_entity["name"],
|
||||
"code": project_entity["data"].get("code")
|
||||
},
|
||||
"username": context.data["user"],
|
||||
"app": context.data["hostName"]
|
||||
}
|
||||
|
||||
context.data["anatomyData"] = context_data
|
||||
|
||||
# add system general settings anatomy data
|
||||
system_general_data = get_system_general_anatomy_data()
|
||||
context_data.update(system_general_data)
|
||||
|
||||
datetime_data = context.data.get("datetimeData") or {}
|
||||
context_data.update(datetime_data)
|
||||
|
||||
asset_entity = context.data.get("assetEntity")
|
||||
task_name = None
|
||||
if asset_entity:
|
||||
task_name = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
asset_tasks = asset_entity["data"]["tasks"]
|
||||
task_type = asset_tasks.get(task_name, {}).get("type")
|
||||
anatomy_data = get_template_data(
|
||||
project_entity, asset_entity, task_name, host_name, system_settings
|
||||
)
|
||||
anatomy_data.update(context.data.get("datetimeData") or {})
|
||||
|
||||
project_task_types = project_entity["config"]["tasks"]
|
||||
task_code = project_task_types.get(task_type, {}).get("short_name")
|
||||
username = context.data["user"]
|
||||
anatomy_data["user"] = username
|
||||
# Backwards compatibility for 'username' key
|
||||
anatomy_data["username"] = username
|
||||
|
||||
asset_parents = asset_entity["data"]["parents"]
|
||||
hierarchy = "/".join(asset_parents)
|
||||
|
||||
parent_name = project_entity["name"]
|
||||
if asset_parents:
|
||||
parent_name = asset_parents[-1]
|
||||
|
||||
context_data.update({
|
||||
"asset": asset_entity["name"],
|
||||
"parent": parent_name,
|
||||
"hierarchy": hierarchy,
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code,
|
||||
}
|
||||
})
|
||||
# Store
|
||||
context.data["anatomyData"] = anatomy_data
|
||||
|
||||
self.log.info("Global anatomy Data collected")
|
||||
self.log.debug(json.dumps(context_data, indent=4))
|
||||
self.log.debug(json.dumps(anatomy_data, indent=4))
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ Provides:
|
|||
"""
|
||||
|
||||
import pyblish.api
|
||||
from openpype.api import config
|
||||
from openpype.lib.dateutils import get_datetime_data
|
||||
|
||||
|
||||
class CollectDateTimeData(pyblish.api.ContextPlugin):
|
||||
|
|
@ -15,4 +15,4 @@ class CollectDateTimeData(pyblish.api.ContextPlugin):
|
|||
def process(self, context):
|
||||
key = "datetimeData"
|
||||
if key not in context.data:
|
||||
context.data[key] = config.get_datetime_data()
|
||||
context.data[key] = get_datetime_data()
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin):
|
|||
"subset": subset,
|
||||
"asset": in_data["asset"],
|
||||
"task": in_data["task"],
|
||||
"label": subset,
|
||||
"label": in_data.get("label") or subset,
|
||||
"name": subset,
|
||||
"family": in_data["family"],
|
||||
"families": instance_families,
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
|||
label = "Collect OTIO Frame Ranges"
|
||||
order = pyblish.api.CollectorOrder - 0.08
|
||||
families = ["shot", "clip"]
|
||||
hosts = ["resolve", "hiero", "flame"]
|
||||
hosts = ["resolve", "hiero", "flame", "traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
# get basic variables
|
||||
|
|
|
|||
|
|
@ -116,8 +116,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
# check in two way if it is sequence
|
||||
if hasattr(otio.schema, "ImageSequenceReference"):
|
||||
# for OpenTimelineIO 0.13 and newer
|
||||
if isinstance(media_ref,
|
||||
otio.schema.ImageSequenceReference):
|
||||
if isinstance(
|
||||
media_ref,
|
||||
otio.schema.ImageSequenceReference
|
||||
):
|
||||
is_sequence = True
|
||||
else:
|
||||
# for OpenTimelineIO 0.12 and older
|
||||
|
|
@ -139,11 +141,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
padding=media_ref.frame_zero_padding
|
||||
)
|
||||
collection.indexes.update(
|
||||
[i for i in range(a_frame_start_h, (a_frame_end_h + 1))])
|
||||
list(range(a_frame_start_h, (a_frame_end_h + 1)))
|
||||
)
|
||||
|
||||
self.log.debug(collection)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
else:
|
||||
# in case it is file sequence but not new OTIO schema
|
||||
# `ImageSequenceReference`
|
||||
|
|
@ -152,9 +152,9 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
path, trimmed_media_range_h, metadata)
|
||||
self.staging_dir, collection = collection_data
|
||||
|
||||
self.log.debug(collection)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
self.log.debug(collection)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
else:
|
||||
_trim = False
|
||||
dirname, filename = os.path.split(media_ref.target_url)
|
||||
|
|
@ -198,7 +198,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
|
||||
if kwargs.get("collection"):
|
||||
collection = kwargs.get("collection")
|
||||
files = [f for f in collection]
|
||||
files = list(collection)
|
||||
ext = collection.format("{tail}")
|
||||
representation_data.update({
|
||||
"name": ext[1:],
|
||||
|
|
@ -220,7 +220,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
})
|
||||
|
||||
if kwargs.get("trim") is True:
|
||||
representation_data.update({
|
||||
"tags": ["trim"]
|
||||
})
|
||||
representation_data["tags"] = ["trim"]
|
||||
return representation_data
|
||||
|
|
|
|||
|
|
@ -30,9 +30,15 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
self.log.debug("__ hierarchy_context: {}".format(hierarchy_context))
|
||||
|
||||
self.project = None
|
||||
self.import_to_avalon(project_name, hierarchy_context)
|
||||
self.import_to_avalon(context, project_name, hierarchy_context)
|
||||
|
||||
def import_to_avalon(self, project_name, input_data, parent=None):
|
||||
def import_to_avalon(
|
||||
self,
|
||||
context,
|
||||
project_name,
|
||||
input_data,
|
||||
parent=None,
|
||||
):
|
||||
for name in input_data:
|
||||
self.log.info("input_data[name]: {}".format(input_data[name]))
|
||||
entity_data = input_data[name]
|
||||
|
|
@ -127,12 +133,19 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
if unarchive_entity is None:
|
||||
# Create entity if doesn"t exist
|
||||
entity = self.create_avalon_asset(
|
||||
project_name, name, data
|
||||
name, data
|
||||
)
|
||||
else:
|
||||
# Unarchive if entity was archived
|
||||
entity = self.unarchive_entity(unarchive_entity, data)
|
||||
|
||||
# make sure all relative instances have correct avalon data
|
||||
self._set_avalon_data_to_relative_instances(
|
||||
context,
|
||||
project_name,
|
||||
entity
|
||||
)
|
||||
|
||||
if update_data:
|
||||
# Update entity data with input data
|
||||
legacy_io.update_many(
|
||||
|
|
@ -142,7 +155,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
|
||||
if "childs" in entity_data:
|
||||
self.import_to_avalon(
|
||||
project_name, entity_data["childs"], entity
|
||||
context, project_name, entity_data["childs"], entity
|
||||
)
|
||||
|
||||
def unarchive_entity(self, entity, data):
|
||||
|
|
@ -159,20 +172,52 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
{"_id": entity["_id"]},
|
||||
new_entity
|
||||
)
|
||||
|
||||
return new_entity
|
||||
|
||||
def create_avalon_asset(self, project_name, name, data):
|
||||
item = {
|
||||
def create_avalon_asset(self, name, data):
|
||||
asset_doc = {
|
||||
"schema": "openpype:asset-3.0",
|
||||
"name": name,
|
||||
"parent": self.project["_id"],
|
||||
"type": "asset",
|
||||
"data": data
|
||||
}
|
||||
self.log.debug("Creating asset: {}".format(item))
|
||||
entity_id = legacy_io.insert_one(item).inserted_id
|
||||
self.log.debug("Creating asset: {}".format(asset_doc))
|
||||
asset_doc["_id"] = legacy_io.insert_one(asset_doc).inserted_id
|
||||
|
||||
return get_asset_by_id(project_name, entity_id)
|
||||
return asset_doc
|
||||
|
||||
def _set_avalon_data_to_relative_instances(
|
||||
self,
|
||||
context,
|
||||
project_name,
|
||||
asset_doc
|
||||
):
|
||||
for instance in context:
|
||||
# Skip instance if has filled asset entity
|
||||
if instance.data.get("assetEntity"):
|
||||
continue
|
||||
asset_name = asset_doc["name"]
|
||||
inst_asset_name = instance.data["asset"]
|
||||
|
||||
if asset_name == inst_asset_name:
|
||||
instance.data["assetEntity"] = asset_doc
|
||||
|
||||
# get parenting data
|
||||
parents = asset_doc["data"].get("parents") or list()
|
||||
|
||||
# equire only relative parent
|
||||
parent_name = project_name
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
|
||||
# update avalon data on instance
|
||||
instance.data["anatomyData"].update({
|
||||
"hierarchy": "/".join(parents),
|
||||
"task": {},
|
||||
"parent": parent_name
|
||||
})
|
||||
|
||||
def _get_active_assets(self, context):
|
||||
""" Returns only asset dictionary.
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class ExtractOTIOFile(openpype.api.Extractor):
|
|||
label = "Extract OTIO file"
|
||||
order = pyblish.api.ExtractorOrder - 0.45
|
||||
families = ["workfile"]
|
||||
hosts = ["resolve", "hiero"]
|
||||
hosts = ["resolve", "hiero", "traypublisher"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class ExtractThumbnail(pyblish.api.InstancePlugin):
|
|||
order = pyblish.api.ExtractorOrder
|
||||
families = [
|
||||
"imagesequence", "render", "render2d", "prerender",
|
||||
"source", "plate", "take"
|
||||
"source", "clip", "take"
|
||||
]
|
||||
hosts = ["shell", "fusion", "resolve", "traypublisher"]
|
||||
enabled = False
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
# must be before `ExtractThumbnailSP`
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Trim Video/Audio"
|
||||
hosts = ["standalonepublisher"]
|
||||
hosts = ["standalonepublisher", "traypublisher"]
|
||||
families = ["clip", "trimming"]
|
||||
|
||||
# make sure it is enabled only if at least both families are available
|
||||
|
|
@ -40,6 +40,21 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
fps = instance.data["fps"]
|
||||
video_file_path = instance.data["editorialSourcePath"]
|
||||
extensions = instance.data.get("extensions", ["mov"])
|
||||
output_file_type = instance.data.get("outputFileType")
|
||||
reviewable = "review" in instance.data["families"]
|
||||
|
||||
frame_start = int(instance.data["frameStart"])
|
||||
frame_end = int(instance.data["frameEnd"])
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
|
||||
clip_start_h = float(instance.data["clipInH"])
|
||||
_dur = instance.data["clipDuration"]
|
||||
handle_dur = (handle_start + handle_end)
|
||||
clip_dur_h = float(_dur + handle_dur)
|
||||
|
||||
if output_file_type:
|
||||
extensions = [output_file_type]
|
||||
|
||||
for ext in extensions:
|
||||
self.log.info("Processing ext: `{}`".format(ext))
|
||||
|
|
@ -49,16 +64,10 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
|
||||
clip_trimed_path = os.path.join(
|
||||
staging_dir, instance.data["name"] + ext)
|
||||
# # check video file metadata
|
||||
# input_data = plib.get_ffprobe_streams(video_file_path)[0]
|
||||
# self.log.debug(f"__ input_data: `{input_data}`")
|
||||
|
||||
start = float(instance.data["clipInH"])
|
||||
dur = float(instance.data["clipDurationH"])
|
||||
|
||||
if ext == ".wav":
|
||||
# offset time as ffmpeg is having bug
|
||||
start += 0.5
|
||||
clip_start_h += 0.5
|
||||
# remove "review" from families
|
||||
instance.data["families"] = [
|
||||
fml for fml in instance.data["families"]
|
||||
|
|
@ -67,9 +76,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
|
||||
ffmpeg_args = [
|
||||
ffmpeg_path,
|
||||
"-ss", str(start / fps),
|
||||
"-ss", str(clip_start_h / fps),
|
||||
"-i", video_file_path,
|
||||
"-t", str(dur / fps)
|
||||
"-t", str(clip_dur_h / fps)
|
||||
]
|
||||
if ext in [".mov", ".mp4"]:
|
||||
ffmpeg_args.extend([
|
||||
|
|
@ -98,14 +107,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor):
|
|||
"ext": ext[1:],
|
||||
"files": os.path.basename(clip_trimed_path),
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": int(instance.data["frameStart"]),
|
||||
"frameEnd": int(instance.data["frameEnd"]),
|
||||
"frameStartFtrack": int(instance.data["frameStartH"]),
|
||||
"frameEndFtrack": int(instance.data["frameEndH"]),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartFtrack": frame_start - handle_start,
|
||||
"frameEndFtrack": frame_end + handle_end,
|
||||
"fps": fps,
|
||||
"tags": []
|
||||
}
|
||||
|
||||
if ext in [".mov", ".mp4"]:
|
||||
if ext in [".mov", ".mp4"] and reviewable:
|
||||
repre.update({
|
||||
"thumbnail": True,
|
||||
"tags": ["review", "ftrackreview", "delete"]})
|
||||
|
|
@ -9,12 +9,12 @@ from bson.objectid import ObjectId
|
|||
from pymongo import DeleteMany, ReplaceOne, InsertOne, UpdateOne
|
||||
import pyblish.api
|
||||
|
||||
import openpype.api
|
||||
from openpype.client import (
|
||||
get_representations,
|
||||
get_subset_by_name,
|
||||
get_version_by_name,
|
||||
)
|
||||
from openpype.lib import source_hash
|
||||
from openpype.lib.profiles_filtering import filter_profiles
|
||||
from openpype.lib.file_transaction import FileTransaction
|
||||
from openpype.pipeline import legacy_io
|
||||
|
|
@ -78,12 +78,6 @@ def get_frame_padded(frame, padding):
|
|||
return "{frame:0{padding}d}".format(padding=padding, frame=frame)
|
||||
|
||||
|
||||
def get_first_frame_padded(collection):
|
||||
"""Return first frame as padded number from `clique.Collection`"""
|
||||
start_frame = next(iter(collection.indexes))
|
||||
return get_frame_padded(start_frame, padding=collection.padding)
|
||||
|
||||
|
||||
class IntegrateAsset(pyblish.api.InstancePlugin):
|
||||
"""Register publish in the database and transfer files to destinations.
|
||||
|
||||
|
|
@ -168,7 +162,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
# the database even if not used by the destination template
|
||||
db_representation_context_keys = [
|
||||
"project", "asset", "task", "subset", "version", "representation",
|
||||
"family", "hierarchy", "username"
|
||||
"family", "hierarchy", "username", "output"
|
||||
]
|
||||
skip_host_families = []
|
||||
|
||||
|
|
@ -426,7 +420,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
"".format(len(prepared_representations)))
|
||||
|
||||
def prepare_subset(self, instance, project_name):
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
asset_doc = instance.data["assetEntity"]
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.debug("Subset: {}".format(subset_name))
|
||||
|
||||
|
|
@ -517,20 +511,22 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
# pre-flight validations
|
||||
if repre["ext"].startswith("."):
|
||||
raise ValueError("Extension must not start with a dot '.': "
|
||||
"{}".format(repre["ext"]))
|
||||
raise KnownPublishError((
|
||||
"Extension must not start with a dot '.': {}"
|
||||
).format(repre["ext"]))
|
||||
|
||||
if repre.get("transfers"):
|
||||
raise ValueError("Representation is not allowed to have transfers"
|
||||
"data before integration. They are computed in "
|
||||
"the integrator"
|
||||
"Got: {}".format(repre["transfers"]))
|
||||
raise KnownPublishError((
|
||||
"Representation is not allowed to have transfers"
|
||||
"data before integration. They are computed in "
|
||||
"the integrator. Got: {}"
|
||||
).format(repre["transfers"]))
|
||||
|
||||
# create template data for Anatomy
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
||||
# required representation keys
|
||||
files = repre['files']
|
||||
files = repre["files"]
|
||||
template_data["representation"] = repre["name"]
|
||||
template_data["ext"] = repre["ext"]
|
||||
|
||||
|
|
@ -546,68 +542,68 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
}.items():
|
||||
# Allow to take value from representation
|
||||
# if not found also consider instance.data
|
||||
if key in repre:
|
||||
value = repre[key]
|
||||
elif key in instance.data:
|
||||
value = instance.data[key]
|
||||
else:
|
||||
continue
|
||||
template_data[anatomy_key] = value
|
||||
value = repre.get(key)
|
||||
if value is None:
|
||||
value = instance.data.get(key)
|
||||
|
||||
if repre.get('stagingDir'):
|
||||
stagingdir = repre['stagingDir']
|
||||
else:
|
||||
if value is not None:
|
||||
template_data[anatomy_key] = value
|
||||
|
||||
stagingdir = repre.get("stagingDir")
|
||||
if not stagingdir:
|
||||
# Fall back to instance staging dir if not explicitly
|
||||
# set for representation in the instance
|
||||
self.log.debug("Representation uses instance staging dir: "
|
||||
"{}".format(instance_stagingdir))
|
||||
self.log.debug((
|
||||
"Representation uses instance staging dir: {}"
|
||||
).format(instance_stagingdir))
|
||||
stagingdir = instance_stagingdir
|
||||
|
||||
if not stagingdir:
|
||||
raise ValueError("No staging directory set for representation: "
|
||||
"{}".format(repre))
|
||||
raise KnownPublishError(
|
||||
"No staging directory set for representation: {}".format(repre)
|
||||
)
|
||||
|
||||
self.log.debug("Anatomy template name: {}".format(template_name))
|
||||
anatomy = instance.context.data['anatomy']
|
||||
template = os.path.normpath(anatomy.templates[template_name]["path"])
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
publish_template_category = anatomy.templates[template_name]
|
||||
template = os.path.normpath(publish_template_category["path"])
|
||||
|
||||
is_udim = bool(repre.get("udim"))
|
||||
|
||||
is_sequence_representation = isinstance(files, (list, tuple))
|
||||
if is_sequence_representation:
|
||||
# Collection of files (sequence)
|
||||
assert not any(os.path.isabs(fname) for fname in files), (
|
||||
"Given file names contain full paths"
|
||||
)
|
||||
if any(os.path.isabs(fname) for fname in files):
|
||||
raise KnownPublishError("Given file names contain full paths")
|
||||
|
||||
src_collection = assemble(files)
|
||||
|
||||
# If the representation has `frameStart` set it renumbers the
|
||||
# frame indices of the published collection. It will start from
|
||||
# that `frameStart` index instead. Thus if that frame start
|
||||
# differs from the collection we want to shift the destination
|
||||
# frame indices from the source collection.
|
||||
destination_indexes = list(src_collection.indexes)
|
||||
destination_padding = len(get_first_frame_padded(src_collection))
|
||||
if repre.get("frameStart") is not None and not is_udim:
|
||||
index_frame_start = int(repre.get("frameStart"))
|
||||
|
||||
render_template = anatomy.templates[template_name]
|
||||
# todo: should we ALWAYS manage the frame padding even when not
|
||||
# having `frameStart` set?
|
||||
frame_start_padding = int(
|
||||
render_template.get(
|
||||
"frame_padding",
|
||||
render_template.get("padding")
|
||||
)
|
||||
# Use last frame for minimum padding
|
||||
# - that should cover both 'udim' and 'frame' minimum padding
|
||||
destination_padding = len(str(destination_indexes[-1]))
|
||||
if not is_udim:
|
||||
# Change padding for frames if template has defined higher
|
||||
# padding.
|
||||
template_padding = int(
|
||||
publish_template_category["frame_padding"]
|
||||
)
|
||||
if template_padding > destination_padding:
|
||||
destination_padding = template_padding
|
||||
|
||||
# Shift destination sequence to the start frame
|
||||
src_start_frame = next(iter(src_collection.indexes))
|
||||
shift = index_frame_start - src_start_frame
|
||||
if shift:
|
||||
# If the representation has `frameStart` set it renumbers the
|
||||
# frame indices of the published collection. It will start from
|
||||
# that `frameStart` index instead. Thus if that frame start
|
||||
# differs from the collection we want to shift the destination
|
||||
# frame indices from the source collection.
|
||||
repre_frame_start = repre.get("frameStart")
|
||||
if repre_frame_start is not None:
|
||||
index_frame_start = int(repre["frameStart"])
|
||||
# Shift destination sequence to the start frame
|
||||
destination_indexes = [
|
||||
frame + shift for frame in destination_indexes
|
||||
index_frame_start + idx
|
||||
for idx in range(len(destination_indexes))
|
||||
]
|
||||
destination_padding = frame_start_padding
|
||||
|
||||
# To construct the destination template with anatomy we require
|
||||
# a Frame or UDIM tile set for the template data. We use the first
|
||||
|
|
@ -625,6 +621,13 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
anatomy_filled = anatomy.format(template_data)
|
||||
template_filled = anatomy_filled[template_name]["path"]
|
||||
repre_context = template_filled.used_values
|
||||
|
||||
# Make sure context contains frame
|
||||
# NOTE: Frame would not be available only if template does not
|
||||
# contain '{frame}' in template -> Do we want support it?
|
||||
if not is_udim:
|
||||
repre_context["frame"] = first_index_padded
|
||||
|
||||
self.log.debug("Template filled: {}".format(str(template_filled)))
|
||||
dst_collection = assemble([os.path.normpath(template_filled)])
|
||||
|
||||
|
|
@ -632,9 +635,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
dst_collection.indexes.clear()
|
||||
dst_collection.indexes.update(set(destination_indexes))
|
||||
dst_collection.padding = destination_padding
|
||||
assert (
|
||||
len(src_collection.indexes) == len(dst_collection.indexes)
|
||||
), "This is a bug"
|
||||
if len(src_collection.indexes) != len(dst_collection.indexes):
|
||||
raise KnownPublishError((
|
||||
"This is a bug. Source sequence frames length"
|
||||
" does not match integration frames length"
|
||||
))
|
||||
|
||||
# Multiple file transfers
|
||||
transfers = []
|
||||
|
|
@ -645,9 +650,13 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
else:
|
||||
# Single file
|
||||
fname = files
|
||||
assert not os.path.isabs(fname), (
|
||||
"Given file name is a full path"
|
||||
)
|
||||
if os.path.isabs(fname):
|
||||
self.log.error(
|
||||
"Filename in representation is filepath {}".format(fname)
|
||||
)
|
||||
raise KnownPublishError(
|
||||
"This is a bug. Representation file name is full path"
|
||||
)
|
||||
|
||||
# Manage anatomy template data
|
||||
template_data.pop("frame", None)
|
||||
|
|
@ -677,9 +686,8 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
# Also add these values to the context even if not used by the
|
||||
# destination template
|
||||
value = template_data.get(key)
|
||||
if not value:
|
||||
continue
|
||||
repre_context[key] = template_data[key]
|
||||
if value is not None:
|
||||
repre_context[key] = value
|
||||
|
||||
# Explicitly store the full list even though template data might
|
||||
# have a different value because it uses just a single udim tile
|
||||
|
|
@ -693,40 +701,30 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
else:
|
||||
repre_id = ObjectId()
|
||||
|
||||
# Backwards compatibility:
|
||||
# Store first transferred destination as published path data
|
||||
# todo: can we remove this?
|
||||
# todo: We shouldn't change data that makes its way back into
|
||||
# instance.data[] until we know the publish actually succeeded
|
||||
# otherwise `published_path` might not actually be valid?
|
||||
# - used primarily for reviews that are integrated to custom modules
|
||||
# TODO we should probably store all integrated files
|
||||
# related to the representation?
|
||||
published_path = transfers[0][1]
|
||||
repre["published_path"] = published_path # Backwards compatibility
|
||||
repre["published_path"] = published_path
|
||||
|
||||
# todo: `repre` is not the actual `representation` entity
|
||||
# we should simplify/clarify difference between data above
|
||||
# and the actual representation entity for the database
|
||||
data = repre.get("data", {})
|
||||
data.update({'path': published_path, 'template': template})
|
||||
data.update({"path": published_path, "template": template})
|
||||
representation = {
|
||||
"_id": repre_id,
|
||||
"schema": "openpype:representation-2.0",
|
||||
"type": "representation",
|
||||
"parent": version["_id"],
|
||||
"name": repre['name'],
|
||||
"name": repre["name"],
|
||||
"data": data,
|
||||
|
||||
# Imprint shortcut to context for performance reasons.
|
||||
"context": repre_context
|
||||
}
|
||||
|
||||
# todo: simplify/streamline which additional data makes its way into
|
||||
# the representation context
|
||||
if repre.get("outputName"):
|
||||
representation["context"]["output"] = repre['outputName']
|
||||
|
||||
if is_sequence_representation and repre.get("frameStart") is not None:
|
||||
representation['context']['frame'] = template_data["frame"]
|
||||
|
||||
return {
|
||||
"representation": representation,
|
||||
"anatomy_data": template_data,
|
||||
|
|
@ -786,7 +784,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
version_data[key] = instance.data[key]
|
||||
|
||||
# Include instance.data[versionData] directly
|
||||
version_data_instance = instance.data.get('versionData')
|
||||
version_data_instance = instance.data.get("versionData")
|
||||
if version_data_instance:
|
||||
version_data.update(version_data_instance)
|
||||
|
||||
|
|
@ -826,6 +824,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
def get_profile_filter_criteria(self, instance):
|
||||
"""Return filter criteria for `filter_profiles`"""
|
||||
|
||||
# Anatomy data is pre-filled by Collectors
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
|
||||
|
|
@ -856,6 +855,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
path: modified path if possible, or unmodified path
|
||||
+ warning logged
|
||||
"""
|
||||
|
||||
success, rootless_path = anatomy.find_root_template_from_path(path)
|
||||
if success:
|
||||
path = rootless_path
|
||||
|
|
@ -877,6 +877,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
output_resources: array of dictionaries to be added to 'files' key
|
||||
in representation
|
||||
"""
|
||||
|
||||
file_infos = []
|
||||
for file_path in destinations:
|
||||
file_info = self.prepare_file_info(file_path, anatomy, sites=sites)
|
||||
|
|
@ -896,10 +897,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
Returns:
|
||||
dict: file info dictionary
|
||||
"""
|
||||
|
||||
return {
|
||||
"_id": ObjectId(),
|
||||
"path": self.get_rootless_path(anatomy, path),
|
||||
"size": os.path.getsize(path),
|
||||
"hash": openpype.api.source_hash(path),
|
||||
"hash": source_hash(path),
|
||||
"sites": sites
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,10 @@ class ValidateAssetDocs(pyblish.api.InstancePlugin):
|
|||
if instance.data.get("assetEntity"):
|
||||
self.log.info("Instance has set asset document in its data.")
|
||||
|
||||
elif instance.data.get("newAssetPublishing"):
|
||||
# skip if it is editorial
|
||||
self.log.info("Editorial instance is no need to check...")
|
||||
|
||||
else:
|
||||
raise PublishValidationError((
|
||||
"Instance \"{}\" doesn't have asset document "
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin):
|
|||
"hiero",
|
||||
"standalonepublisher",
|
||||
"resolve",
|
||||
"flame"
|
||||
"flame",
|
||||
"traypublisher"
|
||||
]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -301,7 +301,9 @@
|
|||
"traypublisher"
|
||||
],
|
||||
"families": [
|
||||
"plate"
|
||||
"plate",
|
||||
"review",
|
||||
"audio"
|
||||
],
|
||||
"task_types": [],
|
||||
"tasks": [],
|
||||
|
|
@ -447,6 +449,9 @@
|
|||
"enabled": false,
|
||||
"ftrack_custom_attributes": {}
|
||||
},
|
||||
"IntegrateFtrackComponentOverwrite": {
|
||||
"enabled": true
|
||||
},
|
||||
"IntegrateFtrackInstance": {
|
||||
"family_mapping": {
|
||||
"camera": "cam",
|
||||
|
|
|
|||
|
|
@ -236,6 +236,63 @@
|
|||
"extensions": []
|
||||
}
|
||||
],
|
||||
"editorial_creators": {
|
||||
"editorial_simple": {
|
||||
"default_variants": [
|
||||
"Main"
|
||||
],
|
||||
"clip_name_tokenizer": {
|
||||
"_sequence_": "(sc\\d{3})",
|
||||
"_shot_": "(sh\\d{3})"
|
||||
},
|
||||
"shot_rename": {
|
||||
"enabled": true,
|
||||
"shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}"
|
||||
},
|
||||
"shot_hierarchy": {
|
||||
"enabled": true,
|
||||
"parents_path": "{project}/{folder}/{sequence}",
|
||||
"parents": [
|
||||
{
|
||||
"type": "Project",
|
||||
"name": "project",
|
||||
"value": "{project[name]}"
|
||||
},
|
||||
{
|
||||
"type": "Folder",
|
||||
"name": "folder",
|
||||
"value": "shots"
|
||||
},
|
||||
{
|
||||
"type": "Sequence",
|
||||
"name": "sequence",
|
||||
"value": "{_sequence_}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"shot_add_tasks": {},
|
||||
"family_presets": [
|
||||
{
|
||||
"family": "review",
|
||||
"variant": "Reference",
|
||||
"review": true,
|
||||
"output_file_type": ".mp4"
|
||||
},
|
||||
{
|
||||
"family": "plate",
|
||||
"variant": "",
|
||||
"review": false,
|
||||
"output_file_type": ".mov"
|
||||
},
|
||||
{
|
||||
"family": "audio",
|
||||
"variant": "",
|
||||
"review": false,
|
||||
"output_file_type": ".wav"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"BatchMovieCreator": {
|
||||
"default_variants": ["Main"],
|
||||
"default_tasks": ["Compositing"],
|
||||
|
|
|
|||
|
|
@ -930,6 +930,21 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "IntegrateFtrackComponentOverwrite",
|
||||
"label": "IntegrateFtrackComponentOverwrite",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "IntegrateFtrackInstance",
|
||||
|
|
|
|||
|
|
@ -84,7 +84,197 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "editorial_creators",
|
||||
"label": "Editorial creator plugins",
|
||||
"use_label_wrap": true,
|
||||
"collapsible_key": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "editorial_simple",
|
||||
"label": "Editorial simple creator",
|
||||
"use_label_wrap": true,
|
||||
"collapsible_key": true,
|
||||
"children": [
|
||||
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Shot metadata creator",
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"key": "clip_name_tokenizer",
|
||||
"label": "Clip name tokenizer",
|
||||
"type": "dict-modifiable",
|
||||
"highlight_content": true,
|
||||
"tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "shot_rename",
|
||||
"label": "Shot rename",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "shot_rename_template",
|
||||
"label": "Shot rename template",
|
||||
"tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\""
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"key": "shot_hierarchy",
|
||||
"label": "Shot hierarchy",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "parents_path",
|
||||
"label": "Parents path template",
|
||||
"tooltip": "Using keys from \"Token to parent convertor\" or tokens directly"
|
||||
},
|
||||
{
|
||||
"key": "parents",
|
||||
"label": "Token to parent convertor",
|
||||
"type": "list",
|
||||
"highlight_content": true,
|
||||
"tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "enum",
|
||||
"key": "type",
|
||||
"label": "Parent type",
|
||||
"enum_items": [
|
||||
{"Project": "Project"},
|
||||
{"Folder": "Folder"},
|
||||
{"Episode": "Episode"},
|
||||
{"Sequence": "Sequence"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Parent token name",
|
||||
"tooltip": "Unique name used in \"Parent path template\""
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "value",
|
||||
"label": "Parent name value",
|
||||
"tooltip": "Template where any text, Anatomy keys and Tokens could be used"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "shot_add_tasks",
|
||||
"label": "Add tasks to shot",
|
||||
"type": "dict-modifiable",
|
||||
"highlight_content": true,
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "task-types-enum",
|
||||
"key": "type",
|
||||
"label": "Task type",
|
||||
"multiselection": false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Shot's subset creator",
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "list",
|
||||
"key": "family_presets",
|
||||
"label": "Family presets",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "enum",
|
||||
"key": "family",
|
||||
"label": "Family",
|
||||
"enum_items": [
|
||||
{"review": "review"},
|
||||
{"plate": "plate"},
|
||||
{"audio": "audio"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "variant",
|
||||
"label": "Variant",
|
||||
"placeholder": "< Inherited >"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "review",
|
||||
"label": "Review",
|
||||
"default": true
|
||||
},
|
||||
{
|
||||
"type": "enum",
|
||||
"key": "output_file_type",
|
||||
"label": "Integrating file type",
|
||||
"enum_items": [
|
||||
{".mp4": "MP4"},
|
||||
{".mov": "MOV"},
|
||||
{".wav": "WAV"}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "BatchMovieCreator",
|
||||
|
|
|
|||
|
|
@ -5,18 +5,12 @@ import logging
|
|||
|
||||
from Qt import QtWidgets, QtCore
|
||||
|
||||
from openpype.client import (
|
||||
get_project,
|
||||
get_asset_by_name,
|
||||
)
|
||||
from openpype.lib import (
|
||||
get_last_workfile_with_version,
|
||||
get_workdir_data,
|
||||
)
|
||||
from openpype.lib import get_last_workfile_with_version
|
||||
from openpype.pipeline import (
|
||||
registered_host,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.pipeline.template_data import get_template_data_with_names
|
||||
from openpype.tools.utils import PlaceholderLineEdit
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -30,16 +24,10 @@ def build_workfile_data(session):
|
|||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
host_name = session["AVALON_APP"]
|
||||
project_doc = get_project(
|
||||
project_name, fields=["name", "data.code", "config.tasks"]
|
||||
)
|
||||
asset_doc = get_asset_by_name(
|
||||
project_name,
|
||||
asset_name,
|
||||
fields=["name", "data.tasks", "data.parents"]
|
||||
)
|
||||
|
||||
data = get_workdir_data(project_doc, asset_doc, task_name, host_name)
|
||||
data = get_template_data_with_names(
|
||||
project_name, asset_name, task_name, host_name
|
||||
)
|
||||
data.update({
|
||||
"version": 1,
|
||||
"comment": "",
|
||||
|
|
|
|||
1
vendor/configs/OpenColorIO-Configs
vendored
Submodule
1
vendor/configs/OpenColorIO-Configs
vendored
Submodule
|
|
@ -0,0 +1 @@
|
|||
Subproject commit 0bb079c08be410030669cbf5f19ff869b88af953
|
||||
Loading…
Add table
Add a link
Reference in a new issue