mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge pull request #2928 from pypeclub/feature/OP-1566_Flame-Create-Batch-Group
This commit is contained in:
commit
e7c491f86e
14 changed files with 1246 additions and 248 deletions
|
|
@ -11,10 +11,8 @@ from .constants import (
|
|||
from .lib import (
|
||||
CTX,
|
||||
FlameAppFramework,
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
create_bin,
|
||||
create_segment_data_marker,
|
||||
get_segment_data_marker,
|
||||
set_segment_data_marker,
|
||||
|
|
@ -29,7 +27,10 @@ from .lib import (
|
|||
get_frame_from_filename,
|
||||
get_padding_from_filename,
|
||||
maintained_object_duplication,
|
||||
get_clip_segment
|
||||
maintained_temp_file_path,
|
||||
get_clip_segment,
|
||||
get_batch_group_from_desktop,
|
||||
MediaInfoFile
|
||||
)
|
||||
from .utils import (
|
||||
setup,
|
||||
|
|
@ -56,7 +57,6 @@ from .plugin import (
|
|||
PublishableClip,
|
||||
ClipLoader,
|
||||
OpenClipSolver
|
||||
|
||||
)
|
||||
from .workio import (
|
||||
open_file,
|
||||
|
|
@ -71,6 +71,10 @@ from .render_utils import (
|
|||
get_preset_path_by_xml_name,
|
||||
modify_preset_file
|
||||
)
|
||||
from .batch_utils import (
|
||||
create_batch_group,
|
||||
create_batch_group_conent
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# constants
|
||||
|
|
@ -83,10 +87,8 @@ __all__ = [
|
|||
# lib
|
||||
"CTX",
|
||||
"FlameAppFramework",
|
||||
"get_project_manager",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"create_bin",
|
||||
"create_segment_data_marker",
|
||||
"get_segment_data_marker",
|
||||
"set_segment_data_marker",
|
||||
|
|
@ -101,7 +103,10 @@ __all__ = [
|
|||
"get_frame_from_filename",
|
||||
"get_padding_from_filename",
|
||||
"maintained_object_duplication",
|
||||
"maintained_temp_file_path",
|
||||
"get_clip_segment",
|
||||
"get_batch_group_from_desktop",
|
||||
"MediaInfoFile",
|
||||
|
||||
# pipeline
|
||||
"install",
|
||||
|
|
@ -142,5 +147,9 @@ __all__ = [
|
|||
# render utils
|
||||
"export_clip",
|
||||
"get_preset_path_by_xml_name",
|
||||
"modify_preset_file"
|
||||
"modify_preset_file",
|
||||
|
||||
# batch utils
|
||||
"create_batch_group",
|
||||
"create_batch_group_conent"
|
||||
]
|
||||
|
|
|
|||
151
openpype/hosts/flame/api/batch_utils.py
Normal file
151
openpype/hosts/flame/api/batch_utils.py
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
import flame
|
||||
|
||||
|
||||
def create_batch_group(
|
||||
name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=None,
|
||||
**kwargs
|
||||
):
|
||||
"""Create Batch Group in active project's Desktop
|
||||
|
||||
Args:
|
||||
name (str): name of batch group to be created
|
||||
frame_start (int): start frame of batch
|
||||
frame_end (int): end frame of batch
|
||||
update_batch_group (PyBatch)[optional]: batch group to update
|
||||
|
||||
Return:
|
||||
PyBatch: active flame batch group
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = update_batch_group or flame.batch
|
||||
|
||||
schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1']
|
||||
shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1']
|
||||
|
||||
handle_start = kwargs.get("handleStart") or 0
|
||||
handle_end = kwargs.get("handleEnd") or 0
|
||||
|
||||
frame_start -= handle_start
|
||||
frame_duration += handle_start + handle_end
|
||||
|
||||
if not update_batch_group:
|
||||
# Create batch group with name, start_frame value, duration value,
|
||||
# set of schematic reel names, set of shelf reel names
|
||||
batch_group = batch_group.create_batch_group(
|
||||
name,
|
||||
start_frame=frame_start,
|
||||
duration=frame_duration,
|
||||
reels=schematic_reels,
|
||||
shelf_reels=shelf_reels
|
||||
)
|
||||
else:
|
||||
batch_group.name = name
|
||||
batch_group.start_frame = frame_start
|
||||
batch_group.duration = frame_duration
|
||||
|
||||
# add reels to batch group
|
||||
_add_reels_to_batch_group(
|
||||
batch_group, schematic_reels, shelf_reels)
|
||||
|
||||
# TODO: also update write node if there is any
|
||||
# TODO: also update loaders to start from correct frameStart
|
||||
|
||||
if kwargs.get("switch_batch_tab"):
|
||||
# use this command to switch to the batch tab
|
||||
batch_group.go_to()
|
||||
|
||||
return batch_group
|
||||
|
||||
|
||||
def _add_reels_to_batch_group(batch_group, reels, shelf_reels):
|
||||
# update or create defined reels
|
||||
# helper variables
|
||||
reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.reels
|
||||
]
|
||||
shelf_reel_names = [
|
||||
r.name.get_value()
|
||||
for r in batch_group.shelf_reels
|
||||
]
|
||||
# add schematic reels
|
||||
for _r in reels:
|
||||
if _r in reel_names:
|
||||
continue
|
||||
batch_group.create_reel(_r)
|
||||
|
||||
# add shelf reels
|
||||
for _sr in shelf_reels:
|
||||
if _sr in shelf_reel_names:
|
||||
continue
|
||||
batch_group.create_shelf_reel(_sr)
|
||||
|
||||
|
||||
def create_batch_group_conent(batch_nodes, batch_links, batch_group=None):
|
||||
"""Creating batch group with links
|
||||
|
||||
Args:
|
||||
batch_nodes (list of dict): each dict is node definition
|
||||
batch_links (list of dict): each dict is link definition
|
||||
batch_group (PyBatch, optional): batch group. Defaults to None.
|
||||
|
||||
Return:
|
||||
dict: all batch nodes {name or id: PyNode}
|
||||
"""
|
||||
# make sure some batch obj is present
|
||||
batch_group = batch_group or flame.batch
|
||||
all_batch_nodes = {
|
||||
b.name.get_value(): b
|
||||
for b in batch_group.nodes
|
||||
}
|
||||
for node in batch_nodes:
|
||||
# NOTE: node_props needs to be ideally OrederDict type
|
||||
node_id, node_type, node_props = (
|
||||
node["id"], node["type"], node["properties"])
|
||||
|
||||
# get node name for checking if exists
|
||||
node_name = node_props.pop("name", None) or node_id
|
||||
|
||||
if all_batch_nodes.get(node_name):
|
||||
# update existing batch node
|
||||
batch_node = all_batch_nodes[node_name]
|
||||
else:
|
||||
# create new batch node
|
||||
batch_node = batch_group.create_node(node_type)
|
||||
|
||||
# set name
|
||||
batch_node.name.set_value(node_name)
|
||||
|
||||
# set attributes found in node props
|
||||
for key, value in node_props.items():
|
||||
if not hasattr(batch_node, key):
|
||||
continue
|
||||
setattr(batch_node, key, value)
|
||||
|
||||
# add created node for possible linking
|
||||
all_batch_nodes[node_id] = batch_node
|
||||
|
||||
# link nodes to each other
|
||||
for link in batch_links:
|
||||
_from_n, _to_n = link["from_node"], link["to_node"]
|
||||
|
||||
# check if all linking nodes are available
|
||||
if not all([
|
||||
all_batch_nodes.get(_from_n["id"]),
|
||||
all_batch_nodes.get(_to_n["id"])
|
||||
]):
|
||||
continue
|
||||
|
||||
# link nodes in defined link
|
||||
batch_group.connect_nodes(
|
||||
all_batch_nodes[_from_n["id"]], _from_n["connector"],
|
||||
all_batch_nodes[_to_n["id"]], _to_n["connector"]
|
||||
)
|
||||
|
||||
# sort batch nodes
|
||||
batch_group.organize()
|
||||
|
||||
return all_batch_nodes
|
||||
|
|
@ -3,7 +3,12 @@ import os
|
|||
import re
|
||||
import json
|
||||
import pickle
|
||||
import tempfile
|
||||
import itertools
|
||||
import contextlib
|
||||
import xml.etree.cElementTree as cET
|
||||
from copy import deepcopy
|
||||
from xml.etree import ElementTree as ET
|
||||
from pprint import pformat
|
||||
from .constants import (
|
||||
MARKER_COLOR,
|
||||
|
|
@ -12,9 +17,10 @@ from .constants import (
|
|||
COLOR_MAP,
|
||||
MARKER_PUBLISH_DEFAULT
|
||||
)
|
||||
from openpype.api import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
import openpype.api as openpype
|
||||
|
||||
log = openpype.Logger.get_logger(__name__)
|
||||
|
||||
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
|
||||
|
||||
|
|
@ -227,16 +233,6 @@ class FlameAppFramework(object):
|
|||
return True
|
||||
|
||||
|
||||
def get_project_manager():
|
||||
# TODO: get_project_manager
|
||||
return
|
||||
|
||||
|
||||
def get_media_storage():
|
||||
# TODO: get_media_storage
|
||||
return
|
||||
|
||||
|
||||
def get_current_project():
|
||||
import flame
|
||||
return flame.project.current_project
|
||||
|
|
@ -266,11 +262,6 @@ def get_current_sequence(selection):
|
|||
return process_timeline
|
||||
|
||||
|
||||
def create_bin(name, root=None):
|
||||
# TODO: create_bin
|
||||
return
|
||||
|
||||
|
||||
def rescan_hooks():
|
||||
import flame
|
||||
try:
|
||||
|
|
@ -280,6 +271,7 @@ def rescan_hooks():
|
|||
|
||||
|
||||
def get_metadata(project_name, _log=None):
|
||||
# TODO: can be replaced by MediaInfoFile class method
|
||||
from adsk.libwiretapPythonClientAPI import (
|
||||
WireTapClient,
|
||||
WireTapServerHandle,
|
||||
|
|
@ -704,6 +696,25 @@ def maintained_object_duplication(item):
|
|||
flame.delete(duplicate)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_temp_file_path(suffix=None):
|
||||
_suffix = suffix or ""
|
||||
|
||||
try:
|
||||
# Store dumped json to temporary file
|
||||
temporary_file = tempfile.mktemp(
|
||||
suffix=_suffix, prefix="flame_maintained_")
|
||||
yield temporary_file.replace("\\", "/")
|
||||
|
||||
except IOError as _error:
|
||||
raise IOError(
|
||||
"Not able to create temp json file: {}".format(_error))
|
||||
|
||||
finally:
|
||||
# Remove the temporary json
|
||||
os.remove(temporary_file)
|
||||
|
||||
|
||||
def get_clip_segment(flame_clip):
|
||||
name = flame_clip.name.get_value()
|
||||
version = flame_clip.versions[0]
|
||||
|
|
@ -717,3 +728,213 @@ def get_clip_segment(flame_clip):
|
|||
raise ValueError("Clip `{}` has too many segments!".format(name))
|
||||
|
||||
return segments[0]
|
||||
|
||||
|
||||
def get_batch_group_from_desktop(name):
|
||||
project = get_current_project()
|
||||
project_desktop = project.current_workspace.desktop
|
||||
|
||||
for bgroup in project_desktop.batch_groups:
|
||||
if bgroup.name.get_value() in name:
|
||||
return bgroup
|
||||
|
||||
|
||||
class MediaInfoFile(object):
|
||||
"""Class to get media info file clip data
|
||||
|
||||
Raises:
|
||||
IOError: MEDIA_SCRIPT_PATH path doesn't exists
|
||||
TypeError: Not able to generate clip xml data file
|
||||
ET.ParseError: Missing clip in xml clip data
|
||||
IOError: Not able to save xml clip data to file
|
||||
|
||||
Attributes:
|
||||
str: `MEDIA_SCRIPT_PATH` path to flame binary
|
||||
logging.Logger: `log` logger
|
||||
|
||||
TODO: add method for getting metadata to dict
|
||||
"""
|
||||
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
|
||||
|
||||
log = log
|
||||
|
||||
_clip_data = None
|
||||
_start_frame = None
|
||||
_fps = None
|
||||
_drop_mode = None
|
||||
|
||||
def __init__(self, path, **kwargs):
|
||||
|
||||
# replace log if any
|
||||
if kwargs.get("logger"):
|
||||
self.log = kwargs["logger"]
|
||||
|
||||
# test if `dl_get_media_info` paht exists
|
||||
self._validate_media_script_path()
|
||||
|
||||
# derivate other feed variables
|
||||
self.feed_basename = os.path.basename(path)
|
||||
self.feed_dir = os.path.dirname(path)
|
||||
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
|
||||
|
||||
with maintained_temp_file_path(".clip") as tmp_path:
|
||||
self.log.info("Temp File: {}".format(tmp_path))
|
||||
self._generate_media_info_file(tmp_path)
|
||||
|
||||
# get clip data and make them single if there is multiple
|
||||
# clips data
|
||||
xml_data = self._make_single_clip_media_info(tmp_path)
|
||||
self.log.debug("xml_data: {}".format(xml_data))
|
||||
self.log.debug("type: {}".format(type(xml_data)))
|
||||
|
||||
# get all time related data and assign them
|
||||
self._get_time_info_from_origin(xml_data)
|
||||
self.log.debug("start_frame: {}".format(self.start_frame))
|
||||
self.log.debug("fps: {}".format(self.fps))
|
||||
self.log.debug("drop frame: {}".format(self.drop_mode))
|
||||
self.clip_data = xml_data
|
||||
|
||||
@property
|
||||
def clip_data(self):
|
||||
"""Clip's xml clip data
|
||||
|
||||
Returns:
|
||||
xml.etree.ElementTree: xml data
|
||||
"""
|
||||
return self._clip_data
|
||||
|
||||
@clip_data.setter
|
||||
def clip_data(self, data):
|
||||
self._clip_data = data
|
||||
|
||||
@property
|
||||
def start_frame(self):
|
||||
""" Clip's starting frame found in timecode
|
||||
|
||||
Returns:
|
||||
int: number of frames
|
||||
"""
|
||||
return self._start_frame
|
||||
|
||||
@start_frame.setter
|
||||
def start_frame(self, number):
|
||||
self._start_frame = int(number)
|
||||
|
||||
@property
|
||||
def fps(self):
|
||||
""" Clip's frame rate
|
||||
|
||||
Returns:
|
||||
float: frame rate
|
||||
"""
|
||||
return self._fps
|
||||
|
||||
@fps.setter
|
||||
def fps(self, fl_number):
|
||||
self._fps = float(fl_number)
|
||||
|
||||
@property
|
||||
def drop_mode(self):
|
||||
""" Clip's drop frame mode
|
||||
|
||||
Returns:
|
||||
str: drop frame flag
|
||||
"""
|
||||
return self._drop_mode
|
||||
|
||||
@drop_mode.setter
|
||||
def drop_mode(self, text):
|
||||
self._drop_mode = str(text)
|
||||
|
||||
def _validate_media_script_path(self):
|
||||
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
|
||||
raise IOError("Media Scirpt does not exist: `{}`".format(
|
||||
self.MEDIA_SCRIPT_PATH))
|
||||
|
||||
def _generate_media_info_file(self, fpath):
|
||||
# Create cmd arguments for gettig xml file info file
|
||||
cmd_args = [
|
||||
self.MEDIA_SCRIPT_PATH,
|
||||
"-e", self.feed_ext,
|
||||
"-o", fpath,
|
||||
self.feed_dir
|
||||
]
|
||||
|
||||
try:
|
||||
# execute creation of clip xml template data
|
||||
openpype.run_subprocess(cmd_args)
|
||||
except TypeError as error:
|
||||
raise TypeError(
|
||||
"Error creating `{}` due: {}".format(fpath, error))
|
||||
|
||||
def _make_single_clip_media_info(self, fpath):
|
||||
with open(fpath) as f:
|
||||
lines = f.readlines()
|
||||
_added_root = itertools.chain(
|
||||
"<root>", deepcopy(lines)[1:], "</root>")
|
||||
new_root = ET.fromstringlist(_added_root)
|
||||
|
||||
# find the clip which is matching to my input name
|
||||
xml_clips = new_root.findall("clip")
|
||||
matching_clip = None
|
||||
for xml_clip in xml_clips:
|
||||
if xml_clip.find("name").text in self.feed_basename:
|
||||
matching_clip = xml_clip
|
||||
|
||||
if matching_clip is None:
|
||||
# return warning there is missing clip
|
||||
raise ET.ParseError(
|
||||
"Missing clip in `{}`. Available clips {}".format(
|
||||
self.feed_basename, [
|
||||
xml_clip.find("name").text
|
||||
for xml_clip in xml_clips
|
||||
]
|
||||
))
|
||||
|
||||
return matching_clip
|
||||
|
||||
def _get_time_info_from_origin(self, xml_data):
|
||||
try:
|
||||
for out_track in xml_data.iter('track'):
|
||||
for out_feed in out_track.iter('feed'):
|
||||
# start frame
|
||||
out_feed_nb_ticks_obj = out_feed.find(
|
||||
'startTimecode/nbTicks')
|
||||
self.start_frame = out_feed_nb_ticks_obj.text
|
||||
|
||||
# fps
|
||||
out_feed_fps_obj = out_feed.find(
|
||||
'startTimecode/rate')
|
||||
self.fps = out_feed_fps_obj.text
|
||||
|
||||
# drop frame mode
|
||||
out_feed_drop_mode_obj = out_feed.find(
|
||||
'startTimecode/dropMode')
|
||||
self.drop_mode = out_feed_drop_mode_obj.text
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as msg:
|
||||
self.log.warning(msg)
|
||||
|
||||
@staticmethod
|
||||
def write_clip_data_to_file(fpath, xml_element_data):
|
||||
""" Write xml element of clip data to file
|
||||
|
||||
Args:
|
||||
fpath (string): file path
|
||||
xml_element_data (xml.etree.ElementTree.Element): xml data
|
||||
|
||||
Raises:
|
||||
IOError: If data could not be written to file
|
||||
"""
|
||||
try:
|
||||
# save it as new file
|
||||
tree = cET.ElementTree(xml_element_data)
|
||||
tree.write(
|
||||
fpath, xml_declaration=True,
|
||||
method='xml', encoding='UTF-8'
|
||||
)
|
||||
except IOError as error:
|
||||
raise IOError(
|
||||
"Not able to write data to file: {}".format(error))
|
||||
|
|
|
|||
|
|
@ -1,24 +1,19 @@
|
|||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from xml.etree import ElementTree as ET
|
||||
import six
|
||||
import qargparse
|
||||
from Qt import QtWidgets, QtCore
|
||||
import openpype.api as openpype
|
||||
from openpype.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
)
|
||||
from openpype import style
|
||||
from . import (
|
||||
lib as flib,
|
||||
pipeline as fpipeline,
|
||||
constants
|
||||
)
|
||||
|
||||
from copy import deepcopy
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from Qt import QtCore, QtWidgets
|
||||
|
||||
import openpype.api as openpype
|
||||
import qargparse
|
||||
from openpype import style
|
||||
from openpype.pipeline import LegacyCreator, LoaderPlugin
|
||||
|
||||
from . import constants
|
||||
from . import lib as flib
|
||||
from . import pipeline as fpipeline
|
||||
|
||||
log = openpype.Logger.get_logger(__name__)
|
||||
|
||||
|
|
@ -660,8 +655,8 @@ class PublishableClip:
|
|||
|
||||
|
||||
# Publishing plugin functions
|
||||
# Loader plugin functions
|
||||
|
||||
# Loader plugin functions
|
||||
class ClipLoader(LoaderPlugin):
|
||||
"""A basic clip loader for Flame
|
||||
|
||||
|
|
@ -681,50 +676,52 @@ class ClipLoader(LoaderPlugin):
|
|||
]
|
||||
|
||||
|
||||
class OpenClipSolver:
|
||||
media_script_path = "/opt/Autodesk/mio/current/dl_get_media_info"
|
||||
tmp_name = "_tmp.clip"
|
||||
tmp_file = None
|
||||
class OpenClipSolver(flib.MediaInfoFile):
|
||||
create_new_clip = False
|
||||
|
||||
out_feed_nb_ticks = None
|
||||
out_feed_fps = None
|
||||
out_feed_drop_mode = None
|
||||
|
||||
log = log
|
||||
|
||||
def __init__(self, openclip_file_path, feed_data):
|
||||
# test if media script paht exists
|
||||
self._validate_media_script_path()
|
||||
self.out_file = openclip_file_path
|
||||
|
||||
# new feed variables:
|
||||
feed_path = feed_data["path"]
|
||||
feed_path = feed_data.pop("path")
|
||||
|
||||
# initialize parent class
|
||||
super(OpenClipSolver, self).__init__(
|
||||
feed_path,
|
||||
**feed_data
|
||||
)
|
||||
|
||||
# get other metadata
|
||||
self.feed_version_name = feed_data["version"]
|
||||
self.feed_colorspace = feed_data.get("colorspace")
|
||||
|
||||
if feed_data.get("logger"):
|
||||
self.log = feed_data["logger"]
|
||||
self.log.debug("feed_version_name: {}".format(self.feed_version_name))
|
||||
|
||||
# derivate other feed variables
|
||||
self.feed_basename = os.path.basename(feed_path)
|
||||
self.feed_dir = os.path.dirname(feed_path)
|
||||
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
|
||||
|
||||
if not os.path.isfile(openclip_file_path):
|
||||
# openclip does not exist yet and will be created
|
||||
self.tmp_file = self.out_file = openclip_file_path
|
||||
self.log.debug("feed_ext: {}".format(self.feed_ext))
|
||||
self.log.debug("out_file: {}".format(self.out_file))
|
||||
if not self._is_valid_tmp_file(self.out_file):
|
||||
self.create_new_clip = True
|
||||
|
||||
else:
|
||||
# output a temp file
|
||||
self.out_file = openclip_file_path
|
||||
self.tmp_file = os.path.join(self.feed_dir, self.tmp_name)
|
||||
self._clear_tmp_file()
|
||||
def _is_valid_tmp_file(self, file):
|
||||
# check if file exists
|
||||
if os.path.isfile(file):
|
||||
# test also if file is not empty
|
||||
with open(file) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
self.log.info("Temp File: {}".format(self.tmp_file))
|
||||
if len(lines) > 2:
|
||||
return True
|
||||
|
||||
# file is probably corrupted
|
||||
os.remove(file)
|
||||
return False
|
||||
|
||||
def make(self):
|
||||
self._generate_media_info_file()
|
||||
|
||||
if self.create_new_clip:
|
||||
# New openClip
|
||||
|
|
@ -732,42 +729,17 @@ class OpenClipSolver:
|
|||
else:
|
||||
self._update_open_clip()
|
||||
|
||||
def _validate_media_script_path(self):
|
||||
if not os.path.isfile(self.media_script_path):
|
||||
raise IOError("Media Scirpt does not exist: `{}`".format(
|
||||
self.media_script_path))
|
||||
|
||||
def _generate_media_info_file(self):
|
||||
# Create cmd arguments for gettig xml file info file
|
||||
cmd_args = [
|
||||
self.media_script_path,
|
||||
"-e", self.feed_ext,
|
||||
"-o", self.tmp_file,
|
||||
self.feed_dir
|
||||
]
|
||||
|
||||
# execute creation of clip xml template data
|
||||
try:
|
||||
openpype.run_subprocess(cmd_args)
|
||||
except TypeError:
|
||||
self.log.error("Error creating self.tmp_file")
|
||||
six.reraise(*sys.exc_info())
|
||||
|
||||
def _clear_tmp_file(self):
|
||||
if os.path.isfile(self.tmp_file):
|
||||
os.remove(self.tmp_file)
|
||||
|
||||
def _clear_handler(self, xml_object):
|
||||
for handler in xml_object.findall("./handler"):
|
||||
self.log.debug("Handler found")
|
||||
self.log.info("Handler found")
|
||||
xml_object.remove(handler)
|
||||
|
||||
def _create_new_open_clip(self):
|
||||
self.log.info("Building new openClip")
|
||||
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
|
||||
|
||||
tmp_xml = ET.parse(self.tmp_file)
|
||||
|
||||
tmp_xml_feeds = tmp_xml.find('tracks/track/feeds')
|
||||
# clip data comming from MediaInfoFile
|
||||
tmp_xml_feeds = self.clip_data.find('tracks/track/feeds')
|
||||
tmp_xml_feeds.set('currentVersion', self.feed_version_name)
|
||||
for tmp_feed in tmp_xml_feeds:
|
||||
tmp_feed.set('vuid', self.feed_version_name)
|
||||
|
|
@ -778,46 +750,48 @@ class OpenClipSolver:
|
|||
|
||||
self._clear_handler(tmp_feed)
|
||||
|
||||
tmp_xml_versions_obj = tmp_xml.find('versions')
|
||||
tmp_xml_versions_obj = self.clip_data.find('versions')
|
||||
tmp_xml_versions_obj.set('currentVersion', self.feed_version_name)
|
||||
for xml_new_version in tmp_xml_versions_obj:
|
||||
xml_new_version.set('uid', self.feed_version_name)
|
||||
xml_new_version.set('type', 'version')
|
||||
|
||||
xml_data = self._fix_xml_data(tmp_xml)
|
||||
self._clear_handler(self.clip_data)
|
||||
self.log.info("Adding feed version: {}".format(self.feed_basename))
|
||||
|
||||
self._write_result_xml_to_file(xml_data)
|
||||
|
||||
self.log.info("openClip Updated: {}".format(self.tmp_file))
|
||||
self.write_clip_data_to_file(self.out_file, self.clip_data)
|
||||
|
||||
def _update_open_clip(self):
|
||||
self.log.info("Updating openClip ..")
|
||||
|
||||
out_xml = ET.parse(self.out_file)
|
||||
tmp_xml = ET.parse(self.tmp_file)
|
||||
out_xml = out_xml.getroot()
|
||||
|
||||
self.log.debug(">> out_xml: {}".format(out_xml))
|
||||
self.log.debug(">> tmp_xml: {}".format(tmp_xml))
|
||||
self.log.debug(">> self.clip_data: {}".format(self.clip_data))
|
||||
|
||||
# Get new feed from tmp file
|
||||
tmp_xml_feed = tmp_xml.find('tracks/track/feeds/feed')
|
||||
tmp_xml_feed = self.clip_data.find('tracks/track/feeds/feed')
|
||||
|
||||
self._clear_handler(tmp_xml_feed)
|
||||
self._get_time_info_from_origin(out_xml)
|
||||
|
||||
if self.out_feed_fps:
|
||||
# update fps from MediaInfoFile class
|
||||
if self.fps:
|
||||
tmp_feed_fps_obj = tmp_xml_feed.find(
|
||||
"startTimecode/rate")
|
||||
tmp_feed_fps_obj.text = self.out_feed_fps
|
||||
if self.out_feed_nb_ticks:
|
||||
tmp_feed_fps_obj.text = str(self.fps)
|
||||
|
||||
# update start_frame from MediaInfoFile class
|
||||
if self.start_frame:
|
||||
tmp_feed_nb_ticks_obj = tmp_xml_feed.find(
|
||||
"startTimecode/nbTicks")
|
||||
tmp_feed_nb_ticks_obj.text = self.out_feed_nb_ticks
|
||||
if self.out_feed_drop_mode:
|
||||
tmp_feed_nb_ticks_obj.text = str(self.start_frame)
|
||||
|
||||
# update drop_mode from MediaInfoFile class
|
||||
if self.drop_mode:
|
||||
tmp_feed_drop_mode_obj = tmp_xml_feed.find(
|
||||
"startTimecode/dropMode")
|
||||
tmp_feed_drop_mode_obj.text = self.out_feed_drop_mode
|
||||
tmp_feed_drop_mode_obj.text = str(self.drop_mode)
|
||||
|
||||
new_path_obj = tmp_xml_feed.find(
|
||||
"spans/span/path")
|
||||
|
|
@ -850,7 +824,7 @@ class OpenClipSolver:
|
|||
"version", {"type": "version", "uid": self.feed_version_name})
|
||||
out_xml_versions_obj.insert(0, new_version_obj)
|
||||
|
||||
xml_data = self._fix_xml_data(out_xml)
|
||||
self._clear_handler(out_xml)
|
||||
|
||||
# fist create backup
|
||||
self._create_openclip_backup_file(self.out_file)
|
||||
|
|
@ -858,30 +832,9 @@ class OpenClipSolver:
|
|||
self.log.info("Adding feed version: {}".format(
|
||||
self.feed_version_name))
|
||||
|
||||
self._write_result_xml_to_file(xml_data)
|
||||
self.write_clip_data_to_file(self.out_file, out_xml)
|
||||
|
||||
self.log.info("openClip Updated: {}".format(self.out_file))
|
||||
|
||||
self._clear_tmp_file()
|
||||
|
||||
def _get_time_info_from_origin(self, xml_data):
|
||||
try:
|
||||
for out_track in xml_data.iter('track'):
|
||||
for out_feed in out_track.iter('feed'):
|
||||
out_feed_nb_ticks_obj = out_feed.find(
|
||||
'startTimecode/nbTicks')
|
||||
self.out_feed_nb_ticks = out_feed_nb_ticks_obj.text
|
||||
out_feed_fps_obj = out_feed.find(
|
||||
'startTimecode/rate')
|
||||
self.out_feed_fps = out_feed_fps_obj.text
|
||||
out_feed_drop_mode_obj = out_feed.find(
|
||||
'startTimecode/dropMode')
|
||||
self.out_feed_drop_mode = out_feed_drop_mode_obj.text
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as msg:
|
||||
self.log.warning(msg)
|
||||
self.log.debug("OpenClip Updated: {}".format(self.out_file))
|
||||
|
||||
def _feed_exists(self, xml_data, path):
|
||||
# loop all available feed paths and check if
|
||||
|
|
@ -892,15 +845,6 @@ class OpenClipSolver:
|
|||
"Not appending file as it already is in .clip file")
|
||||
return True
|
||||
|
||||
def _fix_xml_data(self, xml_data):
|
||||
xml_root = xml_data.getroot()
|
||||
self._clear_handler(xml_root)
|
||||
return ET.tostring(xml_root).decode('utf-8')
|
||||
|
||||
def _write_result_xml_to_file(self, xml_data):
|
||||
with open(self.out_file, "w") as f:
|
||||
f.write(xml_data)
|
||||
|
||||
def _create_openclip_backup_file(self, file):
|
||||
bck_file = "{}.bak".format(file)
|
||||
# if backup does not exist
|
||||
|
|
|
|||
|
|
@ -185,7 +185,9 @@ class WireTapCom(object):
|
|||
|
||||
exit_code = subprocess.call(
|
||||
project_create_cmd,
|
||||
cwd=os.path.expanduser('~'))
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot create project in flame db")
|
||||
|
|
@ -254,7 +256,7 @@ class WireTapCom(object):
|
|||
filtered_users = [user for user in used_names if user_name in user]
|
||||
|
||||
if filtered_users:
|
||||
# todo: need to find lastly created following regex pattern for
|
||||
# TODO: need to find lastly created following regex pattern for
|
||||
# date used in name
|
||||
return filtered_users.pop()
|
||||
|
||||
|
|
@ -448,7 +450,9 @@ class WireTapCom(object):
|
|||
|
||||
exit_code = subprocess.call(
|
||||
project_colorspace_cmd,
|
||||
cwd=os.path.expanduser('~'))
|
||||
cwd=os.path.expanduser('~'),
|
||||
preexec_fn=_subprocess_preexec_fn
|
||||
)
|
||||
|
||||
if exit_code != 0:
|
||||
RuntimeError("Cannot set colorspace {} on project {}".format(
|
||||
|
|
@ -456,6 +460,15 @@ class WireTapCom(object):
|
|||
))
|
||||
|
||||
|
||||
def _subprocess_preexec_fn():
|
||||
""" Helper function
|
||||
|
||||
Setting permission mask to 0777
|
||||
"""
|
||||
os.setpgrp()
|
||||
os.umask(0o000)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# get json exchange data
|
||||
json_path = sys.argv[-1]
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@ from . import utils
|
|||
import flame
|
||||
from pprint import pformat
|
||||
|
||||
reload(utils) # noqa
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -260,24 +258,15 @@ def create_otio_markers(otio_item, item):
|
|||
otio_item.markers.append(otio_marker)
|
||||
|
||||
|
||||
def create_otio_reference(clip_data):
|
||||
def create_otio_reference(clip_data, fps=None):
|
||||
metadata = _get_metadata(clip_data)
|
||||
|
||||
# get file info for path and start frame
|
||||
frame_start = 0
|
||||
fps = CTX.get_fps()
|
||||
fps = fps or CTX.get_fps()
|
||||
|
||||
path = clip_data["fpath"]
|
||||
|
||||
reel_clip = None
|
||||
match_reel_clip = [
|
||||
clip for clip in CTX.clips
|
||||
if clip["fpath"] == path
|
||||
]
|
||||
if match_reel_clip:
|
||||
reel_clip = match_reel_clip.pop()
|
||||
fps = reel_clip["fps"]
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
file_head, extension = os.path.splitext(file_name)
|
||||
|
||||
|
|
@ -339,13 +328,22 @@ def create_otio_reference(clip_data):
|
|||
|
||||
|
||||
def create_otio_clip(clip_data):
|
||||
from openpype.hosts.flame.api import MediaInfoFile
|
||||
|
||||
segment = clip_data["PySegment"]
|
||||
|
||||
# create media reference
|
||||
media_reference = create_otio_reference(clip_data)
|
||||
|
||||
# calculate source in
|
||||
first_frame = utils.get_frame_from_filename(clip_data["fpath"]) or 0
|
||||
media_info = MediaInfoFile(clip_data["fpath"])
|
||||
media_timecode_start = media_info.start_frame
|
||||
media_fps = media_info.fps
|
||||
|
||||
# create media reference
|
||||
media_reference = create_otio_reference(clip_data, media_fps)
|
||||
|
||||
# define first frame
|
||||
first_frame = media_timecode_start or utils.get_frame_from_filename(
|
||||
clip_data["fpath"]) or 0
|
||||
|
||||
source_in = int(clip_data["source_in"]) - int(first_frame)
|
||||
|
||||
# creatae source range
|
||||
|
|
@ -378,38 +376,6 @@ def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
|||
)
|
||||
|
||||
|
||||
def get_clips_in_reels(project):
|
||||
output_clips = []
|
||||
project_desktop = project.current_workspace.desktop
|
||||
|
||||
for reel_group in project_desktop.reel_groups:
|
||||
for reel in reel_group.reels:
|
||||
for clip in reel.clips:
|
||||
clip_data = {
|
||||
"PyClip": clip,
|
||||
"fps": float(str(clip.frame_rate)[:-4])
|
||||
}
|
||||
|
||||
attrs = [
|
||||
"name", "width", "height",
|
||||
"ratio", "sample_rate", "bit_depth"
|
||||
]
|
||||
|
||||
for attr in attrs:
|
||||
val = getattr(clip, attr)
|
||||
clip_data[attr] = val
|
||||
|
||||
version = clip.versions[-1]
|
||||
track = version.tracks[-1]
|
||||
for segment in track.segments:
|
||||
segment_data = _get_segment_attributes(segment)
|
||||
clip_data.update(segment_data)
|
||||
|
||||
output_clips.append(clip_data)
|
||||
|
||||
return output_clips
|
||||
|
||||
|
||||
def _get_colourspace_policy():
|
||||
|
||||
output = {}
|
||||
|
|
@ -493,9 +459,6 @@ def _get_shot_tokens_values(clip, tokens):
|
|||
old_value = None
|
||||
output = {}
|
||||
|
||||
if not clip.shot_name:
|
||||
return output
|
||||
|
||||
old_value = clip.shot_name.get_value()
|
||||
|
||||
for token in tokens:
|
||||
|
|
@ -513,15 +476,21 @@ def _get_shot_tokens_values(clip, tokens):
|
|||
|
||||
|
||||
def _get_segment_attributes(segment):
|
||||
# log.debug(dir(segment))
|
||||
|
||||
if str(segment.name)[1:-1] == "":
|
||||
log.debug("Segment name|hidden: {}|{}".format(
|
||||
segment.name.get_value(), segment.hidden
|
||||
))
|
||||
if (
|
||||
segment.name.get_value() == ""
|
||||
or segment.hidden.get_value()
|
||||
):
|
||||
return None
|
||||
|
||||
# Add timeline segment to tree
|
||||
clip_data = {
|
||||
"segment_name": segment.name.get_value(),
|
||||
"segment_comment": segment.comment.get_value(),
|
||||
"shot_name": segment.shot_name.get_value(),
|
||||
"tape_name": segment.tape_name,
|
||||
"source_name": segment.source_name,
|
||||
"fpath": segment.file_path,
|
||||
|
|
@ -529,9 +498,10 @@ def _get_segment_attributes(segment):
|
|||
}
|
||||
|
||||
# add all available shot tokens
|
||||
shot_tokens = _get_shot_tokens_values(segment, [
|
||||
"<colour space>", "<width>", "<height>", "<depth>",
|
||||
])
|
||||
shot_tokens = _get_shot_tokens_values(
|
||||
segment,
|
||||
["<colour space>", "<width>", "<height>", "<depth>"]
|
||||
)
|
||||
clip_data.update(shot_tokens)
|
||||
|
||||
# populate shot source metadata
|
||||
|
|
@ -561,11 +531,6 @@ def create_otio_timeline(sequence):
|
|||
log.info(sequence.attributes)
|
||||
|
||||
CTX.project = get_current_flame_project()
|
||||
CTX.clips = get_clips_in_reels(CTX.project)
|
||||
|
||||
log.debug(pformat(
|
||||
CTX.clips
|
||||
))
|
||||
|
||||
# get current timeline
|
||||
CTX.set_fps(
|
||||
|
|
@ -583,8 +548,13 @@ def create_otio_timeline(sequence):
|
|||
# create otio tracks and clips
|
||||
for ver in sequence.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden:
|
||||
return None
|
||||
# avoid all empty tracks
|
||||
# or hidden tracks
|
||||
if (
|
||||
len(track.segments) == 0
|
||||
or track.hidden.get_value()
|
||||
):
|
||||
continue
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
|
|
@ -597,11 +567,7 @@ def create_otio_timeline(sequence):
|
|||
continue
|
||||
all_segments.append(clip_data)
|
||||
|
||||
segments_ordered = {
|
||||
itemindex: clip_data
|
||||
for itemindex, clip_data in enumerate(
|
||||
all_segments)
|
||||
}
|
||||
segments_ordered = dict(enumerate(all_segments))
|
||||
log.debug("_ segments_ordered: {}".format(
|
||||
pformat(segments_ordered)
|
||||
))
|
||||
|
|
@ -612,15 +578,11 @@ def create_otio_timeline(sequence):
|
|||
log.debug("_ itemindex: {}".format(itemindex))
|
||||
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previous item
|
||||
prev_item = segment_data
|
||||
|
||||
else:
|
||||
# get previous item
|
||||
prev_item = segments_ordered[itemindex - 1]
|
||||
|
||||
prev_item = (
|
||||
segment_data
|
||||
if itemindex == 0
|
||||
else segments_ordered[itemindex - 1]
|
||||
)
|
||||
log.debug("_ segment_data: {}".format(segment_data))
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class LoadClip(opfapi.ClipLoader):
|
|||
# settings
|
||||
reel_group_name = "OpenPype_Reels"
|
||||
reel_name = "Loaded"
|
||||
clip_name_template = "{asset}_{subset}_{representation}"
|
||||
clip_name_template = "{asset}_{subset}_{output}"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ class LoadClip(opfapi.ClipLoader):
|
|||
clip_name = self.clip_name_template.format(
|
||||
**context["representation"]["context"])
|
||||
|
||||
# todo: settings in imageio
|
||||
# TODO: settings in imageio
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = colorspace
|
||||
|
|
|
|||
139
openpype/hosts/flame/plugins/load/load_clip_batch.py
Normal file
139
openpype/hosts/flame/plugins/load/load_clip_batch.py
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
import os
|
||||
import flame
|
||||
from pprint import pformat
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
|
||||
|
||||
class LoadClipBatch(opfapi.ClipLoader):
|
||||
"""Load a subset to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
families = ["render2d", "source", "plate", "render", "review"]
|
||||
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
|
||||
|
||||
label = "Load as clip to current batch"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# settings
|
||||
reel_name = "OP_LoadedReel"
|
||||
clip_name_template = "{asset}_{subset}_{output}"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# get flame objects
|
||||
self.batch = options.get("batch") or flame.batch
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
namespace = namespace
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
version_name = version.get("name", None)
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
|
||||
# in case output is not in context replace key to representation
|
||||
if not context["representation"]["context"].get("output"):
|
||||
self.clip_name_template.replace("output", "representation")
|
||||
|
||||
clip_name = self.clip_name_template.format(
|
||||
**context["representation"]["context"])
|
||||
|
||||
# TODO: settings in imageio
|
||||
# convert colorspace with ocio to flame mapping
|
||||
# in imageio flame section
|
||||
colorspace = colorspace
|
||||
|
||||
# create workfile path
|
||||
workfile_dir = options.get("workdir") or os.environ["AVALON_WORKDIR"]
|
||||
openclip_dir = os.path.join(
|
||||
workfile_dir, clip_name
|
||||
)
|
||||
openclip_path = os.path.join(
|
||||
openclip_dir, clip_name + ".clip"
|
||||
)
|
||||
if not os.path.exists(openclip_dir):
|
||||
os.makedirs(openclip_dir)
|
||||
|
||||
# prepare clip data from context ad send it to openClipLoader
|
||||
loading_context = {
|
||||
"path": self.fname.replace("\\", "/"),
|
||||
"colorspace": colorspace,
|
||||
"version": "v{:0>3}".format(version_name),
|
||||
"logger": self.log
|
||||
|
||||
}
|
||||
self.log.debug(pformat(
|
||||
loading_context
|
||||
))
|
||||
self.log.debug(openclip_path)
|
||||
|
||||
# make openpype clip file
|
||||
opfapi.OpenClipSolver(openclip_path, loading_context).make()
|
||||
|
||||
# prepare Reel group in actual desktop
|
||||
opc = self._get_clip(
|
||||
clip_name,
|
||||
openclip_path
|
||||
)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_data.get(key, str(None))
|
||||
for key in add_keys
|
||||
}
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": clip_name
|
||||
})
|
||||
|
||||
# TODO: finish the containerisation
|
||||
# opc_segment = opfapi.get_clip_segment(opc)
|
||||
|
||||
# return opfapi.containerise(
|
||||
# opc_segment,
|
||||
# name, namespace, context,
|
||||
# self.__class__.__name__,
|
||||
# data_imprint)
|
||||
|
||||
return opc
|
||||
|
||||
def _get_clip(self, name, clip_path):
|
||||
reel = self._get_reel()
|
||||
|
||||
# with maintained openclip as opc
|
||||
matching_clip = None
|
||||
for cl in reel.clips:
|
||||
if cl.name.get_value() != name:
|
||||
continue
|
||||
matching_clip = cl
|
||||
|
||||
if not matching_clip:
|
||||
created_clips = flame.import_clips(str(clip_path), reel)
|
||||
return created_clips.pop()
|
||||
|
||||
return matching_clip
|
||||
|
||||
def _get_reel(self):
|
||||
|
||||
matching_reel = [
|
||||
rg for rg in self.batch.reels
|
||||
if rg.name.get_value() == self.reel_name
|
||||
]
|
||||
|
||||
return (
|
||||
matching_reel.pop()
|
||||
if matching_reel
|
||||
else self.batch.create_reel(str(self.reel_name))
|
||||
)
|
||||
|
|
@ -21,19 +21,12 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
audio_track_items = []
|
||||
|
||||
# TODO: add to settings
|
||||
# settings
|
||||
xml_preset_attrs_from_comments = {
|
||||
"width": "number",
|
||||
"height": "number",
|
||||
"pixelRatio": "float",
|
||||
"resizeType": "string",
|
||||
"resizeFilter": "string"
|
||||
}
|
||||
xml_preset_attrs_from_comments = []
|
||||
add_tasks = []
|
||||
|
||||
def process(self, context):
|
||||
project = context.data["flameProject"]
|
||||
sequence = context.data["flameSequence"]
|
||||
selected_segments = context.data["flameSelectedSegments"]
|
||||
self.log.debug("__ selected_segments: {}".format(selected_segments))
|
||||
|
||||
|
|
@ -79,9 +72,9 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# solve handles length
|
||||
marker_data["handleStart"] = min(
|
||||
marker_data["handleStart"], head)
|
||||
marker_data["handleStart"], abs(head))
|
||||
marker_data["handleEnd"] = min(
|
||||
marker_data["handleEnd"], tail)
|
||||
marker_data["handleEnd"], abs(tail))
|
||||
|
||||
with_audio = bool(marker_data.pop("audio"))
|
||||
|
||||
|
|
@ -112,7 +105,11 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
"fps": self.fps,
|
||||
"flameSourceClip": source_clip,
|
||||
"sourceFirstFrame": int(first_frame),
|
||||
"path": file_path
|
||||
"path": file_path,
|
||||
"flameAddTasks": self.add_tasks,
|
||||
"tasks": {
|
||||
task["name"]: {"type": task["type"]}
|
||||
for task in self.add_tasks}
|
||||
})
|
||||
|
||||
# get otio clip data
|
||||
|
|
@ -187,7 +184,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
# split to key and value
|
||||
key, value = split.split(":")
|
||||
|
||||
for a_name, a_type in self.xml_preset_attrs_from_comments.items():
|
||||
for attr_data in self.xml_preset_attrs_from_comments:
|
||||
a_name = attr_data["name"]
|
||||
a_type = attr_data["type"]
|
||||
|
||||
# exclude all not related attributes
|
||||
if a_name.lower() not in key.lower():
|
||||
continue
|
||||
|
|
@ -247,6 +247,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
|
|||
head = clip_data.get("segment_head")
|
||||
tail = clip_data.get("segment_tail")
|
||||
|
||||
# HACK: it is here to serve for versions bellow 2021.1
|
||||
if not head:
|
||||
head = int(clip_data["source_in"]) - int(first_frame)
|
||||
if not tail:
|
||||
|
|
|
|||
|
|
@ -61,9 +61,13 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
|
||||
# flame objects
|
||||
segment = instance.data["item"]
|
||||
segment_name = segment.name.get_value()
|
||||
sequence_clip = instance.context.data["flameSequence"]
|
||||
clip_data = instance.data["flameSourceClip"]
|
||||
clip = clip_data["PyClip"]
|
||||
|
||||
reel_clip = None
|
||||
if clip_data:
|
||||
reel_clip = clip_data["PyClip"]
|
||||
|
||||
# segment's parent track name
|
||||
s_track_name = segment.parent.name.get_value()
|
||||
|
|
@ -108,6 +112,16 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
|
||||
color_out = preset_config["colorspace_out"]
|
||||
|
||||
# get attribures related loading in integrate_batch_group
|
||||
load_to_batch_group = preset_config.get(
|
||||
"load_to_batch_group")
|
||||
batch_group_loader_name = preset_config.get(
|
||||
"batch_group_loader_name")
|
||||
|
||||
# convert to None if empty string
|
||||
if batch_group_loader_name == "":
|
||||
batch_group_loader_name = None
|
||||
|
||||
# get frame range with handles for representation range
|
||||
frame_start_handle = frame_start - handle_start
|
||||
source_duration_handles = (
|
||||
|
|
@ -117,8 +131,20 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
in_mark = (source_start_handles - source_first_frame) + 1
|
||||
out_mark = in_mark + source_duration_handles
|
||||
|
||||
# make test for type of preset and available reel_clip
|
||||
if (
|
||||
not reel_clip
|
||||
and export_type != "Sequence Publish"
|
||||
):
|
||||
self.log.warning((
|
||||
"Skipping preset {}. Not available "
|
||||
"reel clip for {}").format(
|
||||
preset_file, segment_name
|
||||
))
|
||||
continue
|
||||
|
||||
# by default export source clips
|
||||
exporting_clip = clip
|
||||
exporting_clip = reel_clip
|
||||
|
||||
if export_type == "Sequence Publish":
|
||||
# change export clip to sequence
|
||||
|
|
@ -150,7 +176,7 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
|
||||
if export_type == "Sequence Publish":
|
||||
# only keep visible layer where instance segment is child
|
||||
self.hide_other_tracks(duplclip, s_track_name)
|
||||
self.hide_others(duplclip, segment_name, s_track_name)
|
||||
|
||||
# validate xml preset file is filled
|
||||
if preset_file == "":
|
||||
|
|
@ -211,7 +237,9 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
"tags": repre_tags,
|
||||
"data": {
|
||||
"colorspace": color_out
|
||||
}
|
||||
},
|
||||
"load_to_batch_group": load_to_batch_group,
|
||||
"batch_group_loader_name": batch_group_loader_name
|
||||
}
|
||||
|
||||
# collect all available content of export dir
|
||||
|
|
@ -322,18 +350,26 @@ class ExtractSubsetResources(openpype.api.Extractor):
|
|||
|
||||
return new_stage_dir, new_files_list
|
||||
|
||||
def hide_other_tracks(self, sequence_clip, track_name):
|
||||
def hide_others(self, sequence_clip, segment_name, track_name):
|
||||
"""Helper method used only if sequence clip is used
|
||||
|
||||
Args:
|
||||
sequence_clip (flame.Clip): sequence clip
|
||||
segment_name (str): segment name
|
||||
track_name (str): track name
|
||||
"""
|
||||
# create otio tracks and clips
|
||||
for ver in sequence_clip.versions:
|
||||
for track in ver.tracks:
|
||||
if len(track.segments) == 0 and track.hidden:
|
||||
if len(track.segments) == 0 and track.hidden.get_value():
|
||||
continue
|
||||
|
||||
# hide tracks which are not parent track
|
||||
if track.name.get_value() != track_name:
|
||||
track.hidden = True
|
||||
continue
|
||||
|
||||
# hidde all other segments
|
||||
for segment in track.segments:
|
||||
if segment.name.get_value() != segment_name:
|
||||
segment.hidden = True
|
||||
|
|
|
|||
328
openpype/hosts/flame/plugins/publish/integrate_batch_group.py
Normal file
328
openpype/hosts/flame/plugins/publish/integrate_batch_group.py
Normal file
|
|
@ -0,0 +1,328 @@
|
|||
import os
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
from pprint import pformat
|
||||
import pyblish
|
||||
from openpype.lib import get_workdir
|
||||
import openpype.hosts.flame.api as opfapi
|
||||
import openpype.pipeline as op_pipeline
|
||||
|
||||
|
||||
class IntegrateBatchGroup(pyblish.api.InstancePlugin):
|
||||
"""Integrate published shot to batch group"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.45
|
||||
label = "Integrate Batch Groups"
|
||||
hosts = ["flame"]
|
||||
families = ["clip"]
|
||||
|
||||
# settings
|
||||
default_loader = "LoadClip"
|
||||
|
||||
def process(self, instance):
|
||||
add_tasks = instance.data["flameAddTasks"]
|
||||
|
||||
# iterate all tasks from settings
|
||||
for task_data in add_tasks:
|
||||
# exclude batch group
|
||||
if not task_data["create_batch_group"]:
|
||||
continue
|
||||
|
||||
# create or get already created batch group
|
||||
bgroup = self._get_batch_group(instance, task_data)
|
||||
|
||||
# add batch group content
|
||||
all_batch_nodes = self._add_nodes_to_batch_with_links(
|
||||
instance, task_data, bgroup)
|
||||
|
||||
for name, node in all_batch_nodes.items():
|
||||
self.log.debug("name: {}, dir: {}".format(
|
||||
name, dir(node)
|
||||
))
|
||||
self.log.debug("__ node.attributes: {}".format(
|
||||
node.attributes
|
||||
))
|
||||
|
||||
# load plate to batch group
|
||||
self.log.info("Loading subset `{}` into batch `{}`".format(
|
||||
instance.data["subset"], bgroup.name.get_value()
|
||||
))
|
||||
self._load_clip_to_context(instance, bgroup)
|
||||
|
||||
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
|
||||
# get write file node properties > OrederDict because order does mater
|
||||
write_pref_data = self._get_write_prefs(instance, task_data)
|
||||
|
||||
batch_nodes = [
|
||||
{
|
||||
"type": "comp",
|
||||
"properties": {},
|
||||
"id": "comp_node01"
|
||||
},
|
||||
{
|
||||
"type": "Write File",
|
||||
"properties": write_pref_data,
|
||||
"id": "write_file_node01"
|
||||
}
|
||||
]
|
||||
batch_links = [
|
||||
{
|
||||
"from_node": {
|
||||
"id": "comp_node01",
|
||||
"connector": "Result"
|
||||
},
|
||||
"to_node": {
|
||||
"id": "write_file_node01",
|
||||
"connector": "Front"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# add nodes into batch group
|
||||
return opfapi.create_batch_group_conent(
|
||||
batch_nodes, batch_links, batch_group)
|
||||
|
||||
def _load_clip_to_context(self, instance, bgroup):
|
||||
# get all loaders for host
|
||||
loaders_by_name = {
|
||||
loader.__name__: loader
|
||||
for loader in op_pipeline.discover_loader_plugins()
|
||||
}
|
||||
|
||||
# get all published representations
|
||||
published_representations = instance.data["published_representations"]
|
||||
repres_db_id_by_name = {
|
||||
repre_info["representation"]["name"]: repre_id
|
||||
for repre_id, repre_info in published_representations.items()
|
||||
}
|
||||
|
||||
# get all loadable representations
|
||||
repres_by_name = {
|
||||
repre["name"]: repre for repre in instance.data["representations"]
|
||||
}
|
||||
|
||||
# get repre_id for the loadable representations
|
||||
loader_name_by_repre_id = {
|
||||
repres_db_id_by_name[repr_name]: {
|
||||
"loader": repr_data["batch_group_loader_name"],
|
||||
# add repre data for exception logging
|
||||
"_repre_data": repr_data
|
||||
}
|
||||
for repr_name, repr_data in repres_by_name.items()
|
||||
if repr_data.get("load_to_batch_group")
|
||||
}
|
||||
|
||||
self.log.debug("__ loader_name_by_repre_id: {}".format(pformat(
|
||||
loader_name_by_repre_id)))
|
||||
|
||||
# get representation context from the repre_id
|
||||
repre_contexts = op_pipeline.load.get_repres_contexts(
|
||||
loader_name_by_repre_id.keys())
|
||||
|
||||
self.log.debug("__ repre_contexts: {}".format(pformat(
|
||||
repre_contexts)))
|
||||
|
||||
# loop all returned repres from repre_context dict
|
||||
for repre_id, repre_context in repre_contexts.items():
|
||||
self.log.debug("__ repre_id: {}".format(repre_id))
|
||||
# get loader name by representation id
|
||||
loader_name = (
|
||||
loader_name_by_repre_id[repre_id]["loader"]
|
||||
# if nothing was added to settings fallback to default
|
||||
or self.default_loader
|
||||
)
|
||||
|
||||
# get loader plugin
|
||||
loader_plugin = loaders_by_name.get(loader_name)
|
||||
if loader_plugin:
|
||||
# load to flame by representation context
|
||||
try:
|
||||
op_pipeline.load.load_with_repre_context(
|
||||
loader_plugin, repre_context, **{
|
||||
"data": {
|
||||
"workdir": self.task_workdir,
|
||||
"batch": bgroup
|
||||
}
|
||||
})
|
||||
except op_pipeline.load.IncompatibleLoaderError as msg:
|
||||
self.log.error(
|
||||
"Check allowed representations for Loader `{}` "
|
||||
"in settings > error: {}".format(
|
||||
loader_plugin.__name__, msg))
|
||||
self.log.error(
|
||||
"Representaton context >>{}<< is not compatible "
|
||||
"with loader `{}`".format(
|
||||
pformat(repre_context), loader_plugin.__name__
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.warning(
|
||||
"Something got wrong and there is not Loader found for "
|
||||
"following data: {}".format(
|
||||
pformat(loader_name_by_repre_id))
|
||||
)
|
||||
|
||||
def _get_batch_group(self, instance, task_data):
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_duration = (frame_end - frame_start) + 1
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
task_name = task_data["name"]
|
||||
batchgroup_name = "{}_{}".format(asset_name, task_name)
|
||||
|
||||
batch_data = {
|
||||
"shematic_reels": [
|
||||
"OP_LoadedReel"
|
||||
],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
self.log.debug(
|
||||
"__ batch_data: {}".format(pformat(batch_data)))
|
||||
|
||||
# check if the batch group already exists
|
||||
bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name)
|
||||
|
||||
if not bgroup:
|
||||
self.log.info(
|
||||
"Creating new batch group: {}".format(batchgroup_name))
|
||||
# create batch with utils
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
else:
|
||||
self.log.info(
|
||||
"Updating batch group: {}".format(batchgroup_name))
|
||||
# update already created batch group
|
||||
bgroup = opfapi.create_batch_group(
|
||||
batchgroup_name,
|
||||
frame_start,
|
||||
frame_duration,
|
||||
update_batch_group=bgroup,
|
||||
**batch_data
|
||||
)
|
||||
|
||||
return bgroup
|
||||
|
||||
def _get_anamoty_data_with_current_task(self, instance, task_data):
|
||||
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
task_name = task_data["name"]
|
||||
task_type = task_data["type"]
|
||||
anatomy_obj = instance.context.data["anatomy"]
|
||||
|
||||
# update task data in anatomy data
|
||||
project_task_types = anatomy_obj["tasks"]
|
||||
task_code = project_task_types.get(task_type, {}).get("short_name")
|
||||
anatomy_data.update({
|
||||
"task": {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
})
|
||||
return anatomy_data
|
||||
|
||||
def _get_write_prefs(self, instance, task_data):
|
||||
# update task in anatomy data
|
||||
anatomy_data = self._get_anamoty_data_with_current_task(
|
||||
instance, task_data)
|
||||
|
||||
self.task_workdir = self._get_shot_task_dir_path(
|
||||
instance, task_data)
|
||||
self.log.debug("__ task_workdir: {}".format(
|
||||
self.task_workdir))
|
||||
|
||||
# TODO: this might be done with template in settings
|
||||
render_dir_path = os.path.join(
|
||||
self.task_workdir, "render", "flame")
|
||||
|
||||
if not os.path.exists(render_dir_path):
|
||||
os.makedirs(render_dir_path, mode=0o777)
|
||||
|
||||
# TODO: add most of these to `imageio/flame/batch/write_node`
|
||||
name = "{project[code]}_{asset}_{task[name]}".format(
|
||||
**anatomy_data
|
||||
)
|
||||
|
||||
# The path attribute where the rendered clip is exported
|
||||
# /path/to/file.[0001-0010].exr
|
||||
media_path = render_dir_path
|
||||
# name of file represented by tokens
|
||||
media_path_pattern = (
|
||||
"<name>_v<iteration###>/<name>_v<iteration###>.<frame><ext>")
|
||||
# The Create Open Clip attribute of the Write File node. \
|
||||
# Determines if an Open Clip is created by the Write File node.
|
||||
create_clip = True
|
||||
# The Include Setup attribute of the Write File node.
|
||||
# Determines if a Batch Setup file is created by the Write File node.
|
||||
include_setup = True
|
||||
# The path attribute where the Open Clip file is exported by
|
||||
# the Write File node.
|
||||
create_clip_path = "<name>"
|
||||
# The path attribute where the Batch setup file
|
||||
# is exported by the Write File node.
|
||||
include_setup_path = "./<name>_v<iteration###>"
|
||||
# The file type for the files written by the Write File node.
|
||||
# Setting this attribute also overwrites format_extension,
|
||||
# bit_depth and compress_mode to match the defaults for
|
||||
# this file type.
|
||||
file_type = "OpenEXR"
|
||||
# The file extension for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type
|
||||
# is set. If you require a specific extension, you must
|
||||
# set format_extension after setting file_type.
|
||||
format_extension = "exr"
|
||||
# The bit depth for the files written by the Write File node.
|
||||
# This attribute resets to match file_type whenever file_type is set.
|
||||
bit_depth = "16"
|
||||
# The compressing attribute for the files exported by the Write
|
||||
# File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff'
|
||||
compress = True
|
||||
# The compression format attribute for the specific File Types
|
||||
# export by the Write File node. You must set compress_mode
|
||||
# after setting file_type.
|
||||
compress_mode = "DWAB"
|
||||
# The frame index mode attribute of the Write File node.
|
||||
# Value range: `Use Timecode` or `Use Start Frame`
|
||||
frame_index_mode = "Use Start Frame"
|
||||
frame_padding = 6
|
||||
# The versioning mode of the Open Clip exported by the Write File node.
|
||||
# Only available if create_clip = True.
|
||||
version_mode = "Follow Iteration"
|
||||
version_name = "v<version>"
|
||||
version_padding = 3
|
||||
|
||||
# need to make sure the order of keys is correct
|
||||
return OrderedDict((
|
||||
("name", name),
|
||||
("media_path", media_path),
|
||||
("media_path_pattern", media_path_pattern),
|
||||
("create_clip", create_clip),
|
||||
("include_setup", include_setup),
|
||||
("create_clip_path", create_clip_path),
|
||||
("include_setup_path", include_setup_path),
|
||||
("file_type", file_type),
|
||||
("format_extension", format_extension),
|
||||
("bit_depth", bit_depth),
|
||||
("compress", compress),
|
||||
("compress_mode", compress_mode),
|
||||
("frame_index_mode", frame_index_mode),
|
||||
("frame_padding", frame_padding),
|
||||
("version_mode", version_mode),
|
||||
("version_name", version_name),
|
||||
("version_padding", version_padding)
|
||||
))
|
||||
|
||||
def _get_shot_task_dir_path(self, instance, task_data):
|
||||
project_doc = instance.data["projectEntity"]
|
||||
asset_entity = instance.data["assetEntity"]
|
||||
|
||||
return get_workdir(
|
||||
project_doc, asset_entity, task_data["name"], "flame")
|
||||
|
|
@ -9,6 +9,8 @@ class ValidateSourceClip(pyblish.api.InstancePlugin):
|
|||
label = "Validate Source Clip"
|
||||
hosts = ["flame"]
|
||||
families = ["clip"]
|
||||
optional = True
|
||||
active = False
|
||||
|
||||
def process(self, instance):
|
||||
flame_source_clip = instance.data["flameSourceClip"]
|
||||
|
|
|
|||
|
|
@ -20,6 +20,37 @@
|
|||
}
|
||||
},
|
||||
"publish": {
|
||||
"CollectTimelineInstances": {
|
||||
"xml_preset_attrs_from_comments": [
|
||||
{
|
||||
"name": "width",
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"name": "height",
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"name": "pixelRatio",
|
||||
"type": "float"
|
||||
},
|
||||
{
|
||||
"name": "resizeType",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "resizeFilter",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"add_tasks": [
|
||||
{
|
||||
"name": "compositing",
|
||||
"type": "Compositing",
|
||||
"create_batch_group": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"ExtractSubsetResources": {
|
||||
"keep_original_representation": false,
|
||||
"export_presets_mapping": {
|
||||
|
|
@ -31,7 +62,9 @@
|
|||
"ignore_comment_attrs": false,
|
||||
"colorspace_out": "ACES - ACEScg",
|
||||
"representation_add_range": true,
|
||||
"representation_tags": []
|
||||
"representation_tags": [],
|
||||
"load_to_batch_group": true,
|
||||
"batch_group_loader_name": "LoadClip"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -58,7 +91,29 @@
|
|||
],
|
||||
"reel_group_name": "OpenPype_Reels",
|
||||
"reel_name": "Loaded",
|
||||
"clip_name_template": "{asset}_{subset}_{representation}"
|
||||
"clip_name_template": "{asset}_{subset}_{output}"
|
||||
},
|
||||
"LoadClipBatch": {
|
||||
"enabled": true,
|
||||
"families": [
|
||||
"render2d",
|
||||
"source",
|
||||
"plate",
|
||||
"render",
|
||||
"review"
|
||||
],
|
||||
"representations": [
|
||||
"exr",
|
||||
"dpx",
|
||||
"jpg",
|
||||
"jpeg",
|
||||
"png",
|
||||
"h264",
|
||||
"mov",
|
||||
"mp4"
|
||||
],
|
||||
"reel_name": "OP_LoadedReel",
|
||||
"clip_name_template": "{asset}_{subset}_{output}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -136,6 +136,87 @@
|
|||
"key": "publish",
|
||||
"label": "Publish plugins",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "CollectTimelineInstances",
|
||||
"label": "Collect Timeline Instances",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "collapsible-wrap",
|
||||
"label": "XML presets attributes parsable from segment comments",
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "list",
|
||||
"key": "xml_preset_attrs_from_comments",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Attribute name"
|
||||
},
|
||||
{
|
||||
"key": "type",
|
||||
"label": "Attribute type",
|
||||
"type": "enum",
|
||||
"default": "number",
|
||||
"enum_items": [
|
||||
{
|
||||
"number": "number"
|
||||
},
|
||||
{
|
||||
"float": "float"
|
||||
},
|
||||
{
|
||||
"string": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Add tasks",
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "list",
|
||||
"key": "add_tasks",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Task name"
|
||||
},
|
||||
{
|
||||
"key": "type",
|
||||
"label": "Task type",
|
||||
"multiselection": false,
|
||||
"type": "task-types-enum"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "create_batch_group",
|
||||
"label": "Create batch group"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
@ -221,6 +302,20 @@
|
|||
"type": "text",
|
||||
"multiline": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "load_to_batch_group",
|
||||
"label": "Load to batch group reel",
|
||||
"default": false
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "batch_group_loader_name",
|
||||
"label": "Use loader name"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -281,6 +376,48 @@
|
|||
"label": "Clip name template"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "LoadClipBatch",
|
||||
"label": "Load as clip to current batch",
|
||||
"checkbox_key": "enabled",
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "families",
|
||||
"label": "Families",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "representations",
|
||||
"label": "Representations",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "reel_name",
|
||||
"label": "Reel name"
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "clip_name_template",
|
||||
"label": "Clip name template"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue