Merge pull request #842 from pypeclub/feature/resolve-create-publish-new-way-convert-pype3

DaVinci Resolve with OTIO publishing (pype3)
This commit is contained in:
Milan Kolar 2021-01-08 11:33:06 +01:00 committed by GitHub
commit a1041a3182
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 3555 additions and 275 deletions

View file

@ -11,7 +11,7 @@ class LaunchWithWindowsShell(PreLaunchHook):
"""
order = 10
app_groups = ["nuke", "nukex", "hiero", "nukestudio"]
app_groups = ["resolve", "nuke", "nukex", "hiero", "nukestudio"]
platforms = ["windows"]
def execute(self):

View file

@ -14,8 +14,10 @@ class ResolvePrelaunch(PreLaunchHook):
app_groups = ["resolve"]
def execute(self):
# TODO: add OTIO installation from `pype/requirements.py`
# making sure pyton 3.6 is installed at provided path
py36_dir = os.path.normpath(self.env.get("PYTHON36_RESOLVE", ""))
py36_dir = os.path.normpath(
self.launch_context.env.get("PYTHON36_RESOLVE", ""))
assert os.path.isdir(py36_dir), (
"Python 3.6 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
@ -23,11 +25,10 @@ class ResolvePrelaunch(PreLaunchHook):
f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`"
)
self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...")
self.env["PYTHON36_RESOLVE"] = py36_dir
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
)
assert os.path.isdir(us_dir), (
"Resolve utility script dir does not exists. Either make sure "
@ -38,8 +39,9 @@ class ResolvePrelaunch(PreLaunchHook):
self.log.debug(f"-- us_dir: `{us_dir}`")
# correctly format path for pre python script
pre_py_sc = os.path.normpath(self.env.get("PRE_PYTHON_SCRIPT", ""))
self.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
pre_py_sc = os.path.normpath(
self.launch_context.env.get("PRE_PYTHON_SCRIPT", ""))
self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
try:
__import__("pype.hosts.resolve")
@ -55,4 +57,4 @@ class ResolvePrelaunch(PreLaunchHook):
# Resolve Setup integration
importlib.reload(utils)
self.log.debug(f"-- utils.__file__: `{utils.__file__}`")
utils.setup(self.env)
utils.setup(self.launch_context.env)

View file

@ -14,20 +14,32 @@ from .pipeline import (
)
from .lib import (
publish_clip_color,
get_project_manager,
get_current_project,
get_current_sequence,
get_video_track_names,
get_current_track_items,
get_track_item_pype_tag,
set_track_item_pype_tag,
imprint,
set_publish_attribute,
get_publish_attribute,
create_current_sequence_media_bin,
create_compound_clip,
swap_clips,
get_pype_clip_metadata,
set_project_manager_to_folder_name
set_project_manager_to_folder_name,
get_reformated_path,
get_otio_clip_instance_data
)
from .menu import launch_pype_menu
from .plugin import Creator
from .plugin import (
Creator,
PublishClip
)
from .workio import (
open_file,
@ -57,21 +69,31 @@ __all__ = [
"get_resolve_module",
# lib
"publish_clip_color",
"get_project_manager",
"get_current_project",
"get_current_sequence",
"get_video_track_names",
"get_current_track_items",
"get_track_item_pype_tag",
"set_track_item_pype_tag",
"imprint",
"set_publish_attribute",
"get_publish_attribute",
"create_current_sequence_media_bin",
"create_compound_clip",
"swap_clips",
"get_pype_clip_metadata",
"set_project_manager_to_folder_name",
"get_reformated_path",
"get_otio_clip_instance_data",
# menu
"launch_pype_menu",
# plugin
"Creator",
"PublishClip",
# workio
"open_file",

View file

@ -1,31 +1,47 @@
import sys
import json
import re
from opentimelineio import opentime
from pprint import pformat
import pype
from .otio import davinci_export as otio_export
from pype.api import Logger
log = Logger().get_logger(__name__, "resolve")
self = sys.modules[__name__]
self.pm = None
self.project_manager = None
# Pype sequencial rename variables
self.rename_index = 0
self.rename_add = 0
self.pype_metadata_key = "VFX Notes"
self.publish_clip_color = "Pink"
self.pype_marker_workflow = True
# Pype compound clip workflow variable
self.pype_tag_name = "VFX Notes"
# Pype marker workflow variables
self.pype_marker_name = "PYPEDATA"
self.pype_marker_duration = 1
self.pype_marker_color = "Mint"
self.temp_marker_frame = None
def get_project_manager():
from . import bmdvr
if not self.pm:
self.pm = bmdvr.GetProjectManager()
return self.pm
if not self.project_manager:
self.project_manager = bmdvr.GetProjectManager()
return self.project_manager
def get_current_project():
# initialize project manager
get_project_manager()
return self.pm.GetCurrentProject()
return self.project_manager.GetCurrentProject()
def get_current_sequence():
@ -35,6 +51,22 @@ def get_current_sequence():
return project.GetCurrentTimeline()
def get_video_track_names():
tracks = list()
track_type = "video"
sequence = get_current_sequence()
# get all tracks count filtered by track type
selected_track_count = sequence.GetTrackCount(track_type)
# loop all tracks and get items
for track_index in range(1, (int(selected_track_count) + 1)):
track_name = sequence.GetTrackName("video", track_index)
tracks.append(track_name)
return tracks
def get_current_track_items(
filter=False,
track_type=None,
@ -77,13 +109,168 @@ def get_current_track_items(
if filter is True:
if selecting_color in ti_color:
selected_clips.append(data)
# ti.ClearClipColor()
else:
selected_clips.append(data)
return selected_clips
def get_track_item_pype_tag(track_item):
"""
Get pype track item tag created by creator or loader plugin.
Attributes:
trackItem (resolve.TimelineItem): hiero object
Returns:
hiero.core.Tag: hierarchy, orig clip attributes
"""
return_tag = None
if self.pype_marker_workflow:
return_tag = get_pype_marker(track_item)
else:
media_pool_item = track_item.GetMediaPoolItem()
# get all tags from track item
_tags = media_pool_item.GetMetadata()
if not _tags:
return None
for key, data in _tags.items():
# return only correct tag defined by global name
if key in self.pype_tag_name:
return_tag = json.loads(data)
return return_tag
def set_track_item_pype_tag(track_item, data=None):
"""
Set pype track item tag to input track_item.
Attributes:
trackItem (resolve.TimelineItem): resolve api object
Returns:
dict: json loaded data
"""
data = data or dict()
# get available pype tag if any
tag_data = get_track_item_pype_tag(track_item)
if self.pype_marker_workflow:
# delete tag as it is not updatable
if tag_data:
delete_pype_marker(track_item)
tag_data.update(data)
set_pype_marker(track_item, tag_data)
else:
if tag_data:
media_pool_item = track_item.GetMediaPoolItem()
# it not tag then create one
tag_data.update(data)
media_pool_item.SetMetadata(
self.pype_tag_name, json.dumps(tag_data))
else:
tag_data = data
# if pype tag available then update with input data
# add it to the input track item
track_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data))
return tag_data
def imprint(track_item, data=None):
"""
Adding `Avalon data` into a hiero track item tag.
Also including publish attribute into tag.
Arguments:
track_item (hiero.core.TrackItem): hiero track item object
data (dict): Any data which needst to be imprinted
Examples:
data = {
'asset': 'sq020sh0280',
'family': 'render',
'subset': 'subsetMain'
}
"""
data = data or {}
set_track_item_pype_tag(track_item, data)
# add publish attribute
set_publish_attribute(track_item, True)
def set_publish_attribute(track_item, value):
""" Set Publish attribute in input Tag object
Attribute:
tag (hiero.core.Tag): a tag object
value (bool): True or False
"""
tag_data = get_track_item_pype_tag(track_item)
tag_data["publish"] = value
# set data to the publish attribute
set_track_item_pype_tag(track_item, tag_data)
def get_publish_attribute(track_item):
""" Get Publish attribute from input Tag object
Attribute:
tag (hiero.core.Tag): a tag object
value (bool): True or False
"""
tag_data = get_track_item_pype_tag(track_item)
return tag_data["publish"]
def set_pype_marker(track_item, tag_data):
source_start = track_item.GetLeftOffset()
item_duration = track_item.GetDuration()
frame = int(source_start + (item_duration / 2))
# marker attributes
frameId = (frame / 10) * 10
color = self.pype_marker_color
name = self.pype_marker_name
note = json.dumps(tag_data)
duration = (self.pype_marker_duration / 10) * 10
track_item.AddMarker(
frameId,
color,
name,
note,
duration
)
def get_pype_marker(track_item):
track_item_markers = track_item.GetMarkers()
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
color = track_item_markers[marker_frame]["color"]
name = track_item_markers[marker_frame]["name"]
print(f"_ marker data: {marker_frame} | {name} | {color} | {note}")
if name == self.pype_marker_name and color == self.pype_marker_color:
self.temp_marker_frame = marker_frame
return json.loads(note)
return dict()
def delete_pype_marker(track_item):
track_item.DeleteMarkerAtFrame(self.temp_marker_frame)
self.temp_marker_frame = None
def create_current_sequence_media_bin(sequence):
seq_name = sequence.GetName()
media_pool = get_current_project().GetMediaPool()
@ -178,7 +365,7 @@ def get_name_with_data(clip_data, presets):
})
def create_compound_clip(clip_data, folder, rename=False, **kwargs):
def create_compound_clip(clip_data, name, folder):
"""
Convert timeline object into nested timeline object
@ -186,8 +373,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
clip_data (dict): timeline item object packed into dict
with project, timeline (sequence)
folder (resolve.MediaPool.Folder): media pool folder object,
rename (bool)[optional]: renaming in sequence or not
kwargs (optional): additional data needed for rename=True (presets)
name (str): name for compound clip
Returns:
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
@ -199,34 +385,12 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
# get details of objects
clip_item = clip["item"]
track = clip_data["track"]
mp = project.GetMediaPool()
# get clip attributes
clip_attributes = get_clip_attributes(clip_item)
print(f"_ clip_attributes: {pformat(clip_attributes)}")
if rename:
presets = kwargs.get("presets")
if presets:
name, data = get_name_with_data(clip_data, presets)
# add hirarchy data to clip attributes
clip_attributes.update(data)
else:
name = "{:0>3}_{:0>4}".format(
int(track["index"]), int(clip["index"]))
else:
# build name
clip_name_split = clip_item.GetName().split(".")
name = "_".join([
track["name"],
str(track["index"]),
clip_name_split[0],
str(clip["index"])]
)
# get metadata
mp_item = clip_item.GetMediaPoolItem()
mp_props = mp_item.GetClipProperty()
@ -283,9 +447,9 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
project.SetCurrentTimeline(sq_origin)
# Add collected metadata and attributes to the comound clip:
if mp_item.GetMetadata(self.pype_metadata_key):
clip_attributes[self.pype_metadata_key] = mp_item.GetMetadata(
self.pype_metadata_key)[self.pype_metadata_key]
if mp_item.GetMetadata(self.pype_tag_name):
clip_attributes[self.pype_tag_name] = mp_item.GetMetadata(
self.pype_tag_name)[self.pype_tag_name]
# stringify
clip_attributes = json.dumps(clip_attributes)
@ -295,7 +459,7 @@ def create_compound_clip(clip_data, folder, rename=False, **kwargs):
cct.SetMetadata(k, v)
# add metadata to cct
cct.SetMetadata(self.pype_metadata_key, clip_attributes)
cct.SetMetadata(self.pype_tag_name, clip_attributes)
# reset start timecode of the compound clip
cct.SetClipProperty("Start TC", mp_props["Start TC"])
@ -314,7 +478,7 @@ def swap_clips(from_clip, to_clip, to_clip_name, to_in_frame, to_out_frame):
It will add take and activate it to the frame range which is inputted
Args:
from_clip (resolve.mediaPoolItem)
from_clip (resolve.TimelineItem)
to_clip (resolve.mediaPoolItem)
to_clip_name (str): name of to_clip
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
@ -373,7 +537,7 @@ def get_pype_clip_metadata(clip):
mp_item = clip.GetMediaPoolItem()
metadata = mp_item.GetMetadata()
return metadata.get(self.pype_metadata_key)
return metadata.get(self.pype_tag_name)
def get_clip_attributes(clip):
@ -424,16 +588,16 @@ def set_project_manager_to_folder_name(folder_name):
set_folder = False
# go back to root folder
if self.pm.GotoRootFolder():
if self.project_manager.GotoRootFolder():
log.info(f"Testing existing folder: {folder_name}")
folders = convert_resolve_list_type(
self.pm.GetFoldersInCurrentFolder())
self.project_manager.GetFoldersInCurrentFolder())
log.info(f"Testing existing folders: {folders}")
# get me first available folder object
# with the same name as in `folder_name` else return False
if next((f for f in folders if f in folder_name), False):
log.info(f"Found existing folder: {folder_name}")
set_folder = self.pm.OpenFolder(folder_name)
set_folder = self.project_manager.OpenFolder(folder_name)
if set_folder:
return True
@ -441,11 +605,11 @@ def set_project_manager_to_folder_name(folder_name):
# if folder by name is not existent then create one
# go back to root folder
log.info(f"Folder `{folder_name}` not found and will be created")
if self.pm.GotoRootFolder():
if self.project_manager.GotoRootFolder():
try:
# create folder by given name
self.pm.CreateFolder(folder_name)
self.pm.OpenFolder(folder_name)
self.project_manager.CreateFolder(folder_name)
self.project_manager.OpenFolder(folder_name)
return True
except NameError as e:
log.error((f"Folder with name `{folder_name}` cannot be created!"
@ -462,3 +626,80 @@ def convert_resolve_list_type(resolve_list):
"Input argument should be dict() type")
return [resolve_list[i] for i in sorted(resolve_list.keys())]
def get_reformated_path(path, padded=True):
"""
Return fixed python expression path
Args:
path (str): path url or simple file name
Returns:
type: string with reformated path
Example:
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
"""
num_pattern = "(\\[\\d+\\-\\d+\\])"
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
padding = len(re.findall(padding_pattern, path).pop())
if padded:
path = re.sub(num_pattern, f"%0{padding}d", path)
else:
path = re.sub(num_pattern, f"%d", path)
return path
def create_otio_time_range_from_track_item_data(track_item_data):
track_item = track_item_data["clip"]["item"]
project = track_item_data["project"]
timeline = track_item_data["sequence"]
timeline_start = timeline.GetStartFrame()
frame_start = int(track_item.GetStart() - timeline_start)
frame_duration = int(track_item.GetDuration())
fps = project.GetSetting("timelineFrameRate")
return otio_export.create_otio_time_range(
frame_start, frame_duration, fps)
def get_otio_clip_instance_data(otio_timeline, track_item_data):
"""
Return otio objects for timeline, track and clip
Args:
track_item_data (dict): track_item_data from list returned by
resolve.get_current_track_items()
otio_timeline (otio.schema.Timeline): otio object
Returns:
dict: otio clip object
"""
track_item = track_item_data["clip"]["item"]
track_name = track_item_data["track"]["name"]
timeline_range = create_otio_time_range_from_track_item_data(
track_item_data)
for otio_clip in otio_timeline.each_clip():
track_name = otio_clip.parent().name
parent_range = otio_clip.range_in_parent()
if track_name not in track_name:
continue
if otio_clip.name not in track_item.GetName():
continue
if pype.lib.is_overlapping_otio_ranges(
parent_range, timeline_range, strict=True):
# add pypedata marker to otio_clip metadata
for marker in otio_clip.markers:
if self.pype_marker_name in marker.name:
otio_clip.metadata.update(marker.metadata)
return {"otioClip": otio_clip}
return None

View file

@ -4,6 +4,17 @@ QWidget {
font-size: 13px;
}
QComboBox {
border: 1px solid #090909;
background-color: #201f1f;
color: #ffffff;
}
QComboBox QAbstractItemView
{
color: white;
}
QPushButton {
border: 1px solid #090909;
background-color: #201f1f;

View file

View file

@ -0,0 +1,324 @@
""" compatibility OpenTimelineIO 0.12.0 and older
"""
import os
import re
import sys
import json
import opentimelineio as otio
from . import utils
import clique
self = sys.modules[__name__]
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def create_otio_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def create_otio_time_range(start_frame, frame_duration, fps):
return otio.opentime.TimeRange(
start_time=create_otio_rational_time(start_frame, fps),
duration=create_otio_rational_time(frame_duration, fps)
)
def create_otio_reference(media_pool_item):
metadata = _get_metadata_media_pool_item(media_pool_item)
mp_clip_property = media_pool_item.GetClipProperty()
path = mp_clip_property["File Path"]
reformat_path = utils.get_reformated_path(path, padded=True)
padding = utils.get_padding_from_path(path)
if padding:
metadata.update({
"isSequence": True,
"padding": padding
})
# get clip property regarding to type
mp_clip_property = media_pool_item.GetClipProperty()
fps = float(mp_clip_property["FPS"])
if mp_clip_property["Type"] == "Video":
frame_start = int(mp_clip_property["Start"])
frame_duration = int(mp_clip_property["Frames"])
else:
audio_duration = str(mp_clip_property["Duration"])
frame_start = 0
frame_duration = int(utils.timecode_to_frames(
audio_duration, float(fps)))
otio_ex_ref_item = None
if padding:
# if it is file sequence try to create `ImageSequenceReference`
# the OTIO might not be compatible so return nothing and do it old way
try:
dirname, filename = os.path.split(path)
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
otio_ex_ref_item = otio.schema.ImageSequenceReference(
target_url_base=dirname + os.sep,
name_prefix=collection.format("{head}"),
name_suffix=collection.format("{tail}"),
start_frame=frame_start,
frame_zero_padding=padding_num,
rate=fps,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
except AttributeError:
pass
if not otio_ex_ref_item:
# in case old OTIO or video file create `ExternalReference`
otio_ex_ref_item = otio.schema.ExternalReference(
target_url=reformat_path,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
# add metadata to otio item
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
return otio_ex_ref_item
def create_otio_markers(track_item, fps):
track_item_markers = track_item.GetMarkers()
markers = []
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
if "{" in note and "}" in note:
metadata = json.loads(note)
else:
metadata = {"note": note}
markers.append(
otio.schema.Marker(
name=track_item_markers[marker_frame]["name"],
marked_range=create_otio_time_range(
marker_frame,
track_item_markers[marker_frame]["duration"],
fps
),
color=track_item_markers[marker_frame]["color"].upper(),
metadata=metadata
)
)
return markers
def create_otio_clip(track_item):
media_pool_item = track_item.GetMediaPoolItem()
mp_clip_property = media_pool_item.GetClipProperty()
if not self.project_fps:
fps = mp_clip_property["FPS"]
else:
fps = self.project_fps
name = track_item.GetName()
media_reference = create_otio_reference(media_pool_item)
source_range = create_otio_time_range(
int(track_item.GetLeftOffset()),
int(track_item.GetDuration()),
fps
)
if mp_clip_property["Type"] == "Audio":
return_clips = list()
audio_chanels = mp_clip_property["Audio Ch"]
for channel in range(0, int(audio_chanels)):
clip = otio.schema.Clip(
name=f"{name}_{channel}",
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return_clips.append(clip)
return return_clips
else:
clip = otio.schema.Clip(
name=name,
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return clip
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
return otio.schema.Gap(
source_range=create_otio_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
fps
)
)
def _create_otio_timeline(project, timeline, fps):
metadata = _get_timeline_metadata(project, timeline)
start_time = create_otio_rational_time(
timeline.GetStartFrame(), fps)
otio_timeline = otio.schema.Timeline(
name=timeline.GetName(),
global_start_time=start_time,
metadata=metadata
)
return otio_timeline
def _get_timeline_metadata(project, timeline):
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
return _get_metadata_media_pool_item(tl)
def _get_metadata_media_pool_item(media_pool_item):
data = dict()
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
property = media_pool_item.GetClipProperty() or {}
for name, value in property.items():
if "Resolution" in name and "" != value:
width, height = value.split("x")
data.update({
"width": int(width),
"height": int(height)
})
if "PAR" in name and "" != value:
try:
data.update({"pixelAspect": float(value)})
except ValueError:
if "Square" in value:
data.update({"pixelAspect": float(1)})
else:
data.update({"pixelAspect": float(1)})
return data
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
)
def add_otio_gap(clip_start, otio_track, track_item, timeline):
# if gap between track start and clip start
if clip_start > otio_track.available_range().duration.value:
# create gap and add it to track
otio_track.append(
create_otio_gap(
otio_track.available_range().duration.value,
track_item.GetStart(),
timeline.GetStartFrame(),
self.project_fps
)
)
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
mp_metadata = media_pool_item.GetMetadata()
# add additional metadata from kwargs
if kwargs:
mp_metadata.update(kwargs)
# add metadata to otio item metadata
for key, value in mp_metadata.items():
otio_item.metadata.update({key: value})
def create_otio_timeline(resolve_project):
# get current timeline
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
timeline = resolve_project.GetCurrentTimeline()
# convert timeline to otio
otio_timeline = _create_otio_timeline(
resolve_project, timeline, self.project_fps)
# loop all defined track types
for track_type in list(self.track_types.keys()):
# get total track count
track_count = timeline.GetTrackCount(track_type)
# loop all tracks by track indexes
for track_index in range(1, int(track_count) + 1):
# get current track name
track_name = timeline.GetTrackName(track_type, track_index)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
# get all track items in current track
current_track_items = timeline.GetItemListInTrack(
track_type, track_index)
# loop available track items in current track items
for track_item in current_track_items:
# skip offline track items
if track_item.GetMediaPoolItem() is None:
continue
# calculate real clip start
clip_start = track_item.GetStart() - timeline.GetStartFrame()
add_otio_gap(
clip_start, otio_track, track_item, timeline)
# create otio clip and add it to track
otio_clip = create_otio_clip(track_item)
if not isinstance(otio_clip, list):
otio_track.append(otio_clip)
else:
for index, clip in enumerate(otio_clip):
if index == 0:
otio_track.append(clip)
else:
# add previouse otio track to timeline
otio_timeline.tracks.append(otio_track)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
add_otio_gap(
clip_start, otio_track,
track_item, timeline)
otio_track.append(clip)
# add track to otio timeline
otio_timeline.tracks.append(otio_track)
return otio_timeline
def write_to_file(otio_timeline, path):
otio.adapters.write_to_file(otio_timeline, path)

View file

@ -0,0 +1,108 @@
import sys
import json
import DaVinciResolveScript
import opentimelineio as otio
self = sys.modules[__name__]
self.resolve = DaVinciResolveScript.scriptapp('Resolve')
self.fusion = DaVinciResolveScript.scriptapp('Fusion')
self.project_manager = self.resolve.GetProjectManager()
self.current_project = self.project_manager.GetCurrentProject()
self.media_pool = self.current_project.GetMediaPool()
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def build_timeline(otio_timeline):
# TODO: build timeline in mediapool `otioImport` folder
# TODO: loop otio tracks and build them in the new timeline
for clip in otio_timeline.each_clip():
# TODO: create track item
print(clip.name)
print(clip.parent().name)
print(clip.range_in_parent())
def _build_track(otio_track):
# TODO: _build_track
pass
def _build_media_pool_item(otio_media_reference):
# TODO: _build_media_pool_item
pass
def _build_track_item(otio_clip):
# TODO: _build_track_item
pass
def _build_gap(otio_clip):
# TODO: _build_gap
pass
def _build_marker(track_item, otio_marker):
frame_start = otio_marker.marked_range.start_time.value
frame_duration = otio_marker.marked_range.duration.value
# marker attributes
frameId = (frame_start / 10) * 10
color = otio_marker.color
name = otio_marker.name
note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata)
duration = (frame_duration / 10) * 10
track_item.AddMarker(
frameId,
color,
name,
note,
duration
)
def _build_media_pool_folder(name):
"""
Returns folder with input name and sets it as current folder.
It will create new media bin if none is found in root media bin
Args:
name (str): name of bin
Returns:
resolve.api.MediaPool.Folder: description
"""
root_folder = self.media_pool.GetRootFolder()
sub_folders = root_folder.GetSubFolderList()
testing_names = list()
for subfolder in sub_folders:
subf_name = subfolder.GetName()
if name in subf_name:
testing_names.append(subfolder)
else:
testing_names.append(False)
matching = next((f for f in testing_names if f is not False), None)
if not matching:
new_folder = self.media_pool.AddSubFolder(root_folder, name)
self.media_pool.SetCurrentFolder(new_folder)
else:
self.media_pool.SetCurrentFolder(matching)
return self.media_pool.GetCurrentFolder()
def read_from_file(otio_file):
otio_timeline = otio.adapters.read_from_file(otio_file)
build_timeline(otio_timeline)

View file

@ -0,0 +1,63 @@
import re
import opentimelineio as otio
def timecode_to_frames(timecode, framerate):
rt = otio.opentime.from_timecode(timecode, 24)
return int(otio.opentime.to_frames(rt))
def frames_to_timecode(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_timecode(rt)
def frames_to_secons(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_seconds(rt)
def get_reformated_path(path, padded=True):
"""
Return fixed python expression path
Args:
path (str): path url or simple file name
Returns:
type: string with reformated path
Example:
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
"""
num_pattern = "(\\[\\d+\\-\\d+\\])"
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
padding = len(re.findall(padding_pattern, path).pop())
if padded:
path = re.sub(num_pattern, f"%0{padding}d", path)
else:
path = re.sub(num_pattern, f"%d", path)
return path
def get_padding_from_path(path):
"""
Return padding number from DaVinci Resolve sequence path style
Args:
path (str): path url or simple file name
Returns:
int: padding number
Example:
get_padding_from_path("plate.[0001-1008].exr") > 4
"""
padding_pattern = "(\\d+)(?=-)"
if "[" in path:
return len(re.findall(padding_pattern, path).pop())
return None

View file

@ -3,11 +3,15 @@ Basic avalon integration
"""
import os
import contextlib
from collections import OrderedDict
from avalon.tools import workfiles
from avalon import api as avalon
from avalon import schema
from avalon.pipeline import AVALON_CONTAINER_ID
from pyblish import api as pyblish
import pype
from pype.api import Logger
from . import lib
log = Logger().get_logger(__name__, "resolve")
@ -57,6 +61,9 @@ def install():
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
get_resolve_module()
@ -79,30 +86,50 @@ def uninstall():
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
avalon.deregister_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def containerise(obj,
def containerise(track_item,
name,
namespace,
context,
loader=None,
data=None):
"""Bundle Resolve's object into an assembly and imprint it with metadata
"""Bundle Hiero's object into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
obj (obj): Resolve's object to imprint as container
track_item (hiero.core.TrackItem): object to imprint as container
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (str, optional): Name of node used to produce this container.
Returns:
obj (obj): containerised object
track_item (hiero.core.TrackItem): containerised object
"""
pass
data_imprint = OrderedDict({
"schema": "avalon-core:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
})
if data:
for k, v in data.items():
data_imprint.update({k: v})
print("_ data_imprint: {}".format(data_imprint))
lib.set_track_item_pype_tag(track_item, data_imprint)
return track_item
def ls():
@ -115,20 +142,77 @@ def ls():
See the `container.json` schema for details on how it should look,
and the Maya equivalent, which is in `avalon.maya.pipeline`
"""
pass
# get all track items from current timeline
all_track_items = lib.get_current_track_items(filter=False)
for track_item_data in all_track_items:
track_item = track_item_data["clip"]["item"]
container = parse_container(track_item)
if container:
yield container
def parse_container(container):
"""Return the container node's full container data.
def parse_container(track_item, validate=True):
"""Return container data from track_item's pype tag.
Args:
container (str): A container node name.
track_item (hiero.core.TrackItem): A containerised track item.
validate (bool)[optional]: validating with avalon scheme
Returns:
dict: The container schema data for this container node.
dict: The container schema data for input containerized track item.
"""
pass
# convert tag metadata to normal keys names
data = lib.get_track_item_pype_tag(track_item)
if validate and data and data.get("schema"):
schema.validate(data)
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
container["objectName"] = track_item.name()
# Store reference to the node object
container["_track_item"] = track_item
return container
def update_container(track_item, data=None):
"""Update container data to input track_item's pype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
data (dict)[optional]: dictionery with data to be updated
Returns:
bool: True if container was updated correctly
"""
data = data or dict()
container = lib.get_track_item_pype_tag(track_item)
for _key, _value in container.items():
try:
container[_key] = data[_key]
except KeyError:
pass
log.info("Updating container: `{}`".format(track_item))
return bool(lib.set_track_item_pype_tag(track_item, container))
def launch_workfiles_app(*args):
@ -163,3 +247,18 @@ def reset_selection():
"""Deselect all selected nodes
"""
pass
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
from pype.hosts.resolve import (
set_publish_attribute
)
# Whether instances should be passthrough based on new value
track_item = instance.data["item"]
set_publish_attribute(track_item, new_value)

View file

@ -2,7 +2,7 @@ import re
from avalon import api
from pype.hosts import resolve
from avalon.vendor import qargparse
from pype.api import config
from . import lib
from Qt import QtWidgets, QtCore
@ -12,7 +12,7 @@ class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
def __init__(self, name, info, presets, parent=None):
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
self.setObjectName(name)
@ -25,6 +25,7 @@ class CreatorWidget(QtWidgets.QDialog):
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle(name or "Pype Creator Input")
self.resize(500, 700)
# Where inputs and labels are set
self.content_widget = [QtWidgets.QWidget(self)]
@ -35,14 +36,25 @@ class CreatorWidget(QtWidgets.QDialog):
# first add widget tag line
top_layout.addWidget(QtWidgets.QLabel(info))
top_layout.addWidget(Spacer(5, self))
# main dynamic layout
self.content_widget.append(QtWidgets.QWidget(self))
content_layout = QtWidgets.QFormLayout(self.content_widget[-1])
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_area.setWidgetResizable(True)
self.content_widget.append(self.scroll_area)
scroll_widget = QtWidgets.QWidget(self)
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
self.content_layout = [in_scroll_area]
# add preset data into input widget layout
self.items = self.add_presets_to_layout(content_layout, presets)
self.items = self.populate_widgets(ui_inputs)
self.scroll_area.setWidget(scroll_widget)
# Confirmation buttons
btns_widget = QtWidgets.QWidget(self)
@ -79,20 +91,33 @@ class CreatorWidget(QtWidgets.QDialog):
self.result = None
self.close()
def value(self, data):
def value(self, data, new_data=None):
new_data = new_data or dict()
for k, v in data.items():
if isinstance(v, dict):
print(f"nested: {k}")
data[k] = self.value(v)
elif getattr(v, "value", None):
print(f"normal int: {k}")
result = v.value()
data[k] = result()
else:
print(f"normal text: {k}")
result = v.text()
data[k] = result()
return data
new_data[k] = {
"target": None,
"value": None
}
if v["type"] == "dict":
new_data[k]["target"] = v["target"]
new_data[k]["value"] = self.value(v["value"])
if v["type"] == "section":
new_data.pop(k)
new_data = self.value(v["value"], new_data)
elif getattr(v["value"], "currentText", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].currentText()
elif getattr(v["value"], "isChecked", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].isChecked()
elif getattr(v["value"], "value", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].value()
elif getattr(v["value"], "text", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].text()
return new_data
def camel_case_split(self, text):
matches = re.finditer(
@ -124,41 +149,115 @@ class CreatorWidget(QtWidgets.QDialog):
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
return item
def add_presets_to_layout(self, content_layout, data):
def populate_widgets(self, data, content_layout=None):
"""
Populate widget from input dict.
Each plugin has its own set of widget rows defined in dictionary
each row values should have following keys: `type`, `target`,
`label`, `order`, `value` and optionally also `toolTip`.
Args:
data (dict): widget rows or organized groups defined
by types `dict` or `section`
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
Returns:
dict: redefined data dict updated with created widgets
"""
content_layout = content_layout or self.content_layout[-1]
# fix order of process by defined order value
ordered_keys = list(data.keys())
for k, v in data.items():
if isinstance(v, dict):
try:
# try removing a key from index which should
# be filled with new
ordered_keys.pop(v["order"])
except IndexError:
pass
# add key into correct order
ordered_keys.insert(v["order"], k)
# process ordered
for k in ordered_keys:
v = data[k]
tool_tip = v.get("toolTip", "")
if v["type"] == "dict":
# adding spacer between sections
self.content_widget.append(QtWidgets.QWidget(self))
devider = QtWidgets.QVBoxLayout(self.content_widget[-1])
devider.addWidget(Spacer(5, self))
devider.setObjectName("Devider")
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_widget.append(QtWidgets.QWidget(self))
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_widget[-1])
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
self.create_row(nested_content_layout, "QLabel", k)
data[k] = self.add_presets_to_layout(nested_content_layout, v)
elif isinstance(v, str):
print(f"layout.str: {k}")
print(f"content_layout: {content_layout}")
data[k] = self.create_row(
content_layout, "QLineEdit", k, setText=v)
elif isinstance(v, int):
print(f"layout.int: {k}")
print(f"content_layout: {content_layout}")
data[k] = self.create_row(
content_layout, "QSpinBox", k, setValue=v)
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
if v["type"] == "section":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
elif v["type"] == "QLineEdit":
data[k]["value"] = self.create_row(
content_layout, "QLineEdit", v["label"],
setText=v["value"], setToolTip=tool_tip)
elif v["type"] == "QComboBox":
data[k]["value"] = self.create_row(
content_layout, "QComboBox", v["label"],
addItems=v["value"], setToolTip=tool_tip)
elif v["type"] == "QCheckBox":
data[k]["value"] = self.create_row(
content_layout, "QCheckBox", v["label"],
setChecked=v["value"], setToolTip=tool_tip)
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setRange=(0, 99999),
setValue=v["value"],
setToolTip=tool_tip)
return data
@ -179,20 +278,6 @@ class Spacer(QtWidgets.QWidget):
self.setLayout(layout)
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
parents = []
return parents
class SequenceLoader(api.Loader):
"""A basic SequenceLoader for Resolve
@ -258,8 +343,12 @@ class Creator(api.Creator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.presets = config.get_presets()['plugins']["resolve"][
"create"].get(self.__class__.__name__, {})
from pype.api import get_current_project_settings
resolve_p_settings = get_current_project_settings().get("resolve")
self.presets = dict()
if resolve_p_settings:
self.presets = resolve_p_settings["create"].get(
self.__class__.__name__, {})
# adding basic current context resolve objects
self.project = resolve.get_current_project()
@ -271,3 +360,310 @@ class Creator(api.Creator):
self.selected = resolve.get_current_track_items(filter=False)
self.widget = CreatorWidget
class PublishClip:
"""
Convert a track item to publishable instance
Args:
track_item (hiero.core.TrackItem): hiero track item object
kwargs (optional): additional data needed for rename=True (presets)
Returns:
hiero.core.TrackItem: hiero track item object with pype tag
"""
vertical_clip_match = dict()
tag_data = dict()
types = {
"shot": "shot",
"folder": "folder",
"episode": "episode",
"sequence": "sequence",
"track": "sequence",
}
# parents search patern
parents_search_patern = r"\{([a-z]*?)\}"
# default templates for non-ui use
rename_default = False
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
subset_name_default = "<track_name>"
review_track_default = "< none >"
subset_family_default = "plate"
count_from_default = 10
count_steps_default = 10
vertical_sync_default = False
driving_layer_default = ""
def __init__(self, cls, track_item_data, **kwargs):
# populate input cls attribute onto self.[attr]
self.__dict__.update(cls.__dict__)
# get main parent objects
self.track_item_data = track_item_data
self.track_item = track_item_data["clip"]["item"]
sequence_name = track_item_data["sequence"].GetName()
self.sequence_name = str(sequence_name).replace(" ", "_")
# track item (clip) main attributes
self.ti_name = self.track_item.GetName()
self.ti_index = int(track_item_data["clip"]["index"])
# get track name and index
track_name = track_item_data["track"]["name"]
self.track_name = str(track_name).replace(" ", "_")
self.track_index = int(track_item_data["track"]["index"])
# adding tag.family into tag
if kwargs.get("avalon"):
self.tag_data.update(kwargs["avalon"])
# adding ui inputs if any
self.ui_inputs = kwargs.get("ui_inputs", {})
# adding media pool folder if any
self.mp_folder = kwargs.get("mp_folder")
# populate default data before we get other attributes
self._populate_track_item_default_data()
# use all populated default data to create all important attributes
self._populate_attributes()
# create parents with correct types
self._create_parents()
def convert(self):
# solve track item data and add them to tag data
self._convert_to_tag_data()
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
if (self.track_name in self.review_layer) and (
self.driving_layer not in self.review_layer):
return
# deal with clip name
new_name = self.tag_data.pop("newClipName")
if self.rename:
self.tag_data["asset"] = new_name
else:
self.tag_data["asset"] = self.ti_name
if not lib.pype_marker_workflow:
# create compound clip workflow
lib.create_compound_clip(
self.track_item_data,
self.tag_data["asset"],
self.mp_folder
)
# add track_item_data selection to tag
self.tag_data.update({
"track_data": self.track_item_data["track"]
})
# create pype tag on track_item and add data
lib.imprint(self.track_item, self.tag_data)
return self.track_item
def _populate_track_item_default_data(self):
""" Populate default formating data from track item. """
self.track_item_default_data = {
"_folder_": "shots",
"_sequence_": self.sequence_name,
"_track_": self.track_name,
"_clip_": self.ti_name,
"_trackIndex_": self.track_index,
"_clipIndex_": self.ti_index
}
def _populate_attributes(self):
""" Populate main object attributes. """
# track item frame range and parent track name for vertical sync check
self.clip_in = int(self.track_item.GetStart())
self.clip_out = int(self.track_item.GetEnd())
# define ui inputs if non gui mode was used
self.shot_num = self.ti_index
print(
"____ self.shot_num: {}".format(self.shot_num))
# ui_inputs data or default values if gui was not used
self.rename = self.ui_inputs.get(
"clipRename", {}).get("value") or self.rename_default
self.clip_name = self.ui_inputs.get(
"clipName", {}).get("value") or self.clip_name_default
self.hierarchy = self.ui_inputs.get(
"hierarchy", {}).get("value") or self.hierarchy_default
self.hierarchy_data = self.ui_inputs.get(
"hierarchyData", {}).get("value") or \
self.track_item_default_data.copy()
self.count_from = self.ui_inputs.get(
"countFrom", {}).get("value") or self.count_from_default
self.count_steps = self.ui_inputs.get(
"countSteps", {}).get("value") or self.count_steps_default
self.subset_name = self.ui_inputs.get(
"subsetName", {}).get("value") or self.subset_name_default
self.subset_family = self.ui_inputs.get(
"subsetFamily", {}).get("value") or self.subset_family_default
self.vertical_sync = self.ui_inputs.get(
"vSyncOn", {}).get("value") or self.vertical_sync_default
self.driving_layer = self.ui_inputs.get(
"vSyncTrack", {}).get("value") or self.driving_layer_default
self.review_track = self.ui_inputs.get(
"reviewTrack", {}).get("value") or self.review_track_default
# build subset name from layer name
if self.subset_name == "<track_name>":
self.subset_name = self.track_name
# create subset for publishing
self.subset = self.subset_family + self.subset_name.capitalize()
def _replace_hash_to_expression(self, name, text):
""" Replace hash with number in correct padding. """
_spl = text.split("#")
_len = (len(_spl) - 1)
_repl = "{{{0}:0>{1}}}".format(name, _len)
new_text = text.replace(("#" * _len), _repl)
return new_text
def _convert_to_tag_data(self):
""" Convert internal data to tag data.
Populating the tag data into internal variable self.tag_data
"""
# define vertical sync attributes
master_layer = True
self.review_layer = ""
if self.vertical_sync:
# check if track name is not in driving layer
if self.track_name not in self.driving_layer:
# if it is not then define vertical sync as None
master_layer = False
# increasing steps by index of rename iteration
self.count_steps *= self.rename_index
hierarchy_formating_data = dict()
_data = self.track_item_default_data.copy()
if self.ui_inputs:
# adding tag metadata from ui
for _k, _v in self.ui_inputs.items():
if _v["target"] == "tag":
self.tag_data[_k] = _v["value"]
# driving layer is set as positive match
if master_layer or self.vertical_sync:
# mark review layer
if self.review_track and (
self.review_track not in self.review_track_default):
# if review layer is defined and not the same as defalut
self.review_layer = self.review_track
# shot num calculate
if self.rename_index == 0:
self.shot_num = self.count_from
else:
self.shot_num = self.count_from + self.count_steps
# clip name sequence number
_data.update({"shot": self.shot_num})
# solve # in test to pythonic expression
for _k, _v in self.hierarchy_data.items():
if "#" not in _v["value"]:
continue
self.hierarchy_data[
_k]["value"] = self._replace_hash_to_expression(
_k, _v["value"])
# fill up pythonic expresisons in hierarchy data
for k, _v in self.hierarchy_data.items():
hierarchy_formating_data[k] = _v["value"].format(**_data)
else:
# if no gui mode then just pass default data
hierarchy_formating_data = self.hierarchy_data
tag_hierarchy_data = self._solve_tag_hierarchy_data(
hierarchy_formating_data
)
tag_hierarchy_data.update({"masterLayer": True})
if master_layer and self.vertical_sync:
# tag_hierarchy_data.update({"masterLayer": True})
self.vertical_clip_match.update({
(self.clip_in, self.clip_out): tag_hierarchy_data
})
if not master_layer and self.vertical_sync:
# driving layer is set as negative match
for (_in, _out), master_data in self.vertical_clip_match.items():
master_data.update({"masterLayer": False})
if _in == self.clip_in and _out == self.clip_out:
data_subset = master_data["subset"]
# add track index in case duplicity of names in master data
if self.subset in data_subset:
master_data["subset"] = self.subset + str(
self.track_index)
# in case track name and subset name is the same then add
if self.subset_name == self.track_name:
master_data["subset"] = self.subset
# assing data to return hierarchy data to tag
tag_hierarchy_data = master_data
# add data to return data dict
self.tag_data.update(tag_hierarchy_data)
if master_layer and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
""" Solve tag data from hierarchy data and templates. """
# fill up clip name and hierarchy keys
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
return {
"newClipName": clip_name_filled,
"hierarchy": hierarchy_filled,
"parents": self.parents,
"hierarchyData": hierarchy_formating_data,
"subset": self.subset,
"family": self.subset_family,
"families": ["clip"]
}
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
assert entity_type, "Missing entity type for `{}`".format(
key
)
return {
"entity_type": entity_type,
"entity_name": self.hierarchy_data[key]["value"].format(
**self.track_item_default_data
)
}
def _create_parents(self):
""" Create parents and return it in list. """
self.parents = list()
patern = re.compile(self.parents_search_patern)
par_split = [patern.findall(t).pop()
for t in self.hierarchy.split("/")]
for key in par_split:
parent = self._convert_to_entity(key)
self.parents.append(parent)

View file

@ -0,0 +1,134 @@
#!/usr/bin/env python
# TODO: convert this script to be usable with PYPE
"""
Example DaVinci Resolve script:
Load a still from DRX file, apply the still to all clips in all timelines.
Set render format and codec, add render jobs for all timelines, render
to specified path and wait for rendering completion.
Once render is complete, delete all jobs
"""
# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa
from python_get_resolve import GetResolve
import sys
import time
def AddTimelineToRender(project, timeline, presetName,
targetDirectory, renderFormat, renderCodec):
project.SetCurrentTimeline(timeline)
project.LoadRenderPreset(presetName)
if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec):
return False
project.SetRenderSettings(
{"SelectAllFrames": 1, "TargetDir": targetDirectory})
return project.AddRenderJob()
def RenderAllTimelines(resolve, presetName, targetDirectory,
renderFormat, renderCodec):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
resolve.OpenPage("Deliver")
timelineCount = project.GetTimelineCount()
for index in range(0, int(timelineCount)):
if not AddTimelineToRender(
project,
project.GetTimelineByIndex(index + 1),
presetName,
targetDirectory,
renderFormat,
renderCodec):
return False
return project.StartRendering()
def IsRenderingInProgress(resolve):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
return project.IsRenderingInProgress()
def WaitForRenderingCompletion(resolve):
while IsRenderingInProgress(resolve):
time.sleep(1)
return
def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0):
trackCount = timeline.GetTrackCount("video")
clips = {}
for index in range(1, int(trackCount) + 1):
clips.update(timeline.GetItemsInTrack("video", index))
return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips)
def ApplyDRXToAllTimelines(resolve, path, gradeMode=0):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
if not project:
return False
timelineCount = project.GetTimelineCount()
for index in range(0, int(timelineCount)):
timeline = project.GetTimelineByIndex(index + 1)
project.SetCurrentTimeline(timeline)
if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode):
return False
return True
def DeleteAllRenderJobs(resolve):
projectManager = resolve.GetProjectManager()
project = projectManager.GetCurrentProject()
project.DeleteAllRenderJobs()
return
# Inputs:
# - DRX file to import grade still and apply it for clips
# - grade mode (0, 1 or 2)
# - preset name for rendering
# - render path
# - render format
# - render codec
if len(sys.argv) < 7:
print(
"input parameters for scripts are [drx file path] [grade mode] "
"[render preset name] [render path] [render format] [render codec]")
sys.exit()
drxPath = sys.argv[1]
gradeMode = sys.argv[2]
renderPresetName = sys.argv[3]
renderPath = sys.argv[4]
renderFormat = sys.argv[5]
renderCodec = sys.argv[6]
# Get currently open project
resolve = GetResolve()
if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode):
print("Unable to apply a still from drx file to all timelines")
sys.exit()
if not RenderAllTimelines(resolve, renderPresetName, renderPath,
renderFormat, renderCodec):
print("Unable to set all timelines for rendering")
sys.exit()
WaitForRenderingCompletion(resolve)
DeleteAllRenderJobs(resolve)
print("Rendering is completed.")

View file

@ -0,0 +1,84 @@
#!/usr/bin/env python
import os
from pype.hosts.resolve.otio import davinci_export as otio_export
resolve = bmd.scriptapp("Resolve") # noqa
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager) # noqa
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Export OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "exportfilebttn",
"Text": "Select Destination",
"Weight": 1.25,
"ToolTip": "Choose where to save the otio",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "exportbttn",
"Text": "Export",
"Weight": 2,
"ToolTip": "Export the current timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
def _close_window(event):
disp.ExitLoop()
def _export_button(event):
pm = resolve.GetProjectManager()
project = pm.GetCurrentProject()
fps = project.GetSetting("timelineFrameRate")
timeline = project.GetCurrentTimeline()
otio_timeline = otio_export.create_otio_timeline(timeline, fps)
otio_path = os.path.join(
itm["exportfilebttn"].Text,
timeline.GetName() + ".otio")
print(otio_path)
otio_export.write_to_file(
otio_timeline,
otio_path)
_close_window(None)
def _export_file_pressed(event):
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
itm["exportfilebttn"].Text = selectedPath
dlg.On.OTIOwin.Close = _close_window
dlg.On.exportfilebttn.Clicked = _export_file_pressed
dlg.On.exportbttn.Clicked = _export_button
dlg.Show()
disp.RunLoop()
dlg.Hide()

View file

@ -0,0 +1,72 @@
#!/usr/bin/env python
import os
from pype.hosts.resolve.otio import davinci_import as otio_import
resolve = bmd.scriptapp("Resolve") # noqa
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager) # noqa
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Import OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "importOTIOfileButton",
"Text": "Select OTIO File Path",
"Weight": 1.25,
"ToolTip": "Choose otio file to import from",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "importButton",
"Text": "Import",
"Weight": 2,
"ToolTip": "Import otio to new timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
def _close_window(event):
disp.ExitLoop()
def _import_button(event):
otio_import.read_from_file(itm["importOTIOfileButton"].Text)
_close_window(None)
def _import_file_pressed(event):
selected_path = fu.RequestFile(os.path.expanduser("~/Documents"))
itm["importOTIOfileButton"].Text = selected_path
dlg.On.OTIOwin.Close = _close_window
dlg.On.importOTIOfileButton.Clicked = _import_file_pressed
dlg.On.importButton.Clicked = _import_button
dlg.Show()
disp.RunLoop()
dlg.Hide()

View file

@ -0,0 +1,16 @@
#!/usr/bin/env python
import os
import sys
import pype
def main(env):
import pype.hosts.resolve as bmdvr
# Registers pype's Global pyblish plugins
pype.install()
bmdvr.setup(env)
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -0,0 +1,22 @@
#!/usr/bin/env python
def main():
import pype.hosts.resolve as bmdvr
bmdvr.utils.get_resolve_module()
tracks = list()
track_type = "video"
sequence = bmdvr.get_current_sequence()
# get all tracks count filtered by track type
selected_track_count = sequence.GetTrackCount(track_type)
# loop all tracks and get items
for track_index in range(1, (int(selected_track_count) + 1)):
track_name = sequence.GetTrackName("video", track_index)
tracks.append(track_name)
if __name__ == "__main__":
main()

View file

@ -1,19 +1,24 @@
#! python3
import sys
from pype.api import Logger
import DaVinciResolveScript as bmdvr
log = Logger().get_logger(__name__)
def main():
import pype.hosts.resolve as bmdvr
bm = bmdvr.utils.get_resolve_module()
log.info(f"blackmagicmodule: {bm}")
print(f"_>> bmdvr.scriptapp(Resolve): {bmdvr.scriptapp('Resolve')}")
resolve = bmdvr.scriptapp('Resolve')
print(f"resolve: {resolve}")
project_manager = resolve.GetProjectManager()
project = project_manager.GetCurrentProject()
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
print(tl.GetName())
print(tl.GetMetadata())
print(tl.GetClipProperty())
if __name__ == "__main__":

View file

@ -76,6 +76,17 @@ from .ffmpeg_utils import (
ffprobe_streams
)
from .editorial import (
is_overlapping_otio_ranges,
otio_range_to_frame_range,
otio_range_with_handles,
convert_to_padded_path,
trim_media_range,
range_from_frames,
frames_to_secons,
make_sequence_collection
)
terminal = Terminal
__all__ = [
@ -136,5 +147,14 @@ __all__ = [
"IniSettingRegistry",
"JSONSettingRegistry",
"PypeSettingsRegistry",
"timeit"
"timeit",
"is_overlapping_otio_ranges",
"otio_range_with_handles",
"convert_to_padded_path",
"otio_range_to_frame_range",
"trim_media_range",
"range_from_frames",
"frames_to_secons",
"make_sequence_collection"
]

160
pype/lib/editorial.py Normal file
View file

@ -0,0 +1,160 @@
import os
import re
import clique
from opentimelineio import opentime
from opentimelineio.opentime import (
to_frames, RationalTime, TimeRange)
def otio_range_to_frame_range(otio_range):
start = to_frames(
otio_range.start_time, otio_range.start_time.rate)
end = start + to_frames(
otio_range.duration, otio_range.duration.rate) - 1
return start, end
def otio_range_with_handles(otio_range, instance):
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles_duration = handle_start + handle_end
fps = float(otio_range.start_time.rate)
start = to_frames(otio_range.start_time, fps)
duration = to_frames(otio_range.duration, fps)
return TimeRange(
start_time=RationalTime((start - handle_start), fps),
duration=RationalTime((duration + handles_duration), fps)
)
def is_overlapping_otio_ranges(test_otio_range, main_otio_range, strict=False):
test_start, test_end = otio_range_to_frame_range(test_otio_range)
main_start, main_end = otio_range_to_frame_range(main_otio_range)
covering_exp = bool(
(test_start <= main_start) and (test_end >= main_end)
)
inside_exp = bool(
(test_start >= main_start) and (test_end <= main_end)
)
overlaying_right_exp = bool(
(test_start <= main_end) and (test_end >= main_end)
)
overlaying_left_exp = bool(
(test_end >= main_start) and (test_start <= main_start)
)
if not strict:
return any((
covering_exp,
inside_exp,
overlaying_right_exp,
overlaying_left_exp
))
else:
return covering_exp
def convert_to_padded_path(path, padding):
"""
Return correct padding in sequence string
Args:
path (str): path url or simple file name
padding (int): number of padding
Returns:
type: string with reformated path
Example:
convert_to_padded_path("plate.%d.exr") > plate.%04d.exr
"""
if "%d" in path:
path = re.sub("%d", "%0{padding}d".format(padding=padding), path)
return path
def trim_media_range(media_range, source_range):
"""
Trim input media range with clip source range.
Args:
media_range (otio.opentime.TimeRange): available range of media
source_range (otio.opentime.TimeRange): clip required range
Returns:
otio.opentime.TimeRange: trimmed media range
"""
rw_media_start = RationalTime(
media_range.start_time.value + source_range.start_time.value,
media_range.start_time.rate
)
rw_media_duration = RationalTime(
source_range.duration.value,
media_range.duration.rate
)
return TimeRange(
rw_media_start, rw_media_duration)
def range_from_frames(start, duration, fps):
"""
Returns otio time range.
Args:
start (int): frame start
duration (int): frame duration
fps (float): frame range
Returns:
otio.opentime.TimeRange: crated range
"""
return TimeRange(
RationalTime(start, fps),
RationalTime(duration, fps)
)
def frames_to_secons(frames, framerate):
"""
Returning secons.
Args:
frames (int): frame
framerate (flaot): frame rate
Returns:
float: second value
"""
rt = opentime.from_frames(frames, framerate)
return opentime.to_seconds(rt)
def make_sequence_collection(path, otio_range, metadata):
"""
Make collection from path otio range and otio metadata.
Args:
path (str): path to image sequence with `%d`
otio_range (otio.opentime.TimeRange): range to be used
metadata (dict): data where padding value can be found
Returns:
list: dir_path (str): path to sequence, collection object
"""
if "%" not in path:
return None
file_name = os.path.basename(path)
dir_path = os.path.dirname(path)
head = file_name.split("%")[0]
tail = os.path.splitext(file_name)[-1]
first, last = otio_range_to_frame_range(otio_range)
collection = clique.Collection(
head=head, tail=tail, padding=metadata["padding"])
collection.indexes.update([i for i in range(first, (last + 1))])
return dir_path, collection

View file

@ -36,7 +36,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["shot"]
hosts = ["hiero"]
hosts = ["hiero", "resolve"]
optional = False
def process(self, context):

View file

@ -0,0 +1,113 @@
import pyblish.api
import avalon.api as avalon
class CollectHierarchy(pyblish.api.ContextPlugin):
"""Collecting hierarchy from `parents`.
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
plugins to be able to create needed parents for the context if they
don't exist yet
"""
label = "Collect Hierarchy"
order = pyblish.api.CollectorOrder - 0.57
families = ["shot"]
hosts = ["resolve"]
def process(self, context):
temp_context = {}
project_name = avalon.Session["AVALON_PROJECT"]
final_context = {}
final_context[project_name] = {}
final_context[project_name]['entity_type'] = 'Project'
for instance in context:
self.log.info("Processing instance: `{}` ...".format(instance))
# shot data dict
shot_data = {}
family = instance.data.get("family")
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection([family]):
continue
# exclude if not masterLayer True
if not instance.data.get("masterLayer"):
continue
# get asset build data if any available
shot_data["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
shot_data['entity_type'] = 'Shot'
shot_data['tasks'] = instance.data.get("tasks") or []
shot_data["comments"] = instance.data.get("comments", [])
shot_data['custom_attributes'] = {
"handleStart": instance.data["handleStart"],
"handleEnd": instance.data["handleEnd"],
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
}
actual = {instance.data["asset"]: shot_data}
for parent in reversed(instance.data["parents"]):
next_dict = {}
parent_name = parent["entity_name"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent[
"entity_type"].capitalize()
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self._update_dict(temp_context, actual)
# skip if nothing for hierarchy available
if not temp_context:
return
final_context[project_name]['childs'] = temp_context
# adding hierarchy context to context
context.data["hierarchyContext"] = final_context
self.log.debug("context.data[hierarchyContext] is: {}".format(
context.data["hierarchyContext"]))
def _update_dict(self, parent_dict, child_dict):
"""
Nesting each children into its parent.
Args:
parent_dict (dict): parent dict wich should be nested with children
child_dict (dict): children dict which should be injested
"""
for key in parent_dict:
if key in child_dict and isinstance(parent_dict[key], dict):
child_dict[key] = self._update_dict(
parent_dict[key], child_dict[key]
)
else:
if parent_dict.get(key) and child_dict.get(key):
continue
else:
child_dict[key] = parent_dict[key]
return child_dict

View file

@ -0,0 +1,70 @@
"""
Requires:
otioTimeline -> context data attribute
review -> instance data attribute
masterLayer -> instance data attribute
otioClipRange -> instance data attribute
"""
# import os
import opentimelineio as otio
import pyblish.api
import pype.lib
from pprint import pformat
class CollectOcioFrameRanges(pyblish.api.InstancePlugin):
"""Getting otio ranges from otio_clip
Adding timeline and source ranges to instance data"""
label = "Collect OTIO Frame Ranges"
order = pyblish.api.CollectorOrder - 0.58
families = ["shot", "clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_clip = instance.data["otioClip"]
workfile_start = instance.data["workfileFrameStart"]
# get ranges
otio_tl_range = otio_clip.range_in_parent()
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
otio_tl_range_handles = pype.lib.otio_range_with_handles(
otio_tl_range, instance)
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
# get source avalable start frame
src_starting_from = otio.opentime.to_frames(
otio_avalable_range.start_time,
otio_avalable_range.start_time.rate)
# convert to frames
range_convert = pype.lib.otio_range_to_frame_range
tl_start, tl_end = range_convert(otio_tl_range)
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
src_start, src_end = range_convert(otio_src_range)
src_start_h, src_end_h = range_convert(otio_src_range_handles)
frame_start = workfile_start
frame_end = frame_start + otio.opentime.to_frames(
otio_tl_range.duration, otio_tl_range.duration.rate) - 1
data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"clipIn": tl_start,
"clipOut": tl_end,
"clipInH": tl_start_h,
"clipOutH": tl_end_h,
"sourceStart": src_starting_from + src_start,
"sourceEnd": src_starting_from + src_end,
"sourceStartH": src_starting_from + src_start_h,
"sourceEndH": src_starting_from + src_end_h,
}
instance.data.update(data)
self.log.debug(
"_ data: {}".format(pformat(data)))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))

View file

@ -0,0 +1,99 @@
"""
Requires:
instance -> otioClip
context -> otioTimeline
Optional:
otioClip.metadata -> masterLayer
Provides:
instance -> otioReviewClips
instance -> families (adding ["review", "ftrack"])
"""
import opentimelineio as otio
import pyblish.api
from pprint import pformat
class CollectOcioReview(pyblish.api.InstancePlugin):
"""Get matching otio track from defined review layer"""
label = "Collect OTIO Review"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_review_clips = list()
otio_timeline = instance.context.data["otioTimeline"]
otio_clip = instance.data["otioClip"]
# optionally get `reviewTrack`
review_track_name = otio_clip.metadata.get("reviewTrack")
# generate range in parent
otio_tl_range = otio_clip.range_in_parent()
# calculate real timeline end needed for the clip
clip_end_frame = int(
otio_tl_range.start_time.value + otio_tl_range.duration.value)
# skip if no review track available
if not review_track_name:
return
# loop all tracks and match with name in `reviewTrack`
for track in otio_timeline.tracks:
if review_track_name not in track.name:
continue
# process correct track
# establish gap
otio_gap = None
# get track parent range
track_rip = track.range_in_parent()
# calculate real track end frame
track_end_frame = int(
track_rip.start_time.value + track_rip.duration.value)
# check if the end of track is not lower then clip requirement
if clip_end_frame > track_end_frame:
# calculate diference duration
gap_duration = clip_end_frame - track_end_frame
# create rational time range for gap
otio_gap_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
float(0),
track_rip.start_time.rate
),
duration=otio.opentime.RationalTime(
float(gap_duration),
track_rip.start_time.rate
)
)
# crate gap
otio_gap = otio.schema.Gap(source_range=otio_gap_range)
# trim available clips from devined track as reviewable source
otio_review_clips = otio.algorithms.track_trimmed_to_range(
track,
otio_tl_range
)
# add gap at the end if track end is shorter then needed
if otio_gap:
otio_review_clips.append(otio_gap)
if otio_review_clips:
instance.data["families"] += ["review", "ftrack"]
instance.data["otioReviewClips"] = otio_review_clips
self.log.info(
"Creating review track: {}".format(otio_review_clips))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
self.log.debug(
"_ families: {}".format(instance.data["families"]))

View file

@ -0,0 +1,182 @@
# TODO: this head doc string
"""
Requires:
instance -> otio_clip
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
import pyblish.api
import pype
class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
"""Get Resources for a subset version"""
label = "Collect OTIO Subset Resources"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
# get basic variables
otio_clip = instance.data["otioClip"]
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
# generate range in parent
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
trimmed_media_range = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range)
# calculate wth handles
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
trimmed_media_range_h = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range_handles)
# frame start and end from media
s_frame_start, s_frame_end = pype.lib.otio_range_to_frame_range(
trimmed_media_range)
a_frame_start, a_frame_end = pype.lib.otio_range_to_frame_range(
otio_avalable_range)
a_frame_start_h, a_frame_end_h = pype.lib.otio_range_to_frame_range(
trimmed_media_range_h)
# fix frame_start and frame_end frame to be in range of media
if a_frame_start_h < a_frame_start:
a_frame_start_h = a_frame_start
if a_frame_end_h > a_frame_end:
a_frame_end_h = a_frame_end
# count the difference for frame_start and frame_end
diff_start = s_frame_start - a_frame_start_h
diff_end = a_frame_end_h - s_frame_end
# add to version data start and end range data
# for loader plugins to be correctly displayed and loaded
version_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": diff_start,
"handleEnd": diff_end,
"fps": otio_avalable_range.start_time.rate
})
# change frame_start and frame_end values
# for representation to be correctly renumbered in integrate_new
frame_start -= diff_start
frame_end += diff_end
media_ref = otio_clip.media_reference
metadata = media_ref.metadata
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
self.log.info(
"frame_start-frame_end: {}-{}".format(frame_start, frame_end))
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
self.staging_dir = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(a_frame_start_h, (a_frame_end_h + 1))])
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, trimmed_media_range, metadata)
self.staging_dir, collection = collection_data
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
dirname, filename = os.path.split(media_ref.target_url)
self.staging_dir = dirname
self.log.debug(path)
repre = self._create_representation(
frame_start, frame_end, file=filename)
if repre:
instance.data["versionData"] = version_data
self.log.debug(">>>>>>>> version data {}".format(version_data))
# add representation to instance data
instance.data["representations"].append(repre)
self.log.debug(">>>>>>>> {}".format(repre))
def _create_representation(self, start, end, **kwargs):
"""
Creating representation data.
Args:
start (int): start frame
end (int): end frame
kwargs (dict): optional data
Returns:
dict: representation data
"""
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir
}
if kwargs.get("collection"):
collection = kwargs.get("collection")
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
if kwargs.get("file"):
file = kwargs.get("file")
ext = os.path.splitext(file)[-1]
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": file,
"frameStart": start,
"frameEnd": end,
})
return representation_data

View file

@ -32,7 +32,8 @@ class ExtractBurnin(pype.api.Extractor):
"standalonepublisher",
"harmony",
"fusion",
"aftereffects"
"aftereffects",
# "resolve"
]
optional = True

View file

@ -12,9 +12,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
hosts = ["shell", "fusion"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "render2d", "source"]
families = [
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]
enabled = False
# presetable attribute
@ -50,7 +53,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if not isinstance(repre['files'], (list, tuple)):
input_file = repre['files']
else:
input_file = repre['files'][0]
file_index = int(float(len(repre['files'])) * 0.5)
input_file = repre['files'][file_index]
stagingdir = os.path.normpath(repre.get("stagingDir"))

View file

@ -0,0 +1,41 @@
import os
import pyblish.api
import pype.api
import opentimelineio as otio
class ExtractOTIOFile(pype.api.Extractor):
"""
Extractor export OTIO file
"""
label = "Extract OTIO file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["workfile"]
hosts = ["resolve"]
def process(self, instance):
# create representation data
if "representations" not in instance.data:
instance.data["representations"] = []
name = instance.data["name"]
staging_dir = self.staging_dir(instance)
otio_timeline = instance.context.data["otioTimeline"]
# create otio timeline representation
otio_file_name = name + ".otio"
otio_file_path = os.path.join(staging_dir, otio_file_name)
otio.adapters.write_to_file(otio_timeline, otio_file_path)
representation_otio = {
'name': "otio",
'ext': "otio",
'files': otio_file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation_otio)
self.log.info("Added OTIO file representation: {}".format(
representation_otio))

View file

@ -0,0 +1,426 @@
"""
Requires:
instance -> handleStart
instance -> handleEnd
instance -> otioClip
instance -> otioReviewClips
Optional:
instance -> workfileFrameStart
instance -> resolutionWidth
instance -> resolutionHeight
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
from pyblish import api
import pype
class ExtractOTIOReview(pype.api.Extractor):
"""
Extract OTIO timeline into one concuted image sequence file.
The `otioReviewClip` is holding trimmed range of clips relative to
the `otioClip`. Handles are added during looping by available list
of Gap and clips in the track. Handle start (head) is added before
first Gap or Clip and Handle end (tail) is added at the end of last
Clip or Gap. In case there is missing source material after the
handles addition Gap will be added. At the end all Gaps are converted
to black frames and available material is converted to image sequence
frames. At the end representation is created and added to the instance.
At the moment only image sequence output is supported
"""
order = api.ExtractorOrder - 0.45
label = "Extract OTIO review"
hosts = ["resolve"]
families = ["review"]
# plugin default attributes
temp_file_head = "tempFile."
to_width = 1280
to_height = 720
output_ext = ".jpg"
def process(self, instance):
# TODO: convert resulting image sequence to mp4
# TODO: add oudio ouput to the mp4 if audio in review is on.
# get otio clip and other time info from instance clip
# TODO: what if handles are different in `versionData`?
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
otio_review_clips = instance.data["otioReviewClips"]
# add plugin wide attributes
self.representation_files = list()
self.used_frames = list()
self.workfile_start = int(instance.data.get(
"workfileFrameStart", 1001)) - handle_start
self.padding = len(str(self.workfile_start))
self.used_frames.append(self.workfile_start)
self.to_width = instance.data.get(
"resolutionWidth") or self.to_width
self.to_height = instance.data.get(
"resolutionHeight") or self.to_height
# skip instance if no reviewable data available
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
and (len(otio_review_clips) == 1):
self.log.warning(
"Instance `{}` has nothing to process".format(instance))
return
else:
self.staging_dir = self.staging_dir(instance)
if not instance.data.get("representations"):
instance.data["representations"] = list()
# loop available clips in otio track
for index, r_otio_cl in enumerate(otio_review_clips):
# QUESTION: what if transition on clip?
# get frame range values
src_range = r_otio_cl.source_range
start = src_range.start_time.value
duration = src_range.duration.value
available_range = None
self.actual_fps = src_range.duration.rate
# add available range only if not gap
if isinstance(r_otio_cl, otio.schema.Clip):
available_range = r_otio_cl.available_range()
self.actual_fps = available_range.duration.rate
# reframing handles conditions
if (len(otio_review_clips) > 1) and (index == 0):
# more clips | first clip reframing with handle
start -= handle_start
duration += handle_start
elif len(otio_review_clips) > 1 \
and (index == len(otio_review_clips) - 1):
# more clips | last clip reframing with handle
duration += handle_end
elif len(otio_review_clips) == 1:
# one clip | add both handles
start -= handle_start
duration += (handle_start + handle_end)
if available_range:
available_range = self._trim_available_range(
available_range, start, duration, self.actual_fps)
# process all track items of the track
if isinstance(r_otio_cl, otio.schema.Clip):
# process Clip
media_ref = r_otio_cl.media_reference
metadata = media_ref.metadata
is_sequence = None
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
dirname = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
first, last = pype.lib.otio_range_to_frame_range(
available_range)
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(first, (last + 1))])
# render segment
self._render_seqment(
sequence=[dirname, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, available_range, metadata)
dir_path, collection = collection_data
# render segment
self._render_seqment(
sequence=[dir_path, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# single video file way
path = media_ref.target_url
# render video file to sequence
self._render_seqment(
video=[path, available_range])
# generate used frames
self._generate_used_frames(
available_range.duration.value)
# QUESTION: what if nested track composition is in place?
else:
# at last process a Gap
self._render_seqment(gap=duration)
# generate used frames
self._generate_used_frames(duration)
# creating and registering representation
representation = self._create_representation(start, duration)
instance.data["representations"].append(representation)
self.log.info(f"Adding representation: {representation}")
def _create_representation(self, start, duration):
"""
Creating representation data.
Args:
start (int): start frame
duration (int): duration frames
Returns:
dict: representation data
"""
end = start + duration
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir,
"tags": ["review", "ftrackreview", "delete"]
}
collection = clique.Collection(
self.temp_file_head,
tail=self.output_ext,
padding=self.padding,
indexes=set(self.used_frames)
)
start = min(collection.indexes)
end = max(collection.indexes)
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
def _trim_available_range(self, avl_range, start, duration, fps):
"""
Trim available media range to source range.
If missing media range is detected it will convert it into
black frames gaps.
Args:
avl_range (otio.time.TimeRange): media available time range
start (int): start frame
duration (int): duration frames
fps (float): frame rate
Returns:
otio.time.TimeRange: trimmed available range
"""
avl_start = int(avl_range.start_time.value)
src_start = int(avl_start + start)
avl_durtation = int(avl_range.duration.value)
# if media start is les then clip requires
if src_start < avl_start:
# calculate gap
gap_duration = avl_start - src_start
# create gap data to disk
self._render_seqment(gap=gap_duration)
# generate used frames
self._generate_used_frames(gap_duration)
# fix start and end to correct values
start = 0
duration -= gap_duration
# if media duration is shorter then clip requirement
if duration > avl_durtation:
# calculate gap
gap_start = int(src_start + avl_durtation)
gap_end = int(src_start + duration)
gap_duration = gap_end - gap_start
# create gap data to disk
self._render_seqment(gap=gap_duration, end_offset=avl_durtation)
# generate used frames
self._generate_used_frames(gap_duration, end_offset=avl_durtation)
# fix duration lenght
duration = avl_durtation
# return correct trimmed range
return pype.lib.trim_media_range(
avl_range, pype.lib.range_from_frames(start, duration, fps)
)
def _render_seqment(self, sequence=None,
video=None, gap=None, end_offset=None):
"""
Render seqment into image sequence frames.
Using ffmpeg to convert compatible video and image source
to defined image sequence format.
Args:
sequence (list): input dir path string, collection object in list
video (list)[optional]: video_path string, otio_range in list
gap (int)[optional]: gap duration
end_offset (int)[optional]: offset gap frame start in frames
Returns:
otio.time.TimeRange: trimmed available range
"""
# get rendering app path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# create path and frame start to destination
output_path, out_frame_start = self._get_ffmpeg_output()
if end_offset:
out_frame_start += end_offset
# start command list
command = [ffmpeg_path]
if sequence:
input_dir, collection = sequence
in_frame_start = min(collection.indexes)
# converting image sequence to image sequence
input_file = collection.format("{head}{padding}{tail}")
input_path = os.path.join(input_dir, input_file)
# form command for rendering gap files
command.extend([
"-start_number {}".format(in_frame_start),
"-i {}".format(input_path)
])
elif video:
video_path, otio_range = video
frame_start = otio_range.start_time.value
input_fps = otio_range.start_time.rate
frame_duration = otio_range.duration.value
sec_start = pype.lib.frames_to_secons(frame_start, input_fps)
sec_duration = pype.lib.frames_to_secons(frame_duration, input_fps)
# form command for rendering gap files
command.extend([
"-ss {}".format(sec_start),
"-t {}".format(sec_duration),
"-i {}".format(video_path)
])
elif gap:
sec_duration = pype.lib.frames_to_secons(
gap, self.actual_fps)
# form command for rendering gap files
command.extend([
"-t {} -r {}".format(sec_duration, self.actual_fps),
"-f lavfi",
"-i color=c=black:s={}x{}".format(self.to_width,
self.to_height),
"-tune stillimage"
])
# add output attributes
command.extend([
"-start_number {}".format(out_frame_start),
output_path
])
# execute
self.log.debug("Executing: {}".format(" ".join(command)))
output = pype.api.subprocess(" ".join(command), shell=True)
self.log.debug("Output: {}".format(output))
def _generate_used_frames(self, duration, end_offset=None):
"""
Generating used frames into plugin argument `used_frames`.
The argument `used_frames` is used for checking next available
frame to start with during rendering sequence segments.
Args:
duration (int): duration of frames needed to be generated
end_offset (int)[optional]: in case frames need to be offseted
"""
padding = "{{:0{}d}}".format(self.padding)
if end_offset:
new_frames = list()
start_frame = self.used_frames[-1]
for index in range((end_offset + 1),
(int(end_offset + duration) + 1)):
seq_number = padding.format(start_frame + index)
self.log.debug(
f"index: `{index}` | seq_number: `{seq_number}`")
new_frames.append(int(seq_number))
new_frames += self.used_frames
self.used_frames = new_frames
else:
for _i in range(1, (int(duration) + 1)):
if self.used_frames[-1] == self.workfile_start:
seq_number = padding.format(self.used_frames[-1])
self.workfile_start -= 1
else:
seq_number = padding.format(self.used_frames[-1] + 1)
self.used_frames.append(int(seq_number))
def _get_ffmpeg_output(self):
"""
Returning ffmpeg output command arguments.
Returns:
str: output_path is path for image sequence output
int: out_frame_start is starting sequence frame
"""
output_file = "{}{}{}".format(
self.temp_file_head,
"%0{}d".format(self.padding),
self.output_ext
)
# create path to destination
output_path = os.path.join(self.staging_dir, output_file)
# generate frame start
out_frame_start = self.used_frames[-1] + 1
if self.used_frames[-1] == self.workfile_start:
out_frame_start = self.used_frames[-1]
return output_path, out_frame_start

View file

@ -33,7 +33,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"harmony",
"standalonepublisher",
"fusion",
"tvpaint"
"tvpaint",
"resolve"
]
# Supported extensions

View file

@ -0,0 +1,38 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resoluton"
hosts = ["resolve"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,4 +1,4 @@
from pprint import pformat
# from pprint import pformat
from pype.hosts import resolve
from pype.hosts.resolve import lib
@ -6,45 +6,216 @@ from pype.hosts.resolve import lib
class CreateShotClip(resolve.Creator):
"""Publishable clip"""
label = "Shot"
label = "Create Publishable Clip"
family = "clip"
icon = "film"
defaults = ["Main"]
gui_name = "Pype sequencial rename with hirerarchy"
gui_info = "Define sequencial rename and fill hierarchy data."
gui_tracks = resolve.get_video_track_names()
gui_name = "Pype publish attributes creator"
gui_info = "Define sequential rename and fill hierarchy data."
gui_inputs = {
"clipName": "{episode}{sequence}{shot}",
"hierarchy": "{folder}/{sequence}/{shot}",
"countFrom": 10,
"steps": 10,
"renameHierarchy": {
"type": "section",
"label": "Shot Hierarchy And Rename Settings",
"target": "ui",
"order": 0,
"value": {
"hierarchy": {
"value": "{folder}/{sequence}",
"type": "QLineEdit",
"label": "Shot Parent Hierarchy",
"target": "tag",
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
"order": 0},
"clipRename": {
"value": False,
"type": "QCheckBox",
"label": "Rename clips",
"target": "ui",
"toolTip": "Renaming selected clips on fly", # noqa
"order": 1},
"clipName": {
"value": "{sequence}{shot}",
"type": "QLineEdit",
"label": "Clip Name Template",
"target": "ui",
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
"order": 2},
"countFrom": {
"value": 10,
"type": "QSpinBox",
"label": "Count sequence from",
"target": "ui",
"toolTip": "Set when the sequence number stafrom", # noqa
"order": 3},
"countSteps": {
"value": 10,
"type": "QSpinBox",
"label": "Stepping number",
"target": "ui",
"toolTip": "What number is adding every new step", # noqa
"order": 4},
}
},
"hierarchyData": {
"folder": "shots",
"shot": "sh####",
"track": "{track}",
"sequence": "sc010",
"episode": "ep01"
"type": "dict",
"label": "Shot Template Keywords",
"target": "tag",
"order": 1,
"value": {
"folder": {
"value": "shots",
"type": "QLineEdit",
"label": "{folder}",
"target": "tag",
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 0},
"episode": {
"value": "ep01",
"type": "QLineEdit",
"label": "{episode}",
"target": "tag",
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 1},
"sequence": {
"value": "sq01",
"type": "QLineEdit",
"label": "{sequence}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 2},
"track": {
"value": "{_track_}",
"type": "QLineEdit",
"label": "{track}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 3},
"shot": {
"value": "sh###",
"type": "QLineEdit",
"label": "{shot}",
"target": "tag",
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 4}
}
},
"verticalSync": {
"type": "section",
"label": "Vertical Synchronization Of Attributes",
"target": "ui",
"order": 2,
"value": {
"vSyncOn": {
"value": True,
"type": "QCheckBox",
"label": "Enable Vertical Sync",
"target": "ui",
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
"order": 0},
"vSyncTrack": {
"value": gui_tracks, # noqa
"type": "QComboBox",
"label": "Master track",
"target": "ui",
"toolTip": "Select driving track name which should be mastering all others", # noqa
"order": 1}
}
},
"publishSettings": {
"type": "section",
"label": "Publish Settings",
"target": "ui",
"order": 3,
"value": {
"subsetName": {
"value": ["<track_name>", "main", "bg", "fg", "bg",
"animatic"],
"type": "QComboBox",
"label": "Subset Name",
"target": "ui",
"toolTip": "chose subset name patern, if <track_name> is selected, name of track layer will be used", # noqa
"order": 0},
"subsetFamily": {
"value": ["plate", "take"],
"type": "QComboBox",
"label": "Subset Family",
"target": "ui", "toolTip": "What use of this subset is for", # noqa
"order": 1},
"reviewTrack": {
"value": ["< none >"] + gui_tracks,
"type": "QComboBox",
"label": "Use Review Track",
"target": "ui",
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
"order": 2},
"audio": {
"value": False,
"type": "QCheckBox",
"label": "Include audio",
"target": "tag",
"toolTip": "Process subsets with corresponding audio", # noqa
"order": 3},
"sourceResolution": {
"value": False,
"type": "QCheckBox",
"label": "Source resolution",
"target": "tag",
"toolTip": "Is resloution taken from timeline or source?", # noqa
"order": 4},
}
},
"shotAttr": {
"type": "section",
"label": "Shot Attributes",
"target": "ui",
"order": 4,
"value": {
"workfileFrameStart": {
"value": 1001,
"type": "QSpinBox",
"label": "Workfiles Start Frame",
"target": "tag",
"toolTip": "Set workfile starting frame number", # noqa
"order": 0},
"handleStart": {
"value": 0,
"type": "QSpinBox",
"label": "Handle start (head)",
"target": "tag",
"toolTip": "Handle at start of clip", # noqa
"order": 1},
"handleEnd": {
"value": 0,
"type": "QSpinBox",
"label": "Handle end (tail)",
"target": "tag",
"toolTip": "Handle at end of clip", # noqa
"order": 2},
}
}
}
presets = None
def process(self):
# solve gui inputs overwrites from presets
# overwrite gui inputs from presets
# get key pares from presets and match it on ui inputs
for k, v in self.gui_inputs.items():
if isinstance(v, dict):
# nested dictionary (only one level allowed)
for _k, _v in v.items():
if self.presets.get(_k):
self.gui_inputs[k][_k] = self.presets[_k]
if v["type"] in ("dict", "section"):
# nested dictionary (only one level allowed
# for sections and dict)
for _k, _v in v["value"].items():
if self.presets.get(_k) is not None:
self.gui_inputs[k][
"value"][_k]["value"] = self.presets[_k]
if self.presets.get(k):
self.gui_inputs[k] = self.presets[k]
self.gui_inputs[k]["value"] = self.presets[k]
# open widget for plugins inputs
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
widget.exec_()
print(f"__ selected_clips: {self.selected}")
if len(self.selected) < 1:
return
@ -52,28 +223,41 @@ class CreateShotClip(resolve.Creator):
print("Operation aborted")
return
self.rename_add = 0
# get ui output for track name for vertical sync
v_sync_track = widget.result["vSyncTrack"]["value"]
# sort selected trackItems by
sorted_selected_track_items = list()
unsorted_selected_track_items = list()
for track_item_data in self.selected:
if track_item_data["track"]["name"] in v_sync_track:
sorted_selected_track_items.append(track_item_data)
else:
unsorted_selected_track_items.append(track_item_data)
sorted_selected_track_items.extend(unsorted_selected_track_items)
# sequence attrs
sq_frame_start = self.sequence.GetStartFrame()
sq_markers = self.sequence.GetMarkers()
print(f"__ sq_frame_start: {pformat(sq_frame_start)}")
print(f"__ seq_markers: {pformat(sq_markers)}")
# create media bin for compound clips (trackItems)
mp_folder = resolve.create_current_sequence_media_bin(self.sequence)
print(f"_ mp_folder: {mp_folder.GetName()}")
lib.rename_add = 0
for i, t_data in enumerate(self.selected):
lib.rename_index = i
kwargs = {
"ui_inputs": widget.result,
"avalon": self.data,
"mp_folder": mp_folder,
"sq_frame_start": sq_frame_start,
"sq_markers": sq_markers
}
# clear color after it is done
t_data["clip"]["item"].ClearClipColor()
for i, track_item_data in enumerate(sorted_selected_track_items):
self.rename_index = i
# convert track item to timeline media pool item
resolve.create_compound_clip(
t_data,
mp_folder,
rename=True,
**dict(
{"presets": widget.result})
)
track_item = resolve.PublishClip(
self, track_item_data, **kwargs).convert()
track_item.SetClipColor(lib.publish_clip_color)

View file

@ -0,0 +1,129 @@
import pyblish
from pype.hosts import resolve
# # developer reload modules
from pprint import pformat
class CollectInstances(pyblish.api.ContextPlugin):
"""Collect all Track items selection."""
order = pyblish.api.CollectorOrder - 0.59
label = "Collect Instances"
hosts = ["resolve"]
def process(self, context):
otio_timeline = context.data["otioTimeline"]
selected_track_items = resolve.get_current_track_items(
filter=True, selecting_color=resolve.publish_clip_color)
self.log.info(
"Processing enabled track items: {}".format(
len(selected_track_items)))
for track_item_data in selected_track_items:
data = dict()
track_item = track_item_data["clip"]["item"]
# get pype tag data
tag_data = resolve.get_track_item_pype_tag(track_item)
self.log.debug(f"__ tag_data: {pformat(tag_data)}")
if not tag_data:
continue
if tag_data.get("id") != "pyblish.avalon.instance":
continue
media_pool_item = track_item.GetMediaPoolItem()
clip_property = media_pool_item.GetClipProperty()
self.log.debug(f"clip_property: {clip_property}")
# add tag data to instance data
data.update({
k: v for k, v in tag_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_data["asset"]
subset = tag_data["subset"]
# insert family into families
family = tag_data["family"]
families = [str(f) for f in tag_data["families"]]
families.insert(0, str(family))
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": track_item,
"families": families,
"publish": resolve.get_publish_attribute(track_item),
"fps": context.data["fps"]
})
# otio clip data
otio_data = resolve.get_otio_clip_instance_data(
otio_timeline, track_item_data) or {}
data.update(otio_data)
# add resolution
self.get_resolution_to_data(data, context)
# create instance
instance = context.create_instance(**data)
# create shot instance for shot attributes create/update
self.create_shot_instance(context, track_item, **data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"
# solve source resolution option
if data.get("sourceResolution", None):
otio_clip_metadata = data[
"otioClip"].media_reference.metadata
data.update({
"resolutionWidth": otio_clip_metadata["width"],
"resolutionHeight": otio_clip_metadata["height"],
"pixelAspect": otio_clip_metadata["pixelAspect"]
})
else:
otio_tl_metadata = context.data["otioTimeline"].metadata
data.update({
"resolutionWidth": otio_tl_metadata["width"],
"resolutionHeight": otio_tl_metadata["height"],
"pixelAspect": otio_tl_metadata["pixelAspect"]
})
def create_shot_instance(self, context, track_item, **data):
master_layer = data.get("masterLayer")
hierarchy_data = data.get("hierarchyData")
if not master_layer:
return
if not hierarchy_data:
return
asset = data["asset"]
subset = "shotMain"
# insert family into families
family = "shot"
data.update({
"name": "{} {} {}".format(asset, subset, family),
"subset": subset,
"asset": asset,
"family": family,
"families": [],
"publish": resolve.get_publish_attribute(track_item)
})
context.create_instance(**data)

View file

@ -1,29 +0,0 @@
import os
import pyblish.api
from pype.hosts.resolve.utils import get_resolve_module
class CollectProject(pyblish.api.ContextPlugin):
"""Collect Project object"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Project"
hosts = ["resolve"]
def process(self, context):
exported_projet_ext = ".drp"
current_dir = os.getenv("AVALON_WORKDIR")
resolve = get_resolve_module()
PM = resolve.GetProjectManager()
P = PM.GetCurrentProject()
name = P.GetName()
fname = name + exported_projet_ext
current_file = os.path.join(current_dir, fname)
normalised = os.path.normpath(current_file)
context.data["project"] = P
context.data["currentFile"] = normalised
self.log.info(name)
self.log.debug(normalised)

View file

@ -0,0 +1,55 @@
import pyblish.api
from pype.hosts import resolve
from avalon import api as avalon
from pprint import pformat
# dev
from importlib import reload
from pype.hosts.resolve.otio import davinci_export
reload(davinci_export)
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.6
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
subset = "workfile"
project = resolve.get_current_project()
fps = project.GetSetting("timelineFrameRate")
active_sequence = resolve.get_current_sequence()
video_tracks = resolve.get_video_track_names()
# adding otio timeline to context
otio_timeline = davinci_export.create_otio_timeline(project)
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile"
}
# create instance with workfile
instance = context.create_instance(**instance_data)
# update context with main project attributes
context_data = {
"activeProject": project,
"activeSequence": active_sequence,
"otioTimeline": otio_timeline,
"videoTracks": video_tracks,
"currentFile": project.GetName(),
"fps": fps,
}
context.data.update(context_data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
self.log.debug("__ context_data: {}".format(pformat(context_data)))

View file

@ -0,0 +1,50 @@
import os
import pyblish.api
import pype.api
from pype.hosts import resolve
class ExtractWorkfile(pype.api.Extractor):
"""
Extractor export DRP workfile file representation
"""
label = "Extract Workfile"
order = pyblish.api.ExtractorOrder
families = ["workfile"]
hosts = ["resolve"]
def process(self, instance):
# create representation data
if "representations" not in instance.data:
instance.data["representations"] = []
name = instance.data["name"]
project = instance.context.data["activeProject"]
staging_dir = self.staging_dir(instance)
resolve_workfile_ext = ".drp"
drp_file_name = name + resolve_workfile_ext
drp_file_path = os.path.normpath(
os.path.join(staging_dir, drp_file_name))
# write out the drp workfile
resolve.get_project_manager().ExportProject(
project.GetName(), drp_file_path)
# create drp workfile representation
representation_drp = {
'name': resolve_workfile_ext[1:],
'ext': resolve_workfile_ext[1:],
'files': drp_file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation_drp)
# add sourcePath attribute to instance
if not instance.data.get("sourcePath"):
instance.data["sourcePath"] = drp_file_path
self.log.info("Added Resolve file representation: {}".format(
representation_drp))

View file

@ -1,9 +1,20 @@
{
"create": {
"CreateShotClip": {
"hierarchy": "{folder}/{sequence}",
"clipRename": true,
"clipName": "{track}{sequence}{shot}",
"folder": "takes",
"steps": 20
"countFrom": 10,
"countSteps": 10,
"folder": "shots",
"episode": "ep01",
"sequence": "sq01",
"track": "{_track_}",
"shot": "sh###",
"vSyncOn": false,
"workfileFrameStart": 1001,
"handleStart": 10,
"handleEnd": 10
}
}
}

View file

@ -788,9 +788,7 @@
"RESOLVE_DEV"
]
},
"RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [
"{STUDIO_SOFT}/davinci_resolve/scripts/python"
],
"RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [],
"RESOLVE_SCRIPT_API": {
"windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting",
"darvin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting",
@ -834,7 +832,12 @@
"variant_label": "16",
"icon": "",
"executables": {
"windows": [],
"windows": [
[
"C:/Program Files/Blackmagic Design/DaVinci Resolve/Resolve.exe",
""
]
],
"darwin": [],
"linux": []
},

View file

@ -19,19 +19,102 @@
"is_group": true,
"children": [
{
"type": "text",
"key": "clipName",
"label": "Clip name template"
"type": "collapsible-wrap",
"label": "Shot Hierarchy And Rename Settings",
"collapsable": false,
"children": [
{
"type": "text",
"key": "hierarchy",
"label": "Shot parent hierarchy"
},
{
"type": "boolean",
"key": "clipRename",
"label": "Rename clips"
},
{
"type": "text",
"key": "clipName",
"label": "Clip name template"
},
{
"type": "number",
"key": "countFrom",
"label": "Count sequence from"
},
{
"type": "number",
"key": "countSteps",
"label": "Stepping number"
}
]
},
{
"type": "text",
"key": "folder",
"label": "Folder"
"type": "collapsible-wrap",
"label": "Shot Template Keywords",
"collapsable": false,
"children": [
{
"type": "text",
"key": "folder",
"label": "{folder}"
},
{
"type": "text",
"key": "episode",
"label": "{episode}"
},
{
"type": "text",
"key": "sequence",
"label": "{sequence}"
},
{
"type": "text",
"key": "track",
"label": "{track}"
},
{
"type": "text",
"key": "shot",
"label": "{shot}"
}
]
},
{
"type": "number",
"key": "steps",
"label": "Steps"
"type": "collapsible-wrap",
"label": "Vertical Synchronization Of Attributes",
"collapsable": false,
"children": [
{
"type": "boolean",
"key": "vSyncOn",
"label": "Enable Vertical Sync"
}
]
},
{
"type": "collapsible-wrap",
"label": "Shot Attributes",
"collapsable": false,
"children": [
{
"type": "number",
"key": "workfileFrameStart",
"label": "Workfiles Start Frame"
},
{
"type": "number",
"key": "handleStart",
"label": "Handle start (head)"
},
{
"type": "number",
"key": "handleEnd",
"label": "Handle end (tail)"
}
]
}
]
}

View file

@ -14,7 +14,7 @@ google-api-python-client
jsonschema
keyring
log4mongo
OpenTimelineIO
git+https://github.com/pypeclub/OpenTimelineIO.git@develop
pathlib2
Pillow
pynput

View file

@ -49,6 +49,9 @@ class OTIOExportTask(hiero.core.TaskBase):
return str(type(self))
def get_rate(self, item):
if not hasattr(item, 'framerate'):
item = item.sequence()
num, den = item.framerate().toRational()
rate = float(num) / float(den)
@ -58,12 +61,12 @@ class OTIOExportTask(hiero.core.TaskBase):
return round(rate, 2)
def get_clip_ranges(self, trackitem):
# Is clip an audio file? Use sequence frame rate
if not trackitem.source().mediaSource().hasVideo():
rate_item = trackitem.sequence()
# Get rate from source or sequence
if trackitem.source().mediaSource().hasVideo():
rate_item = trackitem.source()
else:
rate_item = trackitem.source()
rate_item = trackitem.sequence()
source_rate = self.get_rate(rate_item)
@ -88,9 +91,10 @@ class OTIOExportTask(hiero.core.TaskBase):
duration=source_duration
)
available_range = None
hiero_clip = trackitem.source()
if not hiero_clip.mediaSource().isOffline():
available_range = None
if hiero_clip.mediaSource().isMediaPresent():
start_time = otio.opentime.RationalTime(
hiero_clip.mediaSource().startTime(),
source_rate
@ -123,7 +127,7 @@ class OTIOExportTask(hiero.core.TaskBase):
def get_marker_color(self, tag):
icon = tag.icon()
pat = 'icons:Tag(?P<color>\w+)\.\w+'
pat = r'icons:Tag(?P<color>\w+)\.\w+'
res = re.search(pat, icon)
if res:
@ -155,13 +159,17 @@ class OTIOExportTask(hiero.core.TaskBase):
)
)
metadata = dict(
Hiero=tag.metadata().dict()
)
# Store the source item for future import assignment
metadata['Hiero']['source_type'] = hiero_item.__class__.__name__
marker = otio.schema.Marker(
name=tag.name(),
color=self.get_marker_color(tag),
marked_range=marked_range,
metadata={
'Hiero': tag.metadata().dict()
}
metadata=metadata
)
otio_item.markers.append(marker)
@ -170,37 +178,44 @@ class OTIOExportTask(hiero.core.TaskBase):
hiero_clip = trackitem.source()
# Add Gap if needed
prev_item = (
itemindex and trackitem.parent().items()[itemindex - 1] or
trackitem
)
if itemindex == 0:
prev_item = trackitem
if prev_item == trackitem and trackitem.timelineIn() > 0:
else:
prev_item = trackitem.parent().items()[itemindex - 1]
clip_diff = trackitem.timelineIn() - prev_item.timelineOut()
if itemindex == 0 and trackitem.timelineIn() > 0:
self.add_gap(trackitem, otio_track, 0)
elif (
prev_item != trackitem and
prev_item.timelineOut() != trackitem.timelineIn()
):
elif itemindex and clip_diff != 1:
self.add_gap(trackitem, otio_track, prev_item.timelineOut())
# Create Clip
source_range, available_range = self.get_clip_ranges(trackitem)
otio_clip = otio.schema.Clip()
otio_clip.name = trackitem.name()
otio_clip.source_range = source_range
otio_clip = otio.schema.Clip(
name=trackitem.name(),
source_range=source_range
)
# Add media reference
media_reference = otio.schema.MissingReference()
if not hiero_clip.mediaSource().isOffline():
if hiero_clip.mediaSource().isMediaPresent():
source = hiero_clip.mediaSource()
media_reference = otio.schema.ExternalReference()
media_reference.available_range = available_range
first_file = source.fileinfos()[0]
path = first_file.filename()
path, name = os.path.split(source.fileinfos()[0].filename())
media_reference.target_url = os.path.join(path, name)
media_reference.name = name
if "%" in path:
path = re.sub(r"%\d+d", "%d", path)
if "#" in path:
path = re.sub(r"#+", "%d", path)
media_reference = otio.schema.ExternalReference(
target_url=u'{}'.format(path),
available_range=available_range
)
otio_clip.media_reference = media_reference
@ -218,6 +233,7 @@ class OTIOExportTask(hiero.core.TaskBase):
# Add tags as markers
if self._preset.properties()["includeTags"]:
self.add_markers(trackitem, otio_clip)
self.add_markers(trackitem.source(), otio_clip)
otio_track.append(otio_clip)
@ -273,16 +289,16 @@ class OTIOExportTask(hiero.core.TaskBase):
name=alignment, # Consider placing Hiero name in metadata
transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve,
in_offset=in_time,
out_offset=out_time,
metadata={}
out_offset=out_time
)
if alignment == 'kFadeIn':
otio_track.insert(-2, otio_transition)
otio_track.insert(-1, otio_transition)
else:
otio_track.append(otio_transition)
def add_tracks(self):
for track in self._sequence.items():
if isinstance(track, hiero.core.AudioTrack):
@ -291,8 +307,7 @@ class OTIOExportTask(hiero.core.TaskBase):
else:
kind = otio.schema.TrackKind.Video
otio_track = otio.schema.Track(kind=kind)
otio_track.name = track.name()
otio_track = otio.schema.Track(name=track.name(), kind=kind)
for itemindex, trackitem in enumerate(track):
if isinstance(trackitem.source(), hiero.core.Clip):
@ -306,6 +321,12 @@ class OTIOExportTask(hiero.core.TaskBase):
def create_OTIO(self):
self.otio_timeline = otio.schema.Timeline()
# Set global start time based on sequence
self.otio_timeline.global_start_time = otio.opentime.RationalTime(
self._sequence.timecodeStart(),
self._sequence.framerate().toFloat()
)
self.otio_timeline.name = self._sequence.name()
self.add_tracks()

View file

@ -202,7 +202,8 @@ marker_color_map = {
"PURPLE": "Magenta",
"MAGENTA": "Magenta",
"BLACK": "Blue",
"WHITE": "Green"
"WHITE": "Green",
"MINT": "Cyan"
}
@ -259,7 +260,7 @@ def add_markers(otio_item, hiero_item, tagsbin):
marker.marked_range.duration.value
)
tag = hiero_item.addTagToRange(_tag, start, end)
tag = hiero_item.addTag(_tag)
tag.setName(marker.name or marker_color_map[marker_color])
# Add metadata
@ -285,7 +286,7 @@ def create_track(otio_track, tracknum, track_kind):
return track
def create_clip(otio_clip, tagsbin):
def create_clip(otio_clip):
# Create MediaSource
otio_media = otio_clip.media_reference
if isinstance(otio_media, otio.schema.ExternalReference):
@ -300,13 +301,10 @@ def create_clip(otio_clip, tagsbin):
# Create Clip
clip = hiero.core.Clip(media)
# Add markers
add_markers(otio_clip, clip, tagsbin)
return clip
def create_trackitem(playhead, track, otio_clip, clip):
def create_trackitem(playhead, track, otio_clip, clip, tagsbin):
source_range = otio_clip.source_range
trackitem = track.createTrackItem(otio_clip.name)
@ -352,22 +350,44 @@ def create_trackitem(playhead, track, otio_clip, clip):
trackitem.setTimelineIn(timeline_in)
trackitem.setTimelineOut(timeline_out)
# Add markers
add_markers(otio_clip, trackitem, tagsbin)
return trackitem
def build_sequence(otio_timeline, project=None, track_kind=None):
def build_sequence(
otio_timeline, project=None, sequence=None, track_kind=None):
if project is None:
# TODO: Find a proper way for active project
project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1]
if sequence:
project = sequence.project()
# Create a Sequence
sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence')
else:
# Per version 12.1v2 there is no way of getting active project
project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1]
# Create a Bin to hold clips
projectbin = project.clipsBin()
projectbin.addItem(hiero.core.BinItem(sequence))
sequencebin = hiero.core.Bin(sequence.name())
projectbin.addItem(sequencebin)
if not sequence:
# Create a Sequence
sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence')
# Set sequence settings from otio timeline if available
if hasattr(otio_timeline, 'global_start_time'):
if otio_timeline.global_start_time:
start_time = otio_timeline.global_start_time
sequence.setFramerate(start_time.rate)
sequence.setTimecodeStart(start_time.value)
# Create a Bin to hold clips
projectbin.addItem(hiero.core.BinItem(sequence))
sequencebin = hiero.core.Bin(sequence.name())
projectbin.addItem(sequencebin)
else:
sequencebin = projectbin
# Get tagsBin
tagsbin = hiero.core.project("Tag Presets").tagsBin()
@ -375,13 +395,11 @@ def build_sequence(otio_timeline, project=None, track_kind=None):
# Add timeline markers
add_markers(otio_timeline, sequence, tagsbin)
# TODO: Set sequence settings from otio timeline if available
if isinstance(otio_timeline, otio.schema.Timeline):
tracks = otio_timeline.tracks
else:
# otio.schema.Stack
tracks = otio_timeline
tracks = [otio_timeline]
for tracknum, otio_track in enumerate(tracks):
playhead = 0
@ -403,7 +421,7 @@ def build_sequence(otio_timeline, project=None, track_kind=None):
elif isinstance(otio_clip, otio.schema.Clip):
# Create a Clip
clip = create_clip(otio_clip, tagsbin)
clip = create_clip(otio_clip)
# Add Clip to a Bin
sequencebin.addItem(hiero.core.BinItem(clip))
@ -413,7 +431,8 @@ def build_sequence(otio_timeline, project=None, track_kind=None):
playhead,
track,
otio_clip,
clip
clip,
tagsbin
)
# Add trackitem to track