Merge branch 'develop' into feature/get_rid_of_subprocess

This commit is contained in:
Milan Kolar 2021-01-08 22:44:19 +01:00
commit 5f98463e40
179 changed files with 6949 additions and 12757 deletions

View file

@ -0,0 +1,113 @@
import pyblish.api
import avalon.api as avalon
class CollectHierarchy(pyblish.api.ContextPlugin):
"""Collecting hierarchy from `parents`.
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
plugins to be able to create needed parents for the context if they
don't exist yet
"""
label = "Collect Hierarchy"
order = pyblish.api.CollectorOrder - 0.57
families = ["shot"]
hosts = ["resolve"]
def process(self, context):
temp_context = {}
project_name = avalon.Session["AVALON_PROJECT"]
final_context = {}
final_context[project_name] = {}
final_context[project_name]['entity_type'] = 'Project'
for instance in context:
self.log.info("Processing instance: `{}` ...".format(instance))
# shot data dict
shot_data = {}
family = instance.data.get("family")
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection([family]):
continue
# exclude if not masterLayer True
if not instance.data.get("masterLayer"):
continue
# get asset build data if any available
shot_data["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
shot_data['entity_type'] = 'Shot'
shot_data['tasks'] = instance.data.get("tasks") or []
shot_data["comments"] = instance.data.get("comments", [])
shot_data['custom_attributes'] = {
"handleStart": instance.data["handleStart"],
"handleEnd": instance.data["handleEnd"],
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
}
actual = {instance.data["asset"]: shot_data}
for parent in reversed(instance.data["parents"]):
next_dict = {}
parent_name = parent["entity_name"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent[
"entity_type"].capitalize()
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self._update_dict(temp_context, actual)
# skip if nothing for hierarchy available
if not temp_context:
return
final_context[project_name]['childs'] = temp_context
# adding hierarchy context to context
context.data["hierarchyContext"] = final_context
self.log.debug("context.data[hierarchyContext] is: {}".format(
context.data["hierarchyContext"]))
def _update_dict(self, parent_dict, child_dict):
"""
Nesting each children into its parent.
Args:
parent_dict (dict): parent dict wich should be nested with children
child_dict (dict): children dict which should be injested
"""
for key in parent_dict:
if key in child_dict and isinstance(parent_dict[key], dict):
child_dict[key] = self._update_dict(
parent_dict[key], child_dict[key]
)
else:
if parent_dict.get(key) and child_dict.get(key):
continue
else:
child_dict[key] = parent_dict[key]
return child_dict

View file

@ -0,0 +1,70 @@
"""
Requires:
otioTimeline -> context data attribute
review -> instance data attribute
masterLayer -> instance data attribute
otioClipRange -> instance data attribute
"""
# import os
import opentimelineio as otio
import pyblish.api
import pype.lib
from pprint import pformat
class CollectOcioFrameRanges(pyblish.api.InstancePlugin):
"""Getting otio ranges from otio_clip
Adding timeline and source ranges to instance data"""
label = "Collect OTIO Frame Ranges"
order = pyblish.api.CollectorOrder - 0.58
families = ["shot", "clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_clip = instance.data["otioClip"]
workfile_start = instance.data["workfileFrameStart"]
# get ranges
otio_tl_range = otio_clip.range_in_parent()
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
otio_tl_range_handles = pype.lib.otio_range_with_handles(
otio_tl_range, instance)
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
# get source avalable start frame
src_starting_from = otio.opentime.to_frames(
otio_avalable_range.start_time,
otio_avalable_range.start_time.rate)
# convert to frames
range_convert = pype.lib.otio_range_to_frame_range
tl_start, tl_end = range_convert(otio_tl_range)
tl_start_h, tl_end_h = range_convert(otio_tl_range_handles)
src_start, src_end = range_convert(otio_src_range)
src_start_h, src_end_h = range_convert(otio_src_range_handles)
frame_start = workfile_start
frame_end = frame_start + otio.opentime.to_frames(
otio_tl_range.duration, otio_tl_range.duration.rate) - 1
data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"clipIn": tl_start,
"clipOut": tl_end,
"clipInH": tl_start_h,
"clipOutH": tl_end_h,
"sourceStart": src_starting_from + src_start,
"sourceEnd": src_starting_from + src_end,
"sourceStartH": src_starting_from + src_start_h,
"sourceEndH": src_starting_from + src_end_h,
}
instance.data.update(data)
self.log.debug(
"_ data: {}".format(pformat(data)))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))

View file

@ -0,0 +1,99 @@
"""
Requires:
instance -> otioClip
context -> otioTimeline
Optional:
otioClip.metadata -> masterLayer
Provides:
instance -> otioReviewClips
instance -> families (adding ["review", "ftrack"])
"""
import opentimelineio as otio
import pyblish.api
from pprint import pformat
class CollectOcioReview(pyblish.api.InstancePlugin):
"""Get matching otio track from defined review layer"""
label = "Collect OTIO Review"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
# get basic variables
otio_review_clips = list()
otio_timeline = instance.context.data["otioTimeline"]
otio_clip = instance.data["otioClip"]
# optionally get `reviewTrack`
review_track_name = otio_clip.metadata.get("reviewTrack")
# generate range in parent
otio_tl_range = otio_clip.range_in_parent()
# calculate real timeline end needed for the clip
clip_end_frame = int(
otio_tl_range.start_time.value + otio_tl_range.duration.value)
# skip if no review track available
if not review_track_name:
return
# loop all tracks and match with name in `reviewTrack`
for track in otio_timeline.tracks:
if review_track_name not in track.name:
continue
# process correct track
# establish gap
otio_gap = None
# get track parent range
track_rip = track.range_in_parent()
# calculate real track end frame
track_end_frame = int(
track_rip.start_time.value + track_rip.duration.value)
# check if the end of track is not lower then clip requirement
if clip_end_frame > track_end_frame:
# calculate diference duration
gap_duration = clip_end_frame - track_end_frame
# create rational time range for gap
otio_gap_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
float(0),
track_rip.start_time.rate
),
duration=otio.opentime.RationalTime(
float(gap_duration),
track_rip.start_time.rate
)
)
# crate gap
otio_gap = otio.schema.Gap(source_range=otio_gap_range)
# trim available clips from devined track as reviewable source
otio_review_clips = otio.algorithms.track_trimmed_to_range(
track,
otio_tl_range
)
# add gap at the end if track end is shorter then needed
if otio_gap:
otio_review_clips.append(otio_gap)
if otio_review_clips:
instance.data["families"] += ["review", "ftrack"]
instance.data["otioReviewClips"] = otio_review_clips
self.log.info(
"Creating review track: {}".format(otio_review_clips))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
self.log.debug(
"_ families: {}".format(instance.data["families"]))

View file

@ -0,0 +1,182 @@
# TODO: this head doc string
"""
Requires:
instance -> otio_clip
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
import pyblish.api
import pype
class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
"""Get Resources for a subset version"""
label = "Collect OTIO Subset Resources"
order = pyblish.api.CollectorOrder - 0.57
families = ["clip"]
hosts = ["resolve"]
def process(self, instance):
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
# get basic variables
otio_clip = instance.data["otioClip"]
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
# generate range in parent
otio_src_range = otio_clip.source_range
otio_avalable_range = otio_clip.available_range()
trimmed_media_range = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range)
# calculate wth handles
otio_src_range_handles = pype.lib.otio_range_with_handles(
otio_src_range, instance)
trimmed_media_range_h = pype.lib.trim_media_range(
otio_avalable_range, otio_src_range_handles)
# frame start and end from media
s_frame_start, s_frame_end = pype.lib.otio_range_to_frame_range(
trimmed_media_range)
a_frame_start, a_frame_end = pype.lib.otio_range_to_frame_range(
otio_avalable_range)
a_frame_start_h, a_frame_end_h = pype.lib.otio_range_to_frame_range(
trimmed_media_range_h)
# fix frame_start and frame_end frame to be in range of media
if a_frame_start_h < a_frame_start:
a_frame_start_h = a_frame_start
if a_frame_end_h > a_frame_end:
a_frame_end_h = a_frame_end
# count the difference for frame_start and frame_end
diff_start = s_frame_start - a_frame_start_h
diff_end = a_frame_end_h - s_frame_end
# add to version data start and end range data
# for loader plugins to be correctly displayed and loaded
version_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": diff_start,
"handleEnd": diff_end,
"fps": otio_avalable_range.start_time.rate
})
# change frame_start and frame_end values
# for representation to be correctly renumbered in integrate_new
frame_start -= diff_start
frame_end += diff_end
media_ref = otio_clip.media_reference
metadata = media_ref.metadata
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
self.log.info(
"frame_start-frame_end: {}-{}".format(frame_start, frame_end))
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
self.staging_dir = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(a_frame_start_h, (a_frame_end_h + 1))])
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, trimmed_media_range, metadata)
self.staging_dir, collection = collection_data
self.log.debug(collection)
repre = self._create_representation(
frame_start, frame_end, collection=collection)
else:
dirname, filename = os.path.split(media_ref.target_url)
self.staging_dir = dirname
self.log.debug(path)
repre = self._create_representation(
frame_start, frame_end, file=filename)
if repre:
instance.data["versionData"] = version_data
self.log.debug(">>>>>>>> version data {}".format(version_data))
# add representation to instance data
instance.data["representations"].append(repre)
self.log.debug(">>>>>>>> {}".format(repre))
def _create_representation(self, start, end, **kwargs):
"""
Creating representation data.
Args:
start (int): start frame
end (int): end frame
kwargs (dict): optional data
Returns:
dict: representation data
"""
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir
}
if kwargs.get("collection"):
collection = kwargs.get("collection")
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
if kwargs.get("file"):
file = kwargs.get("file")
ext = os.path.splitext(file)[-1]
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": file,
"frameStart": start,
"frameEnd": end,
})
return representation_data

View file

@ -32,7 +32,8 @@ class ExtractBurnin(pype.api.Extractor):
"standalonepublisher",
"harmony",
"fusion",
"aftereffects"
"aftereffects",
# "resolve"
]
optional = True

View file

@ -12,9 +12,12 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
hosts = ["shell", "fusion"]
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "render2d", "source"]
families = [
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]
enabled = False
# presetable attribute
@ -50,7 +53,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
if not isinstance(repre['files'], (list, tuple)):
input_file = repre['files']
else:
input_file = repre['files'][0]
file_index = int(float(len(repre['files'])) * 0.5)
input_file = repre['files'][file_index]
stagingdir = os.path.normpath(repre.get("stagingDir"))

View file

@ -0,0 +1,41 @@
import os
import pyblish.api
import pype.api
import opentimelineio as otio
class ExtractOTIOFile(pype.api.Extractor):
"""
Extractor export OTIO file
"""
label = "Extract OTIO file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["workfile"]
hosts = ["resolve"]
def process(self, instance):
# create representation data
if "representations" not in instance.data:
instance.data["representations"] = []
name = instance.data["name"]
staging_dir = self.staging_dir(instance)
otio_timeline = instance.context.data["otioTimeline"]
# create otio timeline representation
otio_file_name = name + ".otio"
otio_file_path = os.path.join(staging_dir, otio_file_name)
otio.adapters.write_to_file(otio_timeline, otio_file_path)
representation_otio = {
'name': "otio",
'ext': "otio",
'files': otio_file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation_otio)
self.log.info("Added OTIO file representation: {}".format(
representation_otio))

View file

@ -0,0 +1,426 @@
"""
Requires:
instance -> handleStart
instance -> handleEnd
instance -> otioClip
instance -> otioReviewClips
Optional:
instance -> workfileFrameStart
instance -> resolutionWidth
instance -> resolutionHeight
Provides:
instance -> otioReviewClips
"""
import os
import clique
import opentimelineio as otio
from pyblish import api
import pype
class ExtractOTIOReview(pype.api.Extractor):
"""
Extract OTIO timeline into one concuted image sequence file.
The `otioReviewClip` is holding trimmed range of clips relative to
the `otioClip`. Handles are added during looping by available list
of Gap and clips in the track. Handle start (head) is added before
first Gap or Clip and Handle end (tail) is added at the end of last
Clip or Gap. In case there is missing source material after the
handles addition Gap will be added. At the end all Gaps are converted
to black frames and available material is converted to image sequence
frames. At the end representation is created and added to the instance.
At the moment only image sequence output is supported
"""
order = api.ExtractorOrder - 0.45
label = "Extract OTIO review"
hosts = ["resolve"]
families = ["review"]
# plugin default attributes
temp_file_head = "tempFile."
to_width = 1280
to_height = 720
output_ext = ".jpg"
def process(self, instance):
# TODO: convert resulting image sequence to mp4
# TODO: add oudio ouput to the mp4 if audio in review is on.
# get otio clip and other time info from instance clip
# TODO: what if handles are different in `versionData`?
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
otio_review_clips = instance.data["otioReviewClips"]
# add plugin wide attributes
self.representation_files = list()
self.used_frames = list()
self.workfile_start = int(instance.data.get(
"workfileFrameStart", 1001)) - handle_start
self.padding = len(str(self.workfile_start))
self.used_frames.append(self.workfile_start)
self.to_width = instance.data.get(
"resolutionWidth") or self.to_width
self.to_height = instance.data.get(
"resolutionHeight") or self.to_height
# skip instance if no reviewable data available
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
and (len(otio_review_clips) == 1):
self.log.warning(
"Instance `{}` has nothing to process".format(instance))
return
else:
self.staging_dir = self.staging_dir(instance)
if not instance.data.get("representations"):
instance.data["representations"] = list()
# loop available clips in otio track
for index, r_otio_cl in enumerate(otio_review_clips):
# QUESTION: what if transition on clip?
# get frame range values
src_range = r_otio_cl.source_range
start = src_range.start_time.value
duration = src_range.duration.value
available_range = None
self.actual_fps = src_range.duration.rate
# add available range only if not gap
if isinstance(r_otio_cl, otio.schema.Clip):
available_range = r_otio_cl.available_range()
self.actual_fps = available_range.duration.rate
# reframing handles conditions
if (len(otio_review_clips) > 1) and (index == 0):
# more clips | first clip reframing with handle
start -= handle_start
duration += handle_start
elif len(otio_review_clips) > 1 \
and (index == len(otio_review_clips) - 1):
# more clips | last clip reframing with handle
duration += handle_end
elif len(otio_review_clips) == 1:
# one clip | add both handles
start -= handle_start
duration += (handle_start + handle_end)
if available_range:
available_range = self._trim_available_range(
available_range, start, duration, self.actual_fps)
# process all track items of the track
if isinstance(r_otio_cl, otio.schema.Clip):
# process Clip
media_ref = r_otio_cl.media_reference
metadata = media_ref.metadata
is_sequence = None
# check in two way if it is sequence
if hasattr(otio.schema, "ImageSequenceReference"):
# for OpenTimelineIO 0.13 and newer
if isinstance(media_ref,
otio.schema.ImageSequenceReference):
is_sequence = True
else:
# for OpenTimelineIO 0.12 and older
if metadata.get("padding"):
is_sequence = True
if is_sequence:
# file sequence way
if hasattr(media_ref, "target_url_base"):
dirname = media_ref.target_url_base
head = media_ref.name_prefix
tail = media_ref.name_suffix
first, last = pype.lib.otio_range_to_frame_range(
available_range)
collection = clique.Collection(
head=head,
tail=tail,
padding=media_ref.frame_zero_padding
)
collection.indexes.update(
[i for i in range(first, (last + 1))])
# render segment
self._render_seqment(
sequence=[dirname, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# in case it is file sequence but not new OTIO schema
# `ImageSequenceReference`
path = media_ref.target_url
collection_data = pype.lib.make_sequence_collection(
path, available_range, metadata)
dir_path, collection = collection_data
# render segment
self._render_seqment(
sequence=[dir_path, collection])
# generate used frames
self._generate_used_frames(
len(collection.indexes))
else:
# single video file way
path = media_ref.target_url
# render video file to sequence
self._render_seqment(
video=[path, available_range])
# generate used frames
self._generate_used_frames(
available_range.duration.value)
# QUESTION: what if nested track composition is in place?
else:
# at last process a Gap
self._render_seqment(gap=duration)
# generate used frames
self._generate_used_frames(duration)
# creating and registering representation
representation = self._create_representation(start, duration)
instance.data["representations"].append(representation)
self.log.info(f"Adding representation: {representation}")
def _create_representation(self, start, duration):
"""
Creating representation data.
Args:
start (int): start frame
duration (int): duration frames
Returns:
dict: representation data
"""
end = start + duration
# create default representation data
representation_data = {
"frameStart": start,
"frameEnd": end,
"stagingDir": self.staging_dir,
"tags": ["review", "ftrackreview", "delete"]
}
collection = clique.Collection(
self.temp_file_head,
tail=self.output_ext,
padding=self.padding,
indexes=set(self.used_frames)
)
start = min(collection.indexes)
end = max(collection.indexes)
files = [f for f in collection]
ext = collection.format("{tail}")
representation_data.update({
"name": ext[1:],
"ext": ext[1:],
"files": files,
"frameStart": start,
"frameEnd": end,
})
return representation_data
def _trim_available_range(self, avl_range, start, duration, fps):
"""
Trim available media range to source range.
If missing media range is detected it will convert it into
black frames gaps.
Args:
avl_range (otio.time.TimeRange): media available time range
start (int): start frame
duration (int): duration frames
fps (float): frame rate
Returns:
otio.time.TimeRange: trimmed available range
"""
avl_start = int(avl_range.start_time.value)
src_start = int(avl_start + start)
avl_durtation = int(avl_range.duration.value)
# if media start is les then clip requires
if src_start < avl_start:
# calculate gap
gap_duration = avl_start - src_start
# create gap data to disk
self._render_seqment(gap=gap_duration)
# generate used frames
self._generate_used_frames(gap_duration)
# fix start and end to correct values
start = 0
duration -= gap_duration
# if media duration is shorter then clip requirement
if duration > avl_durtation:
# calculate gap
gap_start = int(src_start + avl_durtation)
gap_end = int(src_start + duration)
gap_duration = gap_end - gap_start
# create gap data to disk
self._render_seqment(gap=gap_duration, end_offset=avl_durtation)
# generate used frames
self._generate_used_frames(gap_duration, end_offset=avl_durtation)
# fix duration lenght
duration = avl_durtation
# return correct trimmed range
return pype.lib.trim_media_range(
avl_range, pype.lib.range_from_frames(start, duration, fps)
)
def _render_seqment(self, sequence=None,
video=None, gap=None, end_offset=None):
"""
Render seqment into image sequence frames.
Using ffmpeg to convert compatible video and image source
to defined image sequence format.
Args:
sequence (list): input dir path string, collection object in list
video (list)[optional]: video_path string, otio_range in list
gap (int)[optional]: gap duration
end_offset (int)[optional]: offset gap frame start in frames
Returns:
otio.time.TimeRange: trimmed available range
"""
# get rendering app path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# create path and frame start to destination
output_path, out_frame_start = self._get_ffmpeg_output()
if end_offset:
out_frame_start += end_offset
# start command list
command = [ffmpeg_path]
if sequence:
input_dir, collection = sequence
in_frame_start = min(collection.indexes)
# converting image sequence to image sequence
input_file = collection.format("{head}{padding}{tail}")
input_path = os.path.join(input_dir, input_file)
# form command for rendering gap files
command.extend([
"-start_number {}".format(in_frame_start),
"-i {}".format(input_path)
])
elif video:
video_path, otio_range = video
frame_start = otio_range.start_time.value
input_fps = otio_range.start_time.rate
frame_duration = otio_range.duration.value
sec_start = pype.lib.frames_to_secons(frame_start, input_fps)
sec_duration = pype.lib.frames_to_secons(frame_duration, input_fps)
# form command for rendering gap files
command.extend([
"-ss {}".format(sec_start),
"-t {}".format(sec_duration),
"-i {}".format(video_path)
])
elif gap:
sec_duration = pype.lib.frames_to_secons(
gap, self.actual_fps)
# form command for rendering gap files
command.extend([
"-t {} -r {}".format(sec_duration, self.actual_fps),
"-f lavfi",
"-i color=c=black:s={}x{}".format(self.to_width,
self.to_height),
"-tune stillimage"
])
# add output attributes
command.extend([
"-start_number {}".format(out_frame_start),
output_path
])
# execute
self.log.debug("Executing: {}".format(" ".join(command)))
output = pype.api.subprocess(" ".join(command), shell=True)
self.log.debug("Output: {}".format(output))
def _generate_used_frames(self, duration, end_offset=None):
"""
Generating used frames into plugin argument `used_frames`.
The argument `used_frames` is used for checking next available
frame to start with during rendering sequence segments.
Args:
duration (int): duration of frames needed to be generated
end_offset (int)[optional]: in case frames need to be offseted
"""
padding = "{{:0{}d}}".format(self.padding)
if end_offset:
new_frames = list()
start_frame = self.used_frames[-1]
for index in range((end_offset + 1),
(int(end_offset + duration) + 1)):
seq_number = padding.format(start_frame + index)
self.log.debug(
f"index: `{index}` | seq_number: `{seq_number}`")
new_frames.append(int(seq_number))
new_frames += self.used_frames
self.used_frames = new_frames
else:
for _i in range(1, (int(duration) + 1)):
if self.used_frames[-1] == self.workfile_start:
seq_number = padding.format(self.used_frames[-1])
self.workfile_start -= 1
else:
seq_number = padding.format(self.used_frames[-1] + 1)
self.used_frames.append(int(seq_number))
def _get_ffmpeg_output(self):
"""
Returning ffmpeg output command arguments.
Returns:
str: output_path is path for image sequence output
int: out_frame_start is starting sequence frame
"""
output_file = "{}{}{}".format(
self.temp_file_head,
"%0{}d".format(self.padding),
self.output_ext
)
# create path to destination
output_path = os.path.join(self.staging_dir, output_file)
# generate frame start
out_frame_start = self.used_frames[-1] + 1
if self.used_frames[-1] == self.workfile_start:
out_frame_start = self.used_frames[-1]
return output_path, out_frame_start

View file

@ -33,7 +33,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
"harmony",
"standalonepublisher",
"fusion",
"tvpaint"
"tvpaint",
"resolve"
]
# Supported extensions

View file

@ -329,6 +329,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
if repre.get("outputName"):
template_data["output"] = repre['outputName']
template_data["representation"] = repre["name"]
ext = repre["ext"]
if ext.startswith("."):
self.log.warning((
"Implementaion warning: <\"{}\">"
" Representation's extension stored under \"ext\" key "
" started with dot (\"{}\")."
).format(repre["name"], ext))
ext = ext[1:]
repre["ext"] = ext
template_data["ext"] = ext
template = os.path.normpath(
anatomy.templates[template_name]["path"])
@ -355,7 +368,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
@ -376,6 +388,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
index_frame_start = None
# TODO use frame padding from right template group
if repre.get("frameStart") is not None:
frame_start_padding = int(
anatomy.templates["render"].get(
@ -411,7 +424,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst = "{0}{1}{2}".format(
dst_head,
dst_padding,
dst_tail).replace("..", ".")
dst_tail
)
self.log.debug("destination: `{}`".format(dst))
src = os.path.join(stagingdir, src_file_name)
@ -431,7 +445,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head,
dst_start_frame,
dst_tail
).replace("..", ".")
)
repre['published_path'] = dst
else:
@ -449,13 +463,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"Given file name is a full path"
)
template_data["representation"] = repre['ext']
src = os.path.join(stagingdir, fname)
anatomy_filled = anatomy.format(template_data)
template_filled = anatomy_filled[template_name]["path"]
repre_context = template_filled.used_values
dst = os.path.normpath(template_filled).replace("..", ".")
dst = os.path.normpath(template_filled)
instance.data["transfers"].append([src, dst])

View file

@ -95,7 +95,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin):
template_data.update({
"_id": str(thumbnail_id),
"thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"),
"ext": file_extension,
"ext": file_extension[1:],
"thumbnail_type": "thumbnail"
})

File diff suppressed because it is too large Load diff