mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge pull request #1687 from pypeclub/bugfix/1587-hiero-published-whole-edit-mov
This commit is contained in:
commit
05da26bee9
3 changed files with 144 additions and 5 deletions
|
|
@ -41,10 +41,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
|
||||
# process all sellected timeline track items
|
||||
for track_item in selected_timeline_items:
|
||||
|
||||
data = {}
|
||||
clip_name = track_item.name()
|
||||
source_clip = track_item.source()
|
||||
self.log.debug("clip_name: {}".format(clip_name))
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(source_clip)
|
||||
|
|
@ -128,7 +128,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
|||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
return
|
||||
continue
|
||||
|
||||
# create audio subset instance
|
||||
self.create_audio_instance(context, **data)
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
otio_clip = instance.data["otioClip"]
|
||||
otio_avalable_range = otio_clip.available_range()
|
||||
media_fps = otio_avalable_range.start_time.rate
|
||||
available_duration = otio_avalable_range.duration.value
|
||||
|
||||
# get available range trimmed with processed retimes
|
||||
retimed_attributes = editorial.get_media_range_with_retimes(
|
||||
|
|
@ -68,6 +69,8 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1),
|
||||
media_fps
|
||||
)
|
||||
trimmed_duration = trimmed_media_range_h.duration.value
|
||||
|
||||
self.log.debug("trimmed_media_range_h: {}".format(
|
||||
trimmed_media_range_h))
|
||||
self.log.debug("a_frame_start_h: {}".format(
|
||||
|
|
@ -150,12 +153,18 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
repre = self._create_representation(
|
||||
frame_start, frame_end, collection=collection)
|
||||
else:
|
||||
_trim = False
|
||||
dirname, filename = os.path.split(media_ref.target_url)
|
||||
self.staging_dir = dirname
|
||||
if trimmed_duration < available_duration:
|
||||
self.log.debug("Ready for Trimming")
|
||||
instance.data["families"].append("trim")
|
||||
instance.data["otioTrimmingRange"] = trimmed_media_range_h
|
||||
_trim = True
|
||||
|
||||
self.log.debug(filename)
|
||||
repre = self._create_representation(
|
||||
frame_start, frame_end, file=filename)
|
||||
frame_start, frame_end, file=filename, trim=_trim)
|
||||
|
||||
if repre:
|
||||
# add representation to instance data
|
||||
|
|
@ -196,7 +205,7 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
})
|
||||
return representation_data
|
||||
|
||||
if kwargs.get("file"):
|
||||
file = kwargs.get("file")
|
||||
ext = os.path.splitext(file)[-1]
|
||||
|
|
@ -207,4 +216,9 @@ class CollectOcioSubsetResources(pyblish.api.InstancePlugin):
|
|||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
})
|
||||
return representation_data
|
||||
|
||||
if kwargs.get("trim") is True:
|
||||
representation_data.update({
|
||||
"tags": ["trim"]
|
||||
})
|
||||
return representation_data
|
||||
|
|
|
|||
125
openpype/plugins/publish/extract_otio_trimming_video.py
Normal file
125
openpype/plugins/publish/extract_otio_trimming_video.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
"""
|
||||
Requires:
|
||||
instance -> otioTrimmingRange
|
||||
instance -> representations
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
from pyblish import api
|
||||
import openpype
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
class ExtractOTIOTrimmingVideo(openpype.api.Extractor):
|
||||
"""
|
||||
Trimming video file longer then required lenght
|
||||
|
||||
"""
|
||||
order = api.ExtractorOrder
|
||||
label = "Extract OTIO trim longer video"
|
||||
families = ["trim"]
|
||||
hosts = ["resolve", "hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
self.staging_dir = self.staging_dir(instance)
|
||||
otio_trim_range = instance.data["otioTrimmingRange"]
|
||||
representations = instance.data["representations"]
|
||||
self.log.debug("otio_trim_range: {}".format(otio_trim_range))
|
||||
self.log.debug("self.staging_dir: {}".format(self.staging_dir))
|
||||
|
||||
# get corresponding representation
|
||||
for _repre in representations:
|
||||
if "trim" not in _repre.get("tags", []):
|
||||
continue
|
||||
|
||||
input_file = _repre["files"]
|
||||
input_file_path = os.path.normpath(os.path.join(
|
||||
_repre["stagingDir"], input_file
|
||||
))
|
||||
self.log.debug("input_file_path: {}".format(input_file_path))
|
||||
|
||||
# trim via ffmpeg
|
||||
new_file = self._ffmpeg_trim_seqment(
|
||||
input_file_path, otio_trim_range)
|
||||
|
||||
# prepare new representation data
|
||||
repre_data = deepcopy(_repre)
|
||||
# remove tags as we dont need them
|
||||
repre_data.pop("tags")
|
||||
repre_data["stagingDir"] = self.staging_dir
|
||||
repre_data["files"] = new_file
|
||||
|
||||
# romove `trim` tagged representation
|
||||
representations.remove(_repre)
|
||||
representations.append(repre_data)
|
||||
self.log.debug(repre_data)
|
||||
|
||||
self.log.debug("representations: {}".format(representations))
|
||||
|
||||
def _ffmpeg_trim_seqment(self, input_file_path, otio_range):
|
||||
"""
|
||||
Trim seqment of video file.
|
||||
|
||||
Using ffmpeg to trim video to desired length.
|
||||
|
||||
Args:
|
||||
input_file_path (str): path string
|
||||
otio_range (opentime.TimeRange): range to trim to
|
||||
|
||||
"""
|
||||
# get rendering app path
|
||||
ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# create path to destination
|
||||
output_path = self._get_ffmpeg_output(input_file_path)
|
||||
|
||||
# start command list
|
||||
command = ['"{}"'.format(ffmpeg_path)]
|
||||
|
||||
video_path = input_file_path
|
||||
frame_start = otio_range.start_time.value
|
||||
input_fps = otio_range.start_time.rate
|
||||
frame_duration = (otio_range.duration.value + 1)
|
||||
sec_start = openpype.lib.frames_to_secons(frame_start, input_fps)
|
||||
sec_duration = openpype.lib.frames_to_secons(frame_duration, input_fps)
|
||||
|
||||
# form command for rendering gap files
|
||||
command.extend([
|
||||
"-ss {}".format(sec_start),
|
||||
"-t {}".format(sec_duration),
|
||||
"-i \"{}\"".format(video_path),
|
||||
"-c copy",
|
||||
output_path
|
||||
])
|
||||
|
||||
# execute
|
||||
self.log.debug("Executing: {}".format(" ".join(command)))
|
||||
output = openpype.api.run_subprocess(
|
||||
" ".join(command), logger=self.log
|
||||
)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
return os.path.basename(output_path)
|
||||
|
||||
def _get_ffmpeg_output(self, file_path):
|
||||
"""
|
||||
Returning ffmpeg output command arguments.
|
||||
|
||||
Arguments"
|
||||
file_path (str): path string
|
||||
|
||||
Returns:
|
||||
str: output_path is path
|
||||
|
||||
"""
|
||||
basename = os.path.basename(file_path)
|
||||
name, ext = os.path.splitext(basename)
|
||||
|
||||
output_file = "{}_{}{}".format(
|
||||
name,
|
||||
"trimmed",
|
||||
ext
|
||||
)
|
||||
# create path to destination
|
||||
return os.path.join(self.staging_dir, output_file)
|
||||
Loading…
Add table
Add a link
Reference in a new issue