mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge pull request #1222 from pypeclub/3.0/feature/python2-OpenTimelineIO-vendor
OTIO: adding python-2 version to `pype.vendor`
This commit is contained in:
commit
4c44d42804
66 changed files with 18223 additions and 0 deletions
51
pype/vendor/python/python_2/opentimelineio/__init__.py
vendored
Normal file
51
pype/vendor/python/python_2/opentimelineio/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""An editorial interchange format and library.
|
||||
|
||||
see: http://opentimeline.io
|
||||
|
||||
.. moduleauthor:: Pixar Animation Studios <opentimelineio@pixar.com>
|
||||
"""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
# in dependency hierarchy
|
||||
from . import (
|
||||
opentime,
|
||||
exceptions,
|
||||
core,
|
||||
schema,
|
||||
schemadef,
|
||||
plugins,
|
||||
media_linker,
|
||||
adapters,
|
||||
hooks,
|
||||
algorithms,
|
||||
)
|
||||
|
||||
__version__ = "0.11.0"
|
||||
__author__ = "Pixar Animation Studios"
|
||||
__author_email__ = "opentimelineio@pixar.com"
|
||||
__license__ = "Modified Apache 2.0 License"
|
||||
213
pype/vendor/python/python_2/opentimelineio/adapters/__init__.py
vendored
Normal file
213
pype/vendor/python/python_2/opentimelineio/adapters/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Expose the adapter interface to developers.
|
||||
|
||||
To read from an existing representation, use the read_from_string and
|
||||
read_from_file functions. To query the list of adapters, use the
|
||||
available_adapter_names function.
|
||||
|
||||
The otio_json adapter is provided as a the canonical, lossless, serialization
|
||||
of the in-memory otio schema. Other adapters are to varying degrees lossy.
|
||||
For more information, consult the documentation in the individual adapter
|
||||
modules.
|
||||
"""
|
||||
|
||||
import os
|
||||
import itertools
|
||||
|
||||
from .. import (
|
||||
exceptions,
|
||||
plugins,
|
||||
media_linker
|
||||
)
|
||||
|
||||
from .adapter import Adapter # noqa
|
||||
|
||||
# OTIO Json adapter is always available
|
||||
from . import otio_json # noqa
|
||||
|
||||
|
||||
def suffixes_with_defined_adapters(read=False, write=False):
|
||||
"""Return a set of all the suffixes that have adapters defined for them."""
|
||||
|
||||
if not read and not write:
|
||||
read = True
|
||||
write = True
|
||||
|
||||
positive_adapters = []
|
||||
for adp in plugins.ActiveManifest().adapters:
|
||||
if read and adp.has_feature("read"):
|
||||
positive_adapters.append(adp)
|
||||
continue
|
||||
|
||||
if write and adp.has_feature("write"):
|
||||
positive_adapters.append(adp)
|
||||
|
||||
return set(
|
||||
itertools.chain.from_iterable(
|
||||
adp.suffixes for adp in positive_adapters
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def available_adapter_names():
|
||||
"""Return a string list of the available adapters."""
|
||||
|
||||
return [str(adp.name) for adp in plugins.ActiveManifest().adapters]
|
||||
|
||||
|
||||
def _from_filepath_or_name(filepath, adapter_name):
|
||||
if adapter_name is not None:
|
||||
return plugins.ActiveManifest().from_name(adapter_name)
|
||||
else:
|
||||
return from_filepath(filepath)
|
||||
|
||||
|
||||
def from_filepath(filepath):
|
||||
"""Guess the adapter object to use for a given filepath.
|
||||
|
||||
example:
|
||||
"foo.otio" returns the "otio_json" adapter.
|
||||
"""
|
||||
|
||||
outext = os.path.splitext(filepath)[1][1:]
|
||||
|
||||
try:
|
||||
return plugins.ActiveManifest().from_filepath(outext)
|
||||
except exceptions.NoKnownAdapterForExtensionError:
|
||||
raise exceptions.NoKnownAdapterForExtensionError(
|
||||
"No adapter for suffix '{}' on file '{}'".format(
|
||||
outext,
|
||||
filepath
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def from_name(name):
|
||||
"""Fetch the adapter object by the name of the adapter directly."""
|
||||
|
||||
try:
|
||||
return plugins.ActiveManifest().from_name(name)
|
||||
except exceptions.NotSupportedError:
|
||||
raise exceptions.NotSupportedError(
|
||||
"adapter not supported: {}, available: {}".format(
|
||||
name,
|
||||
available_adapter_names()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def read_from_file(
|
||||
filepath,
|
||||
adapter_name=None,
|
||||
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
|
||||
media_linker_argument_map=None,
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Read filepath using adapter_name.
|
||||
|
||||
If adapter_name is None, try and infer the adapter name from the filepath.
|
||||
|
||||
For example:
|
||||
timeline = read_from_file("example_trailer.otio")
|
||||
timeline = read_from_file("file_with_no_extension", "cmx_3600")
|
||||
"""
|
||||
|
||||
adapter = _from_filepath_or_name(filepath, adapter_name)
|
||||
|
||||
return adapter.read_from_file(
|
||||
filepath=filepath,
|
||||
media_linker_name=media_linker_name,
|
||||
media_linker_argument_map=media_linker_argument_map,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
|
||||
def read_from_string(
|
||||
input_str,
|
||||
adapter_name='otio_json',
|
||||
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
|
||||
media_linker_argument_map=None,
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Read a timeline from input_str using adapter_name.
|
||||
|
||||
This is useful if you obtain a timeline from someplace other than the
|
||||
filesystem.
|
||||
|
||||
Example:
|
||||
raw_text = urlopen(my_url).read()
|
||||
timeline = read_from_string(raw_text, "otio_json")
|
||||
"""
|
||||
|
||||
adapter = plugins.ActiveManifest().from_name(adapter_name)
|
||||
return adapter.read_from_string(
|
||||
input_str=input_str,
|
||||
media_linker_name=media_linker_name,
|
||||
media_linker_argument_map=media_linker_argument_map,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
|
||||
def write_to_file(
|
||||
input_otio,
|
||||
filepath,
|
||||
adapter_name=None,
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Write input_otio to filepath using adapter_name.
|
||||
|
||||
If adapter_name is None, infer the adapter_name to use based on the
|
||||
filepath.
|
||||
|
||||
Example:
|
||||
otio.adapters.write_to_file(my_timeline, "output.otio")
|
||||
"""
|
||||
|
||||
adapter = _from_filepath_or_name(filepath, adapter_name)
|
||||
|
||||
return adapter.write_to_file(
|
||||
input_otio=input_otio,
|
||||
filepath=filepath,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
|
||||
def write_to_string(
|
||||
input_otio,
|
||||
adapter_name='otio_json',
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Return input_otio written to a string using adapter_name.
|
||||
|
||||
Example:
|
||||
raw_text = otio.adapters.write_to_string(my_timeline, "otio_json")
|
||||
"""
|
||||
|
||||
adapter = plugins.ActiveManifest().from_name(adapter_name)
|
||||
return adapter.write_to_string(
|
||||
input_otio=input_otio,
|
||||
**adapter_argument_map
|
||||
)
|
||||
317
pype/vendor/python/python_2/opentimelineio/adapters/adapter.py
vendored
Normal file
317
pype/vendor/python/python_2/opentimelineio/adapters/adapter.py
vendored
Normal file
|
|
@ -0,0 +1,317 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of the OTIO internal `Adapter` system.
|
||||
|
||||
For information on writing adapters, please consult:
|
||||
https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa
|
||||
"""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
plugins,
|
||||
media_linker,
|
||||
hooks,
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Adapter(plugins.PythonPlugin):
|
||||
"""Adapters convert between OTIO and other formats.
|
||||
|
||||
Note that this class is not subclassed by adapters. Rather, an adapter is
|
||||
a python module that implements at least one of the following functions:
|
||||
|
||||
write_to_string(input_otio)
|
||||
write_to_file(input_otio, filepath) (optionally inferred)
|
||||
read_from_string(input_str)
|
||||
read_from_file(filepath) (optionally inferred)
|
||||
|
||||
...as well as a small json file that advertises the features of the adapter
|
||||
to OTIO. This class serves as the wrapper around these modules internal
|
||||
to OTIO. You should not need to extend this class to create new adapters
|
||||
for OTIO.
|
||||
|
||||
For more information:
|
||||
https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa
|
||||
"""
|
||||
_serializable_label = "Adapter.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
execution_scope=None,
|
||||
filepath=None,
|
||||
suffixes=None
|
||||
):
|
||||
plugins.PythonPlugin.__init__(
|
||||
self,
|
||||
name,
|
||||
execution_scope,
|
||||
filepath
|
||||
)
|
||||
|
||||
self.suffixes = suffixes or []
|
||||
|
||||
suffixes = core.serializable_field(
|
||||
"suffixes",
|
||||
type([]),
|
||||
doc="File suffixes associated with this adapter."
|
||||
)
|
||||
|
||||
def has_feature(self, feature_string):
|
||||
"""
|
||||
return true if adapter supports feature_string, which must be a key
|
||||
of the _FEATURE_MAP dictionary.
|
||||
|
||||
Will trigger a call to self.module(), which imports the plugin.
|
||||
"""
|
||||
|
||||
if feature_string.lower() not in _FEATURE_MAP:
|
||||
return False
|
||||
|
||||
search_strs = _FEATURE_MAP[feature_string]
|
||||
|
||||
try:
|
||||
return any(hasattr(self.module(), s) for s in search_strs)
|
||||
except ImportError:
|
||||
# @TODO: should issue a warning that the plugin was not importable?
|
||||
return False
|
||||
|
||||
def read_from_file(
|
||||
self,
|
||||
filepath,
|
||||
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
|
||||
media_linker_argument_map=None,
|
||||
hook_function_argument_map={},
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Execute the read_from_file function on this adapter.
|
||||
|
||||
If read_from_string exists, but not read_from_file, execute that with
|
||||
a trivial file object wrapper.
|
||||
"""
|
||||
|
||||
if media_linker_argument_map is None:
|
||||
media_linker_argument_map = {}
|
||||
|
||||
result = None
|
||||
|
||||
if (
|
||||
not self.has_feature("read_from_file") and
|
||||
self.has_feature("read_from_string")
|
||||
):
|
||||
with open(filepath, 'r') as fo:
|
||||
contents = fo.read()
|
||||
result = self._execute_function(
|
||||
"read_from_string",
|
||||
input_str=contents,
|
||||
**adapter_argument_map
|
||||
)
|
||||
else:
|
||||
result = self._execute_function(
|
||||
"read_from_file",
|
||||
filepath=filepath,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
|
||||
hook_function_argument_map['media_linker_argument_map'] = \
|
||||
media_linker_argument_map
|
||||
result = hooks.run("post_adapter_read", result,
|
||||
extra_args=hook_function_argument_map)
|
||||
|
||||
if media_linker_name and (
|
||||
media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia
|
||||
):
|
||||
_with_linked_media_references(
|
||||
result,
|
||||
media_linker_name,
|
||||
media_linker_argument_map
|
||||
)
|
||||
|
||||
result = hooks.run("post_media_linker", result,
|
||||
extra_args=media_linker_argument_map)
|
||||
|
||||
return result
|
||||
|
||||
def write_to_file(
|
||||
self,
|
||||
input_otio,
|
||||
filepath,
|
||||
hook_function_argument_map={},
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Execute the write_to_file function on this adapter.
|
||||
|
||||
If write_to_string exists, but not write_to_file, execute that with
|
||||
a trivial file object wrapper.
|
||||
"""
|
||||
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
|
||||
input_otio = hooks.run("pre_adapter_write", input_otio,
|
||||
extra_args=hook_function_argument_map)
|
||||
|
||||
if (
|
||||
not self.has_feature("write_to_file") and
|
||||
self.has_feature("write_to_string")
|
||||
):
|
||||
result = self.write_to_string(input_otio, **adapter_argument_map)
|
||||
with open(filepath, 'w') as fo:
|
||||
fo.write(result)
|
||||
return filepath
|
||||
|
||||
return self._execute_function(
|
||||
"write_to_file",
|
||||
input_otio=input_otio,
|
||||
filepath=filepath,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
def read_from_string(
|
||||
self,
|
||||
input_str,
|
||||
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
|
||||
media_linker_argument_map=None,
|
||||
hook_function_argument_map={},
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Call the read_from_string function on this adapter."""
|
||||
|
||||
result = self._execute_function(
|
||||
"read_from_string",
|
||||
input_str=input_str,
|
||||
**adapter_argument_map
|
||||
)
|
||||
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
|
||||
hook_function_argument_map['media_linker_argument_map'] = \
|
||||
media_linker_argument_map
|
||||
|
||||
result = hooks.run("post_adapter_read", result,
|
||||
extra_args=hook_function_argument_map)
|
||||
|
||||
if media_linker_name and (
|
||||
media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia
|
||||
):
|
||||
_with_linked_media_references(
|
||||
result,
|
||||
media_linker_name,
|
||||
media_linker_argument_map
|
||||
)
|
||||
|
||||
# @TODO: Should this run *ONLY* if the media linker ran?
|
||||
result = hooks.run("post_media_linker", result,
|
||||
extra_args=hook_function_argument_map)
|
||||
|
||||
return result
|
||||
|
||||
def write_to_string(
|
||||
self,
|
||||
input_otio,
|
||||
hook_function_argument_map={},
|
||||
**adapter_argument_map
|
||||
):
|
||||
"""Call the write_to_string function on this adapter."""
|
||||
|
||||
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
|
||||
input_otio = hooks.run("pre_adapter_write", input_otio,
|
||||
extra_args=hook_function_argument_map)
|
||||
|
||||
return self._execute_function(
|
||||
"write_to_string",
|
||||
input_otio=input_otio,
|
||||
**adapter_argument_map
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"Adapter("
|
||||
"{}, "
|
||||
"{}, "
|
||||
"{}, "
|
||||
"{}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath),
|
||||
repr(self.suffixes),
|
||||
)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.adapter.Adapter("
|
||||
"name={}, "
|
||||
"execution_scope={}, "
|
||||
"filepath={}, "
|
||||
"suffixes={}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath),
|
||||
repr(self.suffixes),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _with_linked_media_references(
|
||||
read_otio,
|
||||
media_linker_name,
|
||||
media_linker_argument_map
|
||||
):
|
||||
"""Link media references in the read_otio if possible.
|
||||
|
||||
Makes changes in place and returns the read_otio structure back.
|
||||
"""
|
||||
|
||||
if not read_otio or not media_linker.from_name(media_linker_name):
|
||||
return read_otio
|
||||
|
||||
# not every object the adapter reads has an "each_clip" method, so this
|
||||
# skips objects without one.
|
||||
clpfn = getattr(read_otio, "each_clip", None)
|
||||
if clpfn is None:
|
||||
return read_otio
|
||||
|
||||
for cl in read_otio.each_clip():
|
||||
new_mr = media_linker.linked_media_reference(
|
||||
cl,
|
||||
media_linker_name,
|
||||
# @TODO: should any context get wired in at this point?
|
||||
media_linker_argument_map
|
||||
)
|
||||
if new_mr is not None:
|
||||
cl.media_reference = new_mr
|
||||
|
||||
return read_otio
|
||||
|
||||
|
||||
# map of attr to look for vs feature name in the adapter plugin
|
||||
_FEATURE_MAP = {
|
||||
'read_from_file': ['read_from_file'],
|
||||
'read_from_string': ['read_from_string'],
|
||||
'read': ['read_from_file', 'read_from_string'],
|
||||
'write_to_file': ['write_to_file'],
|
||||
'write_to_string': ['write_to_string'],
|
||||
'write': ['write_to_file', 'write_to_string']
|
||||
}
|
||||
31
pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json
vendored
Normal file
31
pype/vendor/python/python_2/opentimelineio/adapters/builtin_adapters.plugin_manifest.json
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"OTIO_SCHEMA" : "PluginManifest.1",
|
||||
"adapters": [
|
||||
{
|
||||
"OTIO_SCHEMA": "Adapter.1",
|
||||
"name": "fcp_xml",
|
||||
"execution_scope": "in process",
|
||||
"filepath": "fcp_xml.py",
|
||||
"suffixes": ["xml"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "otio_json",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "otio_json.py",
|
||||
"suffixes" : ["otio"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "cmx_3600",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "cmx_3600.py",
|
||||
"suffixes" : ["edl"]
|
||||
}
|
||||
],
|
||||
"hooks": {
|
||||
"post_adapter_read" : [],
|
||||
"post_media_linker" : [],
|
||||
"pre_adapter_write" : []
|
||||
}
|
||||
}
|
||||
1306
pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py
vendored
Normal file
1306
pype/vendor/python/python_2/opentimelineio/adapters/cmx_3600.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
1941
pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py
vendored
Normal file
1941
pype/vendor/python/python_2/opentimelineio/adapters/fcp_xml.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
48
pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py
vendored
Normal file
48
pype/vendor/python/python_2/opentimelineio/adapters/otio_json.py
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""This adapter lets you read and write native .otio files"""
|
||||
|
||||
from .. import (
|
||||
core
|
||||
)
|
||||
|
||||
|
||||
# @TODO: Implement out of process plugins that hand around JSON
|
||||
|
||||
|
||||
def read_from_file(filepath):
|
||||
return core.deserialize_json_from_file(filepath)
|
||||
|
||||
|
||||
def read_from_string(input_str):
|
||||
return core.deserialize_json_from_string(input_str)
|
||||
|
||||
|
||||
def write_to_string(input_otio):
|
||||
return core.serialize_json_to_string(input_otio)
|
||||
|
||||
|
||||
def write_to_file(input_otio, filepath):
|
||||
return core.serialize_json_to_file(input_otio, filepath)
|
||||
44
pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py
vendored
Normal file
44
pype/vendor/python/python_2/opentimelineio/algorithms/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Algorithms for OTIO objects."""
|
||||
|
||||
# flake8: noqa
|
||||
from .track_algo import (
|
||||
track_trimmed_to_range,
|
||||
track_with_expanded_transitions
|
||||
)
|
||||
|
||||
from .stack_algo import (
|
||||
flatten_stack,
|
||||
top_clip_at_time,
|
||||
)
|
||||
|
||||
from .filter import (
|
||||
filtered_composition,
|
||||
filtered_with_sequence_context
|
||||
)
|
||||
from .timeline_algo import (
|
||||
timeline_trimmed_to_range
|
||||
)
|
||||
275
pype/vendor/python/python_2/opentimelineio/algorithms/filter.py
vendored
Normal file
275
pype/vendor/python/python_2/opentimelineio/algorithms/filter.py
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2018 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Algorithms for filtering OTIO files. """
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
schema
|
||||
)
|
||||
|
||||
|
||||
def _is_in(thing, container):
|
||||
return any(thing is item for item in container)
|
||||
|
||||
|
||||
def _isinstance_in(child, typelist):
|
||||
return any(isinstance(child, t) for t in typelist)
|
||||
|
||||
|
||||
def filtered_composition(
|
||||
root,
|
||||
unary_filter_fn,
|
||||
types_to_prune=None,
|
||||
):
|
||||
"""Filter a deep copy of root (and children) with unary_filter_fn.
|
||||
|
||||
types_to_prune:: tuple of types, example: (otio.schema.Gap,...)
|
||||
|
||||
1. Make a deep copy of root
|
||||
2. Starting with root, perform a depth first traversal
|
||||
3. For each item (including root):
|
||||
a. if types_to_prune is not None and item is an instance of a type
|
||||
in types_to_prune, prune it from the copy, continue.
|
||||
b. Otherwise, pass the copy to unary_filter_fn. If unary_filter_fn:
|
||||
I. returns an object: add it to the copy, replacing original
|
||||
II. returns a tuple: insert it into the list, replacing original
|
||||
III. returns None: prune it
|
||||
4. If an item is pruned, do not traverse its children
|
||||
5. Return the new deep copy.
|
||||
|
||||
EXAMPLE 1 (filter):
|
||||
If your unary function is:
|
||||
def fn(thing):
|
||||
if thing.name == B:
|
||||
return thing' # some transformation of B
|
||||
else:
|
||||
return thing
|
||||
|
||||
If you have a track: [A,B,C]
|
||||
|
||||
filtered_composition(track, fn) => [A,B',C]
|
||||
|
||||
EXAMPLE 2 (prune):
|
||||
If your unary function is:
|
||||
def fn(thing):
|
||||
if thing.name == B:
|
||||
return None
|
||||
else:
|
||||
return thing
|
||||
|
||||
filtered_composition(track, fn) => [A,C]
|
||||
|
||||
EXAMPLE 3 (expand):
|
||||
If your unary function is:
|
||||
def fn(thing):
|
||||
if thing.name == B:
|
||||
return tuple(B_1,B_2,B_3)
|
||||
else:
|
||||
return thing
|
||||
|
||||
filtered_composition(track, fn) => [A,B_1,B_2,B_3,C]
|
||||
|
||||
EXAMPLE 4 (prune gaps):
|
||||
track :: [Gap, A, Gap]
|
||||
filtered_composition(
|
||||
track, lambda _:_, types_to_prune=(otio.schema.Gap,)) => [A]
|
||||
"""
|
||||
|
||||
# deep copy everything
|
||||
mutable_object = copy.deepcopy(root)
|
||||
|
||||
prune_list = set()
|
||||
|
||||
header_list = [mutable_object]
|
||||
|
||||
if isinstance(mutable_object, schema.Timeline):
|
||||
header_list.append(mutable_object.tracks)
|
||||
|
||||
iter_list = header_list + list(mutable_object.each_child())
|
||||
|
||||
for child in iter_list:
|
||||
if _safe_parent(child) is not None and _is_in(child.parent(), prune_list):
|
||||
prune_list.add(child)
|
||||
continue
|
||||
|
||||
parent = None
|
||||
child_index = None
|
||||
if _safe_parent(child) is not None:
|
||||
child_index = child.parent().index(child)
|
||||
parent = child.parent()
|
||||
del child.parent()[child_index]
|
||||
|
||||
# first try to prune
|
||||
if (types_to_prune and _isinstance_in(child, types_to_prune)):
|
||||
result = None
|
||||
# finally call the user function
|
||||
else:
|
||||
result = unary_filter_fn(child)
|
||||
|
||||
if child is mutable_object:
|
||||
mutable_object = result
|
||||
|
||||
if result is None:
|
||||
prune_list.add(child)
|
||||
continue
|
||||
|
||||
if type(result) is not tuple:
|
||||
result = [result]
|
||||
|
||||
if parent is not None:
|
||||
parent[child_index:child_index] = result
|
||||
|
||||
return mutable_object
|
||||
|
||||
|
||||
def _safe_parent(child):
|
||||
if hasattr(child, 'parent'):
|
||||
return child.parent()
|
||||
return None
|
||||
|
||||
|
||||
def filtered_with_sequence_context(
|
||||
root,
|
||||
reduce_fn,
|
||||
types_to_prune=None,
|
||||
):
|
||||
"""Filter a deep copy of root (and children) with reduce_fn.
|
||||
|
||||
reduce_fn::function(previous_item, current, next_item) (see below)
|
||||
types_to_prune:: tuple of types, example: (otio.schema.Gap,...)
|
||||
|
||||
1. Make a deep copy of root
|
||||
2. Starting with root, perform a depth first traversal
|
||||
3. For each item (including root):
|
||||
a. if types_to_prune is not None and item is an instance of a type
|
||||
in types_to_prune, prune it from the copy, continue.
|
||||
b. Otherwise, pass (prev, copy, and next) to reduce_fn. If reduce_fn:
|
||||
I. returns an object: add it to the copy, replacing original
|
||||
II. returns a tuple: insert it into the list, replacing original
|
||||
III. returns None: prune it
|
||||
|
||||
** note that reduce_fn is always passed objects from the original
|
||||
deep copy, not what prior calls return. See below for examples
|
||||
4. If an item is pruned, do not traverse its children
|
||||
5. Return the new deep copy.
|
||||
|
||||
EXAMPLE 1 (filter):
|
||||
>>> track = [A,B,C]
|
||||
>>> def fn(prev_item, thing, next_item):
|
||||
... if prev_item.name == A:
|
||||
... return D # some new clip
|
||||
... else:
|
||||
... return thing
|
||||
>>> filtered_with_sequence_context(track, fn) => [A,D,C]
|
||||
|
||||
order of calls to fn:
|
||||
fn(None, A, B) => A
|
||||
fn(A, B, C) => D
|
||||
fn(B, C, D) => C # !! note that it was passed B instead of D.
|
||||
|
||||
EXAMPLE 2 (prune):
|
||||
>>> track = [A,B,C]
|
||||
>>> def fn(prev_item, thing, next_item):
|
||||
... if prev_item.name == A:
|
||||
... return None # prune the clip
|
||||
... else:
|
||||
... return thing
|
||||
>>> filtered_with_sequence_context(track, fn) => [A,C]
|
||||
|
||||
order of calls to fn:
|
||||
fn(None, A, B) => A
|
||||
fn(A, B, C) => None
|
||||
fn(B, C, D) => C # !! note that it was passed B instead of D.
|
||||
|
||||
EXAMPLE 3 (expand):
|
||||
>>> def fn(prev_item, thing, next_item):
|
||||
... if prev_item.name == A:
|
||||
... return (D, E) # tuple of new clips
|
||||
... else:
|
||||
... return thing
|
||||
>>> filtered_with_sequence_context(track, fn) => [A, D, E, C]
|
||||
|
||||
the order of calls to fn will be:
|
||||
fn(None, A, B) => A
|
||||
fn(A, B, C) => (D, E)
|
||||
fn(B, C, D) => C # !! note that it was passed B instead of D.
|
||||
"""
|
||||
|
||||
# deep copy everything
|
||||
mutable_object = copy.deepcopy(root)
|
||||
|
||||
prune_list = set()
|
||||
|
||||
header_list = [mutable_object]
|
||||
|
||||
if isinstance(mutable_object, schema.Timeline):
|
||||
header_list.append(mutable_object.tracks)
|
||||
|
||||
iter_list = header_list + list(mutable_object.each_child())
|
||||
|
||||
# expand to include prev, next when appropriate
|
||||
expanded_iter_list = []
|
||||
for child in iter_list:
|
||||
if _safe_parent(child) and isinstance(child.parent(), schema.Track):
|
||||
prev_item, next_item = child.parent().neighbors_of(child)
|
||||
expanded_iter_list.append((prev_item, child, next_item))
|
||||
else:
|
||||
expanded_iter_list.append((None, child, None))
|
||||
|
||||
for prev_item, child, next_item in expanded_iter_list:
|
||||
if _safe_parent(child) is not None and _is_in(child.parent(), prune_list):
|
||||
prune_list.add(child)
|
||||
continue
|
||||
|
||||
parent = None
|
||||
child_index = None
|
||||
if _safe_parent(child) is not None:
|
||||
child_index = child.parent().index(child)
|
||||
parent = child.parent()
|
||||
del child.parent()[child_index]
|
||||
|
||||
# first try to prune
|
||||
if types_to_prune and _isinstance_in(child, types_to_prune):
|
||||
result = None
|
||||
# finally call the user function
|
||||
else:
|
||||
result = reduce_fn(prev_item, child, next_item)
|
||||
|
||||
if child is mutable_object:
|
||||
mutable_object = result
|
||||
|
||||
if result is None:
|
||||
prune_list.add(child)
|
||||
continue
|
||||
|
||||
if type(result) is not tuple:
|
||||
result = [result]
|
||||
|
||||
if parent is not None:
|
||||
parent[child_index:child_index] = result
|
||||
|
||||
return mutable_object
|
||||
138
pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py
vendored
Normal file
138
pype/vendor/python/python_2/opentimelineio/algorithms/stack_algo.py
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
__doc__ = """ Algorithms for stack objects. """
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
schema,
|
||||
opentime,
|
||||
)
|
||||
from . import (
|
||||
track_algo
|
||||
)
|
||||
|
||||
|
||||
def top_clip_at_time(in_stack, t):
|
||||
"""Return the topmost visible child that overlaps with time t.
|
||||
|
||||
Example:
|
||||
tr1: G1, A, G2
|
||||
tr2: [B------]
|
||||
G1, and G2 are gaps, A and B are clips.
|
||||
|
||||
If t is within A, a will be returned. If t is within G1 or G2, B will be
|
||||
returned.
|
||||
"""
|
||||
|
||||
# ensure that it only runs on stacks
|
||||
if not isinstance(in_stack, schema.Stack):
|
||||
raise ValueError(
|
||||
"Argument in_stack must be of type otio.schema.Stack, "
|
||||
"not: '{}'".format(
|
||||
type(in_stack)
|
||||
)
|
||||
)
|
||||
|
||||
# build a range to use the `each_child`method.
|
||||
search_range = opentime.TimeRange(
|
||||
start_time=t,
|
||||
# 0 duration so we are just sampling a point in time.
|
||||
# XXX Should this duration be equal to the length of one sample?
|
||||
# opentime.RationalTime(1, rate)?
|
||||
duration=opentime.RationalTime(0, t.rate)
|
||||
)
|
||||
|
||||
# walk through the children of the stack in reverse order.
|
||||
for track in reversed(in_stack):
|
||||
valid_results = []
|
||||
if hasattr(track, "each_child"):
|
||||
valid_results = list(
|
||||
c for c in track.each_clip(search_range, shallow_search=True)
|
||||
if c.visible()
|
||||
)
|
||||
|
||||
# XXX doesn't handle nested tracks/stacks at the moment
|
||||
|
||||
for result in valid_results:
|
||||
return result
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def flatten_stack(in_stack):
|
||||
"""Flatten a Stack, or a list of Tracks, into a single Track.
|
||||
Note that the 1st Track is the bottom one, and the last is the top.
|
||||
"""
|
||||
|
||||
flat_track = schema.Track()
|
||||
flat_track.name = "Flattened"
|
||||
|
||||
# map of track to track.range_of_all_children
|
||||
range_track_map = {}
|
||||
|
||||
def _get_next_item(
|
||||
in_stack,
|
||||
track_index=None,
|
||||
trim_range=None
|
||||
):
|
||||
if track_index is None:
|
||||
# start with the top-most track
|
||||
track_index = len(in_stack) - 1
|
||||
if track_index < 0:
|
||||
# if you get to the bottom, you're done
|
||||
return
|
||||
|
||||
track = in_stack[track_index]
|
||||
if trim_range is not None:
|
||||
track = track_algo.track_trimmed_to_range(track, trim_range)
|
||||
|
||||
track_map = range_track_map.get(track)
|
||||
if track_map is None:
|
||||
track_map = track.range_of_all_children()
|
||||
range_track_map[track] = track_map
|
||||
|
||||
for item in track:
|
||||
if (
|
||||
item.visible()
|
||||
or track_index == 0
|
||||
or isinstance(item, schema.Transition)
|
||||
):
|
||||
yield item
|
||||
else:
|
||||
trim = track_map[item]
|
||||
if trim_range is not None:
|
||||
trim = opentime.TimeRange(
|
||||
start_time=trim.start_time + trim_range.start_time,
|
||||
duration=trim.duration
|
||||
)
|
||||
track_map[item] = trim
|
||||
for more in _get_next_item(in_stack, track_index - 1, trim):
|
||||
yield more
|
||||
|
||||
for item in _get_next_item(in_stack):
|
||||
flat_track.append(copy.deepcopy(item))
|
||||
|
||||
return flat_track
|
||||
56
pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py
vendored
Normal file
56
pype/vendor/python/python_2/opentimelineio/algorithms/timeline_algo.py
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
#
|
||||
# Copyright 2019 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Algorithms for timeline objects."""
|
||||
|
||||
import copy
|
||||
|
||||
from . import (
|
||||
track_algo
|
||||
)
|
||||
|
||||
|
||||
def timeline_trimmed_to_range(in_timeline, trim_range):
|
||||
"""Returns a new timeline that is a copy of the in_timeline, but with items
|
||||
outside the trim_range removed and items on the ends trimmed to the
|
||||
trim_range. Note that the timeline is never expanded, only shortened.
|
||||
Please note that you could do nearly the same thing non-destructively by
|
||||
just setting the Track's source_range but sometimes you want to really cut
|
||||
away the stuff outside and that's what this function is meant for."""
|
||||
new_timeline = copy.deepcopy(in_timeline)
|
||||
|
||||
for track_num, child_track in enumerate(in_timeline.tracks):
|
||||
# @TODO: put the trim_range into the space of the tracks
|
||||
# new_range = new_timeline.tracks.transformed_time_range(
|
||||
# trim_range,
|
||||
# child_track
|
||||
# )
|
||||
|
||||
# trim the track and assign it to the new stack.
|
||||
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(
|
||||
child_track,
|
||||
trim_range
|
||||
)
|
||||
|
||||
return new_timeline
|
||||
236
pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py
vendored
Normal file
236
pype/vendor/python/python_2/opentimelineio/algorithms/track_algo.py
vendored
Normal file
|
|
@ -0,0 +1,236 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Algorithms for track objects."""
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
schema,
|
||||
exceptions,
|
||||
opentime,
|
||||
)
|
||||
|
||||
|
||||
def track_trimmed_to_range(in_track, trim_range):
|
||||
"""Returns a new track that is a copy of the in_track, but with items
|
||||
outside the trim_range removed and items on the ends trimmed to the
|
||||
trim_range. Note that the track is never expanded, only shortened.
|
||||
Please note that you could do nearly the same thing non-destructively by
|
||||
just setting the Track's source_range but sometimes you want to really cut
|
||||
away the stuff outside and that's what this function is meant for."""
|
||||
new_track = copy.deepcopy(in_track)
|
||||
|
||||
track_map = new_track.range_of_all_children()
|
||||
|
||||
# iterate backwards so we can delete items
|
||||
for c, child in reversed(list(enumerate(new_track))):
|
||||
child_range = track_map[child]
|
||||
if not trim_range.overlaps(child_range):
|
||||
# completely outside the trim range, so we discard it
|
||||
del new_track[c]
|
||||
elif trim_range.contains(child_range):
|
||||
# completely contained, keep the whole thing
|
||||
pass
|
||||
else:
|
||||
if isinstance(child, schema.Transition):
|
||||
raise exceptions.CannotTrimTransitionsError(
|
||||
"Cannot trim in the middle of a Transition."
|
||||
)
|
||||
|
||||
# we need to clip the end(s)
|
||||
child_source_range = child.trimmed_range()
|
||||
|
||||
# should we trim the start?
|
||||
if trim_range.start_time > child_range.start_time:
|
||||
trim_amount = trim_range.start_time - child_range.start_time
|
||||
child_source_range = opentime.TimeRange(
|
||||
start_time=child_source_range.start_time + trim_amount,
|
||||
duration=child_source_range.duration - trim_amount
|
||||
|
||||
)
|
||||
|
||||
# should we trim the end?
|
||||
trim_end = trim_range.end_time_exclusive()
|
||||
child_end = child_range.end_time_exclusive()
|
||||
if trim_end < child_end:
|
||||
trim_amount = child_end - trim_end
|
||||
child_source_range = opentime.TimeRange(
|
||||
start_time=child_source_range.start_time,
|
||||
duration=child_source_range.duration - trim_amount
|
||||
|
||||
)
|
||||
|
||||
# set the new child's trims
|
||||
child.source_range = child_source_range
|
||||
|
||||
return new_track
|
||||
|
||||
|
||||
def track_with_expanded_transitions(in_track):
|
||||
"""Expands transitions such that neighboring clips are trimmed into
|
||||
regions of overlap.
|
||||
|
||||
For example, if your track is:
|
||||
Clip1, T, Clip2
|
||||
|
||||
will return:
|
||||
Clip1', Clip1_t, T, Clip2_t, Clip2'
|
||||
|
||||
Where Clip1' is the part of Clip1 not in the transition, Clip1_t is the
|
||||
part inside the transition and so on.
|
||||
"""
|
||||
|
||||
result_track = []
|
||||
|
||||
seq_iter = iter(in_track)
|
||||
prev_thing = None
|
||||
thing = next(seq_iter, None)
|
||||
next_thing = next(seq_iter, None)
|
||||
|
||||
while thing is not None:
|
||||
if isinstance(thing, schema.Transition):
|
||||
result_track.append(_expand_transition(thing, in_track))
|
||||
else:
|
||||
# not a transition, but might be trimmed by one before or after
|
||||
# in the track
|
||||
pre_transition = None
|
||||
next_transition = None
|
||||
|
||||
if isinstance(prev_thing, schema.Transition):
|
||||
pre_transition = prev_thing
|
||||
|
||||
if isinstance(next_thing, schema.Transition):
|
||||
next_transition = next_thing
|
||||
|
||||
result_track.append(
|
||||
_trim_from_transitions(
|
||||
thing,
|
||||
pre=pre_transition,
|
||||
post=next_transition
|
||||
)
|
||||
)
|
||||
|
||||
# loop
|
||||
prev_thing = thing
|
||||
thing = next_thing
|
||||
next_thing = next(seq_iter, None)
|
||||
|
||||
return result_track
|
||||
|
||||
|
||||
def _expand_transition(target_transition, from_track):
|
||||
""" Expand transitions into the portions of pre-and-post clips that
|
||||
overlap with the transition.
|
||||
"""
|
||||
|
||||
result = from_track.neighbors_of(
|
||||
target_transition,
|
||||
schema.NeighborGapPolicy.around_transitions
|
||||
)
|
||||
|
||||
trx_duration = target_transition.in_offset + target_transition.out_offset
|
||||
|
||||
# make copies of the before and after, and modify their in/out points
|
||||
pre = copy.deepcopy(result.previous)
|
||||
|
||||
if isinstance(pre, schema.Transition):
|
||||
raise exceptions.TransitionFollowingATransitionError(
|
||||
"cannot put two transitions next to each other in a track: "
|
||||
"{}, {}".format(
|
||||
pre,
|
||||
target_transition
|
||||
)
|
||||
)
|
||||
if target_transition.in_offset is None:
|
||||
raise RuntimeError(
|
||||
"in_offset is None on: {}".format(target_transition)
|
||||
)
|
||||
|
||||
if target_transition.out_offset is None:
|
||||
raise RuntimeError(
|
||||
"out_offset is None on: {}".format(target_transition)
|
||||
)
|
||||
|
||||
pre.name = (pre.name or "") + "_transition_pre"
|
||||
|
||||
# ensure that pre.source_range is set, because it will get manipulated
|
||||
tr = pre.trimmed_range()
|
||||
|
||||
pre.source_range = opentime.TimeRange(
|
||||
start_time=(
|
||||
tr.end_time_exclusive() - target_transition.in_offset
|
||||
),
|
||||
duration=trx_duration.rescaled_to(
|
||||
tr.start_time
|
||||
)
|
||||
)
|
||||
|
||||
post = copy.deepcopy(result.next)
|
||||
if isinstance(post, schema.Transition):
|
||||
raise exceptions.TransitionFollowingATransitionError(
|
||||
"cannot put two transitions next to each other in a track: "
|
||||
"{}, {}".format(
|
||||
target_transition,
|
||||
post
|
||||
)
|
||||
)
|
||||
|
||||
post.name = (post.name or "") + "_transition_post"
|
||||
|
||||
# ensure that post.source_range is set, because it will get manipulated
|
||||
tr = post.trimmed_range()
|
||||
|
||||
post.source_range = opentime.TimeRange(
|
||||
start_time=(
|
||||
tr.start_time - target_transition.in_offset
|
||||
).rescaled_to(tr.start_time),
|
||||
duration=trx_duration.rescaled_to(tr.start_time)
|
||||
)
|
||||
|
||||
return pre, target_transition, post
|
||||
|
||||
|
||||
def _trim_from_transitions(thing, pre=None, post=None):
|
||||
""" Trim clips next to transitions. """
|
||||
|
||||
result = copy.deepcopy(thing)
|
||||
|
||||
# We might not have a source_range yet,
|
||||
# We can trim to the computed trimmed_range to
|
||||
# ensure we have something.
|
||||
new_range = result.trimmed_range()
|
||||
start_time = new_range.start_time
|
||||
duration = new_range.duration
|
||||
|
||||
if pre:
|
||||
start_time += pre.out_offset
|
||||
duration -= pre.out_offset
|
||||
|
||||
if post:
|
||||
duration -= post.in_offset
|
||||
|
||||
result.source_range = opentime.TimeRange(start_time, duration)
|
||||
|
||||
return result
|
||||
40
pype/vendor/python/python_2/opentimelineio/console/__init__.py
vendored
Normal file
40
pype/vendor/python/python_2/opentimelineio/console/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#
|
||||
# Copyright 2018 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Console scripts for OpenTimelineIO
|
||||
|
||||
.. moduleauthor:: Pixar Animation Studios <opentimelineio@pixar.com>
|
||||
"""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
# in dependency hierarchy
|
||||
from . import (
|
||||
otioconvert,
|
||||
otiocat,
|
||||
otiostat,
|
||||
console_utils,
|
||||
autogen_serialized_datamodel,
|
||||
)
|
||||
|
||||
302
pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py
vendored
Normal file
302
pype/vendor/python/python_2/opentimelineio/console/autogen_serialized_datamodel.py
vendored
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2019 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
|
||||
"""Generates documentation of the serialized data model for OpenTimelineIO."""
|
||||
|
||||
import argparse
|
||||
import inspect
|
||||
import json
|
||||
import tempfile
|
||||
import sys
|
||||
|
||||
try:
|
||||
# python2
|
||||
import StringIO as io
|
||||
except ImportError:
|
||||
# python3
|
||||
import io
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
DOCUMENT_HEADER = """# OpenTimelineIO Serialized Data Documentation
|
||||
|
||||
This document is a list of all the OpenTimelineIO classes that serialize to and
|
||||
from JSON, omitting SchemaDef plugins.
|
||||
|
||||
This document is automatically generated by running
|
||||
docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is
|
||||
part of the unit tests suite and should be updated whenever the schema changes.
|
||||
If it needs to be updated, run: `make doc-model-update` and this file should be
|
||||
regenerated.
|
||||
|
||||
# Classes
|
||||
|
||||
"""
|
||||
|
||||
FIELDS_ONLY_HEADER = """# OpenTimelineIO Serialized Data Documentation
|
||||
|
||||
This document is a list of all the OpenTimelineIO classes that serialize to and
|
||||
from JSON, omitting plugins classes and docstrings.
|
||||
|
||||
This document is automatically generated by running
|
||||
docs/autogen_serialized_datamodel.py, or by running `make doc-model`. It is
|
||||
part of the unit tests suite and should be updated whenever the schema changes.
|
||||
If it needs to be updated, run: `make doc-model-update` and this file should be
|
||||
regenerated.
|
||||
|
||||
# Classes
|
||||
|
||||
"""
|
||||
|
||||
CLASS_HEADER_WITH_DOCS = """
|
||||
### {classname}
|
||||
|
||||
*full module path*: `{modpath}`
|
||||
|
||||
*documentation*:
|
||||
|
||||
```
|
||||
{docstring}
|
||||
```
|
||||
|
||||
parameters:
|
||||
"""
|
||||
|
||||
CLASS_HEADER_ONLY_FIELDS = """
|
||||
### {classname}
|
||||
|
||||
parameters:
|
||||
"""
|
||||
|
||||
MODULE_HEADER = """
|
||||
## Module: {modname}
|
||||
"""
|
||||
|
||||
PROP_HEADER = """- *{propkey}*: {prophelp}
|
||||
"""
|
||||
|
||||
# @TODO: having type information here would be awesome
|
||||
PROP_HEADER_NO_HELP = """- *{propkey}*
|
||||
"""
|
||||
|
||||
# three ways to try and get the property + docstring
|
||||
PROP_FETCHERS = (
|
||||
lambda cl, k: inspect.getdoc(getattr(cl, k)),
|
||||
lambda cl, k: inspect.getdoc(getattr(cl, "_" + k)),
|
||||
lambda cl, k: inspect.getdoc(getattr(cl(), k)) and "" or "",
|
||||
)
|
||||
|
||||
|
||||
def _parsed_args():
|
||||
""" parse commandline arguments with argparse """
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument(
|
||||
"-d",
|
||||
"--dryrun",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Dryrun mode - print out instead of perform actions"
|
||||
)
|
||||
group.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Update the baseline with the current version"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
# things to skip
|
||||
SKIP_CLASSES = [otio.core.SerializableObject, otio.core.UnknownSchema]
|
||||
SKIP_KEYS = ["OTIO_SCHEMA"] # not data, just for the backing format
|
||||
SKIP_MODULES = ["opentimelineio.schemadef"] # because these are plugins
|
||||
|
||||
|
||||
def _generate_model_for_module(mod, classes, modules):
|
||||
modules.add(mod)
|
||||
|
||||
# fetch the classes from this module
|
||||
serializeable_classes = [
|
||||
thing for thing in mod.__dict__.values()
|
||||
if (
|
||||
inspect.isclass(thing)
|
||||
and thing not in classes
|
||||
and issubclass(thing, otio.core.SerializableObject)
|
||||
or thing in (
|
||||
otio.opentime.RationalTime,
|
||||
otio.opentime.TimeRange,
|
||||
otio.opentime.TimeTransform,
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
# serialize/deserialize the classes to capture their serialized parameters
|
||||
model = {}
|
||||
for cl in serializeable_classes:
|
||||
if cl in SKIP_CLASSES:
|
||||
continue
|
||||
|
||||
model[cl] = {}
|
||||
field_dict = json.loads(otio.adapters.otio_json.write_to_string(cl()))
|
||||
for k in field_dict.keys():
|
||||
if k in SKIP_KEYS:
|
||||
continue
|
||||
|
||||
for fetcher in PROP_FETCHERS:
|
||||
try:
|
||||
model[cl][k] = fetcher(cl, k)
|
||||
break
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
sys.stderr.write("ERROR: could not fetch property: {}".format(k))
|
||||
|
||||
# Stashing the OTIO_SCHEMA back into the dictionary since the
|
||||
# documentation uses this information in its header.
|
||||
model[cl]["OTIO_SCHEMA"] = field_dict["OTIO_SCHEMA"]
|
||||
|
||||
classes.update(model)
|
||||
|
||||
# find new modules to recurse into
|
||||
new_mods = sorted(
|
||||
(
|
||||
thing for thing in mod.__dict__.values()
|
||||
if (
|
||||
inspect.ismodule(thing)
|
||||
and thing not in modules
|
||||
and all(not thing.__name__.startswith(t) for t in SKIP_MODULES)
|
||||
)
|
||||
),
|
||||
key=lambda mod: str(mod)
|
||||
)
|
||||
|
||||
# recurse into the new modules and update the classes and modules values
|
||||
[_generate_model_for_module(m, classes, modules) for m in new_mods]
|
||||
|
||||
|
||||
def _generate_model():
|
||||
classes = {}
|
||||
modules = set()
|
||||
_generate_model_for_module(otio, classes, modules)
|
||||
return classes
|
||||
|
||||
|
||||
def _write_documentation(model):
|
||||
md_with_helpstrings = io.StringIO()
|
||||
md_only_fields = io.StringIO()
|
||||
|
||||
md_with_helpstrings.write(DOCUMENT_HEADER)
|
||||
md_only_fields.write(FIELDS_ONLY_HEADER)
|
||||
|
||||
modules = {}
|
||||
for cl in model:
|
||||
modules.setdefault(cl.__module__, []).append(cl)
|
||||
|
||||
CURRENT_MODULE = None
|
||||
for module_list in sorted(modules):
|
||||
this_mod = ".".join(module_list.split('.')[:2])
|
||||
if this_mod != CURRENT_MODULE:
|
||||
CURRENT_MODULE = this_mod
|
||||
md_with_helpstrings.write(MODULE_HEADER.format(modname=this_mod))
|
||||
md_only_fields.write(MODULE_HEADER.format(modname=this_mod))
|
||||
|
||||
# because these are classes, they need to sort on their stringified
|
||||
# names
|
||||
for cl in sorted(modules[module_list], key=lambda cl: str(cl)):
|
||||
modname = inspect.getmodule(cl).__name__
|
||||
label = model[cl]["OTIO_SCHEMA"]
|
||||
md_with_helpstrings.write(
|
||||
CLASS_HEADER_WITH_DOCS.format(
|
||||
classname=label,
|
||||
modpath=modname + "." + cl.__name__,
|
||||
docstring=cl.__doc__
|
||||
)
|
||||
)
|
||||
md_only_fields.write(
|
||||
CLASS_HEADER_ONLY_FIELDS.format(
|
||||
classname=label,
|
||||
)
|
||||
)
|
||||
|
||||
for key, helpstr in sorted(model[cl].items()):
|
||||
if key in SKIP_KEYS:
|
||||
continue
|
||||
md_with_helpstrings.write(
|
||||
PROP_HEADER.format(propkey=key, prophelp=helpstr)
|
||||
)
|
||||
md_only_fields.write(
|
||||
PROP_HEADER_NO_HELP.format(propkey=key)
|
||||
)
|
||||
|
||||
return md_with_helpstrings.getvalue(), md_only_fields.getvalue()
|
||||
|
||||
|
||||
def main():
|
||||
""" main entry point """
|
||||
args = _parsed_args()
|
||||
with_docs, without_docs = generate_and_write_documentation()
|
||||
|
||||
# print it out somewhere
|
||||
if args.dryrun:
|
||||
print(with_docs)
|
||||
return
|
||||
|
||||
output = args.output
|
||||
if not output:
|
||||
output = tempfile.NamedTemporaryFile(
|
||||
'w',
|
||||
suffix="otio_serialized_schema.md",
|
||||
delete=False
|
||||
).name
|
||||
|
||||
with open(output, 'w') as fo:
|
||||
fo.write(with_docs)
|
||||
|
||||
# write version without docstrings
|
||||
prefix, suffix = output.rsplit('.', 1)
|
||||
output_only_fields = prefix + "-only-fields." + suffix
|
||||
|
||||
with open(output_only_fields, 'w') as fo:
|
||||
fo.write(without_docs)
|
||||
|
||||
print("wrote documentation to {} and {}".format(output, output_only_fields))
|
||||
|
||||
|
||||
def generate_and_write_documentation():
|
||||
model = _generate_model()
|
||||
return _write_documentation(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
72
pype/vendor/python/python_2/opentimelineio/console/console_utils.py
vendored
Normal file
72
pype/vendor/python/python_2/opentimelineio/console/console_utils.py
vendored
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
#
|
||||
# Copyright 2019 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
import ast
|
||||
|
||||
from .. import (
|
||||
media_linker,
|
||||
)
|
||||
|
||||
"""Utilities for OpenTimelineIO commandline modules."""
|
||||
|
||||
|
||||
def arg_list_to_map(arg_list, label):
|
||||
"""
|
||||
Convert an argument of the form -A foo=bar from the parsed result to a map.
|
||||
"""
|
||||
|
||||
argument_map = {}
|
||||
for pair in arg_list:
|
||||
if '=' not in pair:
|
||||
raise ValueError(
|
||||
"error: {} arguments must be in the form key=value"
|
||||
" got: {}".format(label, pair)
|
||||
)
|
||||
|
||||
key, val = pair.split('=', 1) # only split on the 1st '='
|
||||
try:
|
||||
# Sometimes we need to pass a bool, int, list, etc.
|
||||
parsed_value = ast.literal_eval(val)
|
||||
except (ValueError, SyntaxError):
|
||||
# Fall back to a simple string
|
||||
parsed_value = val
|
||||
argument_map[key] = parsed_value
|
||||
|
||||
return argument_map
|
||||
|
||||
|
||||
def media_linker_name(ml_name_arg):
|
||||
"""
|
||||
Parse commandline arguments for the media linker, which can be not set
|
||||
(fall back to default), "" or "none" (don't link media) or the name of a
|
||||
media linker to use.
|
||||
"""
|
||||
if ml_name_arg.lower() == 'default':
|
||||
media_linker_name = media_linker.MediaLinkingPolicy.ForceDefaultLinker
|
||||
elif ml_name_arg.lower() in ['none', '']:
|
||||
media_linker_name = media_linker.MediaLinkingPolicy.DoNotLinkMedia
|
||||
else:
|
||||
media_linker_name = ml_name_arg
|
||||
|
||||
return media_linker_name
|
||||
138
pype/vendor/python/python_2/opentimelineio/console/otiocat.py
vendored
Normal file
138
pype/vendor/python/python_2/opentimelineio/console/otiocat.py
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Print the contents of an OTIO file to stdout."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def _parsed_args():
|
||||
""" parse commandline arguments with argparse """
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
'filepath',
|
||||
type=str,
|
||||
nargs='+',
|
||||
help='files to print the contents of'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-a',
|
||||
'--adapter-arg',
|
||||
type=str,
|
||||
default=[],
|
||||
action='append',
|
||||
help='Extra arguments to be passed to input adapter in the form of '
|
||||
'key=value. Values are strings, numbers or Python literals: True, '
|
||||
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-m',
|
||||
'--media-linker',
|
||||
type=str,
|
||||
default="Default",
|
||||
help=(
|
||||
"Specify a media linker. 'Default' means use the "
|
||||
"$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly "
|
||||
"disable the linker, and anything else is interpreted as the name"
|
||||
" of the media linker to use."
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
'-M',
|
||||
'--media-linker-arg',
|
||||
type=str,
|
||||
default=[],
|
||||
action='append',
|
||||
help='Extra arguments to be passed to the media linker in the form of '
|
||||
'key=value. Values are strings, numbers or Python literals: True, '
|
||||
'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.'
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def _otio_compatible_file_to_json_string(
|
||||
fpath,
|
||||
media_linker_name,
|
||||
media_linker_argument_map,
|
||||
adapter_argument_map
|
||||
):
|
||||
"""Read the file at fpath with the default otio adapter and return the json
|
||||
as a string.
|
||||
"""
|
||||
|
||||
adapter = otio.adapters.from_name("otio_json")
|
||||
return adapter.write_to_string(
|
||||
otio.adapters.read_from_file(
|
||||
fpath,
|
||||
media_linker_name=media_linker_name,
|
||||
media_linker_argument_map=media_linker_argument_map,
|
||||
**adapter_argument_map
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse arguments and call _otio_compatible_file_to_json_string."""
|
||||
|
||||
args = _parsed_args()
|
||||
|
||||
media_linker_name = otio.console.console_utils.media_linker_name(
|
||||
args.media_linker
|
||||
)
|
||||
|
||||
try:
|
||||
read_adapter_arg_map = otio.console.console_utils.arg_list_to_map(
|
||||
args.adapter_arg,
|
||||
"adapter"
|
||||
)
|
||||
media_linker_argument_map = otio.console.console_utils.arg_list_to_map(
|
||||
args.media_linker_arg,
|
||||
"media linker"
|
||||
)
|
||||
except ValueError as exc:
|
||||
sys.stderr.write("\n" + str(exc) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
for fpath in args.filepath:
|
||||
print(
|
||||
_otio_compatible_file_to_json_string(
|
||||
fpath,
|
||||
media_linker_name,
|
||||
media_linker_argument_map,
|
||||
read_adapter_arg_map
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
259
pype/vendor/python/python_2/opentimelineio/console/otioconvert.py
vendored
Normal file
259
pype/vendor/python/python_2/opentimelineio/console/otioconvert.py
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import copy
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
__doc__ = """ Python wrapper around OTIO to convert timeline files between \
|
||||
formats.
|
||||
|
||||
Available adapters: {}
|
||||
""".format(otio.adapters.available_adapter_names())
|
||||
|
||||
|
||||
def _parsed_args():
|
||||
""" parse commandline arguments with argparse """
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
'-i',
|
||||
'--input',
|
||||
type=str,
|
||||
required=True,
|
||||
help='path to input file',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o',
|
||||
'--output',
|
||||
type=str,
|
||||
required=True,
|
||||
help='path to output file',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-I',
|
||||
'--input-adapter',
|
||||
type=str,
|
||||
default=None,
|
||||
help="Explicitly use this adapter for reading the input file",
|
||||
)
|
||||
parser.add_argument(
|
||||
'-O',
|
||||
'--output-adapter',
|
||||
type=str,
|
||||
default=None,
|
||||
help="Explicitly use this adapter for writing the output file",
|
||||
)
|
||||
parser.add_argument(
|
||||
'-T',
|
||||
'--tracks',
|
||||
type=str,
|
||||
default=None,
|
||||
help="Pick one or more tracks, by 0-based index, separated by commas.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'-m',
|
||||
'--media-linker',
|
||||
type=str,
|
||||
default="Default",
|
||||
help=(
|
||||
"Specify a media linker. 'Default' means use the "
|
||||
"$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly "
|
||||
"disable the linker, and anything else is interpreted as the name"
|
||||
" of the media linker to use."
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
'-M',
|
||||
'--media-linker-arg',
|
||||
type=str,
|
||||
default=[],
|
||||
action='append',
|
||||
help='Extra arguments to be passed to the media linker in the form of '
|
||||
'key=value. Values are strings, numbers or Python literals: True, '
|
||||
'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-a',
|
||||
'--adapter-arg',
|
||||
type=str,
|
||||
default=[],
|
||||
action='append',
|
||||
help='Extra arguments to be passed to input adapter in the form of '
|
||||
'key=value. Values are strings, numbers or Python literals: True, '
|
||||
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-A',
|
||||
'--output-adapter-arg',
|
||||
type=str,
|
||||
default=[],
|
||||
action='append',
|
||||
help='Extra arguments to be passed to output adapter in the form of '
|
||||
'key=value. Values are strings, numbers or Python literals: True, '
|
||||
'False, etc. Can be used multiple times: -A burrito="bar" -A taco=12.'
|
||||
)
|
||||
trim_args = parser.add_argument_group(
|
||||
title="Trim Arguments",
|
||||
description="Arguments that allow you to trim the OTIO file."
|
||||
)
|
||||
trim_args.add_argument(
|
||||
'--begin',
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Trim out everything in the timeline before this time, in the "
|
||||
"global time frame of the timeline. Argument should be in the form"
|
||||
' "VALUE,RATE", eg: --begin "10,24". Requires --end argument.'
|
||||
),
|
||||
)
|
||||
trim_args.add_argument(
|
||||
'--end',
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Trim out everything in the timeline after this time, in the "
|
||||
"global time frame of the timeline. Argument should be in the form"
|
||||
' "VALUE,RATE", eg: --begin "10,24". Requires --begin argument.'
|
||||
),
|
||||
)
|
||||
|
||||
result = parser.parse_args()
|
||||
|
||||
if result.begin is not None and result.end is None:
|
||||
parser.error("--begin requires --end.")
|
||||
if result.end is not None and result.begin is None:
|
||||
parser.error("--end requires --begin.")
|
||||
|
||||
if result.begin is not None:
|
||||
try:
|
||||
value, rate = result.begin.split(",")
|
||||
result.begin = otio.opentime.RationalTime(float(value), float(rate))
|
||||
except ValueError:
|
||||
parser.error(
|
||||
"--begin argument needs to be of the form: VALUE,RATE where "
|
||||
"VALUE is the (float) time value of the resulting RationalTime "
|
||||
"and RATE is the (float) time rate of the resulting RationalTime,"
|
||||
" not '{}'".format(result.begin)
|
||||
)
|
||||
|
||||
if result.end is not None:
|
||||
try:
|
||||
value, rate = result.end.split(",")
|
||||
result.end = otio.opentime.RationalTime(float(value), float(rate))
|
||||
except ValueError:
|
||||
parser.error(
|
||||
"--end argument needs to be of the form: VALUE,RATE where "
|
||||
"VALUE is the (float) time value of the resulting RationalTime "
|
||||
"and RATE is the (float) time rate of the resulting RationalTime,"
|
||||
" not '{}'".format(result.begin)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse arguments and convert the files."""
|
||||
|
||||
args = _parsed_args()
|
||||
|
||||
in_adapter = args.input_adapter
|
||||
if in_adapter is None:
|
||||
in_adapter = otio.adapters.from_filepath(args.input).name
|
||||
|
||||
out_adapter = args.output_adapter
|
||||
if out_adapter is None:
|
||||
out_adapter = otio.adapters.from_filepath(args.output).name
|
||||
|
||||
media_linker_name = otio.console.console_utils.media_linker_name(
|
||||
args.media_linker
|
||||
)
|
||||
|
||||
try:
|
||||
read_adapter_arg_map = otio.console.console_utils.arg_list_to_map(
|
||||
args.adapter_arg,
|
||||
"input adapter"
|
||||
)
|
||||
ml_args = otio.console.console_utils.arg_list_to_map(
|
||||
args.media_linker_arg,
|
||||
"media linker"
|
||||
)
|
||||
except ValueError as exc:
|
||||
sys.stderr.write("\n" + str(exc) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
result_tl = otio.adapters.read_from_file(
|
||||
args.input,
|
||||
in_adapter,
|
||||
media_linker_name=media_linker_name,
|
||||
media_linker_argument_map=ml_args,
|
||||
**read_adapter_arg_map
|
||||
)
|
||||
|
||||
if args.tracks:
|
||||
result_tracks = copy.deepcopy(otio.schema.Stack())
|
||||
del result_tracks[:]
|
||||
for track in args.tracks.split(","):
|
||||
tr = result_tl.tracks[int(track)]
|
||||
del result_tl.tracks[int(track)]
|
||||
print("track {0} is of kind: '{1}'".format(track, tr.kind))
|
||||
result_tracks.append(tr)
|
||||
result_tl.tracks = result_tracks
|
||||
|
||||
# handle trim arguments
|
||||
if args.begin is not None and args.end is not None:
|
||||
result_tl = otio.algorithms.timeline_trimmed_to_range(
|
||||
result_tl,
|
||||
otio.opentime.range_from_start_end_time(args.begin, args.end)
|
||||
)
|
||||
|
||||
try:
|
||||
write_adapter_arg_map = otio.console.console_utils.arg_list_to_map(
|
||||
args.output_adapter_arg,
|
||||
"output adapter"
|
||||
)
|
||||
except ValueError as exc:
|
||||
sys.stderr.write("\n" + str(exc) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
otio.adapters.write_to_file(
|
||||
result_tl,
|
||||
args.output,
|
||||
out_adapter,
|
||||
**write_adapter_arg_map
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except otio.exceptions.OTIOError as err:
|
||||
sys.stderr.write("ERROR: " + str(err) + "\n")
|
||||
sys.exit(1)
|
||||
193
pype/vendor/python/python_2/opentimelineio/console/otiostat.py
vendored
Normal file
193
pype/vendor/python/python_2/opentimelineio/console/otiostat.py
vendored
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Print statistics about the otio file, including validation information."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def _parsed_args():
|
||||
""" parse commandline arguments with argparse """
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
'filepath',
|
||||
type=str,
|
||||
nargs='+',
|
||||
help='files to operate on'
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
TESTS = []
|
||||
|
||||
|
||||
def stat_check(name):
|
||||
def real_stat_check(fn):
|
||||
TESTS.append((name, fn))
|
||||
return fn
|
||||
return real_stat_check
|
||||
|
||||
|
||||
@stat_check("parsed")
|
||||
def _did_parse(input):
|
||||
return input and True or False
|
||||
|
||||
|
||||
@stat_check("top level object")
|
||||
def _top_level_object(input):
|
||||
return input._serializable_label
|
||||
|
||||
|
||||
@stat_check("number of tracks")
|
||||
def _num_tracks(input):
|
||||
try:
|
||||
return len(input.tracks)
|
||||
except AttributeError:
|
||||
return 0
|
||||
|
||||
|
||||
@stat_check("Tracks are the same length")
|
||||
def _equal_length_tracks(tl):
|
||||
if not tl.tracks:
|
||||
return True
|
||||
for i, track in enumerate(tl.tracks):
|
||||
if track.duration() != tl.tracks[0].duration():
|
||||
raise RuntimeError(
|
||||
"track {} is not the same duration as the other tracks."
|
||||
" Track {} duration, vs: {}".format(
|
||||
i,
|
||||
track.duration(),
|
||||
tl.tracks[0].duration()
|
||||
)
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
@stat_check("deepest nesting")
|
||||
def _deepest_nesting(input):
|
||||
def depth(parent):
|
||||
if not isinstance(parent, otio.core.Composition):
|
||||
return 1
|
||||
d = 0
|
||||
for child in parent:
|
||||
d = max(d, depth(child) + 1)
|
||||
return d
|
||||
if isinstance(input, otio.schema.Timeline):
|
||||
return depth(input.tracks) + 1
|
||||
else:
|
||||
return depth(input)
|
||||
|
||||
|
||||
@stat_check("number of clips")
|
||||
def _num_clips(input):
|
||||
return len(list(input.each_clip()))
|
||||
|
||||
|
||||
@stat_check("total duration")
|
||||
def _total_duration(input):
|
||||
try:
|
||||
return input.tracks.duration()
|
||||
except AttributeError:
|
||||
return "n/a"
|
||||
|
||||
|
||||
@stat_check("total duration in timecode")
|
||||
def _total_duration_timecode(input):
|
||||
try:
|
||||
d = input.tracks.duration()
|
||||
return otio.opentime.to_timecode(d, d.rate)
|
||||
except AttributeError:
|
||||
return "n/a"
|
||||
|
||||
|
||||
@stat_check("top level rate")
|
||||
def _top_level_rate(input):
|
||||
try:
|
||||
return input.tracks.duration().rate
|
||||
except AttributeError:
|
||||
return "n/a"
|
||||
|
||||
|
||||
@stat_check("clips with cdl data")
|
||||
def _clips_with_cdl_data(input):
|
||||
return len(list(c for c in input.each_clip() if 'cdl' in c.metadata))
|
||||
|
||||
|
||||
@stat_check("Tracks with non standard types")
|
||||
def _sequences_with_non_standard_types(input):
|
||||
return len(
|
||||
list(
|
||||
c
|
||||
for c in input.each_child(descended_from_type=otio.schema.Track)
|
||||
if c.kind not in (otio.schema.TrackKind.__dict__)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _stat_otio(input_otio):
|
||||
for (test, testfunc) in TESTS:
|
||||
try:
|
||||
print("{}: {}".format(test, testfunc(input_otio)))
|
||||
except (otio.exceptions.OTIOError) as e:
|
||||
sys.stderr.write(
|
||||
"There was an OTIO Error: "
|
||||
" {}\n".format(e),
|
||||
)
|
||||
continue
|
||||
except (Exception) as e:
|
||||
sys.stderr.write("There was a system error: {}\n".format(e))
|
||||
continue
|
||||
|
||||
|
||||
def main():
|
||||
""" main entry point """
|
||||
args = _parsed_args()
|
||||
|
||||
for fp in args.filepath:
|
||||
try:
|
||||
parsed_otio = otio.adapters.read_from_file(fp)
|
||||
except (otio.exceptions.OTIOError) as e:
|
||||
sys.stderr.write(
|
||||
"The file did not successfully parse, with error:"
|
||||
" {}\n".format(e),
|
||||
)
|
||||
continue
|
||||
except (Exception) as e:
|
||||
sys.stderr.write("There was a system error: {}\n".format(e))
|
||||
continue
|
||||
|
||||
_stat_otio(parsed_otio)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
67
pype/vendor/python/python_2/opentimelineio/core/__init__.py
vendored
Normal file
67
pype/vendor/python/python_2/opentimelineio/core/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Internal implementation details of OpenTimelineIO."""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
from . import (
|
||||
serializable_object
|
||||
)
|
||||
from .serializable_object import (
|
||||
SerializableObject,
|
||||
serializable_field,
|
||||
deprecated_field,
|
||||
)
|
||||
from .composable import (
|
||||
Composable
|
||||
)
|
||||
from .item import (
|
||||
Item
|
||||
)
|
||||
from . import composition
|
||||
from .composition import (
|
||||
Composition,
|
||||
)
|
||||
from . import type_registry
|
||||
from .type_registry import (
|
||||
register_type,
|
||||
upgrade_function_for,
|
||||
schema_name_from_label,
|
||||
schema_version_from_label,
|
||||
instance_from_schema,
|
||||
)
|
||||
from .json_serializer import (
|
||||
serialize_json_to_string,
|
||||
serialize_json_to_file,
|
||||
deserialize_json_from_string,
|
||||
deserialize_json_from_file,
|
||||
)
|
||||
from .media_reference import (
|
||||
MediaReference,
|
||||
)
|
||||
from . import unknown_schema
|
||||
from .unknown_schema import (
|
||||
UnknownSchema
|
||||
)
|
||||
141
pype/vendor/python/python_2/opentimelineio/core/composable.py
vendored
Normal file
141
pype/vendor/python/python_2/opentimelineio/core/composable.py
vendored
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Composable class definition.
|
||||
|
||||
An object that can be composed by tracks.
|
||||
"""
|
||||
|
||||
import weakref
|
||||
|
||||
from . import serializable_object
|
||||
from . import type_registry
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
@type_registry.register_type
|
||||
class Composable(serializable_object.SerializableObject):
|
||||
"""An object that can be composed by tracks.
|
||||
|
||||
Base class of:
|
||||
Item
|
||||
Transition
|
||||
"""
|
||||
|
||||
name = serializable_object.serializable_field(
|
||||
"name",
|
||||
doc="Composable name."
|
||||
)
|
||||
metadata = serializable_object.serializable_field(
|
||||
"metadata",
|
||||
doc="Metadata dictionary for this Composable."
|
||||
)
|
||||
|
||||
_serializable_label = "Composable.1"
|
||||
_class_path = "core.Composable"
|
||||
|
||||
def __init__(self, name=None, metadata=None):
|
||||
super(Composable, self).__init__()
|
||||
self._parent = None
|
||||
|
||||
# initialize the serializable fields
|
||||
self.name = name
|
||||
self.metadata = copy.deepcopy(metadata) if metadata else {}
|
||||
|
||||
@staticmethod
|
||||
def visible():
|
||||
"""Return the visibility of the Composable. By default True."""
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def overlapping():
|
||||
"""Return whether an Item is overlapping. By default False."""
|
||||
|
||||
return False
|
||||
|
||||
# @{ functions to express the composable hierarchy
|
||||
def _root_parent(self):
|
||||
return ([self] + self._ancestors())[-1]
|
||||
|
||||
def _ancestors(self):
|
||||
ancestors = []
|
||||
seqi = self
|
||||
while seqi.parent() is not None:
|
||||
seqi = seqi.parent()
|
||||
ancestors.append(seqi)
|
||||
return ancestors
|
||||
|
||||
def parent(self):
|
||||
"""Return the parent Composable, or None if self has no parent."""
|
||||
|
||||
return self._parent() if self._parent is not None else None
|
||||
|
||||
def _set_parent(self, new_parent):
|
||||
if new_parent is not None and self.parent() is not None:
|
||||
raise ValueError(
|
||||
"Composable named '{}' is already in a composition named '{}',"
|
||||
" remove from previous parent before adding to new one."
|
||||
" Composable: {}, Composition: {}".format(
|
||||
self.name,
|
||||
self.parent() is not None and self.parent().name or None,
|
||||
self,
|
||||
self.parent()
|
||||
)
|
||||
)
|
||||
self._parent = weakref.ref(new_parent) if new_parent is not None else None
|
||||
|
||||
def is_parent_of(self, other):
|
||||
"""Returns true if self is a parent or ancestor of other."""
|
||||
|
||||
visited = set([])
|
||||
while other.parent() is not None and other.parent() not in visited:
|
||||
if other.parent() is self:
|
||||
return True
|
||||
visited.add(other)
|
||||
other = other.parent()
|
||||
|
||||
return False
|
||||
|
||||
# @}
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.{}("
|
||||
"name={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
self._class_path,
|
||||
repr(self.name),
|
||||
repr(self.metadata)
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "{}({}, {})".format(
|
||||
self._class_path.split('.')[-1],
|
||||
self.name,
|
||||
str(self.metadata)
|
||||
)
|
||||
718
pype/vendor/python/python_2/opentimelineio/core/composition.py
vendored
Normal file
718
pype/vendor/python/python_2/opentimelineio/core/composition.py
vendored
Normal file
|
|
@ -0,0 +1,718 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Composition base class. An object that contains `Items`."""
|
||||
|
||||
import collections
|
||||
|
||||
from . import (
|
||||
serializable_object,
|
||||
type_registry,
|
||||
item,
|
||||
composable,
|
||||
)
|
||||
|
||||
from .. import (
|
||||
opentime,
|
||||
exceptions
|
||||
)
|
||||
|
||||
|
||||
def _bisect_right(
|
||||
seq,
|
||||
tgt,
|
||||
key_func,
|
||||
lower_search_bound=0,
|
||||
upper_search_bound=None
|
||||
):
|
||||
"""Return the index of the last item in seq such that all e in seq[:index]
|
||||
have key_func(e) <= tgt, and all e in seq[index:] have key_func(e) > tgt.
|
||||
|
||||
Thus, seq.insert(index, value) will insert value after the rightmost item
|
||||
such that meets the above condition.
|
||||
|
||||
lower_search_bound and upper_search_bound bound the slice to be searched.
|
||||
|
||||
Assumes that seq is already sorted.
|
||||
"""
|
||||
|
||||
if lower_search_bound < 0:
|
||||
raise ValueError('lower_search_bound must be non-negative')
|
||||
|
||||
if upper_search_bound is None:
|
||||
upper_search_bound = len(seq)
|
||||
|
||||
while lower_search_bound < upper_search_bound:
|
||||
midpoint_index = (lower_search_bound + upper_search_bound) // 2
|
||||
|
||||
if tgt < key_func(seq[midpoint_index]):
|
||||
upper_search_bound = midpoint_index
|
||||
else:
|
||||
lower_search_bound = midpoint_index + 1
|
||||
|
||||
return lower_search_bound
|
||||
|
||||
|
||||
def _bisect_left(
|
||||
seq,
|
||||
tgt,
|
||||
key_func,
|
||||
lower_search_bound=0,
|
||||
upper_search_bound=None
|
||||
):
|
||||
"""Return the index of the last item in seq such that all e in seq[:index]
|
||||
have key_func(e) < tgt, and all e in seq[index:] have key_func(e) >= tgt.
|
||||
|
||||
Thus, seq.insert(index, value) will insert value before the leftmost item
|
||||
such that meets the above condition.
|
||||
|
||||
lower_search_bound and upper_search_bound bound the slice to be searched.
|
||||
|
||||
Assumes that seq is already sorted.
|
||||
"""
|
||||
|
||||
if lower_search_bound < 0:
|
||||
raise ValueError('lower_search_bound must be non-negative')
|
||||
|
||||
if upper_search_bound is None:
|
||||
upper_search_bound = len(seq)
|
||||
|
||||
while lower_search_bound < upper_search_bound:
|
||||
midpoint_index = (lower_search_bound + upper_search_bound) // 2
|
||||
|
||||
if key_func(seq[midpoint_index]) < tgt:
|
||||
lower_search_bound = midpoint_index + 1
|
||||
else:
|
||||
upper_search_bound = midpoint_index
|
||||
|
||||
return lower_search_bound
|
||||
|
||||
|
||||
@type_registry.register_type
|
||||
class Composition(item.Item, collections.MutableSequence):
|
||||
"""Base class for an OTIO Item that contains other Items.
|
||||
|
||||
Should be subclassed (for example by Track and Stack), not used
|
||||
directly.
|
||||
"""
|
||||
|
||||
_serializable_label = "Composition.1"
|
||||
_composition_kind = "Composition"
|
||||
_modname = "core"
|
||||
_composable_base_class = composable.Composable
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
children=None,
|
||||
source_range=None,
|
||||
markers=None,
|
||||
effects=None,
|
||||
metadata=None
|
||||
):
|
||||
item.Item.__init__(
|
||||
self,
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
markers=markers,
|
||||
effects=effects,
|
||||
metadata=metadata
|
||||
)
|
||||
collections.MutableSequence.__init__(self)
|
||||
|
||||
# Because we know that all children are unique, we store a set
|
||||
# of all the children as well to speed up __contain__ checks.
|
||||
self._child_lookup = set()
|
||||
|
||||
self._children = []
|
||||
if children:
|
||||
# cannot simply set ._children to children since __setitem__ runs
|
||||
# extra logic (assigning ._parent pointers) and populates the
|
||||
# internal membership set _child_lookup.
|
||||
self.extend(children)
|
||||
|
||||
_children = serializable_object.serializable_field(
|
||||
"children",
|
||||
list,
|
||||
"Items contained by this composition."
|
||||
)
|
||||
|
||||
@property
|
||||
def composition_kind(self):
|
||||
"""Returns a label specifying the kind of composition."""
|
||||
|
||||
return self._composition_kind
|
||||
|
||||
def __str__(self):
|
||||
return "{}({}, {}, {}, {})".format(
|
||||
self._composition_kind,
|
||||
str(self.name),
|
||||
str(self._children),
|
||||
str(self.source_range),
|
||||
str(self.metadata)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.{}.{}("
|
||||
"name={}, "
|
||||
"children={}, "
|
||||
"source_range={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
self._modname,
|
||||
self._composition_kind,
|
||||
repr(self.name),
|
||||
repr(self._children),
|
||||
repr(self.source_range),
|
||||
repr(self.metadata)
|
||||
)
|
||||
)
|
||||
|
||||
transform = serializable_object.deprecated_field()
|
||||
|
||||
def child_at_time(
|
||||
self,
|
||||
search_time,
|
||||
shallow_search=False,
|
||||
):
|
||||
"""Return the child that overlaps with time search_time.
|
||||
|
||||
search_time is in the space of self.
|
||||
|
||||
If shallow_search is false, will recurse into compositions.
|
||||
"""
|
||||
|
||||
range_map = self.range_of_all_children()
|
||||
|
||||
# find the first item whose end_time_exclusive is after the
|
||||
first_inside_range = _bisect_left(
|
||||
seq=self._children,
|
||||
tgt=search_time,
|
||||
key_func=lambda child: range_map[child].end_time_exclusive(),
|
||||
)
|
||||
|
||||
# find the last item whose start_time is before the
|
||||
last_in_range = _bisect_right(
|
||||
seq=self._children,
|
||||
tgt=search_time,
|
||||
key_func=lambda child: range_map[child].start_time,
|
||||
lower_search_bound=first_inside_range,
|
||||
)
|
||||
|
||||
# limit the search to children who are in the search_range
|
||||
possible_matches = self._children[first_inside_range:last_in_range]
|
||||
|
||||
result = None
|
||||
for thing in possible_matches:
|
||||
if range_map[thing].overlaps(search_time):
|
||||
result = thing
|
||||
break
|
||||
|
||||
# if the search cannot or should not continue
|
||||
if (
|
||||
result is None
|
||||
or shallow_search
|
||||
or not hasattr(result, "child_at_time")
|
||||
):
|
||||
return result
|
||||
|
||||
# before you recurse, you have to transform the time into the
|
||||
# space of the child
|
||||
child_search_time = self.transformed_time(search_time, result)
|
||||
|
||||
return result.child_at_time(child_search_time, shallow_search)
|
||||
|
||||
def each_child(
|
||||
self,
|
||||
search_range=None,
|
||||
descended_from_type=composable.Composable,
|
||||
shallow_search=False,
|
||||
):
|
||||
""" Generator that returns each child contained in the composition in
|
||||
the order in which it is found.
|
||||
|
||||
Arguments:
|
||||
search_range: if specified, only children whose range overlaps with
|
||||
the search range will be yielded.
|
||||
descended_from_type: if specified, only children who are a
|
||||
descendent of the descended_from_type will be yielded.
|
||||
shallow_search: if True, will only search children of self, not
|
||||
and not recurse into children of children.
|
||||
"""
|
||||
if search_range:
|
||||
range_map = self.range_of_all_children()
|
||||
|
||||
# find the first item whose end_time_inclusive is after the
|
||||
# start_time of the search range
|
||||
first_inside_range = _bisect_left(
|
||||
seq=self._children,
|
||||
tgt=search_range.start_time,
|
||||
key_func=lambda child: range_map[child].end_time_inclusive(),
|
||||
)
|
||||
|
||||
# find the last item whose start_time is before the
|
||||
# end_time_inclusive of the search_range
|
||||
last_in_range = _bisect_right(
|
||||
seq=self._children,
|
||||
tgt=search_range.end_time_inclusive(),
|
||||
key_func=lambda child: range_map[child].start_time,
|
||||
lower_search_bound=first_inside_range,
|
||||
)
|
||||
|
||||
# limit the search to children who are in the search_range
|
||||
children = self._children[first_inside_range:last_in_range]
|
||||
else:
|
||||
# otherwise search all the children
|
||||
children = self._children
|
||||
|
||||
for child in children:
|
||||
# filter out children who are not descended from the specified type
|
||||
# shortcut the isinstance if descended_from_type is composable
|
||||
# (since all objects in compositions are already composables)
|
||||
is_descendant = descended_from_type == composable.Composable
|
||||
if is_descendant or isinstance(child, descended_from_type):
|
||||
yield child
|
||||
|
||||
# if not a shallow_search, for children that are compositions,
|
||||
# recurse into their children
|
||||
if not shallow_search and hasattr(child, "each_child"):
|
||||
|
||||
if search_range is not None:
|
||||
search_range = self.transformed_time_range(search_range, child)
|
||||
|
||||
for valid_child in child.each_child(
|
||||
search_range,
|
||||
descended_from_type,
|
||||
shallow_search
|
||||
):
|
||||
yield valid_child
|
||||
|
||||
def range_of_child_at_index(self, index):
|
||||
"""Return the range of a child item in the time range of this
|
||||
composition.
|
||||
|
||||
For example, with a track:
|
||||
[ClipA][ClipB][ClipC]
|
||||
|
||||
The self.range_of_child_at_index(2) will return:
|
||||
TimeRange(ClipA.duration + ClipB.duration, ClipC.duration)
|
||||
|
||||
To be implemented by subclass of Composition.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def trimmed_range_of_child_at_index(self, index):
|
||||
"""Return the trimmed range of the child item at index in the time
|
||||
range of this composition.
|
||||
|
||||
For example, with a track:
|
||||
|
||||
[ ]
|
||||
|
||||
[ClipA][ClipB][ClipC]
|
||||
|
||||
The range of index 2 (ClipC) will be just like
|
||||
range_of_child_at_index() but trimmed based on this Composition's
|
||||
source_range.
|
||||
|
||||
To be implemented by child.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def range_of_all_children(self):
|
||||
"""Return a dict mapping children to their range in this object."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def __copy__(self):
|
||||
result = super(Composition, self).__copy__()
|
||||
|
||||
# Children are *not* copied with a shallow copy since the meaning is
|
||||
# ambiguous - they have a parent pointer which would need to be flipped
|
||||
# or they would need to be copied, which implies a deepcopy().
|
||||
#
|
||||
# This follows from the python documentation on copy/deepcopy:
|
||||
# https://docs.python.org/2/library/copy.html
|
||||
#
|
||||
# """
|
||||
# - A shallow copy constructs a new compound object and then (to the
|
||||
# extent possible) inserts references into it to the objects found in
|
||||
# the original.
|
||||
# - A deep copy constructs a new compound object and then, recursively,
|
||||
# inserts copies into it of the objects found in the original.
|
||||
# """
|
||||
result._children = []
|
||||
|
||||
return result
|
||||
|
||||
def __deepcopy__(self, md):
|
||||
result = super(Composition, self).__deepcopy__(md)
|
||||
|
||||
# deepcopy should have already copied the children, so only parent
|
||||
# pointers need to be updated.
|
||||
[c._set_parent(result) for c in result._children]
|
||||
|
||||
# we also need to reconstruct the membership set of _child_lookup.
|
||||
result._child_lookup.update(result._children)
|
||||
|
||||
return result
|
||||
|
||||
def _path_to_child(self, child):
|
||||
if not isinstance(child, composable.Composable):
|
||||
raise TypeError(
|
||||
"An object child of 'Composable' is required,"
|
||||
" not type '{}'".format(
|
||||
type(child)
|
||||
)
|
||||
)
|
||||
|
||||
current = child
|
||||
parents = []
|
||||
|
||||
while(current is not self):
|
||||
try:
|
||||
current = current.parent()
|
||||
except AttributeError:
|
||||
raise exceptions.NotAChildError(
|
||||
"Item '{}' is not a child of '{}'.".format(child, self)
|
||||
)
|
||||
|
||||
parents.append(current)
|
||||
|
||||
return parents
|
||||
|
||||
def range_of_child(self, child, reference_space=None):
|
||||
"""The range of the child in relation to another item
|
||||
(reference_space), not trimmed based on this
|
||||
composition's source_range.
|
||||
|
||||
Note that reference_space must be in the same timeline as self.
|
||||
|
||||
For example:
|
||||
|
||||
| [-----] | seq
|
||||
|
||||
[-----------------] Clip A
|
||||
|
||||
If ClipA has duration 17, and seq has source_range: 5, duration 15,
|
||||
seq.range_of_child(Clip A) will return (0, 17)
|
||||
ignoring the source range of seq.
|
||||
|
||||
To get the range of the child with the source_range applied, use the
|
||||
trimmed_range_of_child() method.
|
||||
"""
|
||||
|
||||
if not reference_space:
|
||||
reference_space = self
|
||||
|
||||
parents = self._path_to_child(child)
|
||||
|
||||
current = child
|
||||
result_range = None
|
||||
|
||||
for parent in parents:
|
||||
index = parent.index(current)
|
||||
parent_range = parent.range_of_child_at_index(index)
|
||||
|
||||
if not result_range:
|
||||
result_range = parent_range
|
||||
current = parent
|
||||
continue
|
||||
|
||||
result_range = opentime.TimeRange(
|
||||
start_time=result_range.start_time + parent_range.start_time,
|
||||
duration=result_range.duration
|
||||
)
|
||||
current = parent
|
||||
|
||||
if reference_space is not self:
|
||||
result_range = self.transformed_time_range(
|
||||
result_range,
|
||||
reference_space
|
||||
)
|
||||
|
||||
return result_range
|
||||
|
||||
def handles_of_child(self, child):
|
||||
"""If media beyond the ends of this child are visible due to adjacent
|
||||
Transitions (only applicable in a Track) then this will return the
|
||||
head and tail offsets as a tuple of RationalTime objects. If no handles
|
||||
are present on either side, then None is returned instead of a
|
||||
RationalTime.
|
||||
|
||||
Example usage:
|
||||
>>> head, tail = track.handles_of_child(clip)
|
||||
>>> if head:
|
||||
... print('Do something')
|
||||
>>> if tail:
|
||||
... print('Do something else')
|
||||
"""
|
||||
return (None, None)
|
||||
|
||||
def trimmed_range_of_child(self, child, reference_space=None):
|
||||
"""Get range of the child in reference_space coordinates, after the
|
||||
self.source_range is applied.
|
||||
|
||||
Example
|
||||
| [-----] | seq
|
||||
[-----------------] Clip A
|
||||
|
||||
If ClipA has duration 17, and seq has source_range: 5, duration 10,
|
||||
seq.trimmed_range_of_child(Clip A) will return (5, 10)
|
||||
Which is trimming the range according to the source_range of seq.
|
||||
|
||||
To get the range of the child without the source_range applied, use the
|
||||
range_of_child() method.
|
||||
|
||||
Another example
|
||||
| [-----] | seq source range starts on frame 4 and goes to frame 8
|
||||
[ClipA][ClipB] (each 6 frames long)
|
||||
|
||||
>>> seq.range_of_child(CLipA)
|
||||
0, duration 6
|
||||
>>> seq.trimmed_range_of_child(ClipA):
|
||||
4, duration 2
|
||||
"""
|
||||
|
||||
if not reference_space:
|
||||
reference_space = self
|
||||
|
||||
if not reference_space == self:
|
||||
raise NotImplementedError
|
||||
|
||||
parents = self._path_to_child(child)
|
||||
|
||||
current = child
|
||||
result_range = None
|
||||
|
||||
for parent in parents:
|
||||
index = parent.index(current)
|
||||
parent_range = parent.trimmed_range_of_child_at_index(index)
|
||||
|
||||
if not result_range:
|
||||
result_range = parent_range
|
||||
current = parent
|
||||
continue
|
||||
|
||||
result_range.start_time += parent_range.start_time
|
||||
current = parent
|
||||
|
||||
if not self.source_range or not result_range:
|
||||
return result_range
|
||||
|
||||
new_start_time = max(
|
||||
self.source_range.start_time,
|
||||
result_range.start_time
|
||||
)
|
||||
|
||||
# trimmed out
|
||||
if new_start_time >= result_range.end_time_exclusive():
|
||||
return None
|
||||
|
||||
# compute duration
|
||||
new_duration = min(
|
||||
result_range.end_time_exclusive(),
|
||||
self.source_range.end_time_exclusive()
|
||||
) - new_start_time
|
||||
|
||||
if new_duration.value < 0:
|
||||
return None
|
||||
|
||||
return opentime.TimeRange(new_start_time, new_duration)
|
||||
|
||||
def trim_child_range(self, child_range):
|
||||
if not self.source_range:
|
||||
return child_range
|
||||
|
||||
# cropped out entirely
|
||||
past_end_time = self.source_range.start_time >= child_range.end_time_exclusive()
|
||||
before_start_time = \
|
||||
self.source_range.end_time_exclusive() <= child_range.start_time
|
||||
|
||||
if past_end_time or before_start_time:
|
||||
return None
|
||||
|
||||
if child_range.start_time < self.source_range.start_time:
|
||||
child_range = opentime.range_from_start_end_time(
|
||||
self.source_range.start_time,
|
||||
child_range.end_time_exclusive()
|
||||
)
|
||||
|
||||
if (
|
||||
child_range.end_time_exclusive() >
|
||||
self.source_range.end_time_exclusive()
|
||||
):
|
||||
child_range = opentime.range_from_start_end_time(
|
||||
child_range.start_time,
|
||||
self.source_range.end_time_exclusive()
|
||||
)
|
||||
|
||||
return child_range
|
||||
|
||||
# @{ SerializableObject override.
|
||||
def _update(self, d):
|
||||
"""Like the dictionary .update() method.
|
||||
|
||||
Update the data dictionary of this SerializableObject with the .data
|
||||
of d if d is a SerializableObject or if d is a dictionary, d itself.
|
||||
"""
|
||||
|
||||
# use the parent update function
|
||||
super(Composition, self)._update(d)
|
||||
|
||||
# ...except for the 'children' field, which needs to run through the
|
||||
# insert method so that _parent pointers are correctly set on children.
|
||||
self._children = []
|
||||
self.extend(d.get('children', []))
|
||||
# @}
|
||||
|
||||
# @{ collections.MutableSequence implementation
|
||||
def __getitem__(self, item):
|
||||
return self._children[item]
|
||||
|
||||
def _setitem_slice(self, key, value):
|
||||
set_value = set(value)
|
||||
|
||||
# check if any members in the new slice are repeated
|
||||
if len(set_value) != len(value):
|
||||
raise ValueError(
|
||||
"Instancing not allowed in Compositions, {} contains repeated"
|
||||
" items.".format(value)
|
||||
)
|
||||
|
||||
old = self._children[key]
|
||||
if old:
|
||||
set_old = set(old)
|
||||
set_outside_old = set(self._children).difference(set_old)
|
||||
|
||||
isect = set_outside_old.intersection(set_value)
|
||||
if isect:
|
||||
raise ValueError(
|
||||
"Attempting to insert duplicates of items {} already "
|
||||
"present in container, instancing not allowed in "
|
||||
"Compositions".format(isect)
|
||||
)
|
||||
|
||||
# update old parent
|
||||
for val in old:
|
||||
val._set_parent(None)
|
||||
self._child_lookup.remove(val)
|
||||
|
||||
# insert into _children
|
||||
self._children[key] = value
|
||||
|
||||
# update new parent
|
||||
if value:
|
||||
for val in value:
|
||||
val._set_parent(self)
|
||||
self._child_lookup.add(val)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
# fetch the current thing at that index/slice
|
||||
old = self._children[key]
|
||||
|
||||
# in the case of key being a slice, old and value are both sequences
|
||||
if old is value:
|
||||
return
|
||||
|
||||
if isinstance(key, slice):
|
||||
return self._setitem_slice(key, value)
|
||||
|
||||
if value in self:
|
||||
raise ValueError(
|
||||
"Composable {} already present in this container, instancing"
|
||||
" not allowed in otio compositions.".format(value)
|
||||
)
|
||||
|
||||
# unset the old child's parent and delete the membership entry.
|
||||
if old is not None:
|
||||
old._set_parent(None)
|
||||
self._child_lookup.remove(old)
|
||||
|
||||
# put it into our list of children
|
||||
self._children[key] = value
|
||||
|
||||
# set the new parent
|
||||
if value is not None:
|
||||
value._set_parent(self)
|
||||
|
||||
# put it into our membership tracking set
|
||||
self._child_lookup.add(value)
|
||||
|
||||
def insert(self, index, item):
|
||||
"""Insert an item into the composition at location `index`."""
|
||||
|
||||
if not isinstance(item, self._composable_base_class):
|
||||
raise TypeError(
|
||||
"Not allowed to insert an object of type {0} into a {1}, only"
|
||||
" objects descending from {2}. Tried to insert: {3}".format(
|
||||
type(item),
|
||||
type(self),
|
||||
self._composable_base_class,
|
||||
str(item)
|
||||
)
|
||||
)
|
||||
|
||||
if item in self:
|
||||
raise ValueError(
|
||||
"Composable {} already present in this container, instancing"
|
||||
" not allowed in otio compositions.".format(item)
|
||||
)
|
||||
|
||||
# set the item's parent and add it to our membership tracking and list
|
||||
# of children
|
||||
item._set_parent(self)
|
||||
self._child_lookup.add(item)
|
||||
self._children.insert(index, item)
|
||||
|
||||
def __contains__(self, item):
|
||||
"""Use our internal membership tracking set to speed up searches."""
|
||||
return item in self._child_lookup
|
||||
|
||||
def __len__(self):
|
||||
"""The len() of a Composition is the # of children in it.
|
||||
Note that this also means that a Composition with no children
|
||||
is considered False, so take care to test for "if foo is not None"
|
||||
versus just "if foo" when the difference matters."""
|
||||
return len(self._children)
|
||||
|
||||
def __delitem__(self, key):
|
||||
# grab the old value
|
||||
old = self._children[key]
|
||||
|
||||
# remove it from the membership tracking set and clear parent
|
||||
if old is not None:
|
||||
if isinstance(key, slice):
|
||||
for val in old:
|
||||
self._child_lookup.remove(val)
|
||||
val._set_parent(None)
|
||||
else:
|
||||
self._child_lookup.remove(old)
|
||||
old._set_parent(None)
|
||||
|
||||
# remove it from our list of children
|
||||
del self._children[key]
|
||||
243
pype/vendor/python/python_2/opentimelineio/core/item.py
vendored
Normal file
243
pype/vendor/python/python_2/opentimelineio/core/item.py
vendored
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of the Item base class. OTIO Objects that contain media."""
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
opentime,
|
||||
exceptions,
|
||||
)
|
||||
|
||||
from . import (
|
||||
serializable_object,
|
||||
composable,
|
||||
)
|
||||
|
||||
|
||||
class Item(composable.Composable):
|
||||
"""An Item is a Composable that can be part of a Composition or Timeline.
|
||||
|
||||
More specifically, it is a Composable that has meaningful duration.
|
||||
|
||||
Can also hold effects and markers.
|
||||
|
||||
Base class of:
|
||||
- Composition (and children)
|
||||
- Clip
|
||||
- Gap
|
||||
"""
|
||||
|
||||
_serializable_label = "Item.1"
|
||||
_class_path = "core.Item"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
source_range=None,
|
||||
effects=None,
|
||||
markers=None,
|
||||
metadata=None,
|
||||
):
|
||||
super(Item, self).__init__(name=name, metadata=metadata)
|
||||
|
||||
self.source_range = copy.deepcopy(source_range)
|
||||
self.effects = copy.deepcopy(effects) if effects else []
|
||||
self.markers = copy.deepcopy(markers) if markers else []
|
||||
|
||||
name = serializable_object.serializable_field("name", doc="Item name.")
|
||||
source_range = serializable_object.serializable_field(
|
||||
"source_range",
|
||||
opentime.TimeRange,
|
||||
doc="Range of source to trim to. Can be None or a TimeRange."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def visible():
|
||||
"""Return the visibility of the Item. By default True."""
|
||||
|
||||
return True
|
||||
|
||||
def duration(self):
|
||||
"""Convience wrapper for the trimmed_range.duration of the item."""
|
||||
|
||||
return self.trimmed_range().duration
|
||||
|
||||
def available_range(self):
|
||||
"""Implemented by child classes, available range of media."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def trimmed_range(self):
|
||||
"""The range after applying the source range."""
|
||||
if self.source_range is not None:
|
||||
return copy.copy(self.source_range)
|
||||
|
||||
return self.available_range()
|
||||
|
||||
def visible_range(self):
|
||||
"""The range of this item's media visible to its parent.
|
||||
Includes handles revealed by adjacent transitions (if any).
|
||||
This will always be larger or equal to trimmed_range()."""
|
||||
result = self.trimmed_range()
|
||||
if self.parent():
|
||||
head, tail = self.parent().handles_of_child(self)
|
||||
if head:
|
||||
result = opentime.TimeRange(
|
||||
start_time=result.start_time - head,
|
||||
duration=result.duration + head
|
||||
)
|
||||
if tail:
|
||||
result = opentime.TimeRange(
|
||||
start_time=result.start_time,
|
||||
duration=result.duration + tail
|
||||
)
|
||||
return result
|
||||
|
||||
def trimmed_range_in_parent(self):
|
||||
"""Find and return the trimmed range of this item in the parent."""
|
||||
if not self.parent():
|
||||
raise exceptions.NotAChildError(
|
||||
"No parent of {}, cannot compute range in parent.".format(self)
|
||||
)
|
||||
|
||||
return self.parent().trimmed_range_of_child(self)
|
||||
|
||||
def range_in_parent(self):
|
||||
"""Find and return the untrimmed range of this item in the parent."""
|
||||
if not self.parent():
|
||||
raise exceptions.NotAChildError(
|
||||
"No parent of {}, cannot compute range in parent.".format(self)
|
||||
)
|
||||
|
||||
return self.parent().range_of_child(self)
|
||||
|
||||
def transformed_time(self, t, to_item):
|
||||
"""Converts time t in the coordinate system of self to coordinate
|
||||
system of to_item.
|
||||
|
||||
Note that self and to_item must be part of the same timeline (they must
|
||||
have a common ancestor).
|
||||
|
||||
Example:
|
||||
|
||||
0 20
|
||||
[------t----D----------]
|
||||
[--A-][t----B---][--C--]
|
||||
100 101 110
|
||||
101 in B = 6 in D
|
||||
|
||||
t = t argument
|
||||
"""
|
||||
|
||||
if not isinstance(t, opentime.RationalTime):
|
||||
raise ValueError(
|
||||
"transformed_time only operates on RationalTime, not {}".format(
|
||||
type(t)
|
||||
)
|
||||
)
|
||||
|
||||
# does not operate in place
|
||||
result = copy.copy(t)
|
||||
|
||||
if to_item is None:
|
||||
return result
|
||||
|
||||
root = self._root_parent()
|
||||
|
||||
# transform t to root parent's coordinate system
|
||||
item = self
|
||||
while item != root and item != to_item:
|
||||
|
||||
parent = item.parent()
|
||||
result -= item.trimmed_range().start_time
|
||||
result += parent.range_of_child(item).start_time
|
||||
|
||||
item = parent
|
||||
|
||||
ancestor = item
|
||||
|
||||
# transform from root parent's coordinate system to to_item
|
||||
item = to_item
|
||||
while item != root and item != ancestor:
|
||||
|
||||
parent = item.parent()
|
||||
result += item.trimmed_range().start_time
|
||||
result -= parent.range_of_child(item).start_time
|
||||
|
||||
item = parent
|
||||
|
||||
assert(item is ancestor)
|
||||
|
||||
return result
|
||||
|
||||
def transformed_time_range(self, tr, to_item):
|
||||
"""Transforms the timerange tr to the range of child or self to_item."""
|
||||
|
||||
return opentime.TimeRange(
|
||||
self.transformed_time(tr.start_time, to_item),
|
||||
tr.duration
|
||||
)
|
||||
|
||||
markers = serializable_object.serializable_field(
|
||||
"markers",
|
||||
doc="List of markers on this item."
|
||||
)
|
||||
effects = serializable_object.serializable_field(
|
||||
"effects",
|
||||
doc="List of effects on this item."
|
||||
)
|
||||
metadata = serializable_object.serializable_field(
|
||||
"metadata",
|
||||
doc="Metadata dictionary for this item."
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.{}("
|
||||
"name={}, "
|
||||
"source_range={}, "
|
||||
"effects={}, "
|
||||
"markers={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
self._class_path,
|
||||
repr(self.name),
|
||||
repr(self.source_range),
|
||||
repr(self.effects),
|
||||
repr(self.markers),
|
||||
repr(self.metadata)
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "{}({}, {}, {}, {}, {})".format(
|
||||
self._class_path.split('.')[-1],
|
||||
self.name,
|
||||
str(self.source_range),
|
||||
str(self.effects),
|
||||
str(self.markers),
|
||||
str(self.metadata)
|
||||
)
|
||||
218
pype/vendor/python/python_2/opentimelineio/core/json_serializer.py
vendored
Normal file
218
pype/vendor/python/python_2/opentimelineio/core/json_serializer.py
vendored
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Serializer for SerializableObjects to JSON
|
||||
|
||||
Used for the otio_json adapter as well as for plugins and manifests.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from . import (
|
||||
SerializableObject,
|
||||
type_registry,
|
||||
)
|
||||
|
||||
from .unknown_schema import UnknownSchema
|
||||
|
||||
from .. import (
|
||||
exceptions,
|
||||
opentime,
|
||||
)
|
||||
|
||||
|
||||
# @TODO: Handle file version drifting
|
||||
|
||||
|
||||
class _SerializableObjectEncoder(json.JSONEncoder):
|
||||
|
||||
""" Encoder for the SerializableObject OTIO Class and its descendents. """
|
||||
|
||||
def default(self, obj):
|
||||
for typename, encfn in _ENCODER_LIST:
|
||||
if isinstance(obj, typename):
|
||||
return encfn(obj)
|
||||
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def serialize_json_to_string(root, indent=4):
|
||||
"""Serialize a tree of SerializableObject to JSON.
|
||||
|
||||
Returns a JSON string.
|
||||
"""
|
||||
|
||||
return _SerializableObjectEncoder(
|
||||
sort_keys=True,
|
||||
indent=indent
|
||||
).encode(root)
|
||||
|
||||
|
||||
def serialize_json_to_file(root, to_file):
|
||||
"""
|
||||
Serialize a tree of SerializableObject to JSON.
|
||||
|
||||
Writes the result to the given file path.
|
||||
"""
|
||||
|
||||
content = serialize_json_to_string(root)
|
||||
|
||||
with open(to_file, 'w') as file_contents:
|
||||
file_contents.write(content)
|
||||
|
||||
# @{ Encoders
|
||||
|
||||
|
||||
def _encoded_serializable_object(input_otio):
|
||||
if not input_otio._serializable_label:
|
||||
raise exceptions.InvalidSerializableLabelError(
|
||||
input_otio._serializable_label
|
||||
)
|
||||
result = {
|
||||
"OTIO_SCHEMA": input_otio._serializable_label,
|
||||
}
|
||||
result.update(input_otio._data)
|
||||
return result
|
||||
|
||||
|
||||
def _encoded_unknown_schema_object(input_otio):
|
||||
orig_label = input_otio.data.get(UnknownSchema._original_label)
|
||||
if not orig_label:
|
||||
raise exceptions.InvalidSerializableLabelError(
|
||||
orig_label
|
||||
)
|
||||
# result is just a dict, not a SerializableObject
|
||||
result = {}
|
||||
result.update(input_otio.data)
|
||||
result["OTIO_SCHEMA"] = orig_label # override the UnknownSchema label
|
||||
del result[UnknownSchema._original_label]
|
||||
return result
|
||||
|
||||
|
||||
def _encoded_time(input_otio):
|
||||
return {
|
||||
"OTIO_SCHEMA": "RationalTime.1",
|
||||
'value': input_otio.value,
|
||||
'rate': input_otio.rate
|
||||
}
|
||||
|
||||
|
||||
def _encoded_time_range(input_otio):
|
||||
return {
|
||||
"OTIO_SCHEMA": "TimeRange.1",
|
||||
'start_time': _encoded_time(input_otio.start_time),
|
||||
'duration': _encoded_time(input_otio.duration)
|
||||
}
|
||||
|
||||
|
||||
def _encoded_transform(input_otio):
|
||||
return {
|
||||
"OTIO_SCHEMA": "TimeTransform.1",
|
||||
'offset': _encoded_time(input_otio.offset),
|
||||
'scale': input_otio.scale,
|
||||
'rate': input_otio.rate
|
||||
}
|
||||
# @}
|
||||
|
||||
|
||||
# Ordered list of functions for encoding OTIO objects to JSON.
|
||||
# More particular cases should precede more general cases.
|
||||
_ENCODER_LIST = [
|
||||
(opentime.RationalTime, _encoded_time),
|
||||
(opentime.TimeRange, _encoded_time_range),
|
||||
(opentime.TimeTransform, _encoded_transform),
|
||||
(UnknownSchema, _encoded_unknown_schema_object),
|
||||
(SerializableObject, _encoded_serializable_object)
|
||||
]
|
||||
|
||||
# @{ Decoders
|
||||
|
||||
|
||||
def _decoded_time(input_otio):
|
||||
return opentime.RationalTime(
|
||||
input_otio['value'],
|
||||
input_otio['rate']
|
||||
)
|
||||
|
||||
|
||||
def _decoded_time_range(input_otio):
|
||||
return opentime.TimeRange(
|
||||
input_otio['start_time'],
|
||||
input_otio['duration']
|
||||
)
|
||||
|
||||
|
||||
def _decoded_transform(input_otio):
|
||||
return opentime.TimeTransform(
|
||||
input_otio['offset'],
|
||||
input_otio['scale']
|
||||
)
|
||||
# @}
|
||||
|
||||
|
||||
# Map of explicit decoder functions to schema labels (for opentime)
|
||||
# because opentime is implemented with no knowledge of OTIO, it doesn't use the
|
||||
# same pattern as SerializableObject.
|
||||
_DECODER_FUNCTION_MAP = {
|
||||
'RationalTime.1': _decoded_time,
|
||||
'TimeRange.1': _decoded_time_range,
|
||||
'TimeTransform.1': _decoded_transform,
|
||||
}
|
||||
|
||||
|
||||
def _as_otio(dct):
|
||||
""" Specialized JSON decoder for OTIO base Objects. """
|
||||
|
||||
if "OTIO_SCHEMA" in dct:
|
||||
schema_label = dct["OTIO_SCHEMA"]
|
||||
|
||||
if schema_label in _DECODER_FUNCTION_MAP:
|
||||
return _DECODER_FUNCTION_MAP[schema_label](dct)
|
||||
|
||||
schema_name = type_registry.schema_name_from_label(schema_label)
|
||||
schema_version = type_registry.schema_version_from_label(schema_label)
|
||||
del dct["OTIO_SCHEMA"]
|
||||
|
||||
return type_registry.instance_from_schema(
|
||||
schema_name,
|
||||
schema_version,
|
||||
dct
|
||||
)
|
||||
|
||||
return dct
|
||||
|
||||
|
||||
def deserialize_json_from_string(otio_string):
|
||||
""" Deserialize a string containing JSON to OTIO objects. """
|
||||
|
||||
return json.loads(otio_string, object_hook=_as_otio)
|
||||
|
||||
|
||||
def deserialize_json_from_file(otio_filepath):
|
||||
""" Deserialize the file at otio_filepath containing JSON to OTIO. """
|
||||
|
||||
with open(otio_filepath, 'r') as file_contents:
|
||||
result = deserialize_json_from_string(file_contents.read())
|
||||
result._json_path = otio_filepath
|
||||
return result
|
||||
102
pype/vendor/python/python_2/opentimelineio/core/media_reference.py
vendored
Normal file
102
pype/vendor/python/python_2/opentimelineio/core/media_reference.py
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Media Reference Classes and Functions."""
|
||||
|
||||
from .. import (
|
||||
opentime,
|
||||
)
|
||||
from . import (
|
||||
type_registry,
|
||||
serializable_object,
|
||||
)
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
@type_registry.register_type
|
||||
class MediaReference(serializable_object.SerializableObject):
|
||||
"""Base Media Reference Class.
|
||||
|
||||
Currently handles string printing the child classes, which expose interface
|
||||
into its data dictionary.
|
||||
|
||||
The requirement is that the schema is named so that external systems can
|
||||
fetch the required information correctly.
|
||||
"""
|
||||
_serializable_label = "MediaReference.1"
|
||||
_name = "MediaReference"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
available_range=None,
|
||||
metadata=None
|
||||
):
|
||||
super(MediaReference, self).__init__()
|
||||
|
||||
self.name = name
|
||||
self.available_range = copy.deepcopy(available_range)
|
||||
self.metadata = copy.deepcopy(metadata) or {}
|
||||
|
||||
name = serializable_object.serializable_field(
|
||||
"name",
|
||||
doc="Name of this media reference."
|
||||
)
|
||||
available_range = serializable_object.serializable_field(
|
||||
"available_range",
|
||||
opentime.TimeRange,
|
||||
doc="Available range of media in this media reference."
|
||||
)
|
||||
metadata = serializable_object.serializable_field(
|
||||
"metadata",
|
||||
dict,
|
||||
doc="Metadata dictionary."
|
||||
)
|
||||
|
||||
@property
|
||||
def is_missing_reference(self):
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return "{}({}, {}, {})".format(
|
||||
self._name,
|
||||
repr(self.name),
|
||||
repr(self.available_range),
|
||||
repr(self.metadata)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.schema.{}("
|
||||
"name={},"
|
||||
" available_range={},"
|
||||
" metadata={}"
|
||||
")"
|
||||
).format(
|
||||
self._name,
|
||||
repr(self.name),
|
||||
repr(self.available_range),
|
||||
repr(self.metadata)
|
||||
)
|
||||
219
pype/vendor/python/python_2/opentimelineio/core/serializable_object.py
vendored
Normal file
219
pype/vendor/python/python_2/opentimelineio/core/serializable_object.py
vendored
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implements the otio.core.SerializableObject"""
|
||||
|
||||
import copy
|
||||
|
||||
from . import (
|
||||
type_registry,
|
||||
)
|
||||
|
||||
|
||||
class SerializableObject(object):
|
||||
"""Base object for things that can be [de]serialized to/from .otio files.
|
||||
|
||||
To define a new child class of this, you inherit from it and also use the
|
||||
register_type decorator. Then you use the serializable_field function
|
||||
above to create attributes that can be serialized/deserialized.
|
||||
|
||||
You can use the upgrade_function_for decorator to upgrade older schemas
|
||||
to newer ones.
|
||||
|
||||
Finally, if you're in the process of upgrading schemas and you want to
|
||||
catch code that refers to old attribute names, you can use the
|
||||
deprecated_field function. This raises an exception if code attempts to
|
||||
read or write to that attribute. After testing and before pushing, please
|
||||
remove references to deprecated_field.
|
||||
|
||||
For example
|
||||
|
||||
>>> import opentimelineio as otio
|
||||
|
||||
>>> @otio.core.register_type
|
||||
... class ExampleChild(otio.core.SerializableObject):
|
||||
... _serializable_label = "ExampleChild.7"
|
||||
... child_data = otio.core.serializable_field("child_data", int)
|
||||
|
||||
# @TODO: delete once testing shows nothing is referencing this.
|
||||
>>> old_child_data_name = otio.core.deprecated_field()
|
||||
|
||||
>>> @otio.core.upgrade_function_for(ExampleChild, 3)
|
||||
... def upgrade_child_to_three(_data):
|
||||
... return {"child_data" : _data["old_child_data_name"]}
|
||||
"""
|
||||
|
||||
# Every child must define a _serializable_label attribute.
|
||||
# This attribute is a string in the form of: "SchemaName.VersionNumber"
|
||||
# Where VersionNumber is an integer.
|
||||
# You can use the classmethods .schema_name() and .schema_version() to
|
||||
# query these fields.
|
||||
_serializable_label = None
|
||||
_class_path = "core.SerializableObject"
|
||||
|
||||
def __init__(self):
|
||||
self._data = {}
|
||||
|
||||
# @{ "Reference Type" semantics for SerializableObject
|
||||
# We think of the SerializableObject as a reference type - by default
|
||||
# comparison is pointer comparison, but you can use 'is_equivalent_to' to
|
||||
# check if the contents of the SerializableObject are the same as some
|
||||
# other SerializableObject's contents.
|
||||
#
|
||||
# Implicitly:
|
||||
# def __eq__(self, other):
|
||||
# return self is other
|
||||
|
||||
def is_equivalent_to(self, other):
|
||||
"""Returns true if the contents of self and other match."""
|
||||
|
||||
try:
|
||||
if self._data == other._data:
|
||||
return True
|
||||
|
||||
# XXX: Gross hack takes OTIO->JSON String->Python Dictionaries
|
||||
#
|
||||
# using the serializer ensures that we only compare fields that are
|
||||
# serializable, which is how we define equivalence.
|
||||
#
|
||||
# we use json.loads() to turn the string back into dictionaries
|
||||
# so we can use python's equivalence for things like floating
|
||||
# point numbers (ie 5.0 == 5) without having to do string
|
||||
# processing.
|
||||
|
||||
from . import json_serializer
|
||||
import json
|
||||
|
||||
lhs_str = json_serializer.serialize_json_to_string(self)
|
||||
lhs = json.loads(lhs_str)
|
||||
|
||||
rhs_str = json_serializer.serialize_json_to_string(other)
|
||||
rhs = json.loads(rhs_str)
|
||||
|
||||
return (lhs == rhs)
|
||||
except AttributeError:
|
||||
return False
|
||||
# @}
|
||||
|
||||
def _update(self, d):
|
||||
"""Like the dictionary .update() method.
|
||||
|
||||
Update the _data dictionary of this SerializableObject with the ._data
|
||||
of d if d is a SerializableObject or if d is a dictionary, d itself.
|
||||
"""
|
||||
|
||||
if isinstance(d, SerializableObject):
|
||||
self._data.update(d._data)
|
||||
else:
|
||||
self._data.update(d)
|
||||
|
||||
@classmethod
|
||||
def schema_name(cls):
|
||||
return type_registry.schema_name_from_label(
|
||||
cls._serializable_label
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def schema_version(cls):
|
||||
return type_registry.schema_version_from_label(
|
||||
cls._serializable_label
|
||||
)
|
||||
|
||||
@property
|
||||
def is_unknown_schema(self):
|
||||
# in general, SerializableObject will have a known schema
|
||||
# but UnknownSchema subclass will redefine this property to be True
|
||||
return False
|
||||
|
||||
def __copy__(self):
|
||||
raise NotImplementedError(
|
||||
"Shallow copying is not permitted. Use a deep copy."
|
||||
)
|
||||
|
||||
def __deepcopy__(self, md):
|
||||
result = type(self)()
|
||||
result._data = copy.deepcopy(self._data, md)
|
||||
|
||||
return result
|
||||
|
||||
def deepcopy(self):
|
||||
return self.__deepcopy__({})
|
||||
|
||||
|
||||
def serializable_field(name, required_type=None, doc=None):
|
||||
"""Create a serializable_field for child classes of SerializableObject.
|
||||
|
||||
Convienence function for adding attributes to child classes of
|
||||
SerializableObject in such a way that they will be serialized/deserialized
|
||||
automatically.
|
||||
|
||||
Use it like this:
|
||||
class foo(SerializableObject):
|
||||
bar = serializable_field("bar", required_type=int, doc="example")
|
||||
|
||||
This would indicate that class "foo" has a serializable field "bar". So:
|
||||
f = foo()
|
||||
f.bar = "stuff"
|
||||
|
||||
# serialize & deserialize
|
||||
otio_json = otio.adapters.from_name("otio")
|
||||
f2 = otio_json.read_from_string(otio_json.write_to_string(f))
|
||||
|
||||
# fields should be equal
|
||||
f.bar == f2.bar
|
||||
|
||||
Additionally, the "doc" field will become the documentation for the
|
||||
property.
|
||||
"""
|
||||
|
||||
def getter(self):
|
||||
return self._data[name]
|
||||
|
||||
def setter(self, val):
|
||||
# always allow None values regardless of value of required_type
|
||||
if required_type is not None and val is not None:
|
||||
if not isinstance(val, required_type):
|
||||
raise TypeError(
|
||||
"attribute '{}' must be an instance of '{}', not: {}".format(
|
||||
name,
|
||||
required_type,
|
||||
type(val)
|
||||
)
|
||||
)
|
||||
|
||||
self._data[name] = val
|
||||
|
||||
return property(getter, setter, doc=doc)
|
||||
|
||||
|
||||
def deprecated_field():
|
||||
""" For marking attributes on a SerializableObject deprecated. """
|
||||
|
||||
def getter(self):
|
||||
raise DeprecationWarning
|
||||
|
||||
def setter(self, val):
|
||||
raise DeprecationWarning
|
||||
|
||||
return property(getter, setter, doc="Deprecated field, do not use.")
|
||||
152
pype/vendor/python/python_2/opentimelineio/core/type_registry.py
vendored
Normal file
152
pype/vendor/python/python_2/opentimelineio/core/type_registry.py
vendored
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Core type registry system for registering OTIO types for serialization."""
|
||||
|
||||
from .. import (
|
||||
exceptions
|
||||
)
|
||||
|
||||
|
||||
# Types decorate use register_type() to insert themselves into this map
|
||||
_OTIO_TYPES = {}
|
||||
|
||||
# maps types to a map of versions to upgrade functions
|
||||
_UPGRADE_FUNCTIONS = {}
|
||||
|
||||
|
||||
def schema_name_from_label(label):
|
||||
"""Return the schema name from the label name."""
|
||||
|
||||
return label.split(".")[0]
|
||||
|
||||
|
||||
def schema_version_from_label(label):
|
||||
"""Return the schema version from the label name."""
|
||||
|
||||
return int(label.split(".")[1])
|
||||
|
||||
|
||||
def schema_label_from_name_version(schema_name, schema_version):
|
||||
"""Return the serializeable object schema label given the name and version."""
|
||||
|
||||
return "{}.{}".format(schema_name, schema_version)
|
||||
|
||||
|
||||
def register_type(classobj, schemaname=None):
|
||||
""" Register a class to a Schema Label.
|
||||
|
||||
Normally this is used as a decorator. However, in special cases where a
|
||||
type has been renamed, you might need to register the new type to multiple
|
||||
schema names. To do this:
|
||||
|
||||
>>> @core.register_type
|
||||
... class MyNewClass(...):
|
||||
... pass
|
||||
|
||||
>>> core.register_type(MyNewClass, "MyOldName")
|
||||
|
||||
This will parse the old schema name into the new class type. You may also
|
||||
need to write an upgrade function if the schema itself has changed.
|
||||
"""
|
||||
|
||||
if schemaname is None:
|
||||
schemaname = schema_name_from_label(classobj._serializable_label)
|
||||
|
||||
_OTIO_TYPES[schemaname] = classobj
|
||||
|
||||
return classobj
|
||||
|
||||
|
||||
def upgrade_function_for(cls, version_to_upgrade_to):
|
||||
"""Decorator for identifying schema class upgrade functions.
|
||||
|
||||
Example
|
||||
>>> @upgrade_function_for(MyClass, 5)
|
||||
... def upgrade_to_version_five(data):
|
||||
... pass
|
||||
|
||||
This will get called to upgrade a schema of MyClass to version 5. My class
|
||||
must be a class deriving from otio.core.SerializableObject.
|
||||
|
||||
The upgrade function should take a single argument - the dictionary to
|
||||
upgrade, and return a dictionary with the fields upgraded.
|
||||
|
||||
Remember that you don't need to provide an upgrade function for upgrades
|
||||
that add or remove fields, only for schema versions that change the field
|
||||
names.
|
||||
"""
|
||||
|
||||
def decorator_func(func):
|
||||
""" Decorator for marking upgrade functions """
|
||||
|
||||
_UPGRADE_FUNCTIONS.setdefault(cls, {})[version_to_upgrade_to] = func
|
||||
|
||||
return func
|
||||
|
||||
return decorator_func
|
||||
|
||||
|
||||
def instance_from_schema(schema_name, schema_version, data_dict):
|
||||
"""Return an instance, of the schema from data in the data_dict."""
|
||||
|
||||
if schema_name not in _OTIO_TYPES:
|
||||
from .unknown_schema import UnknownSchema
|
||||
|
||||
# create an object of UnknownSchema type to represent the data
|
||||
schema_label = schema_label_from_name_version(schema_name, schema_version)
|
||||
data_dict[UnknownSchema._original_label] = schema_label
|
||||
unknown_label = UnknownSchema._serializable_label
|
||||
schema_name = schema_name_from_label(unknown_label)
|
||||
schema_version = schema_version_from_label(unknown_label)
|
||||
|
||||
cls = _OTIO_TYPES[schema_name]
|
||||
|
||||
schema_version = int(schema_version)
|
||||
if cls.schema_version() < schema_version:
|
||||
raise exceptions.UnsupportedSchemaError(
|
||||
"Schema '{}' has highest version available '{}', which is lower "
|
||||
"than requested schema version '{}'".format(
|
||||
schema_name,
|
||||
cls.schema_version(),
|
||||
schema_version
|
||||
)
|
||||
)
|
||||
|
||||
if cls.schema_version() != schema_version:
|
||||
# since the keys are the versions to upgrade to, sorting the keys
|
||||
# before iterating through them should ensure that upgrade functions
|
||||
# are called in order.
|
||||
for version, upgrade_func in sorted(
|
||||
_UPGRADE_FUNCTIONS[cls].items()
|
||||
):
|
||||
if version < schema_version:
|
||||
continue
|
||||
|
||||
data_dict = upgrade_func(data_dict)
|
||||
|
||||
obj = cls()
|
||||
obj._update(data_dict)
|
||||
|
||||
return obj
|
||||
50
pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py
vendored
Normal file
50
pype/vendor/python/python_2/opentimelineio/core/unknown_schema.py
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""
|
||||
Implementation of the UnknownSchema schema.
|
||||
"""
|
||||
|
||||
from .serializable_object import SerializableObject
|
||||
from .type_registry import register_type
|
||||
|
||||
|
||||
@register_type
|
||||
class UnknownSchema(SerializableObject):
|
||||
"""Represents an object whose schema is unknown to us."""
|
||||
|
||||
_serializable_label = "UnknownSchema.1"
|
||||
_name = "UnknownSchema"
|
||||
_original_label = "UnknownSchemaOriginalLabel"
|
||||
|
||||
@property
|
||||
def is_unknown_schema(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
"""Exposes the data dictionary of the underlying SerializableObject
|
||||
directly.
|
||||
"""
|
||||
return self._data
|
||||
89
pype/vendor/python/python_2/opentimelineio/exceptions.py
vendored
Normal file
89
pype/vendor/python/python_2/opentimelineio/exceptions.py
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Exception classes for OpenTimelineIO"""
|
||||
|
||||
|
||||
class OTIOError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CouldNotReadFileError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class NoKnownAdapterForExtensionError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class ReadingNotSupportedError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class WritingNotSupportedError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class NotSupportedError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidSerializableLabelError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class CannotComputeAvailableRangeError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class AdapterDoesntSupportFunctionError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedSchemaError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class NotAChildError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class InstancingNotAllowedError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class TransitionFollowingATransitionError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class MisconfiguredPluginError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class CannotTrimTransitionsError(OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class NoDefaultMediaLinkerError(OTIOError):
|
||||
pass
|
||||
174
pype/vendor/python/python_2/opentimelineio/hooks.py
vendored
Normal file
174
pype/vendor/python/python_2/opentimelineio/hooks.py
vendored
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
#
|
||||
# Copyright 2018 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
from . import (
|
||||
plugins,
|
||||
core,
|
||||
)
|
||||
|
||||
__doc__ = """
|
||||
HookScripts are plugins that run at defined points ("Hooks").
|
||||
|
||||
They expose a hook_function with signature:
|
||||
hook_function :: otio.schema.Timeline, Dict -> otio.schema.Timeline
|
||||
|
||||
Both hook scripts and the hooks they attach to are defined in the plugin
|
||||
manifest.
|
||||
|
||||
You can attach multiple hook scripts to a hook. They will be executed in list
|
||||
order, first to last.
|
||||
|
||||
They are defined by the manifests HookScripts and hooks areas.
|
||||
|
||||
>>>
|
||||
{
|
||||
"OTIO_SCHEMA" : "PluginManifest.1",
|
||||
"hook_scripts" : [
|
||||
{
|
||||
"OTIO_SCHEMA" : "HookScript.1",
|
||||
"name" : "example hook",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "example.py"
|
||||
}
|
||||
],
|
||||
"hooks" : {
|
||||
"pre_adapter_write" : ["example hook"],
|
||||
"post_adapter_read" : []
|
||||
}
|
||||
}
|
||||
|
||||
The 'hook_scripts' area loads the python modules with the 'hook_function's to
|
||||
call in them. The 'hooks' area defines the hooks (and any associated
|
||||
scripts). You can further query and modify these from python.
|
||||
|
||||
>>> import opentimelineio as otio
|
||||
... hook_list = otio.hooks.scripts_attached_to("some_hook") # -> ['a','b','c']
|
||||
...
|
||||
... # to run the hook scripts:
|
||||
... otio.hooks.run("some_hook", some_timeline, optional_argument_dict)
|
||||
|
||||
This will pass (some_timeline, optional_argument_dict) to 'a', which will
|
||||
a new timeline that will get passed into 'b' with optional_argument_dict,
|
||||
etc.
|
||||
|
||||
To Edit the order, change the order in the list:
|
||||
|
||||
>>> hook_list[0], hook_list[2] = hook_list[2], hook_list[0]
|
||||
... print hook_list # ['c','b','a']
|
||||
|
||||
Now c will run, then b, then a.
|
||||
|
||||
To delete a function the list:
|
||||
|
||||
>>> del hook_list[1]
|
||||
"""
|
||||
|
||||
|
||||
@core.register_type
|
||||
class HookScript(plugins.PythonPlugin):
|
||||
_serializable_label = "HookScript.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
execution_scope=None,
|
||||
filepath=None,
|
||||
):
|
||||
"""HookScript plugin constructor."""
|
||||
|
||||
super(HookScript, self).__init__(name, execution_scope, filepath)
|
||||
|
||||
def run(self, in_timeline, argument_map={}):
|
||||
"""Run the hook_function associated with this plugin."""
|
||||
|
||||
# @TODO: should in_timeline be passed in place? or should a copy be
|
||||
# made?
|
||||
return self._execute_function(
|
||||
"hook_function",
|
||||
in_timeline=in_timeline,
|
||||
argument_map=argument_map
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "HookScript({}, {}, {})".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.hooks.HookScript("
|
||||
"name={}, "
|
||||
"execution_scope={}, "
|
||||
"filepath={}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def names():
|
||||
"""Return a list of all the registered hooks."""
|
||||
|
||||
return plugins.ActiveManifest().hooks.keys()
|
||||
|
||||
|
||||
def available_hookscript_names():
|
||||
"""Return the names of HookScripts that have been registered."""
|
||||
|
||||
return [hs.name for hs in plugins.ActiveManifest().hook_scripts]
|
||||
|
||||
|
||||
def available_hookscripts():
|
||||
"""Return the HookScripts objects that have been registered."""
|
||||
return plugins.ActiveManifest().hook_scripts
|
||||
|
||||
|
||||
def scripts_attached_to(hook):
|
||||
"""Return an editable list of all the hook scriptss that are attached to
|
||||
the specified hook, in execution order. Changing this list will change the
|
||||
order that scripts run in, and deleting a script will remove it from
|
||||
executing
|
||||
"""
|
||||
|
||||
# @TODO: Should this return a copy?
|
||||
return plugins.ActiveManifest().hooks[hook]
|
||||
|
||||
|
||||
def run(hook, tl, extra_args=None):
|
||||
"""Run all the scripts associated with hook, passing in tl and extra_args.
|
||||
|
||||
Will return the return value of the last hook script.
|
||||
|
||||
If no hookscripts are defined, returns tl.
|
||||
"""
|
||||
|
||||
hook_scripts = plugins.ActiveManifest().hooks[hook]
|
||||
for name in hook_scripts:
|
||||
hs = plugins.ActiveManifest().from_name(name, "hook_scripts")
|
||||
tl = hs.run(tl, extra_args)
|
||||
return tl
|
||||
169
pype/vendor/python/python_2/opentimelineio/media_linker.py
vendored
Normal file
169
pype/vendor/python/python_2/opentimelineio/media_linker.py
vendored
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
""" MediaLinker plugins fire after an adapter has read a file in order to
|
||||
produce MediaReferences that point at valid, site specific media.
|
||||
|
||||
They expose a "link_media_reference" function with the signature:
|
||||
link_media_reference :: otio.schema.Clip -> otio.core.MediaReference
|
||||
|
||||
or:
|
||||
def linked_media_reference(from_clip):
|
||||
result = otio.core.MediaReference() # whichever subclass
|
||||
# do stuff
|
||||
return result
|
||||
|
||||
To get context information, they can inspect the metadata on the clip and on
|
||||
the media reference. The .parent() method can be used to find the containing
|
||||
track if metadata is stored there.
|
||||
|
||||
Please raise an instance (or child instance) of
|
||||
otio.exceptions.CannotLinkMediaError() if there is a problem linking the media.
|
||||
|
||||
For example:
|
||||
for clip in timeline.each_clip():
|
||||
try:
|
||||
new_mr = otio.media_linker.linked_media_reference(clip)
|
||||
clip.media_reference = new_mr
|
||||
except otio.exceptions.CannotLinkMediaError:
|
||||
# or report the error
|
||||
pass
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from . import (
|
||||
exceptions,
|
||||
plugins,
|
||||
core,
|
||||
)
|
||||
|
||||
|
||||
# Enum describing different media linker policies
|
||||
class MediaLinkingPolicy:
|
||||
DoNotLinkMedia = "__do_not_link_media"
|
||||
ForceDefaultLinker = "__default"
|
||||
|
||||
|
||||
# @TODO: wrap this up in the plugin system somehow? automatically generate?
|
||||
def available_media_linker_names():
|
||||
"""Return a string list of the available media linker plugins."""
|
||||
|
||||
return [str(adp.name) for adp in plugins.ActiveManifest().media_linkers]
|
||||
|
||||
|
||||
def from_name(name):
|
||||
"""Fetch the media linker object by the name of the adapter directly."""
|
||||
|
||||
if name == MediaLinkingPolicy.ForceDefaultLinker or not name:
|
||||
name = os.environ.get("OTIO_DEFAULT_MEDIA_LINKER", None)
|
||||
|
||||
if not name:
|
||||
return None
|
||||
|
||||
# @TODO: make this handle the enums
|
||||
try:
|
||||
return plugins.ActiveManifest().from_name(
|
||||
name,
|
||||
kind_list="media_linkers"
|
||||
)
|
||||
except exceptions.NotSupportedError:
|
||||
raise exceptions.NotSupportedError(
|
||||
"media linker not supported: {}, available: {}".format(
|
||||
name,
|
||||
available_media_linker_names()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def default_media_linker():
|
||||
try:
|
||||
return os.environ['OTIO_DEFAULT_MEDIA_LINKER']
|
||||
except KeyError:
|
||||
raise exceptions.NoDefaultMediaLinkerError(
|
||||
"No default Media Linker set in $OTIO_DEFAULT_MEDIA_LINKER"
|
||||
)
|
||||
|
||||
|
||||
def linked_media_reference(
|
||||
target_clip,
|
||||
media_linker_name=MediaLinkingPolicy.ForceDefaultLinker,
|
||||
media_linker_argument_map=None
|
||||
):
|
||||
media_linker = from_name(media_linker_name)
|
||||
|
||||
if not media_linker:
|
||||
return target_clip
|
||||
|
||||
# @TODO: connect this argument map up to the function call through to the
|
||||
# real linker
|
||||
if not media_linker_argument_map:
|
||||
media_linker_argument_map = {}
|
||||
|
||||
return media_linker.link_media_reference(
|
||||
target_clip,
|
||||
media_linker_argument_map
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class MediaLinker(plugins.PythonPlugin):
|
||||
_serializable_label = "MediaLinker.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
execution_scope=None,
|
||||
filepath=None,
|
||||
):
|
||||
super(MediaLinker, self).__init__(name, execution_scope, filepath)
|
||||
|
||||
def link_media_reference(self, in_clip, media_linker_argument_map=None):
|
||||
media_linker_argument_map = media_linker_argument_map or {}
|
||||
|
||||
return self._execute_function(
|
||||
"link_media_reference",
|
||||
in_clip=in_clip,
|
||||
media_linker_argument_map=media_linker_argument_map
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "MediaLinker({}, {}, {})".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.media_linker.MediaLinker("
|
||||
"name={}, "
|
||||
"execution_scope={}, "
|
||||
"filepath={}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.execution_scope),
|
||||
repr(self.filepath)
|
||||
)
|
||||
)
|
||||
856
pype/vendor/python/python_2/opentimelineio/opentime.py
vendored
Normal file
856
pype/vendor/python/python_2/opentimelineio/opentime.py
vendored
Normal file
|
|
@ -0,0 +1,856 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Library for expressing and transforming time.
|
||||
|
||||
NOTE: This module is written specifically with a future port to C in mind.
|
||||
When ported to C, Time will be a struct and these functions should be very
|
||||
simple.
|
||||
"""
|
||||
|
||||
import math
|
||||
import copy
|
||||
|
||||
|
||||
VALID_NON_DROPFRAME_TIMECODE_RATES = (
|
||||
1,
|
||||
12,
|
||||
23.976,
|
||||
23.98,
|
||||
(24000 / 1001.0),
|
||||
24,
|
||||
25,
|
||||
30,
|
||||
29.97,
|
||||
(30000 / 1001.0),
|
||||
48,
|
||||
50,
|
||||
59.94,
|
||||
(60000 / 1001.0),
|
||||
60,
|
||||
)
|
||||
|
||||
VALID_DROPFRAME_TIMECODE_RATES = (
|
||||
29.97,
|
||||
(30000 / 1001.0),
|
||||
59.94,
|
||||
(60000 / 1001.0),
|
||||
)
|
||||
|
||||
VALID_TIMECODE_RATES = (
|
||||
VALID_NON_DROPFRAME_TIMECODE_RATES + VALID_DROPFRAME_TIMECODE_RATES)
|
||||
|
||||
_fn_cache = object.__setattr__
|
||||
|
||||
|
||||
class RationalTime(object):
|
||||
""" Represents an instantaneous point in time, value * (1/rate) seconds
|
||||
from time 0seconds.
|
||||
"""
|
||||
|
||||
# Locks RationalTime instances to only these attributes
|
||||
__slots__ = ['value', 'rate']
|
||||
|
||||
def __init__(self, value=0.0, rate=1.0):
|
||||
_fn_cache(self, "value", value)
|
||||
_fn_cache(self, "rate", rate)
|
||||
|
||||
def __setattr__(self, key, val):
|
||||
"""Enforces immutability """
|
||||
raise AttributeError("RationalTime is Immutable.")
|
||||
|
||||
def __copy__(self, memodict=None):
|
||||
return RationalTime(self.value, self.rate)
|
||||
|
||||
# Always deepcopy, since we want this class to behave like a value type
|
||||
__deepcopy__ = __copy__
|
||||
|
||||
def rescaled_to(self, new_rate):
|
||||
"""Returns the time for this time converted to new_rate"""
|
||||
|
||||
try:
|
||||
new_rate = new_rate.rate
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if self.rate == new_rate:
|
||||
return copy.copy(self)
|
||||
|
||||
return RationalTime(
|
||||
self.value_rescaled_to(new_rate),
|
||||
new_rate
|
||||
)
|
||||
|
||||
def value_rescaled_to(self, new_rate):
|
||||
"""Returns the time value for self converted to new_rate"""
|
||||
|
||||
try:
|
||||
new_rate = new_rate.rate
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if new_rate == self.rate:
|
||||
return self.value
|
||||
|
||||
# TODO: This math probably needs some overrun protection
|
||||
try:
|
||||
return float(self.value) * float(new_rate) / float(self.rate)
|
||||
except (AttributeError, TypeError, ValueError):
|
||||
raise TypeError(
|
||||
"Sorry, RationalTime cannot be rescaled to a value of type "
|
||||
"'{}', only RationalTime and numbers are supported.".format(
|
||||
type(new_rate)
|
||||
)
|
||||
)
|
||||
|
||||
def almost_equal(self, other, delta=0.0):
|
||||
try:
|
||||
rescaled_value = self.value_rescaled_to(other.rate)
|
||||
return abs(rescaled_value - other.value) <= delta
|
||||
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
"""Returns a RationalTime object that is the sum of self and other.
|
||||
|
||||
If self and other have differing time rates, the result will have the
|
||||
have the rate of the faster time.
|
||||
"""
|
||||
|
||||
try:
|
||||
if self.rate == other.rate:
|
||||
return RationalTime(self.value + other.value, self.rate)
|
||||
except AttributeError:
|
||||
if not isinstance(other, RationalTime):
|
||||
raise TypeError(
|
||||
"RationalTime may only be added to other objects of type "
|
||||
"RationalTime, not {}.".format(type(other))
|
||||
)
|
||||
raise
|
||||
|
||||
if self.rate > other.rate:
|
||||
scale = self.rate
|
||||
value = self.value + other.value_rescaled_to(scale)
|
||||
else:
|
||||
scale = other.rate
|
||||
value = self.value_rescaled_to(scale) + other.value
|
||||
|
||||
return RationalTime(value, scale)
|
||||
|
||||
# because RationalTime is immutable, += is sugar around +
|
||||
__iadd__ = __add__
|
||||
|
||||
def __sub__(self, other):
|
||||
"""Returns a RationalTime object that is self - other.
|
||||
|
||||
If self and other have differing time rates, the result will have the
|
||||
have the rate of the faster time.
|
||||
"""
|
||||
|
||||
try:
|
||||
if self.rate == other.rate:
|
||||
return RationalTime(self.value - other.value, self.rate)
|
||||
except AttributeError:
|
||||
if not isinstance(other, RationalTime):
|
||||
raise TypeError(
|
||||
"RationalTime may only be added to other objects of type "
|
||||
"RationalTime, not {}.".format(type(other))
|
||||
)
|
||||
raise
|
||||
|
||||
if self.rate > other.rate:
|
||||
scale = self.rate
|
||||
value = self.value - other.value_rescaled_to(scale)
|
||||
else:
|
||||
scale = other.rate
|
||||
value = self.value_rescaled_to(scale) - other.value
|
||||
|
||||
return RationalTime(value=value, rate=scale)
|
||||
|
||||
def _comparable_floats(self, other):
|
||||
"""Returns a tuple of two floats, (self, other), which are suitable
|
||||
for comparison.
|
||||
|
||||
If other is not of a type that can be compared, TypeError is raised
|
||||
"""
|
||||
try:
|
||||
return (
|
||||
float(self.value) / self.rate,
|
||||
float(other.value) / other.rate
|
||||
)
|
||||
except AttributeError:
|
||||
if not isinstance(other, RationalTime):
|
||||
raise TypeError(
|
||||
"RationalTime can only be compared to other objects of type "
|
||||
"RationalTime, not {}".format(type(other))
|
||||
)
|
||||
raise
|
||||
|
||||
def __gt__(self, other):
|
||||
f_self, f_other = self._comparable_floats(other)
|
||||
return f_self > f_other
|
||||
|
||||
def __lt__(self, other):
|
||||
f_self, f_other = self._comparable_floats(other)
|
||||
return f_self < f_other
|
||||
|
||||
def __le__(self, other):
|
||||
f_self, f_other = self._comparable_floats(other)
|
||||
return f_self <= f_other
|
||||
|
||||
def __ge__(self, other):
|
||||
f_self, f_other = self._comparable_floats(other)
|
||||
return f_self >= f_other
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.opentime.RationalTime(value={value},"
|
||||
" rate={rate})".format(
|
||||
value=repr(self.value),
|
||||
rate=repr(self.rate),
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "RationalTime({}, {})".format(
|
||||
str(self.value),
|
||||
str(self.rate)
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self.value_rescaled_to(other.rate) == other.value
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.value, self.rate))
|
||||
|
||||
|
||||
class TimeTransform(object):
|
||||
"""1D Transform for RationalTime. Has offset and scale."""
|
||||
|
||||
def __init__(self, offset=RationalTime(), scale=1.0, rate=None):
|
||||
self.offset = copy.copy(offset)
|
||||
self.scale = float(scale)
|
||||
self.rate = float(rate) if rate else None
|
||||
|
||||
def applied_to(self, other):
|
||||
if isinstance(other, TimeRange):
|
||||
return range_from_start_end_time(
|
||||
start_time=self.applied_to(other.start_time),
|
||||
end_time_exclusive=self.applied_to(other.end_time_exclusive())
|
||||
)
|
||||
|
||||
target_rate = self.rate if self.rate is not None else other.rate
|
||||
if isinstance(other, TimeTransform):
|
||||
return TimeTransform(
|
||||
offset=self.offset + other.offset,
|
||||
scale=self.scale * other.scale,
|
||||
rate=target_rate
|
||||
)
|
||||
elif isinstance(other, RationalTime):
|
||||
value = other.value * self.scale
|
||||
result = RationalTime(value, other.rate) + self.offset
|
||||
if target_rate is not None:
|
||||
result = result.rescaled_to(target_rate)
|
||||
|
||||
return result
|
||||
else:
|
||||
raise TypeError(
|
||||
"TimeTransform can only be applied to a TimeTransform or "
|
||||
"RationalTime, not a {}".format(type(other))
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.opentime.TimeTransform(offset={}, scale={}, rate={})".format(
|
||||
repr(self.offset),
|
||||
repr(self.scale),
|
||||
repr(self.rate)
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"TimeTransform({}, {}, {})".format(
|
||||
str(self.offset),
|
||||
str(self.scale),
|
||||
str(self.rate)
|
||||
)
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (
|
||||
(self.offset, self.scale, self.rate) ==
|
||||
(other.offset, other.scale, self.rate)
|
||||
)
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.offset, self.scale, self.rate))
|
||||
|
||||
|
||||
class BoundStrategy(object):
|
||||
"""Different bounding strategies for TimeRange """
|
||||
|
||||
Free = 1
|
||||
Clamp = 2
|
||||
|
||||
|
||||
class TimeRange(object):
|
||||
"""Contains a range of time, starting (and including) start_time and
|
||||
lasting duration.value * (1/duration.rate) seconds.
|
||||
|
||||
A 0 duration TimeRange is the same as a RationalTime, and contains only the
|
||||
start_time of the TimeRange.
|
||||
"""
|
||||
|
||||
__slots__ = ['start_time', 'duration']
|
||||
|
||||
def __init__(self, start_time=None, duration=None):
|
||||
if not isinstance(start_time, RationalTime) and start_time is not None:
|
||||
raise TypeError(
|
||||
"start_time must be a RationalTime, not "
|
||||
"'{}'".format(start_time)
|
||||
)
|
||||
if (
|
||||
duration is not None and (
|
||||
not isinstance(duration, RationalTime)
|
||||
or duration.value < 0.0
|
||||
)
|
||||
):
|
||||
raise TypeError(
|
||||
"duration must be a RationalTime with value >= 0, not "
|
||||
"'{}'".format(duration)
|
||||
)
|
||||
|
||||
# if the start time has not been passed in
|
||||
if not start_time:
|
||||
if duration:
|
||||
# ...get the rate from the duration
|
||||
start_time = RationalTime(rate=duration.rate)
|
||||
else:
|
||||
# otherwise use the default
|
||||
start_time = RationalTime()
|
||||
_fn_cache(self, "start_time", copy.copy(start_time))
|
||||
|
||||
if not duration:
|
||||
# ...get the rate from the start_time
|
||||
duration = RationalTime(rate=start_time.rate)
|
||||
_fn_cache(self, "duration", copy.copy(duration))
|
||||
|
||||
def __setattr__(self, key, val):
|
||||
raise AttributeError("TimeRange is Immutable.")
|
||||
|
||||
def __copy__(self, memodict=None):
|
||||
# Construct a new one directly to avoid the overhead of deepcopy
|
||||
return TimeRange(
|
||||
copy.copy(self.start_time),
|
||||
copy.copy(self.duration)
|
||||
)
|
||||
|
||||
# Always deepcopy, since we want this class to behave like a value type
|
||||
__deepcopy__ = __copy__
|
||||
|
||||
def end_time_inclusive(self):
|
||||
"""The time of the last sample that contains data in the TimeRange.
|
||||
|
||||
If the TimeRange goes from (0, 24) w/ duration (10, 24), this will be
|
||||
(9, 24)
|
||||
|
||||
If the TimeRange goes from (0, 24) w/ duration (10.5, 24):
|
||||
(10, 24)
|
||||
|
||||
In other words, the last frame with data (however fractional).
|
||||
"""
|
||||
|
||||
if (
|
||||
self.end_time_exclusive() - self.start_time.rescaled_to(self.duration)
|
||||
).value > 1:
|
||||
|
||||
result = (
|
||||
self.end_time_exclusive() - RationalTime(1, self.start_time.rate)
|
||||
)
|
||||
|
||||
# if the duration's value has a fractional component
|
||||
if self.duration.value != math.floor(self.duration.value):
|
||||
result = RationalTime(
|
||||
math.floor(self.end_time_exclusive().value),
|
||||
result.rate
|
||||
)
|
||||
|
||||
return result
|
||||
else:
|
||||
return copy.deepcopy(self.start_time)
|
||||
|
||||
def end_time_exclusive(self):
|
||||
""""Time of the first sample outside the time range.
|
||||
|
||||
If Start Frame is 10 and duration is 5, then end_time_exclusive is 15,
|
||||
even though the last time with data in this range is 14.
|
||||
|
||||
If Start Frame is 10 and duration is 5.5, then end_time_exclusive is
|
||||
15.5, even though the last time with data in this range is 15.
|
||||
"""
|
||||
|
||||
return self.duration + self.start_time.rescaled_to(self.duration)
|
||||
|
||||
def extended_by(self, other):
|
||||
"""Construct a new TimeRange that is this one extended by another."""
|
||||
|
||||
if not isinstance(other, TimeRange):
|
||||
raise TypeError(
|
||||
"extended_by requires rtime be a TimeRange, not a '{}'".format(
|
||||
type(other)
|
||||
)
|
||||
)
|
||||
|
||||
start_time = min(self.start_time, other.start_time)
|
||||
new_end_time = max(
|
||||
self.end_time_exclusive(),
|
||||
other.end_time_exclusive()
|
||||
)
|
||||
duration = duration_from_start_end_time(start_time, new_end_time)
|
||||
return TimeRange(start_time, duration)
|
||||
|
||||
# @TODO: remove?
|
||||
def clamped(
|
||||
self,
|
||||
other,
|
||||
start_bound=BoundStrategy.Free,
|
||||
end_bound=BoundStrategy.Free
|
||||
):
|
||||
"""Clamp 'other' (either a RationalTime or a TimeRange), according to
|
||||
self.start_time/end_time_exclusive and the bound arguments.
|
||||
"""
|
||||
|
||||
if isinstance(other, RationalTime):
|
||||
if start_bound == BoundStrategy.Clamp:
|
||||
other = max(other, self.start_time)
|
||||
if end_bound == BoundStrategy.Clamp:
|
||||
# @TODO: this should probably be the end_time_inclusive,
|
||||
# not exclusive
|
||||
other = min(other, self.end_time_exclusive())
|
||||
return other
|
||||
elif isinstance(other, TimeRange):
|
||||
start_time = other.start_time
|
||||
end = other.end_time_exclusive()
|
||||
if start_bound == BoundStrategy.Clamp:
|
||||
start_time = max(other.start_time, self.start_time)
|
||||
if end_bound == BoundStrategy.Clamp:
|
||||
end = min(self.end_time_exclusive(), end)
|
||||
duration = duration_from_start_end_time(start_time, end)
|
||||
return TimeRange(start_time, duration)
|
||||
else:
|
||||
raise TypeError(
|
||||
"TimeRange can only be applied to RationalTime objects, not "
|
||||
"{}".format(type(other))
|
||||
)
|
||||
return self
|
||||
|
||||
def contains(self, other):
|
||||
"""Return true if self completely contains other.
|
||||
|
||||
(RationalTime or TimeRange)
|
||||
"""
|
||||
|
||||
if isinstance(other, RationalTime):
|
||||
return (
|
||||
self.start_time <= other and other < self.end_time_exclusive())
|
||||
elif isinstance(other, TimeRange):
|
||||
return (
|
||||
self.start_time <= other.start_time and
|
||||
self.end_time_exclusive() >= other.end_time_exclusive()
|
||||
)
|
||||
raise TypeError(
|
||||
"contains only accepts on otio.opentime.RationalTime or "
|
||||
"otio.opentime.TimeRange, not {}".format(type(other))
|
||||
)
|
||||
|
||||
def overlaps(self, other):
|
||||
"""Return true if self overlaps any part of other.
|
||||
|
||||
(RationalTime or TimeRange)
|
||||
"""
|
||||
|
||||
if isinstance(other, RationalTime):
|
||||
return self.contains(other)
|
||||
elif isinstance(other, TimeRange):
|
||||
return (
|
||||
(
|
||||
self.start_time < other.end_time_exclusive() and
|
||||
other.start_time < self.end_time_exclusive()
|
||||
)
|
||||
)
|
||||
raise TypeError(
|
||||
"overlaps only accepts on otio.opentime.RationalTime or "
|
||||
"otio.opentime.TimeRange, not {}".format(type(other))
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.start_time, self.duration))
|
||||
|
||||
def __eq__(self, rhs):
|
||||
try:
|
||||
return (
|
||||
(self.start_time, self.duration) ==
|
||||
(rhs.start_time, rhs.duration)
|
||||
)
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
def __ne__(self, rhs):
|
||||
return not (self == rhs)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.opentime.TimeRange(start_time={}, duration={})".format(
|
||||
repr(self.start_time),
|
||||
repr(self.duration),
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"TimeRange({}, {})".format(
|
||||
str(self.start_time),
|
||||
str(self.duration),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def from_frames(frame, fps):
|
||||
"""Turn a frame number and fps into a time object.
|
||||
:param frame: (:class:`int`) Frame number.
|
||||
:param fps: (:class:`float`) Frame-rate for the (:class:`RationalTime`) instance.
|
||||
|
||||
:return: (:class:`RationalTime`) Instance for the frame and fps provided.
|
||||
"""
|
||||
|
||||
return RationalTime(int(frame), fps)
|
||||
|
||||
|
||||
def to_frames(time_obj, fps=None):
|
||||
"""Turn a RationalTime into a frame number."""
|
||||
|
||||
if not fps or time_obj.rate == fps:
|
||||
return int(time_obj.value)
|
||||
|
||||
return int(time_obj.value_rescaled_to(fps))
|
||||
|
||||
|
||||
def validate_timecode_rate(rate):
|
||||
"""Check if rate is of valid type and value.
|
||||
Raises (:class:`TypeError` for wrong type of rate.
|
||||
Raises (:class:`VaueError`) for invalid rate value.
|
||||
|
||||
:param rate: (:class:`int`) or (:class:`float`) The frame rate in question
|
||||
"""
|
||||
if not isinstance(rate, (int, float)):
|
||||
raise TypeError(
|
||||
"rate must be <float> or <int> not {t}".format(t=type(rate)))
|
||||
|
||||
if rate not in VALID_TIMECODE_RATES:
|
||||
raise ValueError(
|
||||
'{rate} is not a valid frame rate, '
|
||||
'Please use one of these: {valid}'.format(
|
||||
rate=rate,
|
||||
valid=VALID_TIMECODE_RATES))
|
||||
|
||||
|
||||
def from_timecode(timecode_str, rate):
|
||||
"""Convert a timecode string into a RationalTime.
|
||||
|
||||
:param timecode_str: (:class:`str`) A colon-delimited timecode.
|
||||
:param rate: (:class:`float`) The frame-rate to calculate timecode in
|
||||
terms of.
|
||||
|
||||
:return: (:class:`RationalTime`) Instance for the timecode provided.
|
||||
"""
|
||||
# Validate rate
|
||||
validate_timecode_rate(rate)
|
||||
|
||||
# Check if rate is drop frame
|
||||
rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES
|
||||
|
||||
# Make sure only DF timecodes are treated as such
|
||||
treat_as_df = rate_is_dropframe and ';' in timecode_str
|
||||
|
||||
# Check if timecode indicates drop frame
|
||||
if ';' in timecode_str:
|
||||
if not rate_is_dropframe:
|
||||
raise ValueError(
|
||||
'Timecode "{}" indicates drop-frame rate '
|
||||
'due to the ";" frame divider. '
|
||||
'Passed rate ({}) is of non-drop-frame rate. '
|
||||
'Valid drop-frame rates are: {}'.format(
|
||||
timecode_str,
|
||||
rate,
|
||||
VALID_DROPFRAME_TIMECODE_RATES))
|
||||
else:
|
||||
timecode_str = timecode_str.replace(';', ':')
|
||||
|
||||
hours, minutes, seconds, frames = timecode_str.split(":")
|
||||
|
||||
# Timecode is declared in terms of nominal fps
|
||||
nominal_fps = int(math.ceil(rate))
|
||||
|
||||
if int(frames) >= nominal_fps:
|
||||
raise ValueError(
|
||||
'Frame rate mismatch. Timecode "{}" has frames beyond {}.'.format(
|
||||
timecode_str, nominal_fps - 1))
|
||||
|
||||
dropframes = 0
|
||||
if treat_as_df:
|
||||
if rate == 29.97:
|
||||
dropframes = 2
|
||||
|
||||
elif rate == 59.94:
|
||||
dropframes = 4
|
||||
|
||||
# To use for drop frame compensation
|
||||
total_minutes = int(hours) * 60 + int(minutes)
|
||||
|
||||
# convert to frames
|
||||
value = (
|
||||
((total_minutes * 60) + int(seconds)) * nominal_fps + int(frames)) - \
|
||||
(dropframes * (total_minutes - (total_minutes // 10)))
|
||||
|
||||
return RationalTime(value, rate)
|
||||
|
||||
|
||||
def to_timecode(time_obj, rate=None, drop_frame=None):
|
||||
"""Convert a RationalTime into a timecode string.
|
||||
|
||||
:param time_obj: (:class:`RationalTime`) instance to express as timecode.
|
||||
:param rate: (:class:`float`) The frame-rate to calculate timecode in
|
||||
terms of. (Default time_obj.rate)
|
||||
:param drop_frame: (:class:`bool`) ``True`` to make drop-frame timecode,
|
||||
``False`` for non-drop. If left ``None``, a format will be guessed
|
||||
based on rate.
|
||||
|
||||
:return: (:class:`str`) The timecode.
|
||||
"""
|
||||
if time_obj is None:
|
||||
return None
|
||||
|
||||
rate = rate or time_obj.rate
|
||||
|
||||
# Validate rate
|
||||
validate_timecode_rate(rate)
|
||||
|
||||
# Check if rate is drop frame
|
||||
rate_is_dropframe = rate in VALID_DROPFRAME_TIMECODE_RATES
|
||||
if drop_frame and not rate_is_dropframe:
|
||||
raise ValueError(
|
||||
"Invalid rate for drop-frame timecode {}".format(time_obj.rate)
|
||||
)
|
||||
|
||||
# if in auto-detect for DFTC, use the rate to decide
|
||||
if drop_frame is None:
|
||||
drop_frame = rate_is_dropframe
|
||||
|
||||
dropframes = 0
|
||||
if drop_frame:
|
||||
if rate in (29.97, (30000 / 1001.0)):
|
||||
dropframes = 2
|
||||
|
||||
elif rate == 59.94:
|
||||
dropframes = 4
|
||||
|
||||
# For non-dftc, use the integral frame rate
|
||||
if not drop_frame:
|
||||
rate = round(rate)
|
||||
|
||||
# Number of frames in an hour
|
||||
frames_per_hour = int(round(rate * 60 * 60))
|
||||
# Number of frames in a day - timecode rolls over after 24 hours
|
||||
frames_per_24_hours = frames_per_hour * 24
|
||||
# Number of frames per ten minutes
|
||||
frames_per_10_minutes = int(round(rate * 60 * 10))
|
||||
# Number of frames per minute is the round of the framerate * 60 minus
|
||||
# the number of dropped frames
|
||||
frames_per_minute = int(round(rate) * 60) - dropframes
|
||||
|
||||
value = time_obj.value
|
||||
|
||||
if value < 0:
|
||||
raise ValueError(
|
||||
"Negative values are not supported for converting to timecode.")
|
||||
|
||||
# If frame_number is greater than 24 hrs, next operation will rollover
|
||||
# clock
|
||||
value %= frames_per_24_hours
|
||||
|
||||
if drop_frame:
|
||||
d = value // frames_per_10_minutes
|
||||
m = value % frames_per_10_minutes
|
||||
if m > dropframes:
|
||||
value += (dropframes * 9 * d) + \
|
||||
dropframes * ((m - dropframes) // frames_per_minute)
|
||||
else:
|
||||
value += dropframes * 9 * d
|
||||
|
||||
nominal_fps = int(math.ceil(rate))
|
||||
|
||||
frames = value % nominal_fps
|
||||
seconds = (value // nominal_fps) % 60
|
||||
minutes = ((value // nominal_fps) // 60) % 60
|
||||
hours = (((value // nominal_fps) // 60) // 60)
|
||||
|
||||
tc = "{HH:02d}:{MM:02d}:{SS:02d}{div}{FF:02d}"
|
||||
|
||||
return tc.format(
|
||||
HH=int(hours),
|
||||
MM=int(minutes),
|
||||
SS=int(seconds),
|
||||
div=drop_frame and ";" or ":",
|
||||
FF=int(frames))
|
||||
|
||||
|
||||
def from_time_string(time_str, rate):
|
||||
"""Convert a time with microseconds string into a RationalTime.
|
||||
|
||||
:param time_str: (:class:`str`) A HH:MM:ss.ms time.
|
||||
:param rate: (:class:`float`) The frame-rate to calculate timecode in
|
||||
terms of.
|
||||
|
||||
:return: (:class:`RationalTime`) Instance for the timecode provided.
|
||||
"""
|
||||
|
||||
if ';' in time_str:
|
||||
raise ValueError('Drop-Frame timecodes not supported.')
|
||||
|
||||
hours, minutes, seconds = time_str.split(":")
|
||||
microseconds = "0"
|
||||
if '.' in seconds:
|
||||
seconds, microseconds = str(seconds).split('.')
|
||||
microseconds = microseconds[0:6]
|
||||
seconds = '.'.join([seconds, microseconds])
|
||||
time_obj = from_seconds(
|
||||
float(seconds) +
|
||||
(int(minutes) * 60) +
|
||||
(int(hours) * 60 * 60)
|
||||
)
|
||||
return time_obj.rescaled_to(rate)
|
||||
|
||||
|
||||
def to_time_string(time_obj):
|
||||
"""
|
||||
Convert this timecode to time with microsecond, as formated in FFMPEG
|
||||
|
||||
:return: Number formated string of time
|
||||
"""
|
||||
if time_obj is None:
|
||||
return None
|
||||
# convert time object to seconds
|
||||
seconds = to_seconds(time_obj)
|
||||
|
||||
# reformat in time string
|
||||
time_units_per_minute = 60
|
||||
time_units_per_hour = time_units_per_minute * 60
|
||||
time_units_per_day = time_units_per_hour * 24
|
||||
|
||||
days, hour_units = divmod(seconds, time_units_per_day)
|
||||
hours, minute_units = divmod(hour_units, time_units_per_hour)
|
||||
minutes, seconds = divmod(minute_units, time_units_per_minute)
|
||||
microseconds = "0"
|
||||
seconds = str(seconds)
|
||||
if '.' in seconds:
|
||||
seconds, microseconds = str(seconds).split('.')
|
||||
|
||||
# TODO: There are some rollover policy issues for days and hours,
|
||||
# We need to research these
|
||||
|
||||
return "{hours}:{minutes}:{seconds}.{microseconds}".format(
|
||||
hours="{n:0{width}d}".format(n=int(hours), width=2),
|
||||
minutes="{n:0{width}d}".format(n=int(minutes), width=2),
|
||||
seconds="{n:0{width}d}".format(n=int(seconds), width=2),
|
||||
microseconds=microseconds[0:6]
|
||||
)
|
||||
|
||||
|
||||
def from_seconds(seconds):
|
||||
"""Convert a number of seconds into RationalTime"""
|
||||
|
||||
# Note: in the future we may consider adding a preferred rate arg
|
||||
time_obj = RationalTime(value=seconds, rate=1)
|
||||
|
||||
return time_obj
|
||||
|
||||
|
||||
def to_seconds(time_obj):
|
||||
""" Convert a RationalTime into float seconds """
|
||||
return time_obj.value_rescaled_to(1)
|
||||
|
||||
|
||||
def from_footage(footage):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def to_footage(time_obj):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def duration_from_start_end_time(start_time, end_time_exclusive):
|
||||
"""Compute duration of samples from first to last. This is not the same as
|
||||
distance. For example, the duration of a clip from frame 10 to frame 15
|
||||
is 6 frames. Result in the rate of start_time.
|
||||
"""
|
||||
|
||||
# @TODO: what to do when start_time > end_time_exclusive?
|
||||
|
||||
if start_time.rate == end_time_exclusive.rate:
|
||||
return RationalTime(
|
||||
end_time_exclusive.value - start_time.value,
|
||||
start_time.rate
|
||||
)
|
||||
else:
|
||||
return RationalTime(
|
||||
(
|
||||
end_time_exclusive.value_rescaled_to(start_time)
|
||||
- start_time.value
|
||||
),
|
||||
start_time.rate
|
||||
)
|
||||
|
||||
|
||||
# @TODO: create range from start/end [in,ex]clusive
|
||||
def range_from_start_end_time(start_time, end_time_exclusive):
|
||||
"""Create a TimeRange from start and end RationalTimes."""
|
||||
|
||||
return TimeRange(
|
||||
start_time,
|
||||
duration=duration_from_start_end_time(start_time, end_time_exclusive)
|
||||
)
|
||||
33
pype/vendor/python/python_2/opentimelineio/plugins/__init__.py
vendored
Normal file
33
pype/vendor/python/python_2/opentimelineio/plugins/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Plugin system for OTIO"""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
from .python_plugin import PythonPlugin
|
||||
from .manifest import (
|
||||
manifest_from_file,
|
||||
ActiveManifest,
|
||||
)
|
||||
282
pype/vendor/python/python_2/opentimelineio/plugins/manifest.py
vendored
Normal file
282
pype/vendor/python/python_2/opentimelineio/plugins/manifest.py
vendored
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of an adapter registry system for OTIO."""
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
|
||||
# on some python interpreters, pkg_resources is not available
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
pkg_resources = None
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
exceptions,
|
||||
)
|
||||
|
||||
|
||||
def manifest_from_file(filepath):
|
||||
"""Read the .json file at filepath into a Manifest object."""
|
||||
|
||||
result = core.deserialize_json_from_file(filepath)
|
||||
result.source_files.append(filepath)
|
||||
result._update_plugin_source(filepath)
|
||||
return result
|
||||
|
||||
|
||||
def manifest_from_string(input_string):
|
||||
"""Deserialize the json string into a manifest object."""
|
||||
|
||||
result = core.deserialize_json_from_string(input_string)
|
||||
|
||||
# try and get the caller's name
|
||||
name = "unknown"
|
||||
stack = inspect.stack()
|
||||
if len(stack) > 1 and len(stack[1]) > 3:
|
||||
# filename function name
|
||||
name = "{}:{}".format(stack[1][1], stack[1][3])
|
||||
|
||||
# set the value in the manifest
|
||||
src_string = "call to manifest_from_string() in " + name
|
||||
result.source_files.append(src_string)
|
||||
result._update_plugin_source(src_string)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Manifest(core.SerializableObject):
|
||||
"""Defines an OTIO plugin Manifest.
|
||||
|
||||
This is an internal OTIO implementation detail. A manifest tracks a
|
||||
collection of adapters and allows finding specific adapters by suffix
|
||||
|
||||
For writing your own adapters, consult:
|
||||
https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html#
|
||||
"""
|
||||
_serializable_label = "PluginManifest.1"
|
||||
|
||||
def __init__(self):
|
||||
super(Manifest, self).__init__()
|
||||
self.adapters = []
|
||||
self.schemadefs = []
|
||||
self.media_linkers = []
|
||||
self.source_files = []
|
||||
|
||||
# hook system stuff
|
||||
self.hooks = {}
|
||||
self.hook_scripts = []
|
||||
|
||||
adapters = core.serializable_field(
|
||||
"adapters",
|
||||
type([]),
|
||||
"Adapters this manifest describes."
|
||||
)
|
||||
schemadefs = core.serializable_field(
|
||||
"schemadefs",
|
||||
type([]),
|
||||
"Schemadefs this manifest describes."
|
||||
)
|
||||
media_linkers = core.serializable_field(
|
||||
"media_linkers",
|
||||
type([]),
|
||||
"Media Linkers this manifest describes."
|
||||
)
|
||||
hooks = core.serializable_field(
|
||||
"hooks",
|
||||
type({}),
|
||||
"Hooks that hooks scripts can be attached to."
|
||||
)
|
||||
hook_scripts = core.serializable_field(
|
||||
"hook_scripts",
|
||||
type([]),
|
||||
"Scripts that can be attached to hooks."
|
||||
)
|
||||
|
||||
def extend(self, another_manifest):
|
||||
"""
|
||||
Extend the adapters, schemadefs, and media_linkers lists of this manifest
|
||||
by appending the contents of the corresponding lists of another_manifest.
|
||||
"""
|
||||
if another_manifest:
|
||||
self.adapters.extend(another_manifest.adapters)
|
||||
self.schemadefs.extend(another_manifest.schemadefs)
|
||||
self.media_linkers.extend(another_manifest.media_linkers)
|
||||
self.hook_scripts.extend(another_manifest.hook_scripts)
|
||||
|
||||
for trigger_name, hooks in another_manifest.hooks.items():
|
||||
if trigger_name in self.hooks:
|
||||
self.hooks[trigger_name].extend(hooks)
|
||||
|
||||
def _update_plugin_source(self, path):
|
||||
"""Track the source .json for a given adapter."""
|
||||
|
||||
for thing in (self.adapters + self.schemadefs
|
||||
+ self.media_linkers + self.hook_scripts):
|
||||
thing._json_path = path
|
||||
|
||||
def from_filepath(self, suffix):
|
||||
"""Return the adapter object associated with a given file suffix."""
|
||||
|
||||
for adapter in self.adapters:
|
||||
if suffix.lower() in adapter.suffixes:
|
||||
return adapter
|
||||
raise exceptions.NoKnownAdapterForExtensionError(suffix)
|
||||
|
||||
def adapter_module_from_suffix(self, suffix):
|
||||
"""Return the adapter module associated with a given file suffix."""
|
||||
|
||||
adp = self.from_filepath(suffix)
|
||||
return adp.module()
|
||||
|
||||
def from_name(self, name, kind_list="adapters"):
|
||||
"""Return the adapter object associated with a given adapter name."""
|
||||
|
||||
for thing in getattr(self, kind_list):
|
||||
if name == thing.name:
|
||||
return thing
|
||||
|
||||
raise exceptions.NotSupportedError(
|
||||
"Could not find plugin: '{}' in kind_list: '{}'."
|
||||
" options: {}".format(
|
||||
name,
|
||||
kind_list,
|
||||
getattr(self, kind_list)
|
||||
)
|
||||
)
|
||||
|
||||
def adapter_module_from_name(self, name):
|
||||
"""Return the adapter module associated with a given adapter name."""
|
||||
|
||||
adp = self.from_name(name)
|
||||
return adp.module()
|
||||
|
||||
def schemadef_module_from_name(self, name):
|
||||
"""Return the schemadef module associated with a given schemadef name."""
|
||||
|
||||
adp = self.from_name(name, kind_list="schemadefs")
|
||||
return adp.module()
|
||||
|
||||
|
||||
_MANIFEST = None
|
||||
|
||||
|
||||
def load_manifest():
|
||||
# build the manifest of adapters, starting with builtin adapters
|
||||
result = manifest_from_file(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.dirname(inspect.getsourcefile(core))),
|
||||
"adapters",
|
||||
"builtin_adapters.plugin_manifest.json"
|
||||
)
|
||||
)
|
||||
|
||||
# layer contrib plugins after built in ones
|
||||
try:
|
||||
import opentimelineio_contrib as otio_c
|
||||
|
||||
contrib_manifest = manifest_from_file(
|
||||
os.path.join(
|
||||
os.path.dirname(inspect.getsourcefile(otio_c)),
|
||||
"adapters",
|
||||
"contrib_adapters.plugin_manifest.json"
|
||||
)
|
||||
)
|
||||
result.extend(contrib_manifest)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Discover setuptools-based plugins
|
||||
if pkg_resources:
|
||||
for plugin in pkg_resources.iter_entry_points(
|
||||
"opentimelineio.plugins"
|
||||
):
|
||||
plugin_name = plugin.name
|
||||
try:
|
||||
plugin_entry_point = plugin.load()
|
||||
try:
|
||||
plugin_manifest = plugin_entry_point.plugin_manifest()
|
||||
except AttributeError:
|
||||
if not pkg_resources.resource_exists(
|
||||
plugin.module_name,
|
||||
'plugin_manifest.json'
|
||||
):
|
||||
raise
|
||||
manifest_stream = pkg_resources.resource_stream(
|
||||
plugin.module_name,
|
||||
'plugin_manifest.json'
|
||||
)
|
||||
plugin_manifest = core.deserialize_json_from_string(
|
||||
manifest_stream.read().decode('utf-8')
|
||||
)
|
||||
manifest_stream.close()
|
||||
filepath = pkg_resources.resource_filename(
|
||||
plugin.module_name,
|
||||
'plugin_manifest.json'
|
||||
)
|
||||
plugin_manifest._update_plugin_source(filepath)
|
||||
|
||||
except Exception:
|
||||
logging.exception(
|
||||
"could not load plugin: {}".format(plugin_name)
|
||||
)
|
||||
continue
|
||||
|
||||
result.extend(plugin_manifest)
|
||||
else:
|
||||
# XXX: Should we print some kind of warning that pkg_resources isn't
|
||||
# available?
|
||||
pass
|
||||
|
||||
# read local adapter manifests, if they exist
|
||||
_local_manifest_path = os.environ.get("OTIO_PLUGIN_MANIFEST_PATH", None)
|
||||
if _local_manifest_path is not None:
|
||||
for json_path in _local_manifest_path.split(":"):
|
||||
if not os.path.exists(json_path):
|
||||
# XXX: In case error reporting is requested
|
||||
# print(
|
||||
# "Warning: OpenTimelineIO cannot access path '{}' from "
|
||||
# "$OTIO_PLUGIN_MANIFEST_PATH".format(json_path)
|
||||
# )
|
||||
continue
|
||||
|
||||
LOCAL_MANIFEST = manifest_from_file(json_path)
|
||||
result.extend(LOCAL_MANIFEST)
|
||||
|
||||
# force the schemadefs to load and add to schemadef module namespace
|
||||
for s in result.schemadefs:
|
||||
s.module()
|
||||
return result
|
||||
|
||||
|
||||
def ActiveManifest(force_reload=False):
|
||||
global _MANIFEST
|
||||
if not _MANIFEST or force_reload:
|
||||
_MANIFEST = load_manifest()
|
||||
|
||||
return _MANIFEST
|
||||
128
pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py
vendored
Normal file
128
pype/vendor/python/python_2/opentimelineio/plugins/python_plugin.py
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Base class for OTIO plugins that are exposed by manifests."""
|
||||
|
||||
import os
|
||||
import imp
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
exceptions,
|
||||
)
|
||||
|
||||
|
||||
class PythonPlugin(core.SerializableObject):
|
||||
"""A class of plugin that is encoded in a python module, exposed via a
|
||||
manifest.
|
||||
"""
|
||||
|
||||
_serializable_label = "PythonPlugin.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
execution_scope=None,
|
||||
filepath=None,
|
||||
):
|
||||
super(PythonPlugin, self).__init__()
|
||||
self.name = name
|
||||
self.execution_scope = execution_scope
|
||||
self.filepath = filepath
|
||||
self._json_path = None
|
||||
self._module = None
|
||||
|
||||
name = core.serializable_field("name", doc="Adapter name.")
|
||||
execution_scope = core.serializable_field(
|
||||
"execution_scope",
|
||||
str,
|
||||
doc=(
|
||||
"Describes whether this adapter is executed in the current python"
|
||||
" process or in a subshell. Options are: "
|
||||
"['in process', 'out of process']."
|
||||
)
|
||||
)
|
||||
filepath = core.serializable_field(
|
||||
"filepath",
|
||||
str,
|
||||
doc=(
|
||||
"Absolute path or relative path to adapter module from location of"
|
||||
" json."
|
||||
)
|
||||
)
|
||||
|
||||
def module_abs_path(self):
|
||||
"""Return an absolute path to the module implementing this adapter."""
|
||||
|
||||
filepath = self.filepath
|
||||
if not os.path.isabs(filepath):
|
||||
if not self._json_path:
|
||||
raise exceptions.MisconfiguredPluginError(
|
||||
"{} plugin is misconfigured, missing json path. "
|
||||
"plugin: {}".format(
|
||||
self.name,
|
||||
repr(self)
|
||||
)
|
||||
)
|
||||
|
||||
filepath = os.path.join(os.path.dirname(self._json_path), filepath)
|
||||
|
||||
return filepath
|
||||
|
||||
def _imported_module(self, namespace):
|
||||
"""Load the module this plugin points at."""
|
||||
|
||||
pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0]
|
||||
pydir = os.path.dirname(self.module_abs_path())
|
||||
|
||||
(file_obj, pathname, description) = imp.find_module(pyname, [pydir])
|
||||
|
||||
with file_obj:
|
||||
# this will reload the module if it has already been loaded.
|
||||
mod = imp.load_module(
|
||||
"opentimelineio.{}.{}".format(namespace, self.name),
|
||||
file_obj,
|
||||
pathname,
|
||||
description
|
||||
)
|
||||
|
||||
return mod
|
||||
|
||||
def module(self):
|
||||
"""Return the module object for this adapter. """
|
||||
|
||||
if not self._module:
|
||||
self._module = self._imported_module("adapters")
|
||||
|
||||
return self._module
|
||||
|
||||
def _execute_function(self, func_name, **kwargs):
|
||||
"""Execute func_name on this adapter with error checking."""
|
||||
|
||||
# collects the error handling into a common place.
|
||||
if not hasattr(self.module(), func_name):
|
||||
raise exceptions.AdapterDoesntSupportFunctionError(
|
||||
"Sorry, {} doesn't support {}.".format(self.name, func_name)
|
||||
)
|
||||
return (getattr(self.module(), func_name)(**kwargs))
|
||||
75
pype/vendor/python/python_2/opentimelineio/schema/__init__.py
vendored
Normal file
75
pype/vendor/python/python_2/opentimelineio/schema/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
"""User facing classes."""
|
||||
|
||||
from .missing_reference import (
|
||||
MissingReference
|
||||
)
|
||||
from .external_reference import (
|
||||
ExternalReference
|
||||
)
|
||||
from .clip import (
|
||||
Clip,
|
||||
)
|
||||
from .track import (
|
||||
Track,
|
||||
TrackKind,
|
||||
NeighborGapPolicy,
|
||||
)
|
||||
from .stack import (
|
||||
Stack,
|
||||
)
|
||||
from .timeline import (
|
||||
Timeline,
|
||||
timeline_from_clips,
|
||||
)
|
||||
from .marker import (
|
||||
Marker,
|
||||
MarkerColor,
|
||||
)
|
||||
from .gap import (
|
||||
Gap,
|
||||
)
|
||||
from .effect import (
|
||||
Effect,
|
||||
TimeEffect,
|
||||
LinearTimeWarp,
|
||||
FreezeFrame,
|
||||
)
|
||||
from .transition import (
|
||||
Transition,
|
||||
TransitionTypes,
|
||||
)
|
||||
from .serializable_collection import (
|
||||
SerializableCollection
|
||||
)
|
||||
from .generator_reference import (
|
||||
GeneratorReference
|
||||
)
|
||||
from .schemadef import (
|
||||
SchemaDef
|
||||
)
|
||||
130
pype/vendor/python/python_2/opentimelineio/schema/clip.py
vendored
Normal file
130
pype/vendor/python/python_2/opentimelineio/schema/clip.py
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of the Clip class, for pointing at media."""
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
exceptions,
|
||||
)
|
||||
from . import (
|
||||
missing_reference
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Clip(core.Item):
|
||||
"""The base editable object in OTIO.
|
||||
|
||||
Contains a media reference and a trim on that media reference.
|
||||
"""
|
||||
|
||||
_serializable_label = "Clip.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
media_reference=None,
|
||||
source_range=None,
|
||||
markers=[],
|
||||
effects=[],
|
||||
metadata=None,
|
||||
):
|
||||
core.Item.__init__(
|
||||
self,
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
markers=markers,
|
||||
effects=effects,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
if not media_reference:
|
||||
media_reference = missing_reference.MissingReference()
|
||||
self._media_reference = copy.deepcopy(media_reference)
|
||||
|
||||
name = core.serializable_field("name", doc="Name of this clip.")
|
||||
transform = core.deprecated_field()
|
||||
_media_reference = core.serializable_field(
|
||||
"media_reference",
|
||||
core.MediaReference,
|
||||
"Media reference to the media this clip represents."
|
||||
)
|
||||
|
||||
@property
|
||||
def media_reference(self):
|
||||
if self._media_reference is None:
|
||||
self._media_reference = missing_reference.MissingReference()
|
||||
return self._media_reference
|
||||
|
||||
@media_reference.setter
|
||||
def media_reference(self, val):
|
||||
if val is None:
|
||||
val = missing_reference.MissingReference()
|
||||
self._media_reference = val
|
||||
|
||||
def available_range(self):
|
||||
if not self.media_reference:
|
||||
raise exceptions.CannotComputeAvailableRangeError(
|
||||
"No media reference set on clip: {}".format(self)
|
||||
)
|
||||
|
||||
if not self.media_reference.available_range:
|
||||
raise exceptions.CannotComputeAvailableRangeError(
|
||||
"No available_range set on media reference on clip: {}".format(
|
||||
self
|
||||
)
|
||||
)
|
||||
|
||||
return copy.copy(self.media_reference.available_range)
|
||||
|
||||
def __str__(self):
|
||||
return 'Clip("{}", {}, {}, {})'.format(
|
||||
self.name,
|
||||
self.media_reference,
|
||||
self.source_range,
|
||||
self.metadata
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'otio.schema.Clip('
|
||||
'name={}, '
|
||||
'media_reference={}, '
|
||||
'source_range={}, '
|
||||
'metadata={}'
|
||||
')'.format(
|
||||
repr(self.name),
|
||||
repr(self.media_reference),
|
||||
repr(self.source_range),
|
||||
repr(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
def each_clip(self, search_range=None):
|
||||
"""Yields self."""
|
||||
|
||||
yield self
|
||||
130
pype/vendor/python/python_2/opentimelineio/schema/effect.py
vendored
Normal file
130
pype/vendor/python/python_2/opentimelineio/schema/effect.py
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of Effect OTIO class."""
|
||||
|
||||
from .. import (
|
||||
core
|
||||
)
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Effect(core.SerializableObject):
|
||||
_serializable_label = "Effect.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
effect_name=None,
|
||||
metadata=None
|
||||
):
|
||||
super(Effect, self).__init__()
|
||||
self.name = name
|
||||
self.effect_name = effect_name
|
||||
self.metadata = copy.deepcopy(metadata) if metadata else {}
|
||||
|
||||
name = core.serializable_field(
|
||||
"name",
|
||||
doc="Name of this effect object. Example: 'BlurByHalfEffect'."
|
||||
)
|
||||
effect_name = core.serializable_field(
|
||||
"effect_name",
|
||||
doc="Name of the kind of effect (example: 'Blur', 'Crop', 'Flip')."
|
||||
)
|
||||
metadata = core.serializable_field(
|
||||
"metadata",
|
||||
dict,
|
||||
doc="Metadata dictionary."
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"Effect("
|
||||
"{}, "
|
||||
"{}, "
|
||||
"{}"
|
||||
")".format(
|
||||
str(self.name),
|
||||
str(self.effect_name),
|
||||
str(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.schema.Effect("
|
||||
"name={}, "
|
||||
"effect_name={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.effect_name),
|
||||
repr(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class TimeEffect(Effect):
|
||||
"Base Time Effect Class"
|
||||
_serializable_label = "TimeEffect.1"
|
||||
pass
|
||||
|
||||
|
||||
@core.register_type
|
||||
class LinearTimeWarp(TimeEffect):
|
||||
"A time warp that applies a linear scale across the entire clip"
|
||||
_serializable_label = "LinearTimeWarp.1"
|
||||
|
||||
def __init__(self, name=None, time_scalar=1, metadata=None):
|
||||
Effect.__init__(
|
||||
self,
|
||||
name=name,
|
||||
effect_name="LinearTimeWarp",
|
||||
metadata=metadata
|
||||
)
|
||||
self.time_scalar = time_scalar
|
||||
|
||||
time_scalar = core.serializable_field(
|
||||
"time_scalar",
|
||||
doc="Linear time scalar applied to clip. "
|
||||
"2.0 = double speed, 0.5 = half speed."
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class FreezeFrame(LinearTimeWarp):
|
||||
"Hold the first frame of the clip for the duration of the clip."
|
||||
_serializable_label = "FreezeFrame.1"
|
||||
|
||||
def __init__(self, name=None, metadata=None):
|
||||
LinearTimeWarp.__init__(
|
||||
self,
|
||||
name=name,
|
||||
time_scalar=0,
|
||||
metadata=metadata
|
||||
)
|
||||
self.effect_name = "FreezeFrame"
|
||||
69
pype/vendor/python/python_2/opentimelineio/schema/external_reference.py
vendored
Normal file
69
pype/vendor/python/python_2/opentimelineio/schema/external_reference.py
vendored
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""
|
||||
Implementation of the ExternalReference media reference schema.
|
||||
"""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class ExternalReference(core.MediaReference):
|
||||
"""Reference to media via a url, for example "file:///var/tmp/foo.mov" """
|
||||
|
||||
_serializable_label = "ExternalReference.1"
|
||||
_name = "ExternalReference"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
target_url=None,
|
||||
available_range=None,
|
||||
metadata=None,
|
||||
):
|
||||
core.MediaReference.__init__(
|
||||
self,
|
||||
available_range=available_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
self.target_url = target_url
|
||||
|
||||
target_url = core.serializable_field(
|
||||
"target_url",
|
||||
doc=(
|
||||
"URL at which this media lives. For local references, use the "
|
||||
"'file://' format."
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return 'ExternalReference("{}")'.format(self.target_url)
|
||||
|
||||
def __repr__(self):
|
||||
return 'otio.schema.ExternalReference(target_url={})'.format(
|
||||
repr(self.target_url)
|
||||
)
|
||||
82
pype/vendor/python/python_2/opentimelineio/schema/gap.py
vendored
Normal file
82
pype/vendor/python/python_2/opentimelineio/schema/gap.py
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
opentime,
|
||||
)
|
||||
|
||||
"""Gap Item - represents a transparent gap in content."""
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Gap(core.Item):
|
||||
_serializable_label = "Gap.1"
|
||||
_class_path = "schema.Gap"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
# note - only one of the following two arguments is accepted
|
||||
# if neither is provided, source_range will be set to an empty
|
||||
# TimeRange
|
||||
# Duration is provided as a convienence for creating a gap of a certain
|
||||
# length. IE: Gap(duration=otio.opentime.RationalTime(300, 24))
|
||||
duration=None,
|
||||
source_range=None,
|
||||
effects=None,
|
||||
markers=None,
|
||||
metadata=None,
|
||||
):
|
||||
if duration and source_range:
|
||||
raise RuntimeError(
|
||||
"Cannot instantiate with both a source range and a duration."
|
||||
)
|
||||
|
||||
if duration:
|
||||
source_range = opentime.TimeRange(
|
||||
opentime.RationalTime(0, duration.rate),
|
||||
duration
|
||||
)
|
||||
elif source_range is None:
|
||||
# if neither is provided, seed TimeRange as an empty Source Range.
|
||||
source_range = opentime.TimeRange()
|
||||
|
||||
core.Item.__init__(
|
||||
self,
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
effects=effects,
|
||||
markers=markers,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def visible():
|
||||
return False
|
||||
|
||||
|
||||
# the original name for "gap" was "filler" - this will turn "Filler" found in
|
||||
# OTIO files into Gap automatically.
|
||||
core.register_type(Gap, "Filler")
|
||||
76
pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py
vendored
Normal file
76
pype/vendor/python/python_2/opentimelineio/schema/generator_reference.py
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
"""
|
||||
Generators are media references that _produce_ media rather than refer to it.
|
||||
"""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class GeneratorReference(core.MediaReference):
|
||||
"""
|
||||
Base class for Generators.
|
||||
|
||||
Generators are media references that become "generators" in editorial
|
||||
systems. For example, color bars or a solid color.
|
||||
"""
|
||||
|
||||
_serializable_label = "GeneratorReference.1"
|
||||
_name = "GeneratorReference"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
generator_kind=None,
|
||||
available_range=None,
|
||||
parameters=None,
|
||||
metadata=None
|
||||
):
|
||||
super(GeneratorReference, self).__init__(
|
||||
name,
|
||||
available_range,
|
||||
metadata
|
||||
)
|
||||
|
||||
if parameters is None:
|
||||
parameters = {}
|
||||
self.parameters = parameters
|
||||
self.generator_kind = generator_kind
|
||||
|
||||
parameters = core.serializable_field(
|
||||
"parameters",
|
||||
dict,
|
||||
doc="Dictionary of parameters for generator."
|
||||
)
|
||||
generator_kind = core.serializable_field(
|
||||
"generator_kind",
|
||||
required_type=type(""),
|
||||
# @TODO: need to clarify if this also has an enum of supported types
|
||||
# / generic
|
||||
doc="Kind of generator reference, as defined by the "
|
||||
"schema.generator_reference.GeneratorReferenceTypes enum."
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return 'GeneratorReference("{}", "{}", {}, {})'.format(
|
||||
self.name,
|
||||
self.generator_kind,
|
||||
self.parameters,
|
||||
self.metadata
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'otio.schema.GeneratorReference('
|
||||
'name={}, '
|
||||
'generator_kind={}, '
|
||||
'parameters={}, '
|
||||
'metadata={}'
|
||||
')'.format(
|
||||
repr(self.name),
|
||||
repr(self.generator_kind),
|
||||
repr(self.parameters),
|
||||
repr(self.metadata),
|
||||
)
|
||||
)
|
||||
128
pype/vendor/python/python_2/opentimelineio/schema/marker.py
vendored
Normal file
128
pype/vendor/python/python_2/opentimelineio/schema/marker.py
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Marker class. Holds metadata over regions of time."""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
opentime,
|
||||
)
|
||||
|
||||
|
||||
class MarkerColor:
|
||||
""" Enum encoding colors of markers as strings. """
|
||||
|
||||
PINK = "PINK"
|
||||
RED = "RED"
|
||||
ORANGE = "ORANGE"
|
||||
YELLOW = "YELLOW"
|
||||
GREEN = "GREEN"
|
||||
CYAN = "CYAN"
|
||||
BLUE = "BLUE"
|
||||
PURPLE = "PURPLE"
|
||||
MAGENTA = "MAGENTA"
|
||||
BLACK = "BLACK"
|
||||
WHITE = "WHITE"
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Marker(core.SerializableObject):
|
||||
|
||||
""" Holds metadata over time on a timeline """
|
||||
|
||||
_serializable_label = "Marker.2"
|
||||
_class_path = "marker.Marker"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
marked_range=None,
|
||||
color=MarkerColor.RED,
|
||||
metadata=None,
|
||||
):
|
||||
core.SerializableObject.__init__(
|
||||
self,
|
||||
)
|
||||
self.name = name
|
||||
self.marked_range = marked_range
|
||||
self.color = color
|
||||
self.metadata = metadata or {}
|
||||
|
||||
name = core.serializable_field("name", doc="Name of this marker.")
|
||||
|
||||
marked_range = core.serializable_field(
|
||||
"marked_range",
|
||||
opentime.TimeRange,
|
||||
"Range this marker applies to, relative to the Item this marker is "
|
||||
"attached to (e.g. the Clip or Track that owns this marker)."
|
||||
)
|
||||
|
||||
color = core.serializable_field(
|
||||
"color",
|
||||
required_type=type(MarkerColor.RED),
|
||||
doc="Color string for this marker (for example: 'RED'), based on the "
|
||||
"otio.schema.marker.MarkerColor enum."
|
||||
)
|
||||
|
||||
# old name
|
||||
range = core.deprecated_field()
|
||||
|
||||
metadata = core.serializable_field(
|
||||
"metadata",
|
||||
dict,
|
||||
"Metadata dictionary."
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.schema.Marker("
|
||||
"name={}, "
|
||||
"marked_range={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
repr(self.name),
|
||||
repr(self.marked_range),
|
||||
repr(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"Marker("
|
||||
"{}, "
|
||||
"{}, "
|
||||
"{}"
|
||||
")".format(
|
||||
str(self.name),
|
||||
str(self.marked_range),
|
||||
str(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@core.upgrade_function_for(Marker, 2)
|
||||
def _version_one_to_two(data):
|
||||
data["marked_range"] = data["range"]
|
||||
del data["range"]
|
||||
return data
|
||||
43
pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py
vendored
Normal file
43
pype/vendor/python/python_2/opentimelineio/schema/missing_reference.py
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""
|
||||
Implementation of the MissingReference media reference schema.
|
||||
"""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class MissingReference(core.MediaReference):
|
||||
"""Represents media for which a concrete reference is missing."""
|
||||
|
||||
_serializable_label = "MissingReference.1"
|
||||
_name = "MissingReference"
|
||||
|
||||
@property
|
||||
def is_missing_reference(self):
|
||||
return True
|
||||
65
pype/vendor/python/python_2/opentimelineio/schema/schemadef.py
vendored
Normal file
65
pype/vendor/python/python_2/opentimelineio/schema/schemadef.py
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
|
||||
from .. import (
|
||||
core,
|
||||
exceptions,
|
||||
plugins,
|
||||
schemadef
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class SchemaDef(plugins.PythonPlugin):
|
||||
_serializable_label = "SchemaDef.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
execution_scope=None,
|
||||
filepath=None,
|
||||
):
|
||||
super(SchemaDef, self).__init__(name, execution_scope, filepath)
|
||||
|
||||
def module(self):
|
||||
"""
|
||||
Return the module object for this schemadef plugin.
|
||||
If the module hasn't already been imported, it is imported and
|
||||
injected into the otio.schemadefs namespace as a side-effect.
|
||||
(redefines PythonPlugin.module())
|
||||
"""
|
||||
|
||||
if not self._module:
|
||||
self._module = self._imported_module("schemadef")
|
||||
if self.name:
|
||||
schemadef._add_schemadef_module(self.name, self._module)
|
||||
|
||||
return self._module
|
||||
|
||||
|
||||
def available_schemadef_names():
|
||||
"""Return a string list of the available schemadefs."""
|
||||
|
||||
return [str(sd.name) for sd in plugins.ActiveManifest().schemadefs]
|
||||
|
||||
|
||||
def from_name(name):
|
||||
"""Fetch the schemadef plugin object by the name of the schema directly."""
|
||||
|
||||
try:
|
||||
return plugins.ActiveManifest().from_name(name, kind_list="schemadefs")
|
||||
except exceptions.NotSupportedError:
|
||||
raise exceptions.NotSupportedError(
|
||||
"schemadef not supported: {}, available: {}".format(
|
||||
name,
|
||||
available_schemadef_names()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def module_from_name(name):
|
||||
"""Fetch the plugin's module by the name of the schemadef.
|
||||
|
||||
Will load the plugin if it has not already been loaded. Reading a file that
|
||||
contains the schemadef will also trigger a load of the plugin.
|
||||
"""
|
||||
plugin = from_name(name)
|
||||
return plugin.module()
|
||||
149
pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py
vendored
Normal file
149
pype/vendor/python/python_2/opentimelineio/schema/serializable_collection.py
vendored
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""A serializable collection of SerializableObjects."""
|
||||
|
||||
import collections
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
core
|
||||
)
|
||||
|
||||
from . import (
|
||||
clip
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class SerializableCollection(
|
||||
core.SerializableObject,
|
||||
collections.MutableSequence
|
||||
):
|
||||
"""A kind of composition which can hold any serializable object.
|
||||
|
||||
This composition approximates the concept of a `bin` - a collection of
|
||||
SerializableObjects that do not have any compositing meaning, but can
|
||||
serialize to/from OTIO correctly, with metadata and a named collection.
|
||||
"""
|
||||
|
||||
_serializable_label = "SerializableCollection.1"
|
||||
_class_path = "schema.SerializableCollection"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
children=None,
|
||||
metadata=None,
|
||||
):
|
||||
super(SerializableCollection, self).__init__()
|
||||
|
||||
self.name = name
|
||||
self._children = children or []
|
||||
self.metadata = copy.deepcopy(metadata) if metadata else {}
|
||||
|
||||
name = core.serializable_field(
|
||||
"name",
|
||||
doc="SerializableCollection name."
|
||||
)
|
||||
_children = core.serializable_field(
|
||||
"children",
|
||||
list,
|
||||
"SerializableObject contained by this container."
|
||||
)
|
||||
metadata = core.serializable_field(
|
||||
"metadata",
|
||||
dict,
|
||||
doc="Metadata dictionary for this SerializableCollection."
|
||||
)
|
||||
|
||||
# @{ Stringification
|
||||
def __str__(self):
|
||||
return "SerializableCollection({}, {}, {})".format(
|
||||
str(self.name),
|
||||
str(self._children),
|
||||
str(self.metadata)
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.{}("
|
||||
"name={}, "
|
||||
"children={}, "
|
||||
"metadata={}"
|
||||
")".format(
|
||||
self._class_path,
|
||||
repr(self.name),
|
||||
repr(self._children),
|
||||
repr(self.metadata)
|
||||
)
|
||||
)
|
||||
# @}
|
||||
|
||||
# @{ collections.MutableSequence implementation
|
||||
def __getitem__(self, item):
|
||||
return self._children[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._children[key] = value
|
||||
|
||||
def insert(self, index, item):
|
||||
self._children.insert(index, item)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._children)
|
||||
|
||||
def __delitem__(self, item):
|
||||
del self._children[item]
|
||||
# @}
|
||||
|
||||
def each_child(
|
||||
self,
|
||||
search_range=None,
|
||||
descended_from_type=core.composable.Composable
|
||||
):
|
||||
for i, child in enumerate(self._children):
|
||||
# filter out children who are not descended from the specified type
|
||||
is_descendant = descended_from_type == core.composable.Composable
|
||||
if is_descendant or isinstance(child, descended_from_type):
|
||||
yield child
|
||||
|
||||
# for children that are compositions, recurse into their children
|
||||
if hasattr(child, "each_child"):
|
||||
for valid_child in (
|
||||
c for c in child.each_child(
|
||||
search_range,
|
||||
descended_from_type
|
||||
)
|
||||
):
|
||||
yield valid_child
|
||||
|
||||
def each_clip(self, search_range=None):
|
||||
return self.each_child(search_range, clip.Clip)
|
||||
|
||||
|
||||
# the original name for "SerializableCollection" was "SerializeableCollection"
|
||||
# this will turn this misspelling found in OTIO files into the correct instance
|
||||
# automatically.
|
||||
core.register_type(SerializableCollection, 'SerializeableCollection')
|
||||
120
pype/vendor/python/python_2/opentimelineio/schema/stack.py
vendored
Normal file
120
pype/vendor/python/python_2/opentimelineio/schema/stack.py
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""A stack represents a series of composable.Composables that are arranged such
|
||||
that their start times are at the same point.
|
||||
|
||||
Most commonly, this would be a series of schema.Track objects that then
|
||||
contain clips. The 0 time of those tracks would be coincide with the 0-time of
|
||||
the stack.
|
||||
|
||||
Stacks are in compositing order, with later children obscuring earlier
|
||||
children. In other words, from bottom to top. If a stack has three children,
|
||||
[A, B, C], C is above B which is above A.
|
||||
|
||||
A stack is the length of its longest child. If a child ends before the other
|
||||
children, then an earlier index child would be visible before it.
|
||||
"""
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
opentime,
|
||||
exceptions
|
||||
)
|
||||
|
||||
from . import (
|
||||
clip
|
||||
)
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Stack(core.Composition):
|
||||
_serializable_label = "Stack.1"
|
||||
_composition_kind = "Stack"
|
||||
_modname = "schema"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
children=None,
|
||||
source_range=None,
|
||||
markers=None,
|
||||
effects=None,
|
||||
metadata=None
|
||||
):
|
||||
core.Composition.__init__(
|
||||
self,
|
||||
name=name,
|
||||
children=children,
|
||||
source_range=source_range,
|
||||
markers=markers,
|
||||
effects=effects,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
def range_of_child_at_index(self, index):
|
||||
try:
|
||||
child = self[index]
|
||||
except IndexError:
|
||||
raise exceptions.NoSuchChildAtIndex(index)
|
||||
|
||||
dur = child.duration()
|
||||
|
||||
return opentime.TimeRange(
|
||||
start_time=opentime.RationalTime(0, dur.rate),
|
||||
duration=dur
|
||||
)
|
||||
|
||||
def each_clip(self, search_range=None):
|
||||
return self.each_child(search_range, clip.Clip)
|
||||
|
||||
def available_range(self):
|
||||
if len(self) == 0:
|
||||
return opentime.TimeRange()
|
||||
|
||||
duration = max(child.duration() for child in self)
|
||||
|
||||
return opentime.TimeRange(
|
||||
opentime.RationalTime(0, duration.rate),
|
||||
duration=duration
|
||||
)
|
||||
|
||||
def range_of_all_children(self):
|
||||
child_map = {}
|
||||
for i, c in enumerate(self._children):
|
||||
child_map[c] = self.range_of_child_at_index(i)
|
||||
return child_map
|
||||
|
||||
def trimmed_range_of_child_at_index(self, index, reference_space=None):
|
||||
range = self.range_of_child_at_index(index)
|
||||
|
||||
if not self.source_range:
|
||||
return range
|
||||
|
||||
range = opentime.TimeRange(
|
||||
start_time=self.source_range.start_time,
|
||||
duration=min(range.duration, self.source_range.duration)
|
||||
)
|
||||
|
||||
return range
|
||||
133
pype/vendor/python/python_2/opentimelineio/schema/timeline.py
vendored
Normal file
133
pype/vendor/python/python_2/opentimelineio/schema/timeline.py
vendored
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implementation of the OTIO built in schema, Timeline object."""
|
||||
|
||||
import copy
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
opentime,
|
||||
)
|
||||
|
||||
from . import stack, track
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Timeline(core.SerializableObject):
|
||||
_serializable_label = "Timeline.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
tracks=None,
|
||||
global_start_time=None,
|
||||
metadata=None,
|
||||
):
|
||||
super(Timeline, self).__init__()
|
||||
self.name = name
|
||||
self.global_start_time = copy.deepcopy(global_start_time)
|
||||
|
||||
if tracks is None:
|
||||
tracks = []
|
||||
self.tracks = stack.Stack(name="tracks", children=tracks)
|
||||
|
||||
self.metadata = copy.deepcopy(metadata) if metadata else {}
|
||||
|
||||
name = core.serializable_field("name", doc="Name of this timeline.")
|
||||
tracks = core.serializable_field(
|
||||
"tracks",
|
||||
core.Composition,
|
||||
doc="Stack of tracks containing items."
|
||||
)
|
||||
metadata = core.serializable_field(
|
||||
"metadata",
|
||||
dict,
|
||||
"Metadata dictionary."
|
||||
)
|
||||
global_start_time = core.serializable_field(
|
||||
"global_start_time",
|
||||
opentime.RationalTime,
|
||||
doc="Global starting time value and rate of the timeline."
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return 'Timeline("{}", {})'.format(str(self.name), str(self.tracks))
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"otio.schema.Timeline(name={}, tracks={})".format(
|
||||
repr(self.name),
|
||||
repr(self.tracks)
|
||||
)
|
||||
)
|
||||
|
||||
def each_child(self, search_range=None, descended_from_type=core.Composable):
|
||||
return self.tracks.each_child(search_range, descended_from_type)
|
||||
|
||||
def each_clip(self, search_range=None):
|
||||
"""Return a flat list of each clip, limited to the search_range."""
|
||||
|
||||
return self.tracks.each_clip(search_range)
|
||||
|
||||
def duration(self):
|
||||
"""Duration of this timeline."""
|
||||
|
||||
return self.tracks.duration()
|
||||
|
||||
def range_of_child(self, child):
|
||||
"""Range of the child object contained in this timeline."""
|
||||
|
||||
return self.tracks.range_of_child(child)
|
||||
|
||||
def video_tracks(self):
|
||||
"""
|
||||
This convenience method returns a list of the top-level video tracks in
|
||||
this timeline.
|
||||
"""
|
||||
return [
|
||||
trck for trck
|
||||
in self.tracks
|
||||
if (isinstance(trck, track.Track) and
|
||||
trck.kind == track.TrackKind.Video)
|
||||
]
|
||||
|
||||
def audio_tracks(self):
|
||||
"""
|
||||
This convenience method returns a list of the top-level audio tracks in
|
||||
this timeline.
|
||||
"""
|
||||
return [
|
||||
trck for trck
|
||||
in self.tracks
|
||||
if (isinstance(trck, track.Track) and
|
||||
trck.kind == track.TrackKind.Audio)
|
||||
]
|
||||
|
||||
|
||||
def timeline_from_clips(clips):
|
||||
"""Convenience for making a single track timeline from a list of clips."""
|
||||
|
||||
trck = track.Track(children=clips)
|
||||
return Timeline(tracks=[trck])
|
||||
242
pype/vendor/python/python_2/opentimelineio/schema/track.py
vendored
Normal file
242
pype/vendor/python/python_2/opentimelineio/schema/track.py
vendored
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Implement Track sublcass of composition."""
|
||||
|
||||
import collections
|
||||
|
||||
from .. import (
|
||||
core,
|
||||
opentime,
|
||||
)
|
||||
|
||||
from . import (
|
||||
gap,
|
||||
transition,
|
||||
clip,
|
||||
)
|
||||
|
||||
|
||||
class TrackKind:
|
||||
Video = "Video"
|
||||
Audio = "Audio"
|
||||
|
||||
|
||||
class NeighborGapPolicy:
|
||||
""" enum for deciding how to add gap when asking for neighbors """
|
||||
never = 0
|
||||
around_transitions = 1
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Track(core.Composition):
|
||||
_serializable_label = "Track.1"
|
||||
_composition_kind = "Track"
|
||||
_modname = "schema"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
children=None,
|
||||
kind=TrackKind.Video,
|
||||
source_range=None,
|
||||
markers=None,
|
||||
effects=None,
|
||||
metadata=None,
|
||||
):
|
||||
core.Composition.__init__(
|
||||
self,
|
||||
name=name,
|
||||
children=children,
|
||||
source_range=source_range,
|
||||
markers=markers,
|
||||
effects=effects,
|
||||
metadata=metadata
|
||||
)
|
||||
self.kind = kind
|
||||
|
||||
kind = core.serializable_field(
|
||||
"kind",
|
||||
doc="Composition kind (Stack, Track)"
|
||||
)
|
||||
|
||||
def range_of_child_at_index(self, index):
|
||||
child = self[index]
|
||||
|
||||
# sum the durations of all the children leading up to the chosen one
|
||||
start_time = sum(
|
||||
(
|
||||
o_c.duration()
|
||||
for o_c in (c for c in self[:index] if not c.overlapping())
|
||||
),
|
||||
opentime.RationalTime(value=0, rate=child.duration().rate)
|
||||
)
|
||||
if isinstance(child, transition.Transition):
|
||||
start_time -= child.in_offset
|
||||
|
||||
return opentime.TimeRange(start_time, child.duration())
|
||||
|
||||
def trimmed_range_of_child_at_index(self, index, reference_space=None):
|
||||
child_range = self.range_of_child_at_index(index)
|
||||
|
||||
return self.trim_child_range(child_range)
|
||||
|
||||
def handles_of_child(self, child):
|
||||
"""If media beyond the ends of this child are visible due to adjacent
|
||||
Transitions (only applicable in a Track) then this will return the
|
||||
head and tail offsets as a tuple of RationalTime objects. If no handles
|
||||
are present on either side, then None is returned instead of a
|
||||
RationalTime.
|
||||
|
||||
Example usage
|
||||
|
||||
>>> head, tail = track.handles_of_child(clip)
|
||||
>>> if head:
|
||||
... print('do something')
|
||||
>>> if tail:
|
||||
... print('do something else')
|
||||
"""
|
||||
head, tail = None, None
|
||||
before, after = self.neighbors_of(child)
|
||||
if isinstance(before, transition.Transition):
|
||||
head = before.in_offset
|
||||
if isinstance(after, transition.Transition):
|
||||
tail = after.out_offset
|
||||
|
||||
return head, tail
|
||||
|
||||
def available_range(self):
|
||||
# Sum up our child items' durations
|
||||
duration = sum(
|
||||
(c.duration() for c in self if isinstance(c, core.Item)),
|
||||
opentime.RationalTime()
|
||||
)
|
||||
|
||||
# Add the implicit gap when a Transition is at the start/end
|
||||
if self and isinstance(self[0], transition.Transition):
|
||||
duration += self[0].in_offset
|
||||
if self and isinstance(self[-1], transition.Transition):
|
||||
duration += self[-1].out_offset
|
||||
|
||||
result = opentime.TimeRange(
|
||||
start_time=opentime.RationalTime(0, duration.rate),
|
||||
duration=duration
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def each_clip(self, search_range=None, shallow_search=False):
|
||||
return self.each_child(search_range, clip.Clip, shallow_search)
|
||||
|
||||
def neighbors_of(self, item, insert_gap=NeighborGapPolicy.never):
|
||||
"""Returns the neighbors of the item as a namedtuple, (previous, next).
|
||||
|
||||
Can optionally fill in gaps when transitions have no gaps next to them.
|
||||
|
||||
with insert_gap == NeighborGapPolicy.never:
|
||||
[A, B, C] :: neighbors_of(B) -> (A, C)
|
||||
[A, B, C] :: neighbors_of(A) -> (None, B)
|
||||
[A, B, C] :: neighbors_of(C) -> (B, None)
|
||||
[A] :: neighbors_of(A) -> (None, None)
|
||||
|
||||
with insert_gap == NeighborGapPolicy.around_transitions:
|
||||
(assuming A and C are transitions)
|
||||
[A, B, C] :: neighbors_of(B) -> (A, C)
|
||||
[A, B, C] :: neighbors_of(A) -> (Gap, B)
|
||||
[A, B, C] :: neighbors_of(C) -> (B, Gap)
|
||||
[A] :: neighbors_of(A) -> (Gap, Gap)
|
||||
"""
|
||||
|
||||
try:
|
||||
index = self.index(item)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"item: {} is not in composition: {}".format(
|
||||
item,
|
||||
self
|
||||
)
|
||||
)
|
||||
|
||||
previous, next_item = None, None
|
||||
|
||||
# look before index
|
||||
if index == 0:
|
||||
if insert_gap == NeighborGapPolicy.around_transitions:
|
||||
if isinstance(item, transition.Transition):
|
||||
previous = gap.Gap(
|
||||
source_range=opentime.TimeRange(duration=item.in_offset))
|
||||
elif index > 0:
|
||||
previous = self[index - 1]
|
||||
|
||||
if index == len(self) - 1:
|
||||
if insert_gap == NeighborGapPolicy.around_transitions:
|
||||
if isinstance(item, transition.Transition):
|
||||
next_item = gap.Gap(
|
||||
source_range=opentime.TimeRange(duration=item.out_offset))
|
||||
elif index < len(self) - 1:
|
||||
next_item = self[index + 1]
|
||||
|
||||
return collections.namedtuple('neighbors', ('previous', 'next'))(
|
||||
previous,
|
||||
next_item
|
||||
)
|
||||
|
||||
def range_of_all_children(self):
|
||||
"""Return a dict mapping children to their range in this track."""
|
||||
|
||||
if not self._children:
|
||||
return {}
|
||||
|
||||
result_map = {}
|
||||
|
||||
# Heuristic to guess what the rate should be set to based on the first
|
||||
# thing in the track.
|
||||
first_thing = self._children[0]
|
||||
if isinstance(first_thing, transition.Transition):
|
||||
rate = first_thing.in_offset.rate
|
||||
else:
|
||||
rate = first_thing.trimmed_range().duration.rate
|
||||
|
||||
last_end_time = opentime.RationalTime(0, rate)
|
||||
|
||||
for thing in self._children:
|
||||
if isinstance(thing, transition.Transition):
|
||||
result_map[thing] = opentime.TimeRange(
|
||||
last_end_time - thing.in_offset,
|
||||
thing.out_offset + thing.in_offset,
|
||||
)
|
||||
else:
|
||||
last_range = opentime.TimeRange(
|
||||
last_end_time,
|
||||
thing.trimmed_range().duration
|
||||
)
|
||||
result_map[thing] = last_range
|
||||
last_end_time = last_range.end_time_exclusive()
|
||||
|
||||
return result_map
|
||||
|
||||
|
||||
# the original name for "track" was "sequence" - this will turn "Sequence"
|
||||
# found in OTIO files into Track automatically.
|
||||
core.register_type(Track, "Sequence")
|
||||
159
pype/vendor/python/python_2/opentimelineio/schema/transition.py
vendored
Normal file
159
pype/vendor/python/python_2/opentimelineio/schema/transition.py
vendored
Normal file
|
|
@ -0,0 +1,159 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Transition base class"""
|
||||
|
||||
from .. import (
|
||||
opentime,
|
||||
core,
|
||||
exceptions,
|
||||
)
|
||||
|
||||
import copy
|
||||
|
||||
|
||||
class TransitionTypes:
|
||||
"""Enum encoding types of transitions.
|
||||
|
||||
This is for representing "Dissolves" and "Wipes" defined by the
|
||||
multi-source effect as defined by SMPTE 258M-2004 7.6.3.2
|
||||
|
||||
Other effects are handled by the `schema.Effect` class.
|
||||
"""
|
||||
|
||||
# @{ SMPTE transitions.
|
||||
SMPTE_Dissolve = "SMPTE_Dissolve"
|
||||
# SMPTE_Wipe = "SMPTE_Wipe" -- @TODO
|
||||
# @}
|
||||
|
||||
# Non SMPTE transitions.
|
||||
Custom = "Custom_Transition"
|
||||
|
||||
|
||||
@core.register_type
|
||||
class Transition(core.Composable):
|
||||
"""Represents a transition between two items."""
|
||||
|
||||
_serializable_label = "Transition.1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name=None,
|
||||
transition_type=None,
|
||||
# @TODO: parameters will be added later as needed (SMPTE_Wipe will
|
||||
# probably require it)
|
||||
# parameters=None,
|
||||
in_offset=None,
|
||||
out_offset=None,
|
||||
metadata=None
|
||||
):
|
||||
core.Composable.__init__(
|
||||
self,
|
||||
name=name,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
# init everything as None first, so that we will catch uninitialized
|
||||
# values via exceptions
|
||||
# if parameters is None:
|
||||
# parameters = {}
|
||||
# self.parameters = parameters
|
||||
self.transition_type = transition_type
|
||||
self.in_offset = copy.deepcopy(in_offset)
|
||||
self.out_offset = copy.deepcopy(out_offset)
|
||||
|
||||
transition_type = core.serializable_field(
|
||||
"transition_type",
|
||||
required_type=type(TransitionTypes.SMPTE_Dissolve),
|
||||
doc="Kind of transition, as defined by the "
|
||||
"schema.transition.TransitionTypes enum."
|
||||
)
|
||||
# parameters = core.serializable_field(
|
||||
# "parameters",
|
||||
# doc="Parameters of the transition."
|
||||
# )
|
||||
in_offset = core.serializable_field(
|
||||
"in_offset",
|
||||
required_type=opentime.RationalTime,
|
||||
doc="Amount of the previous clip this transition overlaps, exclusive."
|
||||
)
|
||||
out_offset = core.serializable_field(
|
||||
"out_offset",
|
||||
required_type=opentime.RationalTime,
|
||||
doc="Amount of the next clip this transition overlaps, exclusive."
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return 'Transition("{}", "{}", {}, {}, {})'.format(
|
||||
self.name,
|
||||
self.transition_type,
|
||||
self.in_offset,
|
||||
self.out_offset,
|
||||
# self.parameters,
|
||||
self.metadata
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
'otio.schema.Transition('
|
||||
'name={}, '
|
||||
'transition_type={}, '
|
||||
'in_offset={}, '
|
||||
'out_offset={}, '
|
||||
# 'parameters={}, '
|
||||
'metadata={}'
|
||||
')'.format(
|
||||
repr(self.name),
|
||||
repr(self.transition_type),
|
||||
repr(self.in_offset),
|
||||
repr(self.out_offset),
|
||||
# repr(self.parameters),
|
||||
repr(self.metadata),
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def overlapping():
|
||||
return True
|
||||
|
||||
def duration(self):
|
||||
return self.in_offset + self.out_offset
|
||||
|
||||
def range_in_parent(self):
|
||||
"""Find and return the range of this item in the parent."""
|
||||
if not self.parent():
|
||||
raise exceptions.NotAChildError(
|
||||
"No parent of {}, cannot compute range in parent.".format(self)
|
||||
)
|
||||
|
||||
return self.parent().range_of_child(self)
|
||||
|
||||
def trimmed_range_in_parent(self):
|
||||
"""Find and return the timmed range of this item in the parent."""
|
||||
if not self.parent():
|
||||
raise exceptions.NotAChildError(
|
||||
"No parent of {}, cannot compute range in parent.".format(self)
|
||||
)
|
||||
|
||||
return self.parent().trimmed_range_of_child(self)
|
||||
5
pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py
vendored
Normal file
5
pype/vendor/python/python_2/opentimelineio/schemadef/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
|
||||
def _add_schemadef_module(name, mod):
|
||||
"""Insert a new module name and module object into schemadef namespace."""
|
||||
ns = globals() # the namespace dict of the schemadef package
|
||||
ns[name] = mod
|
||||
54
pype/vendor/python/python_2/opentimelineio/test_utils.py
vendored
Normal file
54
pype/vendor/python/python_2/opentimelineio/test_utils.py
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2018 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Utility assertions for OTIO Unit tests."""
|
||||
|
||||
import re
|
||||
|
||||
from . import (
|
||||
adapters
|
||||
)
|
||||
|
||||
|
||||
class OTIOAssertions(object):
|
||||
def assertJsonEqual(self, known, test_result):
|
||||
"""Convert to json and compare that (more readable)."""
|
||||
self.maxDiff = None
|
||||
|
||||
known_str = adapters.write_to_string(known, 'otio_json')
|
||||
test_str = adapters.write_to_string(test_result, 'otio_json')
|
||||
|
||||
def strip_trailing_decimal_zero(s):
|
||||
return re.sub(r'"(value|rate)": (\d+)\.0', r'"\1": \2', s)
|
||||
|
||||
self.assertMultiLineEqual(
|
||||
strip_trailing_decimal_zero(known_str),
|
||||
strip_trailing_decimal_zero(test_str)
|
||||
)
|
||||
|
||||
def assertIsOTIOEquivalentTo(self, known, test_result):
|
||||
"""Test using the 'is equivalent to' method on SerializableObject"""
|
||||
|
||||
self.assertTrue(known.is_equivalent_to(test_result))
|
||||
37
pype/vendor/python/python_2/opentimelineio_contrib/__init__.py
vendored
Normal file
37
pype/vendor/python/python_2/opentimelineio_contrib/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2018 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Unsupported contrib code for OpenTimelineIO."""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
from . import (
|
||||
adapters
|
||||
)
|
||||
|
||||
__version__ = "0.11.0"
|
||||
__author__ = "Pixar Animation Studios"
|
||||
__author_email__ = "opentimelineio@pixar.com"
|
||||
__license__ = "Modified Apache 2.0 License"
|
||||
0
pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py
vendored
Normal file
0
pype/vendor/python/python_2/opentimelineio_contrib/adapters/__init__.py
vendored
Normal file
0
pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py
vendored
Normal file
0
pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/__init__.py
vendored
Normal file
764
pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py
vendored
Normal file
764
pype/vendor/python/python_2/opentimelineio_contrib/adapters/aaf_adapter/aaf_writer.py
vendored
Normal file
|
|
@ -0,0 +1,764 @@
|
|||
#
|
||||
# Copyright 2019 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""AAF Adapter Transcriber
|
||||
|
||||
Specifies how to transcribe an OpenTimelineIO file into an AAF file.
|
||||
"""
|
||||
|
||||
import aaf2
|
||||
import abc
|
||||
import uuid
|
||||
import opentimelineio as otio
|
||||
import os
|
||||
import copy
|
||||
import re
|
||||
|
||||
|
||||
AAF_PARAMETERDEF_PAN = aaf2.auid.AUID("e4962322-2267-11d3-8a4c-0050040ef7d2")
|
||||
AAF_OPERATIONDEF_MONOAUDIOPAN = aaf2.auid.AUID("9d2ea893-0968-11d3-8a38-0050040ef7d2")
|
||||
AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER = uuid.UUID(
|
||||
"c0038672-a8cf-11d3-a05b-006094eb75cb")
|
||||
AAF_PARAMETERDEF_AVIDEFFECTID = uuid.UUID(
|
||||
"93994bd6-a81d-11d3-a05b-006094eb75cb")
|
||||
AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U = uuid.UUID(
|
||||
"8d56813d-847e-11d5-935a-50f857c10000")
|
||||
AAF_PARAMETERDEF_LEVEL = uuid.UUID("e4962320-2267-11d3-8a4c-0050040ef7d2")
|
||||
AAF_VVAL_EXTRAPOLATION_ID = uuid.UUID("0e24dd54-66cd-4f1a-b0a0-670ac3a7a0b3")
|
||||
AAF_OPERATIONDEF_SUBMASTER = uuid.UUID("f1db0f3d-8d64-11d3-80df-006008143e6f")
|
||||
|
||||
|
||||
class AAFAdapterError(otio.exceptions.OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class AAFValidationError(AAFAdapterError):
|
||||
pass
|
||||
|
||||
|
||||
class AAFFileTranscriber(object):
|
||||
"""
|
||||
AAFFileTranscriber
|
||||
|
||||
AAFFileTranscriber manages the file-level knowledge during a conversion from
|
||||
otio to aaf. This includes keeping track of unique tapemobs and mastermobs.
|
||||
"""
|
||||
|
||||
def __init__(self, input_otio, aaf_file, **kwargs):
|
||||
"""
|
||||
AAFFileTranscriber requires an input timeline and an output pyaaf2 file handle.
|
||||
|
||||
Args:
|
||||
input_otio: an input OpenTimelineIO timeline
|
||||
aaf_file: a pyaaf2 file handle to an output file
|
||||
"""
|
||||
self.aaf_file = aaf_file
|
||||
self.compositionmob = self.aaf_file.create.CompositionMob()
|
||||
self.compositionmob.name = input_otio.name
|
||||
self.compositionmob.usage = "Usage_TopLevel"
|
||||
self.aaf_file.content.mobs.append(self.compositionmob)
|
||||
self._unique_mastermobs = {}
|
||||
self._unique_tapemobs = {}
|
||||
self._clip_mob_ids_map = _gather_clip_mob_ids(input_otio, **kwargs)
|
||||
|
||||
def _unique_mastermob(self, otio_clip):
|
||||
"""Get a unique mastermob, identified by clip metadata mob id."""
|
||||
mob_id = self._clip_mob_ids_map.get(otio_clip)
|
||||
mastermob = self._unique_mastermobs.get(mob_id)
|
||||
if not mastermob:
|
||||
mastermob = self.aaf_file.create.MasterMob()
|
||||
mastermob.name = otio_clip.name
|
||||
mastermob.mob_id = aaf2.mobid.MobID(mob_id)
|
||||
self.aaf_file.content.mobs.append(mastermob)
|
||||
self._unique_mastermobs[mob_id] = mastermob
|
||||
return mastermob
|
||||
|
||||
def _unique_tapemob(self, otio_clip):
|
||||
"""Get a unique tapemob, identified by clip metadata mob id."""
|
||||
mob_id = self._clip_mob_ids_map.get(otio_clip)
|
||||
tapemob = self._unique_tapemobs.get(mob_id)
|
||||
if not tapemob:
|
||||
tapemob = self.aaf_file.create.SourceMob()
|
||||
tapemob.name = otio_clip.name
|
||||
tapemob.descriptor = self.aaf_file.create.ImportDescriptor()
|
||||
# If the edit_rate is not an integer, we need
|
||||
# to use drop frame with a nominal integer fps.
|
||||
edit_rate = otio_clip.visible_range().duration.rate
|
||||
timecode_fps = round(edit_rate)
|
||||
tape_timecode_slot = tapemob.create_timecode_slot(
|
||||
edit_rate=edit_rate,
|
||||
timecode_fps=timecode_fps,
|
||||
drop_frame=(edit_rate != timecode_fps)
|
||||
)
|
||||
timecode_start = (
|
||||
otio_clip.media_reference.available_range.start_time.value)
|
||||
timecode_length = (
|
||||
otio_clip.media_reference.available_range.duration.value)
|
||||
|
||||
tape_timecode_slot.segment.start = timecode_start
|
||||
tape_timecode_slot.segment.length = timecode_length
|
||||
self.aaf_file.content.mobs.append(tapemob)
|
||||
self._unique_tapemobs[mob_id] = tapemob
|
||||
return tapemob
|
||||
|
||||
def track_transcriber(self, otio_track):
|
||||
"""Return an appropriate _TrackTranscriber given an otio track."""
|
||||
if otio_track.kind == otio.schema.TrackKind.Video:
|
||||
transcriber = VideoTrackTranscriber(self, otio_track)
|
||||
elif otio_track.kind == otio.schema.TrackKind.Audio:
|
||||
transcriber = AudioTrackTranscriber(self, otio_track)
|
||||
else:
|
||||
raise otio.exceptions.NotSupportedError(
|
||||
"Unsupported track kind: {}".format(otio_track.kind))
|
||||
return transcriber
|
||||
|
||||
|
||||
def validate_metadata(timeline):
|
||||
"""Print a check of necessary metadata requirements for an otio timeline."""
|
||||
|
||||
all_checks = [__check(timeline, "duration().rate")]
|
||||
edit_rate = __check(timeline, "duration().rate").value
|
||||
|
||||
for child in timeline.each_child():
|
||||
checks = []
|
||||
if isinstance(child, otio.schema.Gap):
|
||||
checks = [
|
||||
__check(child, "duration().rate").equals(edit_rate)
|
||||
]
|
||||
if isinstance(child, otio.schema.Clip):
|
||||
checks = [
|
||||
__check(child, "duration().rate").equals(edit_rate),
|
||||
__check(child, "media_reference.available_range.duration.rate"
|
||||
).equals(edit_rate),
|
||||
__check(child, "media_reference.available_range.start_time.rate"
|
||||
).equals(edit_rate)
|
||||
]
|
||||
if isinstance(child, otio.schema.Transition):
|
||||
checks = [
|
||||
__check(child, "duration().rate").equals(edit_rate),
|
||||
__check(child, "metadata['AAF']['PointList']"),
|
||||
__check(child, "metadata['AAF']['OperationGroup']['Operation']"
|
||||
"['DataDefinition']['Name']"),
|
||||
__check(child, "metadata['AAF']['OperationGroup']['Operation']"
|
||||
"['Description']"),
|
||||
__check(child, "metadata['AAF']['OperationGroup']['Operation']"
|
||||
"['Name']"),
|
||||
__check(child, "metadata['AAF']['CutPoint']")
|
||||
]
|
||||
all_checks.extend(checks)
|
||||
|
||||
if any(check.errors for check in all_checks):
|
||||
raise AAFValidationError("\n" + "\n".join(
|
||||
sum([check.errors for check in all_checks], [])))
|
||||
|
||||
|
||||
def _gather_clip_mob_ids(input_otio,
|
||||
prefer_file_mob_id=False,
|
||||
use_empty_mob_ids=False,
|
||||
**kwargs):
|
||||
"""
|
||||
Create dictionary of otio clips with their corresponding mob ids.
|
||||
"""
|
||||
|
||||
def _from_clip_metadata(clip):
|
||||
"""Get the MobID from the clip.metadata."""
|
||||
return clip.metadata.get("AAF", {}).get("SourceID")
|
||||
|
||||
def _from_media_reference_metadata(clip):
|
||||
"""Get the MobID from the media_reference.metadata."""
|
||||
return (clip.media_reference.metadata.get("AAF", {}).get("MobID") or
|
||||
clip.media_reference.metadata.get("AAF", {}).get("SourceID"))
|
||||
|
||||
def _from_aaf_file(clip):
|
||||
""" Get the MobID from the AAF file itself."""
|
||||
mob_id = None
|
||||
target_url = clip.media_reference.target_url
|
||||
if os.path.isfile(target_url) and target_url.endswith("aaf"):
|
||||
with aaf2.open(clip.media_reference.target_url) as aaf_file:
|
||||
mastermobs = list(aaf_file.content.mastermobs())
|
||||
if len(mastermobs) == 1:
|
||||
mob_id = mastermobs[0].mob_id
|
||||
return mob_id
|
||||
|
||||
def _generate_empty_mobid(clip):
|
||||
"""Generate a meaningless MobID."""
|
||||
return aaf2.mobid.MobID.new()
|
||||
|
||||
strategies = [
|
||||
_from_clip_metadata,
|
||||
_from_media_reference_metadata,
|
||||
_from_aaf_file
|
||||
]
|
||||
|
||||
if prefer_file_mob_id:
|
||||
strategies.remove(_from_aaf_file)
|
||||
strategies.insert(0, _from_aaf_file)
|
||||
|
||||
if use_empty_mob_ids:
|
||||
strategies.append(_generate_empty_mobid)
|
||||
|
||||
clip_mob_ids = {}
|
||||
|
||||
for otio_clip in input_otio.each_clip():
|
||||
for strategy in strategies:
|
||||
mob_id = strategy(otio_clip)
|
||||
if mob_id:
|
||||
clip_mob_ids[otio_clip] = mob_id
|
||||
break
|
||||
else:
|
||||
raise AAFAdapterError("Cannot find mob ID for clip {}".format(otio_clip))
|
||||
|
||||
return clip_mob_ids
|
||||
|
||||
|
||||
def _stackify_nested_groups(timeline):
|
||||
"""
|
||||
Ensure that all nesting in a given timeline is in a stack container.
|
||||
This conforms with how AAF thinks about nesting, there needs
|
||||
to be an outer container, even if it's just one object.
|
||||
"""
|
||||
copied = copy.deepcopy(timeline)
|
||||
for track in copied.tracks:
|
||||
for i, child in enumerate(track.each_child()):
|
||||
is_nested = isinstance(child, otio.schema.Track)
|
||||
is_parent_in_stack = isinstance(child.parent(), otio.schema.Stack)
|
||||
if is_nested and not is_parent_in_stack:
|
||||
stack = otio.schema.Stack()
|
||||
track.remove(child)
|
||||
stack.append(child)
|
||||
track.insert(i, stack)
|
||||
return copied
|
||||
|
||||
|
||||
class _TrackTranscriber(object):
|
||||
"""
|
||||
_TrackTranscriber is the base class for the conversion of a given otio track.
|
||||
|
||||
_TrackTranscriber is not meant to be used by itself. It provides the common
|
||||
functionality to inherit from. We need an abstract base class because Audio and
|
||||
Video are handled differently.
|
||||
"""
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, root_file_transcriber, otio_track):
|
||||
"""
|
||||
_TrackTranscriber
|
||||
|
||||
Args:
|
||||
root_file_transcriber: the corresponding 'parent' AAFFileTranscriber object
|
||||
otio_track: the given otio_track to convert
|
||||
"""
|
||||
self.root_file_transcriber = root_file_transcriber
|
||||
self.compositionmob = root_file_transcriber.compositionmob
|
||||
self.aaf_file = root_file_transcriber.aaf_file
|
||||
self.otio_track = otio_track
|
||||
self.edit_rate = next(self.otio_track.each_child()).duration().rate
|
||||
self.timeline_mobslot, self.sequence = self._create_timeline_mobslot()
|
||||
self.timeline_mobslot.name = self.otio_track.name
|
||||
|
||||
def transcribe(self, otio_child):
|
||||
"""Transcribe otio child to corresponding AAF object"""
|
||||
if isinstance(otio_child, otio.schema.Gap):
|
||||
filler = self.aaf_filler(otio_child)
|
||||
return filler
|
||||
elif isinstance(otio_child, otio.schema.Transition):
|
||||
transition = self.aaf_transition(otio_child)
|
||||
return transition
|
||||
elif isinstance(otio_child, otio.schema.Clip):
|
||||
source_clip = self.aaf_sourceclip(otio_child)
|
||||
return source_clip
|
||||
elif isinstance(otio_child, otio.schema.Track):
|
||||
sequence = self.aaf_sequence(otio_child)
|
||||
return sequence
|
||||
elif isinstance(otio_child, otio.schema.Stack):
|
||||
operation_group = self.aaf_operation_group(otio_child)
|
||||
return operation_group
|
||||
else:
|
||||
raise otio.exceptions.NotSupportedError(
|
||||
"Unsupported otio child type: {}".format(type(otio_child)))
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def media_kind(self):
|
||||
"""Return the string for what kind of track this is."""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def _master_mob_slot_id(self):
|
||||
"""
|
||||
Return the MasterMob Slot ID for the corresponding track media kind
|
||||
"""
|
||||
# MasterMob's and MasterMob slots have to be unique. We handle unique
|
||||
# MasterMob's with _unique_mastermob(). We also need to protect against
|
||||
# duplicate MasterMob slots. As of now, we mandate all picture clips to
|
||||
# be created in MasterMob slot 1 and all sound clips to be created in
|
||||
# MasterMob slot 2. While this is a little inadequate, it works for now
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _create_timeline_mobslot(self):
|
||||
"""
|
||||
Return a timeline_mobslot and sequence for this track.
|
||||
|
||||
In AAF, a TimelineMobSlot is a container for the Sequence. A Sequence is
|
||||
analogous to an otio track.
|
||||
|
||||
Returns:
|
||||
Returns a tuple of (TimelineMobSlot, Sequence)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def default_descriptor(self, otio_clip):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _transition_parameters(self):
|
||||
pass
|
||||
|
||||
def aaf_filler(self, otio_gap):
|
||||
"""Convert an otio Gap into an aaf Filler"""
|
||||
length = otio_gap.visible_range().duration.value
|
||||
filler = self.aaf_file.create.Filler(self.media_kind, length)
|
||||
return filler
|
||||
|
||||
def aaf_sourceclip(self, otio_clip):
|
||||
"""Convert an otio Clip into an aaf SourceClip"""
|
||||
tapemob, tapemob_slot = self._create_tapemob(otio_clip)
|
||||
filemob, filemob_slot = self._create_filemob(otio_clip, tapemob, tapemob_slot)
|
||||
mastermob, mastermob_slot = self._create_mastermob(otio_clip,
|
||||
filemob,
|
||||
filemob_slot)
|
||||
|
||||
# We need both `start_time` and `duration`
|
||||
# Here `start` is the offset between `first` and `in` values.
|
||||
|
||||
offset = (otio_clip.visible_range().start_time -
|
||||
otio_clip.available_range().start_time)
|
||||
start = offset.value
|
||||
length = otio_clip.visible_range().duration.value
|
||||
|
||||
compmob_clip = self.compositionmob.create_source_clip(
|
||||
slot_id=self.timeline_mobslot.slot_id,
|
||||
start=start,
|
||||
length=length,
|
||||
media_kind=self.media_kind)
|
||||
compmob_clip.mob = mastermob
|
||||
compmob_clip.slot = mastermob_slot
|
||||
compmob_clip.slot_id = mastermob_slot.slot_id
|
||||
return compmob_clip
|
||||
|
||||
def aaf_transition(self, otio_transition):
|
||||
"""Convert an otio Transition into an aaf Transition"""
|
||||
if (otio_transition.transition_type !=
|
||||
otio.schema.transition.TransitionTypes.SMPTE_Dissolve):
|
||||
print(
|
||||
"Unsupported transition type: {}".format(
|
||||
otio_transition.transition_type))
|
||||
return None
|
||||
|
||||
transition_params, varying_value = self._transition_parameters()
|
||||
|
||||
interpolation_def = self.aaf_file.create.InterpolationDef(
|
||||
aaf2.misc.LinearInterp, "LinearInterp", "Linear keyframe interpolation")
|
||||
self.aaf_file.dictionary.register_def(interpolation_def)
|
||||
varying_value["Interpolation"].value = (
|
||||
self.aaf_file.dictionary.lookup_interperlationdef("LinearInterp"))
|
||||
|
||||
pointlist = otio_transition.metadata["AAF"]["PointList"]
|
||||
|
||||
c1 = self.aaf_file.create.ControlPoint()
|
||||
c1["EditHint"].value = "Proportional"
|
||||
c1.value = pointlist[0]["Value"]
|
||||
c1.time = pointlist[0]["Time"]
|
||||
|
||||
c2 = self.aaf_file.create.ControlPoint()
|
||||
c2["EditHint"].value = "Proportional"
|
||||
c2.value = pointlist[1]["Value"]
|
||||
c2.time = pointlist[1]["Time"]
|
||||
|
||||
varying_value["PointList"].extend([c1, c2])
|
||||
|
||||
op_group_metadata = otio_transition.metadata["AAF"]["OperationGroup"]
|
||||
effect_id = op_group_metadata["Operation"].get("Identification")
|
||||
is_time_warp = op_group_metadata["Operation"].get("IsTimeWarp")
|
||||
by_pass = op_group_metadata["Operation"].get("Bypass")
|
||||
number_inputs = op_group_metadata["Operation"].get("NumberInputs")
|
||||
operation_category = op_group_metadata["Operation"].get("OperationCategory")
|
||||
data_def_name = op_group_metadata["Operation"]["DataDefinition"]["Name"]
|
||||
data_def = self.aaf_file.dictionary.lookup_datadef(str(data_def_name))
|
||||
description = op_group_metadata["Operation"]["Description"]
|
||||
op_def_name = otio_transition.metadata["AAF"][
|
||||
"OperationGroup"
|
||||
]["Operation"]["Name"]
|
||||
|
||||
# Create OperationDefinition
|
||||
op_def = self.aaf_file.create.OperationDef(uuid.UUID(effect_id), op_def_name)
|
||||
self.aaf_file.dictionary.register_def(op_def)
|
||||
op_def.media_kind = self.media_kind
|
||||
datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind)
|
||||
op_def["IsTimeWarp"].value = is_time_warp
|
||||
op_def["Bypass"].value = by_pass
|
||||
op_def["NumberInputs"].value = number_inputs
|
||||
op_def["OperationCategory"].value = str(operation_category)
|
||||
op_def["ParametersDefined"].extend(transition_params)
|
||||
op_def["DataDefinition"].value = data_def
|
||||
op_def["Description"].value = str(description)
|
||||
|
||||
# Create OperationGroup
|
||||
length = otio_transition.duration().value
|
||||
operation_group = self.aaf_file.create.OperationGroup(op_def, length)
|
||||
operation_group["DataDefinition"].value = datadef
|
||||
operation_group["Parameters"].append(varying_value)
|
||||
|
||||
# Create Transition
|
||||
transition = self.aaf_file.create.Transition(self.media_kind, length)
|
||||
transition["OperationGroup"].value = operation_group
|
||||
transition["CutPoint"].value = otio_transition.metadata["AAF"]["CutPoint"]
|
||||
transition["DataDefinition"].value = datadef
|
||||
return transition
|
||||
|
||||
def aaf_sequence(self, otio_track):
|
||||
"""Convert an otio Track into an aaf Sequence"""
|
||||
sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
|
||||
length = 0
|
||||
for nested_otio_child in otio_track:
|
||||
result = self.transcribe(nested_otio_child)
|
||||
length += result.length
|
||||
sequence.components.append(result)
|
||||
sequence.length = length
|
||||
return sequence
|
||||
|
||||
def aaf_operation_group(self, otio_stack):
|
||||
"""
|
||||
Create and return an OperationGroup which will contain other AAF objects
|
||||
to support OTIO nesting
|
||||
"""
|
||||
# Create OperationDefinition
|
||||
op_def = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_SUBMASTER,
|
||||
"Submaster")
|
||||
self.aaf_file.dictionary.register_def(op_def)
|
||||
op_def.media_kind = self.media_kind
|
||||
datadef = self.aaf_file.dictionary.lookup_datadef(self.media_kind)
|
||||
|
||||
# These values are necessary for pyaaf2 OperationDefinitions
|
||||
op_def["IsTimeWarp"].value = False
|
||||
op_def["Bypass"].value = 0
|
||||
op_def["NumberInputs"].value = -1
|
||||
op_def["OperationCategory"].value = "OperationCategory_Effect"
|
||||
op_def["DataDefinition"].value = datadef
|
||||
|
||||
# Create OperationGroup
|
||||
operation_group = self.aaf_file.create.OperationGroup(op_def)
|
||||
operation_group.media_kind = self.media_kind
|
||||
operation_group["DataDefinition"].value = datadef
|
||||
|
||||
length = 0
|
||||
for nested_otio_child in otio_stack:
|
||||
result = self.transcribe(nested_otio_child)
|
||||
length += result.length
|
||||
operation_group.segments.append(result)
|
||||
operation_group.length = length
|
||||
return operation_group
|
||||
|
||||
def _create_tapemob(self, otio_clip):
|
||||
"""
|
||||
Return a physical sourcemob for an otio Clip based on the MobID.
|
||||
|
||||
Returns:
|
||||
Returns a tuple of (TapeMob, TapeMobSlot)
|
||||
"""
|
||||
tapemob = self.root_file_transcriber._unique_tapemob(otio_clip)
|
||||
tapemob_slot = tapemob.create_empty_slot(self.edit_rate, self.media_kind)
|
||||
tapemob_slot.segment.length = (
|
||||
otio_clip.media_reference.available_range.duration.value)
|
||||
return tapemob, tapemob_slot
|
||||
|
||||
def _create_filemob(self, otio_clip, tapemob, tapemob_slot):
|
||||
"""
|
||||
Return a file sourcemob for an otio Clip. Needs a tapemob and tapemob slot.
|
||||
|
||||
Returns:
|
||||
Returns a tuple of (FileMob, FileMobSlot)
|
||||
"""
|
||||
filemob = self.aaf_file.create.SourceMob()
|
||||
self.aaf_file.content.mobs.append(filemob)
|
||||
|
||||
filemob.descriptor = self.default_descriptor(otio_clip)
|
||||
filemob_slot = filemob.create_timeline_slot(self.edit_rate)
|
||||
filemob_clip = filemob.create_source_clip(
|
||||
slot_id=filemob_slot.slot_id,
|
||||
length=tapemob_slot.segment.length,
|
||||
media_kind=tapemob_slot.segment.media_kind)
|
||||
filemob_clip.mob = tapemob
|
||||
filemob_clip.slot = tapemob_slot
|
||||
filemob_clip.slot_id = tapemob_slot.slot_id
|
||||
filemob_slot.segment = filemob_clip
|
||||
return filemob, filemob_slot
|
||||
|
||||
def _create_mastermob(self, otio_clip, filemob, filemob_slot):
|
||||
"""
|
||||
Return a mastermob for an otio Clip. Needs a filemob and filemob slot.
|
||||
|
||||
Returns:
|
||||
Returns a tuple of (MasterMob, MasterMobSlot)
|
||||
"""
|
||||
mastermob = self.root_file_transcriber._unique_mastermob(otio_clip)
|
||||
timecode_length = otio_clip.media_reference.available_range.duration.value
|
||||
|
||||
try:
|
||||
mastermob_slot = mastermob.slot_at(self._master_mob_slot_id)
|
||||
except IndexError:
|
||||
mastermob_slot = (
|
||||
mastermob.create_timeline_slot(edit_rate=self.edit_rate,
|
||||
slot_id=self._master_mob_slot_id))
|
||||
mastermob_clip = mastermob.create_source_clip(
|
||||
slot_id=mastermob_slot.slot_id,
|
||||
length=timecode_length,
|
||||
media_kind=self.media_kind)
|
||||
mastermob_clip.mob = filemob
|
||||
mastermob_clip.slot = filemob_slot
|
||||
mastermob_clip.slot_id = filemob_slot.slot_id
|
||||
mastermob_slot.segment = mastermob_clip
|
||||
return mastermob, mastermob_slot
|
||||
|
||||
|
||||
class VideoTrackTranscriber(_TrackTranscriber):
|
||||
"""Video track kind specialization of TrackTranscriber."""
|
||||
|
||||
@property
|
||||
def media_kind(self):
|
||||
return "picture"
|
||||
|
||||
@property
|
||||
def _master_mob_slot_id(self):
|
||||
return 1
|
||||
|
||||
def _create_timeline_mobslot(self):
|
||||
"""
|
||||
Create a Sequence container (TimelineMobSlot) and Sequence.
|
||||
|
||||
TimelineMobSlot --> Sequence
|
||||
"""
|
||||
timeline_mobslot = self.compositionmob.create_timeline_slot(
|
||||
edit_rate=self.edit_rate)
|
||||
sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
|
||||
timeline_mobslot.segment = sequence
|
||||
return timeline_mobslot, sequence
|
||||
|
||||
def default_descriptor(self, otio_clip):
|
||||
# TODO: Determine if these values are the correct, and if so,
|
||||
# maybe they should be in the AAF metadata
|
||||
descriptor = self.aaf_file.create.CDCIDescriptor()
|
||||
descriptor["ComponentWidth"].value = 8
|
||||
descriptor["HorizontalSubsampling"].value = 2
|
||||
descriptor["ImageAspectRatio"].value = "16/9"
|
||||
descriptor["StoredWidth"].value = 1920
|
||||
descriptor["StoredHeight"].value = 1080
|
||||
descriptor["FrameLayout"].value = "FullFrame"
|
||||
descriptor["VideoLineMap"].value = [42, 0]
|
||||
descriptor["SampleRate"].value = 24
|
||||
descriptor["Length"].value = 1
|
||||
return descriptor
|
||||
|
||||
def _transition_parameters(self):
|
||||
"""
|
||||
Return video transition parameters
|
||||
"""
|
||||
# Create ParameterDef for AvidParameterByteOrder
|
||||
byteorder_typedef = self.aaf_file.dictionary.lookup_typedef("aafUInt16")
|
||||
param_byteorder = self.aaf_file.create.ParameterDef(
|
||||
AAF_PARAMETERDEF_AVIDPARAMETERBYTEORDER,
|
||||
"AvidParameterByteOrder",
|
||||
"",
|
||||
byteorder_typedef)
|
||||
self.aaf_file.dictionary.register_def(param_byteorder)
|
||||
|
||||
# Create ParameterDef for AvidEffectID
|
||||
avid_effect_typdef = self.aaf_file.dictionary.lookup_typedef("AvidBagOfBits")
|
||||
param_effect_id = self.aaf_file.create.ParameterDef(
|
||||
AAF_PARAMETERDEF_AVIDEFFECTID,
|
||||
"AvidEffectID",
|
||||
"",
|
||||
avid_effect_typdef)
|
||||
self.aaf_file.dictionary.register_def(param_effect_id)
|
||||
|
||||
# Create ParameterDef for AFX_FG_KEY_OPACITY_U
|
||||
opacity_param_def = self.aaf_file.dictionary.lookup_typedef("Rational")
|
||||
opacity_param = self.aaf_file.create.ParameterDef(
|
||||
AAF_PARAMETERDEF_AFX_FG_KEY_OPACITY_U,
|
||||
"AFX_FG_KEY_OPACITY_U",
|
||||
"",
|
||||
opacity_param_def)
|
||||
self.aaf_file.dictionary.register_def(opacity_param)
|
||||
|
||||
# Create VaryingValue
|
||||
opacity_u = self.aaf_file.create.VaryingValue()
|
||||
opacity_u.parameterdef = self.aaf_file.dictionary.lookup_parameterdef(
|
||||
"AFX_FG_KEY_OPACITY_U")
|
||||
opacity_u["VVal_Extrapolation"].value = AAF_VVAL_EXTRAPOLATION_ID
|
||||
opacity_u["VVal_FieldCount"].value = 1
|
||||
|
||||
return [param_byteorder, param_effect_id], opacity_u
|
||||
|
||||
|
||||
class AudioTrackTranscriber(_TrackTranscriber):
|
||||
"""Audio track kind specialization of TrackTranscriber."""
|
||||
|
||||
@property
|
||||
def media_kind(self):
|
||||
return "sound"
|
||||
|
||||
@property
|
||||
def _master_mob_slot_id(self):
|
||||
return 2
|
||||
|
||||
def aaf_sourceclip(self, otio_clip):
|
||||
# Parameter Definition
|
||||
typedef = self.aaf_file.dictionary.lookup_typedef("Rational")
|
||||
param_def = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_PAN,
|
||||
"Pan",
|
||||
"Pan",
|
||||
typedef)
|
||||
self.aaf_file.dictionary.register_def(param_def)
|
||||
interp_def = self.aaf_file.create.InterpolationDef(aaf2.misc.LinearInterp,
|
||||
"LinearInterp",
|
||||
"LinearInterp")
|
||||
self.aaf_file.dictionary.register_def(interp_def)
|
||||
# PointList
|
||||
length = otio_clip.duration().value
|
||||
c1 = self.aaf_file.create.ControlPoint()
|
||||
c1["ControlPointSource"].value = 2
|
||||
c1["Time"].value = aaf2.rational.AAFRational("0/{}".format(length))
|
||||
c1["Value"].value = 0
|
||||
c2 = self.aaf_file.create.ControlPoint()
|
||||
c2["ControlPointSource"].value = 2
|
||||
c2["Time"].value = aaf2.rational.AAFRational("{}/{}".format(length - 1, length))
|
||||
c2["Value"].value = 0
|
||||
varying_value = self.aaf_file.create.VaryingValue()
|
||||
varying_value.parameterdef = param_def
|
||||
varying_value["Interpolation"].value = interp_def
|
||||
varying_value["PointList"].extend([c1, c2])
|
||||
opgroup = self.timeline_mobslot.segment
|
||||
opgroup.parameters.append(varying_value)
|
||||
|
||||
return super(AudioTrackTranscriber, self).aaf_sourceclip(otio_clip)
|
||||
|
||||
def _create_timeline_mobslot(self):
|
||||
"""
|
||||
Create a Sequence container (TimelineMobSlot) and Sequence.
|
||||
Sequence needs to be in an OperationGroup.
|
||||
|
||||
TimelineMobSlot --> OperationGroup --> Sequence
|
||||
"""
|
||||
# TimelineMobSlot
|
||||
timeline_mobslot = self.compositionmob.create_sound_slot(
|
||||
edit_rate=self.edit_rate)
|
||||
# OperationDefinition
|
||||
opdef = self.aaf_file.create.OperationDef(AAF_OPERATIONDEF_MONOAUDIOPAN,
|
||||
"Audio Pan")
|
||||
opdef.media_kind = self.media_kind
|
||||
opdef["NumberInputs"].value = 1
|
||||
self.aaf_file.dictionary.register_def(opdef)
|
||||
# OperationGroup
|
||||
total_length = sum([t.duration().value for t in self.otio_track])
|
||||
opgroup = self.aaf_file.create.OperationGroup(opdef)
|
||||
opgroup.media_kind = self.media_kind
|
||||
opgroup.length = total_length
|
||||
timeline_mobslot.segment = opgroup
|
||||
# Sequence
|
||||
sequence = self.aaf_file.create.Sequence(media_kind=self.media_kind)
|
||||
sequence.length = total_length
|
||||
opgroup.segments.append(sequence)
|
||||
return timeline_mobslot, sequence
|
||||
|
||||
def default_descriptor(self, otio_clip):
|
||||
descriptor = self.aaf_file.create.PCMDescriptor()
|
||||
descriptor["AverageBPS"].value = 96000
|
||||
descriptor["BlockAlign"].value = 2
|
||||
descriptor["QuantizationBits"].value = 16
|
||||
descriptor["AudioSamplingRate"].value = 48000
|
||||
descriptor["Channels"].value = 1
|
||||
descriptor["SampleRate"].value = 48000
|
||||
descriptor["Length"].value = (
|
||||
otio_clip.media_reference.available_range.duration.value)
|
||||
return descriptor
|
||||
|
||||
def _transition_parameters(self):
|
||||
"""
|
||||
Return audio transition parameters
|
||||
"""
|
||||
# Create ParameterDef for ParameterDef_Level
|
||||
def_level_typedef = self.aaf_file.dictionary.lookup_typedef("Rational")
|
||||
param_def_level = self.aaf_file.create.ParameterDef(AAF_PARAMETERDEF_LEVEL,
|
||||
"ParameterDef_Level",
|
||||
"",
|
||||
def_level_typedef)
|
||||
self.aaf_file.dictionary.register_def(param_def_level)
|
||||
|
||||
# Create VaryingValue
|
||||
level = self.aaf_file.create.VaryingValue()
|
||||
level.parameterdef = (
|
||||
self.aaf_file.dictionary.lookup_parameterdef("ParameterDef_Level"))
|
||||
|
||||
return [param_def_level], level
|
||||
|
||||
|
||||
class __check(object):
|
||||
"""
|
||||
__check is a private helper class that safely gets values given to check
|
||||
for existence and equality
|
||||
"""
|
||||
|
||||
def __init__(self, obj, tokenpath):
|
||||
self.orig = obj
|
||||
self.value = obj
|
||||
self.errors = []
|
||||
self.tokenpath = tokenpath
|
||||
try:
|
||||
for token in re.split(r"[\.\[]", tokenpath):
|
||||
if token.endswith("()"):
|
||||
self.value = getattr(self.value, token.replace("()", ""))()
|
||||
elif "]" in token:
|
||||
self.value = self.value[token.strip("[]'\"")]
|
||||
else:
|
||||
self.value = getattr(self.value, token)
|
||||
except Exception as e:
|
||||
self.value = None
|
||||
self.errors.append("{}{} {}.{} does not exist, {}".format(
|
||||
self.orig.name if hasattr(self.orig, "name") else "",
|
||||
type(self.orig),
|
||||
type(self.orig).__name__,
|
||||
self.tokenpath, e))
|
||||
|
||||
def equals(self, val):
|
||||
"""Check if the retrieved value is equal to a given value."""
|
||||
if self.value is not None and self.value != val:
|
||||
self.errors.append(
|
||||
"{}{} {}.{} not equal to {} (expected) != {} (actual)".format(
|
||||
self.orig.name if hasattr(self.orig, "name") else "",
|
||||
type(self.orig),
|
||||
type(self.orig).__name__, self.tokenpath, val, self.value))
|
||||
return self
|
||||
979
pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py
vendored
Normal file
979
pype/vendor/python/python_2/opentimelineio_contrib/adapters/advanced_authoring_format.py
vendored
Normal file
|
|
@ -0,0 +1,979 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""OpenTimelineIO Advanced Authoring Format (AAF) Adapter
|
||||
|
||||
Depending on if/where PyAAF is installed, you may need to set this env var:
|
||||
OTIO_AAF_PYTHON_LIB - should point at the PyAAF module.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import numbers
|
||||
import copy
|
||||
from collections import Iterable
|
||||
import opentimelineio as otio
|
||||
|
||||
lib_path = os.environ.get("OTIO_AAF_PYTHON_LIB")
|
||||
if lib_path and lib_path not in sys.path:
|
||||
sys.path.insert(0, lib_path)
|
||||
|
||||
import aaf2 # noqa: E402
|
||||
import aaf2.content # noqa: E402
|
||||
import aaf2.mobs # noqa: E402
|
||||
import aaf2.components # noqa: E402
|
||||
import aaf2.core # noqa: E402
|
||||
from opentimelineio_contrib.adapters.aaf_adapter import aaf_writer # noqa: E402
|
||||
|
||||
|
||||
debug = False
|
||||
__names = set()
|
||||
|
||||
|
||||
def _get_parameter(item, parameter_name):
|
||||
values = dict((value.name, value) for value in item.parameters.value)
|
||||
return values.get(parameter_name)
|
||||
|
||||
|
||||
def _get_name(item):
|
||||
if isinstance(item, aaf2.components.SourceClip):
|
||||
try:
|
||||
return item.mob.name or "Untitled SourceClip"
|
||||
except AttributeError:
|
||||
# Some AAFs produce this error:
|
||||
# RuntimeError: failed with [-2146303738]: mob not found
|
||||
return "SourceClip Missing Mob?"
|
||||
if hasattr(item, 'name'):
|
||||
name = item.name
|
||||
if name:
|
||||
return name
|
||||
return _get_class_name(item)
|
||||
|
||||
|
||||
def _get_class_name(item):
|
||||
if hasattr(item, "class_name"):
|
||||
return item.class_name
|
||||
else:
|
||||
return item.__class__.__name__
|
||||
|
||||
|
||||
def _transcribe_property(prop):
|
||||
# XXX: The unicode type doesn't exist in Python 3 (all strings are unicode)
|
||||
# so we have to use type(u"") which works in both Python 2 and 3.
|
||||
if isinstance(prop, (str, type(u""), numbers.Integral, float)):
|
||||
return prop
|
||||
|
||||
elif isinstance(prop, list):
|
||||
result = {}
|
||||
for child in prop:
|
||||
if hasattr(child, "name") and hasattr(child, "value"):
|
||||
result[child.name] = _transcribe_property(child.value)
|
||||
else:
|
||||
# @TODO: There may be more properties that we might want also.
|
||||
# If you want to see what is being skipped, turn on debug.
|
||||
if debug:
|
||||
debug_message = \
|
||||
"Skipping unrecognized property: {} of parent {}"
|
||||
print(debug_message.format(child, prop))
|
||||
return result
|
||||
elif hasattr(prop, "properties"):
|
||||
result = {}
|
||||
for child in prop.properties():
|
||||
result[child.name] = _transcribe_property(child.value)
|
||||
return result
|
||||
else:
|
||||
return str(prop)
|
||||
|
||||
|
||||
def _find_timecode_mobs(item):
|
||||
mobs = [item.mob]
|
||||
|
||||
for c in item.walk():
|
||||
if isinstance(c, aaf2.components.SourceClip):
|
||||
mob = c.mob
|
||||
if mob:
|
||||
mobs.append(mob)
|
||||
else:
|
||||
continue
|
||||
else:
|
||||
# This could be 'EssenceGroup', 'Pulldown' or other segment
|
||||
# subclasses
|
||||
# See also: https://jira.pixar.com/browse/SE-3457
|
||||
# For example:
|
||||
# An EssenceGroup is a Segment that has one or more
|
||||
# alternate choices, each of which represent different variations
|
||||
# of one actual piece of content.
|
||||
# According to the AAF Object Specification and Edit Protocol
|
||||
# documents:
|
||||
# "Typically the different representations vary in essence format,
|
||||
# compression, or frame size. The application is responsible for
|
||||
# choosing the appropriate implementation of the essence."
|
||||
# It also says they should all have the same length, but
|
||||
# there might be nested Sequences inside which we're not attempting
|
||||
# to handle here (yet). We'll need a concrete example to ensure
|
||||
# we're doing the right thing.
|
||||
# TODO: Is the Timecode for an EssenceGroup correct?
|
||||
# TODO: Try CountChoices() and ChoiceAt(i)
|
||||
# For now, lets just skip it.
|
||||
continue
|
||||
|
||||
return mobs
|
||||
|
||||
|
||||
def _extract_timecode_info(mob):
|
||||
"""Given a mob with a single timecode slot, return the timecode and length
|
||||
in that slot as a tuple
|
||||
"""
|
||||
timecodes = [slot.segment for slot in mob.slots
|
||||
if isinstance(slot.segment, aaf2.components.Timecode)]
|
||||
|
||||
if len(timecodes) == 1:
|
||||
timecode = timecodes[0]
|
||||
timecode_start = timecode.getvalue('Start')
|
||||
timecode_length = timecode.getvalue('Length')
|
||||
|
||||
if timecode_start is None or timecode_length is None:
|
||||
raise otio.exceptions.NotSupportedError(
|
||||
"Unexpected timecode value(s) in mob named: `{}`."
|
||||
" `Start`: {}, `Length`: {}".format(mob.name,
|
||||
timecode_start,
|
||||
timecode_length)
|
||||
)
|
||||
|
||||
return timecode_start, timecode_length
|
||||
elif len(timecodes) > 1:
|
||||
raise otio.exceptions.NotSupportedError(
|
||||
"Error: mob has more than one timecode slots, this is not"
|
||||
" currently supported by the AAF adapter. found: {} slots, "
|
||||
" mob name is: '{}'".format(len(timecodes), mob.name)
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _add_child(parent, child, source):
|
||||
if child is None:
|
||||
if debug:
|
||||
print("Adding null child? {}".format(source))
|
||||
elif isinstance(child, otio.schema.Marker):
|
||||
parent.markers.append(child)
|
||||
else:
|
||||
parent.append(child)
|
||||
|
||||
|
||||
def _transcribe(item, parents, editRate, masterMobs):
|
||||
result = None
|
||||
metadata = {}
|
||||
|
||||
# First lets grab some standard properties that are present on
|
||||
# many types of AAF objects...
|
||||
metadata["Name"] = _get_name(item)
|
||||
metadata["ClassName"] = _get_class_name(item)
|
||||
|
||||
# Some AAF objects (like TimelineMobSlot) have an edit rate
|
||||
# which should be used for all of the object's children.
|
||||
# We will pass it on to any recursive calls to _transcribe()
|
||||
if hasattr(item, "edit_rate"):
|
||||
editRate = float(item.edit_rate)
|
||||
|
||||
if isinstance(item, aaf2.components.Component):
|
||||
metadata["Length"] = item.length
|
||||
|
||||
if isinstance(item, aaf2.core.AAFObject):
|
||||
for prop in item.properties():
|
||||
if hasattr(prop, 'name') and hasattr(prop, 'value'):
|
||||
key = str(prop.name)
|
||||
value = prop.value
|
||||
metadata[key] = _transcribe_property(value)
|
||||
|
||||
# Now we will use the item's class to determine which OTIO type
|
||||
# to transcribe into. Note that the order of this if/elif/... chain
|
||||
# is important, because the class hierarchy of AAF objects is more
|
||||
# complex than OTIO.
|
||||
|
||||
if isinstance(item, aaf2.content.ContentStorage):
|
||||
result = otio.schema.SerializableCollection()
|
||||
|
||||
# Gather all the Master Mobs, so we can find them later by MobID
|
||||
# when we parse the SourceClips in the composition
|
||||
if masterMobs is None:
|
||||
masterMobs = {}
|
||||
for mob in item.mastermobs():
|
||||
child = _transcribe(mob, parents + [item], editRate, masterMobs)
|
||||
if child is not None:
|
||||
mobID = child.metadata.get("AAF", {}).get("MobID")
|
||||
masterMobs[mobID] = child
|
||||
|
||||
for mob in item.compositionmobs():
|
||||
child = _transcribe(mob, parents + [item], editRate, masterMobs)
|
||||
_add_child(result, child, mob)
|
||||
|
||||
elif isinstance(item, aaf2.mobs.Mob):
|
||||
result = otio.schema.Timeline()
|
||||
|
||||
for slot in item.slots:
|
||||
track = _transcribe(slot, parents + [item], editRate, masterMobs)
|
||||
_add_child(result.tracks, track, slot)
|
||||
|
||||
# Use a heuristic to find the starting timecode from
|
||||
# this track and use it for the Timeline's global_start_time
|
||||
start_time = _find_timecode_track_start(track)
|
||||
if start_time:
|
||||
result.global_start_time = start_time
|
||||
|
||||
elif isinstance(item, aaf2.components.SourceClip):
|
||||
result = otio.schema.Clip()
|
||||
|
||||
# Evidently the last mob is the one with the timecode
|
||||
mobs = _find_timecode_mobs(item)
|
||||
# Get the Timecode start and length values
|
||||
last_mob = mobs[-1] if mobs else None
|
||||
timecode_info = _extract_timecode_info(last_mob) if last_mob else None
|
||||
|
||||
source_start = int(metadata.get("StartTime", "0"))
|
||||
source_length = item.length
|
||||
media_start = source_start
|
||||
media_length = item.length
|
||||
|
||||
if timecode_info:
|
||||
media_start, media_length = timecode_info
|
||||
source_start += media_start
|
||||
|
||||
# The goal here is to find a source range. Actual editorial opinions are found on SourceClips in the
|
||||
# CompositionMobs. To figure out whether this clip is directly in the CompositionMob, we detect if our
|
||||
# parent mobs are only CompositionMobs. If they were anything else - a MasterMob, a SourceMob, we would
|
||||
# know that this is in some indirect relationship.
|
||||
parent_mobs = filter(lambda parent: isinstance(parent, aaf2.mobs.Mob), parents)
|
||||
is_directly_in_composition = all(isinstance(mob, aaf2.mobs.CompositionMob) for mob in parent_mobs)
|
||||
if is_directly_in_composition:
|
||||
result.source_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(source_start, editRate),
|
||||
otio.opentime.RationalTime(source_length, editRate)
|
||||
)
|
||||
|
||||
# The goal here is to find an available range. Media ranges are stored in the related MasterMob, and there
|
||||
# should only be one - hence the name "Master" mob. Somewhere down our chain (either a child or our parents)
|
||||
# is a MasterMob. For SourceClips in the CompositionMob, it is our child. For everything else, it is a
|
||||
# previously encountered parent. Find the MasterMob in our chain, and then extract the information from that.
|
||||
child_mastermob = item.mob if isinstance(item.mob, aaf2.mobs.MasterMob) else None
|
||||
parent_mastermobs = [parent for parent in parents if isinstance(parent, aaf2.mobs.MasterMob)]
|
||||
parent_mastermob = parent_mastermobs[0] if len(parent_mastermobs) > 1 else None
|
||||
mastermob = child_mastermob or parent_mastermob or None
|
||||
|
||||
if mastermob:
|
||||
media = otio.schema.MissingReference()
|
||||
media.available_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(media_start, editRate),
|
||||
otio.opentime.RationalTime(media_length, editRate)
|
||||
)
|
||||
# copy the metadata from the master into the media_reference
|
||||
mastermob_child = masterMobs.get(str(mastermob.mob_id))
|
||||
media.metadata["AAF"] = mastermob_child.metadata.get("AAF", {})
|
||||
result.media_reference = media
|
||||
|
||||
elif isinstance(item, aaf2.components.Transition):
|
||||
result = otio.schema.Transition()
|
||||
|
||||
# Does AAF support anything else?
|
||||
result.transition_type = otio.schema.TransitionTypes.SMPTE_Dissolve
|
||||
|
||||
# Extract value and time attributes of both ControlPoints used for
|
||||
# creating AAF Transition objects
|
||||
varying_value = None
|
||||
for param in item.getvalue('OperationGroup').parameters:
|
||||
if isinstance(param, aaf2.misc.VaryingValue):
|
||||
varying_value = param
|
||||
break
|
||||
|
||||
if varying_value is not None:
|
||||
for control_point in varying_value.getvalue('PointList'):
|
||||
value = control_point.value
|
||||
time = control_point.time
|
||||
metadata.setdefault('PointList', []).append({'Value': value,
|
||||
'Time': time})
|
||||
|
||||
in_offset = int(metadata.get("CutPoint", "0"))
|
||||
out_offset = item.length - in_offset
|
||||
result.in_offset = otio.opentime.RationalTime(in_offset, editRate)
|
||||
result.out_offset = otio.opentime.RationalTime(out_offset, editRate)
|
||||
|
||||
elif isinstance(item, aaf2.components.Filler):
|
||||
result = otio.schema.Gap()
|
||||
|
||||
length = item.length
|
||||
result.source_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, editRate),
|
||||
otio.opentime.RationalTime(length, editRate)
|
||||
)
|
||||
|
||||
elif isinstance(item, aaf2.components.NestedScope):
|
||||
# TODO: Is this the right class?
|
||||
result = otio.schema.Stack()
|
||||
|
||||
for slot in item.slots:
|
||||
child = _transcribe(slot, parents + [item], editRate, masterMobs)
|
||||
_add_child(result, child, slot)
|
||||
|
||||
elif isinstance(item, aaf2.components.Sequence):
|
||||
result = otio.schema.Track()
|
||||
|
||||
for component in item.components:
|
||||
child = _transcribe(component, parents + [item], editRate, masterMobs)
|
||||
_add_child(result, child, component)
|
||||
|
||||
elif isinstance(item, aaf2.components.OperationGroup):
|
||||
result = _transcribe_operation_group(
|
||||
item, parents, metadata, editRate, masterMobs
|
||||
)
|
||||
|
||||
elif isinstance(item, aaf2.mobslots.TimelineMobSlot):
|
||||
result = otio.schema.Track()
|
||||
|
||||
child = _transcribe(item.segment, parents + [item], editRate, masterMobs)
|
||||
_add_child(result, child, item.segment)
|
||||
|
||||
elif isinstance(item, aaf2.mobslots.MobSlot):
|
||||
result = otio.schema.Track()
|
||||
|
||||
child = _transcribe(item.segment, parents + [item], editRate, masterMobs)
|
||||
_add_child(result, child, item.segment)
|
||||
|
||||
elif isinstance(item, aaf2.components.Timecode):
|
||||
pass
|
||||
|
||||
elif isinstance(item, aaf2.components.Pulldown):
|
||||
pass
|
||||
|
||||
elif isinstance(item, aaf2.components.EdgeCode):
|
||||
pass
|
||||
|
||||
elif isinstance(item, aaf2.components.ScopeReference):
|
||||
# TODO: is this like FILLER?
|
||||
|
||||
result = otio.schema.Gap()
|
||||
|
||||
length = item.length
|
||||
result.source_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, editRate),
|
||||
otio.opentime.RationalTime(length, editRate)
|
||||
)
|
||||
|
||||
elif isinstance(item, aaf2.components.DescriptiveMarker):
|
||||
|
||||
# Markers come in on their own separate Track.
|
||||
# TODO: We should consolidate them onto the same track(s) as the clips
|
||||
# result = otio.schema.Marker()
|
||||
pass
|
||||
|
||||
elif isinstance(item, aaf2.components.Selector):
|
||||
# If you mute a clip in media composer, it becomes one of these in the
|
||||
# AAF.
|
||||
result = _transcribe(
|
||||
item.getvalue("Selected"),
|
||||
parents + [item],
|
||||
editRate,
|
||||
masterMobs
|
||||
)
|
||||
|
||||
alternates = [
|
||||
_transcribe(alt, parents + [item], editRate, masterMobs)
|
||||
for alt in item.getvalue("Alternates")
|
||||
]
|
||||
|
||||
# muted case -- if there is only one item its muted, otherwise its
|
||||
# a multi cam thing
|
||||
if alternates and len(alternates) == 1:
|
||||
metadata['muted_clip'] = True
|
||||
result.name = str(alternates[0].name) + "_MUTED"
|
||||
|
||||
metadata['alternates'] = alternates
|
||||
|
||||
# @TODO: There are a bunch of other AAF object types that we will
|
||||
# likely need to add support for. I'm leaving this code here to help
|
||||
# future efforts to extract the useful information out of these.
|
||||
|
||||
# elif isinstance(item, aaf.storage.File):
|
||||
# self.extendChildItems([item.header])
|
||||
|
||||
# elif isinstance(item, aaf.storage.Header):
|
||||
# self.extendChildItems([item.storage()])
|
||||
# self.extendChildItems([item.dictionary()])
|
||||
|
||||
# elif isinstance(item, aaf.dictionary.Dictionary):
|
||||
# l = []
|
||||
# l.append(DummyItem(list(item.class_defs()), 'ClassDefs'))
|
||||
# l.append(DummyItem(list(item.codec_defs()), 'CodecDefs'))
|
||||
# l.append(DummyItem(list(item.container_defs()), 'ContainerDefs'))
|
||||
# l.append(DummyItem(list(item.data_defs()), 'DataDefs'))
|
||||
# l.append(DummyItem(list(item.interpolation_defs()),
|
||||
# 'InterpolationDefs'))
|
||||
# l.append(DummyItem(list(item.klvdata_defs()), 'KLVDataDefs'))
|
||||
# l.append(DummyItem(list(item.operation_defs()), 'OperationDefs'))
|
||||
# l.append(DummyItem(list(item.parameter_defs()), 'ParameterDefs'))
|
||||
# l.append(DummyItem(list(item.plugin_defs()), 'PluginDefs'))
|
||||
# l.append(DummyItem(list(item.taggedvalue_defs()), 'TaggedValueDefs'))
|
||||
# l.append(DummyItem(list(item.type_defs()), 'TypeDefs'))
|
||||
# self.extendChildItems(l)
|
||||
#
|
||||
# elif isinstance(item, pyaaf.AxSelector):
|
||||
# self.extendChildItems(list(item.EnumAlternateSegments()))
|
||||
#
|
||||
# elif isinstance(item, pyaaf.AxScopeReference):
|
||||
# #print item, item.GetRelativeScope(),item.GetRelativeSlot()
|
||||
# pass
|
||||
#
|
||||
# elif isinstance(item, pyaaf.AxEssenceGroup):
|
||||
# segments = []
|
||||
#
|
||||
# for i in xrange(item.CountChoices()):
|
||||
# choice = item.GetChoiceAt(i)
|
||||
# segments.append(choice)
|
||||
# self.extendChildItems(segments)
|
||||
#
|
||||
# elif isinstance(item, pyaaf.AxProperty):
|
||||
# self.properties['Value'] = str(item.GetValue())
|
||||
|
||||
elif isinstance(item, Iterable):
|
||||
result = otio.schema.SerializableCollection()
|
||||
for child in item:
|
||||
result.append(
|
||||
_transcribe(
|
||||
child,
|
||||
parents + [item],
|
||||
editRate,
|
||||
masterMobs
|
||||
)
|
||||
)
|
||||
else:
|
||||
# For everything else, we just ignore it.
|
||||
# To see what is being ignored, turn on the debug flag
|
||||
if debug:
|
||||
print("SKIPPING: {}: {} -- {}".format(type(item), item, result))
|
||||
|
||||
# Did we get anything? If not, we're done
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
# Okay, now we've turned the AAF thing into an OTIO result
|
||||
# There's a bit more we can do before we're ready to return the result.
|
||||
|
||||
# If we didn't get a name yet, use the one we have in metadata
|
||||
if result.name is None:
|
||||
result.name = metadata["Name"]
|
||||
|
||||
# Attach the AAF metadata
|
||||
if not result.metadata:
|
||||
result.metadata = {}
|
||||
result.metadata["AAF"] = metadata
|
||||
|
||||
# Double check that we got the length we expected
|
||||
if isinstance(result, otio.core.Item):
|
||||
length = metadata.get("Length")
|
||||
if (
|
||||
length
|
||||
and result.source_range is not None
|
||||
and result.source_range.duration.value != length
|
||||
):
|
||||
raise otio.exceptions.OTIOError(
|
||||
"Wrong duration? {} should be {} in {}".format(
|
||||
result.source_range.duration.value,
|
||||
length,
|
||||
result
|
||||
)
|
||||
)
|
||||
|
||||
# Did we find a Track?
|
||||
if isinstance(result, otio.schema.Track):
|
||||
# Try to figure out the kind of Track it is
|
||||
if hasattr(item, 'media_kind'):
|
||||
media_kind = str(item.media_kind)
|
||||
result.metadata["AAF"]["MediaKind"] = media_kind
|
||||
if media_kind == "Picture":
|
||||
result.kind = otio.schema.TrackKind.Video
|
||||
elif media_kind in ("SoundMasterTrack", "Sound"):
|
||||
result.kind = otio.schema.TrackKind.Audio
|
||||
else:
|
||||
# Timecode, Edgecode, others?
|
||||
result.kind = None
|
||||
|
||||
# Done!
|
||||
return result
|
||||
|
||||
|
||||
def _find_timecode_track_start(track):
|
||||
# See if we can find a starting timecode in here...
|
||||
aaf_metadata = track.metadata.get("AAF", {})
|
||||
|
||||
# Is this a Timecode track?
|
||||
if aaf_metadata.get("MediaKind") == "Timecode":
|
||||
edit_rate = aaf_metadata.get("EditRate", "0")
|
||||
fps = aaf_metadata.get("Segment", {}).get("FPS", 0)
|
||||
start = aaf_metadata.get("Segment", {}).get("Start", "0")
|
||||
|
||||
# Often times there are several timecode tracks, so
|
||||
# we use a heuristic to only pay attention to Timecode
|
||||
# tracks with a FPS that matches the edit rate.
|
||||
if edit_rate == str(fps):
|
||||
return otio.opentime.RationalTime(
|
||||
value=int(start),
|
||||
rate=float(edit_rate)
|
||||
)
|
||||
|
||||
# We didn't find anything useful
|
||||
return None
|
||||
|
||||
|
||||
def _transcribe_linear_timewarp(item, parameters):
|
||||
# this is a linear time warp
|
||||
effect = otio.schema.LinearTimeWarp()
|
||||
|
||||
offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U')
|
||||
|
||||
# If we have a LinearInterp with just 2 control points, then
|
||||
# we can compute the time_scalar. Note that the SpeedRatio is
|
||||
# NOT correct in many AAFs - we aren't sure why, but luckily we
|
||||
# can compute the correct value this way.
|
||||
points = offset_map.get("PointList")
|
||||
if len(points) > 2:
|
||||
# This is something complicated... try the fancy version
|
||||
return _transcribe_fancy_timewarp(item, parameters)
|
||||
elif (
|
||||
len(points) == 2
|
||||
and float(points[0].time) == 0
|
||||
and float(points[0].value) == 0
|
||||
):
|
||||
# With just two points, we can compute the slope
|
||||
effect.time_scalar = float(points[1].value) / float(points[1].time)
|
||||
else:
|
||||
# Fall back to the SpeedRatio if we didn't understand the points
|
||||
ratio = parameters.get("SpeedRatio")
|
||||
if ratio == str(item.length):
|
||||
# If the SpeedRatio == the length, this is a freeze frame
|
||||
effect.time_scalar = 0
|
||||
elif '/' in ratio:
|
||||
numerator, denominator = map(float, ratio.split('/'))
|
||||
# OTIO time_scalar is 1/x from AAF's SpeedRatio
|
||||
effect.time_scalar = denominator / numerator
|
||||
else:
|
||||
effect.time_scalar = 1.0 / float(ratio)
|
||||
|
||||
# Is this is a freeze frame?
|
||||
if effect.time_scalar == 0:
|
||||
# Note: we might end up here if any of the code paths above
|
||||
# produced a 0 time_scalar.
|
||||
# Use the FreezeFrame class instead of LinearTimeWarp
|
||||
effect = otio.schema.FreezeFrame()
|
||||
|
||||
return effect
|
||||
|
||||
|
||||
def _transcribe_fancy_timewarp(item, parameters):
|
||||
|
||||
# For now, this is an unsupported time effect...
|
||||
effect = otio.schema.TimeEffect()
|
||||
effect.effect_name = None # Unsupported
|
||||
effect.name = item.get("Name")
|
||||
|
||||
return effect
|
||||
|
||||
# TODO: Here is some sample code that pulls out the full
|
||||
# details of a non-linear speed map.
|
||||
|
||||
# speed_map = item.parameter['PARAM_SPEED_MAP_U']
|
||||
# offset_map = item.parameter['PARAM_SPEED_OFFSET_MAP_U']
|
||||
# Also? PARAM_OFFSET_MAP_U (without the word "SPEED" in it?)
|
||||
# print(speed_map['PointList'].value)
|
||||
# print(speed_map.count())
|
||||
# print(speed_map.interpolation_def().name)
|
||||
#
|
||||
# for p in speed_map.points():
|
||||
# print(" ", float(p.time), float(p.value), p.edit_hint)
|
||||
# for prop in p.point_properties():
|
||||
# print(" ", prop.name, prop.value, float(prop.value))
|
||||
#
|
||||
# print(offset_map.interpolation_def().name)
|
||||
# for p in offset_map.points():
|
||||
# edit_hint = p.edit_hint
|
||||
# time = p.time
|
||||
# value = p.value
|
||||
#
|
||||
# pass
|
||||
# # print " ", float(p.time), float(p.value)
|
||||
#
|
||||
# for i in range(100):
|
||||
# float(offset_map.value_at("%i/100" % i))
|
||||
#
|
||||
# # Test file PARAM_SPEED_MAP_U is AvidBezierInterpolator
|
||||
# # currently no implement for value_at
|
||||
# try:
|
||||
# speed_map.value_at(.25)
|
||||
# except NotImplementedError:
|
||||
# pass
|
||||
# else:
|
||||
# raise
|
||||
|
||||
|
||||
def _transcribe_operation_group(item, parents, metadata, editRate, masterMobs):
|
||||
result = otio.schema.Stack()
|
||||
|
||||
operation = metadata.get("Operation", {})
|
||||
parameters = metadata.get("Parameters", {})
|
||||
result.name = operation.get("Name")
|
||||
|
||||
# Trust the length that is specified in the AAF
|
||||
length = metadata.get("Length")
|
||||
result.source_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, editRate),
|
||||
otio.opentime.RationalTime(length, editRate)
|
||||
)
|
||||
|
||||
# Look for speed effects...
|
||||
effect = None
|
||||
if operation.get("IsTimeWarp"):
|
||||
if operation.get("Name") == "Motion Control":
|
||||
|
||||
offset_map = _get_parameter(item, 'PARAM_SPEED_OFFSET_MAP_U')
|
||||
# TODO: We should also check the PARAM_OFFSET_MAP_U which has
|
||||
# an interpolation_def().name as well.
|
||||
if offset_map is not None:
|
||||
interpolation = offset_map.interpolation.name
|
||||
else:
|
||||
interpolation = None
|
||||
|
||||
if interpolation == "LinearInterp":
|
||||
effect = _transcribe_linear_timewarp(item, parameters)
|
||||
else:
|
||||
effect = _transcribe_fancy_timewarp(item, parameters)
|
||||
|
||||
else:
|
||||
# Unsupported time effect
|
||||
effect = otio.schema.TimeEffect()
|
||||
effect.effect_name = None # Unsupported
|
||||
effect.name = operation.get("Name")
|
||||
else:
|
||||
# Unsupported effect
|
||||
effect = otio.schema.Effect()
|
||||
effect.effect_name = None # Unsupported
|
||||
effect.name = operation.get("Name")
|
||||
|
||||
if effect is not None:
|
||||
result.effects.append(effect)
|
||||
effect.metadata = {
|
||||
"AAF": {
|
||||
"Operation": operation,
|
||||
"Parameters": parameters
|
||||
}
|
||||
}
|
||||
|
||||
for segment in item.getvalue("InputSegments"):
|
||||
child = _transcribe(segment, parents + [item], editRate, masterMobs)
|
||||
if child:
|
||||
_add_child(result, child, segment)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _fix_transitions(thing):
|
||||
if isinstance(thing, otio.schema.Timeline):
|
||||
_fix_transitions(thing.tracks)
|
||||
elif (
|
||||
isinstance(thing, otio.core.Composition)
|
||||
or isinstance(thing, otio.schema.SerializableCollection)
|
||||
):
|
||||
if isinstance(thing, otio.schema.Track):
|
||||
for c, child in enumerate(thing):
|
||||
|
||||
# Don't touch the Transitions themselves,
|
||||
# only the Clips & Gaps next to them.
|
||||
if not isinstance(child, otio.core.Item):
|
||||
continue
|
||||
|
||||
# Was the item before us a Transition?
|
||||
if c > 0 and isinstance(
|
||||
thing[c - 1],
|
||||
otio.schema.Transition
|
||||
):
|
||||
pre_trans = thing[c - 1]
|
||||
|
||||
if child.source_range is None:
|
||||
child.source_range = child.trimmed_range()
|
||||
csr = child.source_range
|
||||
child.source_range = otio.opentime.TimeRange(
|
||||
start_time=csr.start_time + pre_trans.in_offset,
|
||||
duration=csr.duration - pre_trans.in_offset
|
||||
)
|
||||
|
||||
# Is the item after us a Transition?
|
||||
if c < len(thing) - 1 and isinstance(
|
||||
thing[c + 1],
|
||||
otio.schema.Transition
|
||||
):
|
||||
post_trans = thing[c + 1]
|
||||
|
||||
if child.source_range is None:
|
||||
child.source_range = child.trimmed_range()
|
||||
csr = child.source_range
|
||||
child.source_range = otio.opentime.TimeRange(
|
||||
start_time=csr.start_time,
|
||||
duration=csr.duration - post_trans.out_offset
|
||||
)
|
||||
|
||||
for child in thing:
|
||||
_fix_transitions(child)
|
||||
|
||||
|
||||
def _simplify(thing):
|
||||
if isinstance(thing, otio.schema.SerializableCollection):
|
||||
if len(thing) == 1:
|
||||
return _simplify(thing[0])
|
||||
else:
|
||||
for c, child in enumerate(thing):
|
||||
thing[c] = _simplify(child)
|
||||
return thing
|
||||
|
||||
elif isinstance(thing, otio.schema.Timeline):
|
||||
result = _simplify(thing.tracks)
|
||||
|
||||
# Only replace the Timeline's stack if the simplified result
|
||||
# was also a Stack. Otherwise leave it (the contents will have
|
||||
# been simplified in place).
|
||||
if isinstance(result, otio.schema.Stack):
|
||||
thing.tracks = result
|
||||
|
||||
return thing
|
||||
|
||||
elif isinstance(thing, otio.core.Composition):
|
||||
# simplify our children
|
||||
for c, child in enumerate(thing):
|
||||
thing[c] = _simplify(child)
|
||||
|
||||
# remove empty children of Stacks
|
||||
if isinstance(thing, otio.schema.Stack):
|
||||
for c in reversed(range(len(thing))):
|
||||
child = thing[c]
|
||||
if not _contains_something_valuable(child):
|
||||
# TODO: We're discarding metadata... should we retain it?
|
||||
del thing[c]
|
||||
|
||||
# Look for Stacks within Stacks
|
||||
c = len(thing) - 1
|
||||
while c >= 0:
|
||||
child = thing[c]
|
||||
# Is my child a Stack also? (with no effects)
|
||||
if (
|
||||
not _has_effects(child)
|
||||
and
|
||||
(
|
||||
isinstance(child, otio.schema.Stack)
|
||||
or (
|
||||
isinstance(child, otio.schema.Track)
|
||||
and len(child) == 1
|
||||
and isinstance(child[0], otio.schema.Stack)
|
||||
and child[0]
|
||||
and isinstance(child[0][0], otio.schema.Track)
|
||||
)
|
||||
)
|
||||
):
|
||||
if isinstance(child, otio.schema.Track):
|
||||
child = child[0]
|
||||
|
||||
# Pull the child's children into the parent
|
||||
num = len(child)
|
||||
children_of_child = child[:]
|
||||
# clear out the ownership of 'child'
|
||||
del child[:]
|
||||
thing[c:c + 1] = children_of_child
|
||||
|
||||
# TODO: We may be discarding metadata, should we merge it?
|
||||
# TODO: Do we need to offset the markers in time?
|
||||
thing.markers.extend(child.markers)
|
||||
# Note: we don't merge effects, because we already made
|
||||
# sure the child had no effects in the if statement above.
|
||||
|
||||
c = c + num
|
||||
c = c - 1
|
||||
|
||||
# skip redundant containers
|
||||
if _is_redundant_container(thing):
|
||||
# TODO: We may be discarding metadata here, should we merge it?
|
||||
result = thing[0].deepcopy()
|
||||
# TODO: Do we need to offset the markers in time?
|
||||
result.markers.extend(thing.markers)
|
||||
# TODO: The order of the effects is probably important...
|
||||
# should they be added to the end or the front?
|
||||
# Intuitively it seems like the child's effects should come before
|
||||
# the parent's effects. This will need to be solidified when we
|
||||
# add more effects support.
|
||||
result.effects.extend(thing.effects)
|
||||
# Keep the parent's length, if it has one
|
||||
if thing.source_range:
|
||||
# make sure it has a source_range first
|
||||
if not result.source_range:
|
||||
try:
|
||||
result.source_range = result.trimmed_range()
|
||||
except otio.exceptions.CannotComputeAvailableRangeError:
|
||||
result.source_range = copy.copy(thing.source_range)
|
||||
# modify the duration, but leave the start_time as is
|
||||
result.source_range = otio.opentime.TimeRange(
|
||||
result.source_range.start_time,
|
||||
thing.source_range.duration
|
||||
)
|
||||
return result
|
||||
|
||||
# if thing is the top level stack, all of its children must be in tracks
|
||||
if isinstance(thing, otio.schema.Stack) and thing.parent() is None:
|
||||
children_needing_tracks = []
|
||||
for child in thing:
|
||||
if isinstance(child, otio.schema.Track):
|
||||
continue
|
||||
children_needing_tracks.append(child)
|
||||
|
||||
for child in children_needing_tracks:
|
||||
orig_index = thing.index(child)
|
||||
del thing[orig_index]
|
||||
new_track = otio.schema.Track()
|
||||
new_track.append(child)
|
||||
thing.insert(orig_index, new_track)
|
||||
|
||||
return thing
|
||||
|
||||
|
||||
def _has_effects(thing):
|
||||
if isinstance(thing, otio.core.Item):
|
||||
if len(thing.effects) > 0:
|
||||
return True
|
||||
|
||||
|
||||
def _is_redundant_container(thing):
|
||||
|
||||
is_composition = isinstance(thing, otio.core.Composition)
|
||||
if not is_composition:
|
||||
return False
|
||||
|
||||
has_one_child = len(thing) == 1
|
||||
if not has_one_child:
|
||||
return False
|
||||
|
||||
am_top_level_track = (
|
||||
type(thing) is otio.schema.Track
|
||||
and type(thing.parent()) is otio.schema.Stack
|
||||
and thing.parent().parent() is None
|
||||
)
|
||||
|
||||
return (
|
||||
not am_top_level_track
|
||||
# am a top level track but my only child is a track
|
||||
or (
|
||||
type(thing) is otio.schema.Track
|
||||
and type(thing[0]) is otio.schema.Track
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _contains_something_valuable(thing):
|
||||
if isinstance(thing, otio.core.Item):
|
||||
if len(thing.effects) > 0 or len(thing.markers) > 0:
|
||||
return True
|
||||
|
||||
if isinstance(thing, otio.core.Composition):
|
||||
|
||||
if len(thing) == 0:
|
||||
# NOT valuable because it is empty
|
||||
return False
|
||||
|
||||
for child in thing:
|
||||
if _contains_something_valuable(child):
|
||||
# valuable because this child is valuable
|
||||
return True
|
||||
|
||||
# none of the children were valuable, so thing is NOT valuable
|
||||
return False
|
||||
|
||||
if isinstance(thing, otio.schema.Gap):
|
||||
# TODO: Are there other valuable things we should look for on a Gap?
|
||||
return False
|
||||
|
||||
# anything else is presumed to be valuable
|
||||
return True
|
||||
|
||||
|
||||
def read_from_file(filepath, simplify=True):
|
||||
|
||||
with aaf2.open(filepath) as aaf_file:
|
||||
|
||||
storage = aaf_file.content
|
||||
|
||||
# Note: We're skipping: f.header
|
||||
# Is there something valuable in there?
|
||||
|
||||
__names.clear()
|
||||
masterMobs = {}
|
||||
|
||||
result = _transcribe(storage, parents=list(), editRate=None, masterMobs=masterMobs)
|
||||
top = storage.toplevel()
|
||||
if top:
|
||||
# re-transcribe just the top-level mobs
|
||||
# but use all the master mobs we found in the 1st pass
|
||||
__names.clear() # reset the names back to 0
|
||||
result = _transcribe(top, parents=list(), editRate=None, masterMobs=masterMobs)
|
||||
|
||||
# AAF is typically more deeply nested than OTIO.
|
||||
# Lets try to simplify the structure by collapsing or removing
|
||||
# unnecessary stuff.
|
||||
if simplify:
|
||||
result = _simplify(result)
|
||||
|
||||
# OTIO represents transitions a bit different than AAF, so
|
||||
# we need to iterate over them and modify the items on either side.
|
||||
# Note that we do this *after* simplifying, since the structure
|
||||
# may change during simplification.
|
||||
_fix_transitions(result)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_to_file(input_otio, filepath, **kwargs):
|
||||
with aaf2.open(filepath, "w") as f:
|
||||
|
||||
timeline = aaf_writer._stackify_nested_groups(input_otio)
|
||||
|
||||
aaf_writer.validate_metadata(timeline)
|
||||
|
||||
otio2aaf = aaf_writer.AAFFileTranscriber(timeline, f, **kwargs)
|
||||
|
||||
if not isinstance(timeline, otio.schema.Timeline):
|
||||
raise otio.exceptions.NotSupportedError(
|
||||
"Currently only supporting top level Timeline")
|
||||
|
||||
for otio_track in timeline.tracks:
|
||||
# Ensure track must have clip to get the edit_rate
|
||||
if len(otio_track) == 0:
|
||||
continue
|
||||
|
||||
transcriber = otio2aaf.track_transcriber(otio_track)
|
||||
|
||||
for otio_child in otio_track:
|
||||
result = transcriber.transcribe(otio_child)
|
||||
if result:
|
||||
transcriber.sequence.components.append(result)
|
||||
318
pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py
vendored
Normal file
318
pype/vendor/python/python_2/opentimelineio_contrib/adapters/ale.py
vendored
Normal file
|
|
@ -0,0 +1,318 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""OpenTimelineIO Avid Log Exchange (ALE) Adapter"""
|
||||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
DEFAULT_VIDEO_FORMAT = '1080'
|
||||
|
||||
|
||||
def AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(width, height):
|
||||
"""Utility function to map a width and height to an Avid Project Format"""
|
||||
|
||||
format_map = {
|
||||
'1080': "1080",
|
||||
'720': "720",
|
||||
'576': "PAL",
|
||||
'486': "NTSC",
|
||||
}
|
||||
mapped = format_map.get(str(height), "CUSTOM")
|
||||
# check for the 2K DCI 1080 format
|
||||
if mapped == '1080' and width > 1920:
|
||||
mapped = "CUSTOM"
|
||||
return mapped
|
||||
|
||||
|
||||
class ALEParseError(otio.exceptions.OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
def _parse_data_line(line, columns, fps):
|
||||
row = line.split("\t")
|
||||
|
||||
if len(row) < len(columns):
|
||||
# Fill in blanks for any missing fields in this row
|
||||
row.extend([""] * (len(columns) - len(row)))
|
||||
|
||||
if len(row) > len(columns):
|
||||
raise ALEParseError("Too many values on row: " + line)
|
||||
|
||||
try:
|
||||
|
||||
# Gather all the columns into a dictionary
|
||||
# For expected columns, like Name, Start, etc. we will pop (remove)
|
||||
# those from metadata, leaving the rest alone.
|
||||
metadata = dict(zip(columns, row))
|
||||
|
||||
clip = otio.schema.Clip()
|
||||
clip.name = metadata.pop("Name", None)
|
||||
|
||||
# When looking for Start, Duration and End, they might be missing
|
||||
# or blank. Treat None and "" as the same via: get(k,"")!=""
|
||||
# To have a valid source range, you need Start and either Duration
|
||||
# or End. If all three are provided, we check to make sure they match.
|
||||
if metadata.get("Start", "") != "":
|
||||
value = metadata.pop("Start")
|
||||
try:
|
||||
start = otio.opentime.from_timecode(value, fps)
|
||||
except (ValueError, TypeError):
|
||||
raise ALEParseError("Invalid Start timecode: {}".format(value))
|
||||
duration = None
|
||||
end = None
|
||||
if metadata.get("Duration", "") != "":
|
||||
value = metadata.pop("Duration")
|
||||
try:
|
||||
duration = otio.opentime.from_timecode(value, fps)
|
||||
except (ValueError, TypeError):
|
||||
raise ALEParseError("Invalid Duration timecode: {}".format(
|
||||
value
|
||||
))
|
||||
if metadata.get("End", "") != "":
|
||||
value = metadata.pop("End")
|
||||
try:
|
||||
end = otio.opentime.from_timecode(value, fps)
|
||||
except (ValueError, TypeError):
|
||||
raise ALEParseError("Invalid End timecode: {}".format(
|
||||
value
|
||||
))
|
||||
if duration is None:
|
||||
duration = end - start
|
||||
if end is None:
|
||||
end = start + duration
|
||||
if end != start + duration:
|
||||
raise ALEParseError(
|
||||
"Inconsistent Start, End, Duration: " + line
|
||||
)
|
||||
clip.source_range = otio.opentime.TimeRange(
|
||||
start,
|
||||
duration
|
||||
)
|
||||
|
||||
if metadata.get("Source File"):
|
||||
source = metadata.pop("Source File")
|
||||
clip.media_reference = otio.schema.ExternalReference(
|
||||
target_url=source
|
||||
)
|
||||
|
||||
# We've pulled out the key/value pairs that we treat specially.
|
||||
# Put the remaining key/values into clip.metadata["ALE"]
|
||||
clip.metadata["ALE"] = metadata
|
||||
|
||||
return clip
|
||||
except Exception as ex:
|
||||
raise ALEParseError("Error parsing line: {}\n{}".format(
|
||||
line, repr(ex)
|
||||
))
|
||||
|
||||
|
||||
def _video_format_from_metadata(clips):
|
||||
# Look for clips with Image Size metadata set
|
||||
max_height = 0
|
||||
max_width = 0
|
||||
for clip in clips:
|
||||
fields = clip.metadata.get("ALE", {})
|
||||
res = fields.get("Image Size", "")
|
||||
m = re.search(r'([0-9]{1,})\s*[xX]\s*([0-9]{1,})', res)
|
||||
if m and len(m.groups()) >= 2:
|
||||
width = int(m.group(1))
|
||||
height = int(m.group(2))
|
||||
if height > max_height:
|
||||
max_height = height
|
||||
if width > max_width:
|
||||
max_width = width
|
||||
|
||||
# We don't have any image size information, use the defaut
|
||||
if max_height == 0:
|
||||
return DEFAULT_VIDEO_FORMAT
|
||||
else:
|
||||
return AVID_VIDEO_FORMAT_FROM_WIDTH_HEIGHT(max_width, max_height)
|
||||
|
||||
|
||||
def read_from_string(input_str, fps=24):
|
||||
|
||||
collection = otio.schema.SerializableCollection()
|
||||
header = {}
|
||||
columns = []
|
||||
|
||||
def nextline(lines):
|
||||
return lines.pop(0)
|
||||
|
||||
lines = input_str.splitlines()
|
||||
while len(lines):
|
||||
line = nextline(lines)
|
||||
|
||||
# skip blank lines
|
||||
if line.strip() == "":
|
||||
continue
|
||||
|
||||
if line.strip() == "Heading":
|
||||
while len(lines):
|
||||
line = nextline(lines)
|
||||
|
||||
if line.strip() == "":
|
||||
break
|
||||
|
||||
if "\t" not in line:
|
||||
raise ALEParseError("Invalid Heading line: " + line)
|
||||
|
||||
segments = line.split("\t")
|
||||
while len(segments) >= 2:
|
||||
key, val = segments.pop(0), segments.pop(0)
|
||||
header[key] = val
|
||||
if len(segments) != 0:
|
||||
raise ALEParseError("Invalid Heading line: " + line)
|
||||
|
||||
if "FPS" in header:
|
||||
fps = float(header["FPS"])
|
||||
|
||||
if line.strip() == "Column":
|
||||
if len(lines) == 0:
|
||||
raise ALEParseError("Unexpected end of file after: " + line)
|
||||
|
||||
line = nextline(lines)
|
||||
columns = line.split("\t")
|
||||
|
||||
if line.strip() == "Data":
|
||||
while len(lines):
|
||||
line = nextline(lines)
|
||||
|
||||
if line.strip() == "":
|
||||
continue
|
||||
|
||||
clip = _parse_data_line(line, columns, fps)
|
||||
|
||||
collection.append(clip)
|
||||
|
||||
collection.metadata["ALE"] = {
|
||||
"header": header,
|
||||
"columns": columns
|
||||
}
|
||||
|
||||
return collection
|
||||
|
||||
|
||||
def write_to_string(input_otio, columns=None, fps=None, video_format=None):
|
||||
|
||||
# Get all the clips we're going to export
|
||||
clips = list(input_otio.each_clip())
|
||||
|
||||
result = ""
|
||||
|
||||
result += "Heading\n"
|
||||
header = dict(input_otio.metadata.get("ALE", {}).get("header", {}))
|
||||
|
||||
# Force this, since we've hard coded tab delimiters
|
||||
header["FIELD_DELIM"] = "TABS"
|
||||
|
||||
if fps is None:
|
||||
# If we weren't given a FPS, is there one in the header metadata?
|
||||
if "FPS" in header:
|
||||
fps = float(header["FPS"])
|
||||
else:
|
||||
# Would it be better to infer this by inspecting the input clips?
|
||||
fps = 24
|
||||
header["FPS"] = str(fps)
|
||||
else:
|
||||
# Put the value we were given into the header
|
||||
header["FPS"] = str(fps)
|
||||
|
||||
# Check if we have been supplied a VIDEO_FORMAT, if not lets set one
|
||||
if video_format is None:
|
||||
# Do we already have it in the header? If so, lets leave that as is
|
||||
if "VIDEO_FORMAT" not in header:
|
||||
header["VIDEO_FORMAT"] = _video_format_from_metadata(clips)
|
||||
else:
|
||||
header["VIDEO_FORMAT"] = str(video_format)
|
||||
|
||||
headers = list(header.items())
|
||||
headers.sort() # make the output predictable
|
||||
for key, val in headers:
|
||||
result += "{}\t{}\n".format(key, val)
|
||||
|
||||
# If the caller passed in a list of columns, use that, otherwise
|
||||
# we need to discover the columns that should be output.
|
||||
if columns is None:
|
||||
# Is there a hint about the columns we want (and column ordering)
|
||||
# at the top level?
|
||||
columns = input_otio.metadata.get("ALE", {}).get("columns", [])
|
||||
|
||||
# Scan all the clips for any extra columns
|
||||
for clip in clips:
|
||||
fields = clip.metadata.get("ALE", {})
|
||||
for key in fields.keys():
|
||||
if key not in columns:
|
||||
columns.append(key)
|
||||
|
||||
# Always output these
|
||||
for c in ["Duration", "End", "Start", "Name", "Source File"]:
|
||||
if c not in columns:
|
||||
columns.insert(0, c)
|
||||
|
||||
result += "\nColumn\n{}\n".format("\t".join(columns))
|
||||
|
||||
result += "\nData\n"
|
||||
|
||||
def val_for_column(column, clip):
|
||||
if column == "Name":
|
||||
return clip.name
|
||||
elif column == "Source File":
|
||||
if (
|
||||
clip.media_reference and
|
||||
hasattr(clip.media_reference, 'target_url') and
|
||||
clip.media_reference.target_url
|
||||
):
|
||||
return clip.media_reference.target_url
|
||||
else:
|
||||
return ""
|
||||
elif column == "Start":
|
||||
if not clip.source_range:
|
||||
return ""
|
||||
return otio.opentime.to_timecode(
|
||||
clip.source_range.start_time, fps
|
||||
)
|
||||
elif column == "Duration":
|
||||
if not clip.source_range:
|
||||
return ""
|
||||
return otio.opentime.to_timecode(
|
||||
clip.source_range.duration, fps
|
||||
)
|
||||
elif column == "End":
|
||||
if not clip.source_range:
|
||||
return ""
|
||||
return otio.opentime.to_timecode(
|
||||
clip.source_range.end_time_exclusive(), fps
|
||||
)
|
||||
else:
|
||||
return clip.metadata.get("ALE", {}).get(column)
|
||||
|
||||
for clip in clips:
|
||||
row = []
|
||||
for column in columns:
|
||||
val = str(val_for_column(column, clip) or "")
|
||||
val.replace("\t", " ") # don't allow tabs inside a value
|
||||
row.append(val)
|
||||
result += "\t".join(row) + "\n"
|
||||
|
||||
return result
|
||||
93
pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py
vendored
Normal file
93
pype/vendor/python/python_2/opentimelineio_contrib/adapters/burnins.py
vendored
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
"""FFMPEG Burnins Adapter"""
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def build_burnins(input_otio):
|
||||
"""
|
||||
Generates the burnin objects for each clip within the otio container
|
||||
|
||||
:param input_otio: OTIO container
|
||||
:rtype: [ffmpeg_burnins.Burnins(), ...]
|
||||
"""
|
||||
|
||||
if os.path.dirname(__file__) not in sys.path:
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
|
||||
import ffmpeg_burnins
|
||||
key = 'burnins'
|
||||
|
||||
burnins = []
|
||||
for clip in input_otio.each_clip():
|
||||
|
||||
# per clip burnin data
|
||||
burnin_data = clip.media_reference.metadata.get(key)
|
||||
if not burnin_data:
|
||||
# otherwise default to global burnin
|
||||
burnin_data = input_otio.metadata.get(key)
|
||||
|
||||
if not burnin_data:
|
||||
continue
|
||||
|
||||
media = clip.media_reference.target_url
|
||||
if media.startswith('file://'):
|
||||
media = media[7:]
|
||||
streams = burnin_data.get('streams')
|
||||
burnins.append(ffmpeg_burnins.Burnins(media,
|
||||
streams=streams))
|
||||
burnins[-1].otio_media = media
|
||||
burnins[-1].otio_overwrite = burnin_data.get('overwrite')
|
||||
burnins[-1].otio_args = burnin_data.get('args')
|
||||
|
||||
for burnin in burnin_data.get('burnins', []):
|
||||
align = burnin.pop('align')
|
||||
function = burnin.pop('function')
|
||||
if function == 'text':
|
||||
text = burnin.pop('text')
|
||||
options = ffmpeg_burnins.TextOptions()
|
||||
options.update(burnin)
|
||||
burnins[-1].add_text(text, align, options=options)
|
||||
elif function == 'frame_number':
|
||||
options = ffmpeg_burnins.FrameNumberOptions()
|
||||
options.update(burnin)
|
||||
burnins[-1].add_frame_numbers(align, options=options)
|
||||
elif function == 'timecode':
|
||||
options = ffmpeg_burnins.TimeCodeOptions()
|
||||
options.update(burnin)
|
||||
burnins[-1].add_timecode(align, options=options)
|
||||
else:
|
||||
raise RuntimeError("Unknown function '%s'" % function)
|
||||
|
||||
return burnins
|
||||
|
||||
|
||||
def write_to_file(input_otio, filepath):
|
||||
"""required OTIO function hook"""
|
||||
|
||||
for burnin in build_burnins(input_otio):
|
||||
burnin.render(os.path.join(filepath, burnin.otio_media),
|
||||
args=burnin.otio_args,
|
||||
overwrite=burnin.otio_overwrite)
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
{
|
||||
"OTIO_SCHEMA" : "PluginManifest.1",
|
||||
"adapters": [
|
||||
{
|
||||
"OTIO_SCHEMA": "Adapter.1",
|
||||
"name": "fcpx_xml",
|
||||
"execution_scope": "in process",
|
||||
"filepath": "fcpx_xml.py",
|
||||
"suffixes": ["fcpxml"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA": "Adapter.1",
|
||||
"name": "hls_playlist",
|
||||
"execution_scope": "in process",
|
||||
"filepath": "hls_playlist.py",
|
||||
"suffixes": ["m3u8"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "rv_session",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "rv.py",
|
||||
"suffixes" : ["rv"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "maya_sequencer",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "maya_sequencer.py",
|
||||
"suffixes" : ["ma","mb"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "ale",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "ale.py",
|
||||
"suffixes" : ["ale"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "burnins",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "burnins.py",
|
||||
"suffixes" : []
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA" : "Adapter.1",
|
||||
"name" : "AAF",
|
||||
"execution_scope" : "in process",
|
||||
"filepath" : "advanced_authoring_format.py",
|
||||
"suffixes" : ["aaf"]
|
||||
},
|
||||
{
|
||||
"OTIO_SCHEMA": "Adapter.1",
|
||||
"name": "xges",
|
||||
"execution_scope": "in process",
|
||||
"filepath": "xges.py",
|
||||
"suffixes": ["xges"]
|
||||
}
|
||||
]
|
||||
}
|
||||
261
pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py
vendored
Normal file
261
pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_maya_sequencer.py
vendored
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# deal with renaming of default library from python 2 / 3
|
||||
try:
|
||||
import urlparse as urllib_parse
|
||||
except ImportError:
|
||||
import urllib.parse as urllib_parse
|
||||
|
||||
# import maya and handle standalone mode
|
||||
from maya import cmds
|
||||
|
||||
try:
|
||||
cmds.ls
|
||||
except AttributeError:
|
||||
from maya import standalone
|
||||
standalone.initialize(name='python')
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
# Mapping of Maya FPS Enum to rate.
|
||||
FPS = {
|
||||
'game': 15,
|
||||
'film': 24,
|
||||
'pal': 25,
|
||||
'ntsc': 30,
|
||||
'show': 48,
|
||||
'palf': 50,
|
||||
'ntscf': 60
|
||||
}
|
||||
|
||||
|
||||
def _url_to_path(url):
|
||||
if url is None:
|
||||
return None
|
||||
|
||||
return urllib_parse.urlparse(url).path
|
||||
|
||||
|
||||
def _video_url_for_shot(shot):
|
||||
current_file = os.path.normpath(cmds.file(q=True, sn=True))
|
||||
return os.path.join(
|
||||
os.path.dirname(current_file),
|
||||
'playblasts',
|
||||
'{base_name}_{shot_name}.mov'.format(
|
||||
base_name=os.path.basename(os.path.splitext(current_file)[0]),
|
||||
shot_name=cmds.shot(shot, q=True, shotName=True)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _match_existing_shot(item, existing_shots):
|
||||
if existing_shots is None:
|
||||
return None
|
||||
|
||||
if item.media_reference.is_missing_reference:
|
||||
return None
|
||||
|
||||
url_path = _url_to_path(item.media_reference.target_url)
|
||||
return next(
|
||||
(
|
||||
shot for shot in existing_shots
|
||||
if _video_url_for_shot(shot) == url_path
|
||||
),
|
||||
None
|
||||
)
|
||||
|
||||
|
||||
# ------------------------
|
||||
# building single track
|
||||
# ------------------------
|
||||
|
||||
def _build_shot(item, track_no, track_range, existing_shot=None):
|
||||
camera = None
|
||||
if existing_shot is None:
|
||||
camera = cmds.camera(name=item.name.split('.')[0] + '_cam')[0]
|
||||
cmds.shot(
|
||||
existing_shot or item.name.split('.')[0],
|
||||
e=existing_shot is not None,
|
||||
shotName=item.name,
|
||||
track=track_no,
|
||||
currentCamera=camera,
|
||||
startTime=item.trimmed_range().start_time.value,
|
||||
endTime=item.trimmed_range().end_time_inclusive().value,
|
||||
sequenceStartTime=track_range.start_time.value,
|
||||
sequenceEndTime=track_range.end_time_inclusive().value
|
||||
)
|
||||
|
||||
|
||||
def _build_track(track, track_no, existing_shots=None):
|
||||
for n, item in enumerate(track):
|
||||
if not isinstance(item, otio.schema.Clip):
|
||||
continue
|
||||
|
||||
track_range = track.range_of_child_at_index(n)
|
||||
if existing_shots is not None:
|
||||
existing_shot = _match_existing_shot(item, existing_shots)
|
||||
else:
|
||||
existing_shot = None
|
||||
|
||||
_build_shot(item, track_no, track_range, existing_shot)
|
||||
|
||||
|
||||
def build_sequence(timeline, clean=False):
|
||||
existing_shots = cmds.ls(type='shot') or []
|
||||
if clean:
|
||||
cmds.delete(existing_shots)
|
||||
existing_shots = []
|
||||
|
||||
tracks = [
|
||||
track for track in timeline.tracks
|
||||
if track.kind == otio.schema.TrackKind.Video
|
||||
]
|
||||
|
||||
for track_no, track in enumerate(reversed(tracks)):
|
||||
_build_track(track, track_no, existing_shots=existing_shots)
|
||||
|
||||
|
||||
def read_from_file(path, clean=True):
|
||||
timeline = otio.adapters.read_from_file(path)
|
||||
build_sequence(timeline, clean=clean)
|
||||
|
||||
|
||||
# -----------------------
|
||||
# parsing single track
|
||||
# -----------------------
|
||||
|
||||
def _get_gap(duration):
|
||||
rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
|
||||
gap_range = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(duration, rate)
|
||||
)
|
||||
return otio.schema.Gap(source_range=gap_range)
|
||||
|
||||
|
||||
def _read_shot(shot):
|
||||
rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
|
||||
start = int(cmds.shot(shot, q=True, startTime=True))
|
||||
end = int(cmds.shot(shot, q=True, endTime=True)) + 1
|
||||
|
||||
video_reference = otio.schema.ExternalReference(
|
||||
target_url=_video_url_for_shot(shot),
|
||||
available_range=otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(value=start, rate=rate),
|
||||
otio.opentime.RationalTime(value=end - start, rate=rate)
|
||||
)
|
||||
)
|
||||
|
||||
return otio.schema.Clip(
|
||||
name=cmds.shot(shot, q=True, shotName=True),
|
||||
media_reference=video_reference,
|
||||
source_range=otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(value=start, rate=rate),
|
||||
otio.opentime.RationalTime(value=end - start, rate=rate)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _read_track(shots):
|
||||
v = otio.schema.Track(kind=otio.schema.track.TrackKind.Video)
|
||||
|
||||
last_clip_end = 0
|
||||
for shot in shots:
|
||||
seq_start = int(cmds.shot(shot, q=True, sequenceStartTime=True))
|
||||
seq_end = int(cmds.shot(shot, q=True, sequenceEndTime=True))
|
||||
|
||||
# add gap if necessary
|
||||
fill_time = seq_start - last_clip_end
|
||||
last_clip_end = seq_end + 1
|
||||
if fill_time:
|
||||
v.append(_get_gap(fill_time))
|
||||
|
||||
# add clip
|
||||
v.append(_read_shot(shot))
|
||||
|
||||
return v
|
||||
|
||||
|
||||
def read_sequence():
|
||||
rate = FPS.get(cmds.currentUnit(q=True, time=True), 25)
|
||||
shots = cmds.ls(type='shot') or []
|
||||
per_track = {}
|
||||
|
||||
for shot in shots:
|
||||
track_no = cmds.shot(shot, q=True, track=True)
|
||||
if track_no not in per_track:
|
||||
per_track[track_no] = []
|
||||
per_track[track_no].append(shot)
|
||||
|
||||
timeline = otio.schema.Timeline()
|
||||
timeline.global_start_time = otio.opentime.RationalTime(0, rate)
|
||||
|
||||
for track_no in reversed(sorted(per_track.keys())):
|
||||
track_shots = per_track[track_no]
|
||||
timeline.tracks.append(_read_track(track_shots))
|
||||
|
||||
return timeline
|
||||
|
||||
|
||||
def write_to_file(path):
|
||||
timeline = read_sequence()
|
||||
otio.adapters.write_to_file(timeline, path)
|
||||
|
||||
|
||||
def main():
|
||||
read_write_arg = sys.argv[1]
|
||||
filepath = sys.argv[2]
|
||||
|
||||
write = False
|
||||
if read_write_arg == "write":
|
||||
write = True
|
||||
|
||||
if write:
|
||||
# read the input OTIO off stdin
|
||||
input_otio = otio.adapters.read_from_string(
|
||||
sys.stdin.read(),
|
||||
'otio_json'
|
||||
)
|
||||
build_sequence(input_otio, clean=True)
|
||||
cmds.file(rename=filepath)
|
||||
cmds.file(save=True, type="mayaAscii")
|
||||
else:
|
||||
cmds.file(filepath, o=True)
|
||||
sys.stdout.write(
|
||||
"\nOTIO_JSON_BEGIN\n" +
|
||||
otio.adapters.write_to_string(
|
||||
read_sequence(),
|
||||
"otio_json"
|
||||
) +
|
||||
"\nOTIO_JSON_END\n"
|
||||
)
|
||||
|
||||
cmds.quit(force=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
327
pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py
vendored
Normal file
327
pype/vendor/python/python_2/opentimelineio_contrib/adapters/extern_rv.py
vendored
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""RV External Adapter component.
|
||||
|
||||
Because the rv adapter requires being run from within the RV py-interp to take
|
||||
advantage of modules inside of RV, this script gets shelled out to from the
|
||||
RV OTIO adapter.
|
||||
|
||||
Requires that you set the environment variables:
|
||||
OTIO_RV_PYTHON_LIB - should point at the parent directory of rvSession
|
||||
OTIO_RV_PYTHON_BIN - should point at py-interp from within rv
|
||||
"""
|
||||
|
||||
# python
|
||||
import sys
|
||||
import os
|
||||
|
||||
# otio
|
||||
import opentimelineio as otio
|
||||
|
||||
# rv import
|
||||
sys.path += [os.path.join(os.environ["OTIO_RV_PYTHON_LIB"], "rvSession")]
|
||||
import rvSession # noqa
|
||||
|
||||
|
||||
def main():
|
||||
""" entry point, should be called from the rv adapter in otio """
|
||||
|
||||
session_file = rvSession.Session()
|
||||
|
||||
output_fname = sys.argv[1]
|
||||
|
||||
# read the input OTIO off stdin
|
||||
input_otio = otio.adapters.read_from_string(sys.stdin.read(), 'otio_json')
|
||||
|
||||
result = write_otio(input_otio, session_file)
|
||||
session_file.setViewNode(result)
|
||||
session_file.write(output_fname)
|
||||
|
||||
|
||||
# exception class @{
|
||||
class NoMappingForOtioTypeError(otio.exceptions.OTIOError):
|
||||
pass
|
||||
# @}
|
||||
|
||||
|
||||
def write_otio(otio_obj, to_session, track_kind=None):
|
||||
WRITE_TYPE_MAP = {
|
||||
otio.schema.Timeline: _write_timeline,
|
||||
otio.schema.Stack: _write_stack,
|
||||
otio.schema.Track: _write_track,
|
||||
otio.schema.Clip: _write_item,
|
||||
otio.schema.Gap: _write_item,
|
||||
otio.schema.Transition: _write_transition,
|
||||
otio.schema.SerializableCollection: _write_collection,
|
||||
}
|
||||
|
||||
if type(otio_obj) in WRITE_TYPE_MAP:
|
||||
return WRITE_TYPE_MAP[type(otio_obj)](otio_obj, to_session, track_kind)
|
||||
|
||||
raise NoMappingForOtioTypeError(
|
||||
str(type(otio_obj)) + " on object: {}".format(otio_obj)
|
||||
)
|
||||
|
||||
|
||||
def _write_dissolve(pre_item, in_dissolve, post_item, to_session, track_kind=None):
|
||||
rv_trx = to_session.newNode("CrossDissolve", str(in_dissolve.name))
|
||||
|
||||
rate = pre_item.trimmed_range().duration.rate
|
||||
rv_trx.setProperty(
|
||||
"CrossDissolve",
|
||||
"",
|
||||
"parameters",
|
||||
"startFrame",
|
||||
rvSession.gto.FLOAT,
|
||||
1.0
|
||||
)
|
||||
rv_trx.setProperty(
|
||||
"CrossDissolve",
|
||||
"",
|
||||
"parameters",
|
||||
"numFrames",
|
||||
rvSession.gto.FLOAT,
|
||||
int(
|
||||
(
|
||||
in_dissolve.in_offset
|
||||
+ in_dissolve.out_offset
|
||||
).rescaled_to(rate).value
|
||||
)
|
||||
)
|
||||
|
||||
rv_trx.setProperty(
|
||||
"CrossDissolve",
|
||||
"",
|
||||
"output",
|
||||
"fps",
|
||||
rvSession.gto.FLOAT,
|
||||
rate
|
||||
)
|
||||
|
||||
pre_item_rv = write_otio(pre_item, to_session, track_kind)
|
||||
rv_trx.addInput(pre_item_rv)
|
||||
|
||||
post_item_rv = write_otio(post_item, to_session, track_kind)
|
||||
|
||||
node_to_insert = post_item_rv
|
||||
|
||||
if (
|
||||
hasattr(pre_item, "media_reference")
|
||||
and pre_item.media_reference
|
||||
and pre_item.media_reference.available_range
|
||||
and hasattr(post_item, "media_reference")
|
||||
and post_item.media_reference
|
||||
and post_item.media_reference.available_range
|
||||
and (
|
||||
post_item.media_reference.available_range.start_time.rate !=
|
||||
pre_item.media_reference.available_range.start_time.rate
|
||||
)
|
||||
):
|
||||
# write a retime to make sure post_item is in the timebase of pre_item
|
||||
rt_node = to_session.newNode("Retime", "transition_retime")
|
||||
rt_node.setTargetFps(
|
||||
pre_item.media_reference.available_range.start_time.rate
|
||||
)
|
||||
|
||||
post_item_rv = write_otio(post_item, to_session, track_kind)
|
||||
|
||||
rt_node.addInput(post_item_rv)
|
||||
node_to_insert = rt_node
|
||||
|
||||
rv_trx.addInput(node_to_insert)
|
||||
|
||||
return rv_trx
|
||||
|
||||
|
||||
def _write_transition(
|
||||
pre_item,
|
||||
in_trx,
|
||||
post_item,
|
||||
to_session,
|
||||
track_kind=None
|
||||
):
|
||||
trx_map = {
|
||||
otio.schema.TransitionTypes.SMPTE_Dissolve: _write_dissolve,
|
||||
}
|
||||
|
||||
if in_trx.transition_type not in trx_map:
|
||||
return
|
||||
|
||||
return trx_map[in_trx.transition_type](
|
||||
pre_item,
|
||||
in_trx,
|
||||
post_item,
|
||||
to_session,
|
||||
track_kind
|
||||
)
|
||||
|
||||
|
||||
def _write_stack(in_stack, to_session, track_kind=None):
|
||||
new_stack = to_session.newNode("Stack", str(in_stack.name) or "tracks")
|
||||
|
||||
for seq in in_stack:
|
||||
result = write_otio(seq, to_session, track_kind)
|
||||
if result:
|
||||
new_stack.addInput(result)
|
||||
|
||||
return new_stack
|
||||
|
||||
|
||||
def _write_track(in_seq, to_session, _=None):
|
||||
new_seq = to_session.newNode("Sequence", str(in_seq.name) or "track")
|
||||
|
||||
items_to_serialize = otio.algorithms.track_with_expanded_transitions(
|
||||
in_seq
|
||||
)
|
||||
|
||||
track_kind = in_seq.kind
|
||||
|
||||
for thing in items_to_serialize:
|
||||
if isinstance(thing, tuple):
|
||||
result = _write_transition(*thing, to_session=to_session,
|
||||
track_kind=track_kind)
|
||||
elif thing.duration().value == 0:
|
||||
continue
|
||||
else:
|
||||
result = write_otio(thing, to_session, track_kind)
|
||||
|
||||
if result:
|
||||
new_seq.addInput(result)
|
||||
|
||||
return new_seq
|
||||
|
||||
|
||||
def _write_timeline(tl, to_session, _=None):
|
||||
result = write_otio(tl.tracks, to_session)
|
||||
return result
|
||||
|
||||
|
||||
def _write_collection(collection, to_session, track_kind=None):
|
||||
results = []
|
||||
for item in collection:
|
||||
result = write_otio(item, to_session, track_kind)
|
||||
if result:
|
||||
results.append(result)
|
||||
|
||||
if results:
|
||||
return results[0]
|
||||
|
||||
|
||||
def _create_media_reference(item, src, track_kind=None):
|
||||
if hasattr(item, "media_reference") and item.media_reference:
|
||||
if isinstance(item.media_reference, otio.schema.ExternalReference):
|
||||
media = [str(item.media_reference.target_url)]
|
||||
|
||||
if track_kind == otio.schema.TrackKind.Audio:
|
||||
# Create blank video media to accompany audio for valid source
|
||||
blank = "{},start={},end={},fps={}.movieproc".format(
|
||||
"blank",
|
||||
item.available_range().start_time.value,
|
||||
item.available_range().end_time_inclusive().value,
|
||||
item.available_range().duration.rate
|
||||
)
|
||||
# Inserting blank media here forces all content to only
|
||||
# produce audio. We do it twice in case we look at this in
|
||||
# stereo
|
||||
media = [blank, blank] + media
|
||||
|
||||
src.setMedia(media)
|
||||
return True
|
||||
|
||||
elif isinstance(item.media_reference, otio.schema.GeneratorReference):
|
||||
if item.media_reference.generator_kind == "SMPTEBars":
|
||||
kind = "smptebars"
|
||||
src.setMedia(
|
||||
[
|
||||
"{},start={},end={},fps={}.movieproc".format(
|
||||
kind,
|
||||
item.available_range().start_time.value,
|
||||
item.available_range().end_time_inclusive().value,
|
||||
item.available_range().duration.rate
|
||||
)
|
||||
]
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _write_item(it, to_session, track_kind=None):
|
||||
src = to_session.newNode("Source", str(it.name) or "clip")
|
||||
|
||||
src.setProperty(
|
||||
"RVSourceGroup",
|
||||
"source",
|
||||
"attributes",
|
||||
"otio_metadata",
|
||||
rvSession.gto.STRING, str(it.metadata)
|
||||
)
|
||||
|
||||
range_to_read = it.trimmed_range()
|
||||
|
||||
if not range_to_read:
|
||||
raise otio.exceptions.OTIOError(
|
||||
"No valid range on clip: {0}.".format(
|
||||
str(it)
|
||||
)
|
||||
)
|
||||
|
||||
# because OTIO has no global concept of FPS, the rate of the duration is
|
||||
# used as the rate for the range of the source.
|
||||
# RationalTime.value_rescaled_to returns the time value of the object in
|
||||
# time rate of the argument.
|
||||
src.setCutIn(
|
||||
range_to_read.start_time.value_rescaled_to(
|
||||
range_to_read.duration
|
||||
)
|
||||
)
|
||||
src.setCutOut(
|
||||
range_to_read.end_time_inclusive().value_rescaled_to(
|
||||
range_to_read.duration
|
||||
)
|
||||
)
|
||||
src.setFPS(range_to_read.duration.rate)
|
||||
|
||||
# if the media reference is missing
|
||||
if not _create_media_reference(it, src, track_kind):
|
||||
kind = "smptebars"
|
||||
if isinstance(it, otio.schema.Gap):
|
||||
kind = "blank"
|
||||
src.setMedia(
|
||||
[
|
||||
"{},start={},end={},fps={}.movieproc".format(
|
||||
kind,
|
||||
range_to_read.start_time.value,
|
||||
range_to_read.end_time_inclusive().value,
|
||||
range_to_read.duration.rate
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
return src
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1182
pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py
vendored
Normal file
1182
pype/vendor/python/python_2/opentimelineio_contrib/adapters/fcpx_xml.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
424
pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py
vendored
Normal file
424
pype/vendor/python/python_2/opentimelineio_contrib/adapters/ffmpeg_burnins.py
vendored
Normal file
|
|
@ -0,0 +1,424 @@
|
|||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2017 Ed Caspersen
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# allcopies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
"""
|
||||
This module provides an interface to allow users to easily
|
||||
build out an FFMPEG command with all the correct filters
|
||||
for applying text (with a background) to the rendered media.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from subprocess import Popen, PIPE
|
||||
from PIL import ImageFont
|
||||
|
||||
|
||||
def _is_windows():
|
||||
"""
|
||||
queries if the current operating system is Windows
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
return sys.platform.startswith('win') or \
|
||||
sys.platform.startswith('cygwin')
|
||||
|
||||
|
||||
def _system_font():
|
||||
"""
|
||||
attempts to determine a default system font
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
if _is_windows():
|
||||
font_path = os.path.join(os.environ['WINDIR'], 'Fonts')
|
||||
fonts = ('arial.ttf', 'calibri.ttf', 'times.ttf')
|
||||
elif sys.platform.startswith('darwin'):
|
||||
font_path = '/System/Library/Fonts'
|
||||
fonts = ('Menlo.ttc',)
|
||||
else:
|
||||
# assuming linux
|
||||
font_path = 'usr/share/fonts/msttcorefonts'
|
||||
fonts = ('arial.ttf', 'times.ttf', 'couri.ttf')
|
||||
|
||||
system_font = None
|
||||
backup = None
|
||||
for font in fonts:
|
||||
font = os.path.join(font_path, font)
|
||||
if os.path.exists(font):
|
||||
system_font = font
|
||||
break
|
||||
else:
|
||||
if os.path.exists(font_path):
|
||||
for each in os.listdir(font_path):
|
||||
ext = os.path.splitext(each)[-1]
|
||||
if ext[1:].startswith('tt'):
|
||||
system_font = os.path.join(font_path, each)
|
||||
return system_font or backup
|
||||
|
||||
|
||||
# Default valuues
|
||||
FONT = _system_font()
|
||||
FONT_SIZE = 16
|
||||
FONT_COLOR = 'white'
|
||||
BG_COLOR = 'black'
|
||||
BG_PADDING = 5
|
||||
|
||||
# FFMPEG command strings
|
||||
FFMPEG = ('ffmpeg -loglevel panic -i %(input)s '
|
||||
'%(filters)s %(args)s%(output)s')
|
||||
FFPROBE = ('ffprobe -v quiet -print_format json -show_format '
|
||||
'-show_streams %(source)s')
|
||||
BOX = 'box=1:boxborderw=%(border)d:boxcolor=%(color)s@%(opacity).1f'
|
||||
DRAWTEXT = ("drawtext=text='%(text)s':x=%(x)s:y=%(y)s:fontcolor="
|
||||
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'")
|
||||
TIMECODE = ("drawtext=timecode='%(text)s':timecode_rate=%(fps).2f"
|
||||
":x=%(x)s:y=%(y)s:fontcolor="
|
||||
"%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'")
|
||||
|
||||
|
||||
# Valid aligment parameters.
|
||||
TOP_CENTERED = 'top_centered'
|
||||
BOTTOM_CENTERED = 'bottom_centered'
|
||||
TOP_LEFT = 'top_left'
|
||||
BOTTOM_LEFT = 'bottom_left'
|
||||
TOP_RIGHT = 'top_right'
|
||||
BOTTOM_RIGHT = 'bottom_right'
|
||||
|
||||
|
||||
class Options(dict):
|
||||
"""
|
||||
Base options class.
|
||||
"""
|
||||
_params = {
|
||||
'opacity': 1,
|
||||
'x_offset': 0,
|
||||
'y_offset': 0,
|
||||
'font': FONT,
|
||||
'font_size': FONT_SIZE,
|
||||
'bg_color': BG_COLOR,
|
||||
'bg_padding': BG_PADDING,
|
||||
'font_color': FONT_COLOR
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Options, self).__init__()
|
||||
params = self._params.copy()
|
||||
params.update(kwargs)
|
||||
super(Options, self).update(**params)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self._params:
|
||||
raise KeyError("Not a valid option key '%s'" % key)
|
||||
super(Options, self).update({key: value})
|
||||
|
||||
|
||||
class FrameNumberOptions(Options):
|
||||
"""
|
||||
:key int frame_offset: offset the frame numbers
|
||||
:key float opacity: opacity value (0-1)
|
||||
:key str expression: expression that would be used instead of text
|
||||
:key bool x_offset: X position offset
|
||||
(does not apply to centered alignments)
|
||||
:key bool y_offset: Y position offset
|
||||
:key str font: path to the font file
|
||||
:key int font_size: size to render the font in
|
||||
:key str bg_color: background color of the box
|
||||
:key int bg_padding: padding between the font and box
|
||||
:key str font_color: color to render
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._params.update({
|
||||
'frame_offset': 0,
|
||||
'expression': None
|
||||
})
|
||||
super(FrameNumberOptions, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class TextOptions(Options):
|
||||
"""
|
||||
:key float opacity: opacity value (0-1)
|
||||
:key str expression: expression that would be used instead of text
|
||||
:key bool x_offset: X position offset
|
||||
(does not apply to centered alignments)
|
||||
:key bool y_offset: Y position offset
|
||||
:key str font: path to the font file
|
||||
:key int font_size: size to render the font in
|
||||
:key str bg_color: background color of the box
|
||||
:key int bg_padding: padding between the font and box
|
||||
:key str font_color: color to render
|
||||
"""
|
||||
|
||||
|
||||
class TimeCodeOptions(Options):
|
||||
"""
|
||||
:key int frame_offset: offset the frame numbers
|
||||
:key float fps: frame rate to calculate the timecode by
|
||||
:key float opacity: opacity value (0-1)
|
||||
:key bool x_offset: X position offset
|
||||
(does not apply to centered alignments)
|
||||
:key bool y_offset: Y position offset
|
||||
:key str font: path to the font file
|
||||
:key int font_size: size to render the font in
|
||||
:key str bg_color: background color of the box
|
||||
:key int bg_padding: padding between the font and box
|
||||
:key str font_color: color to render
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._params.update({
|
||||
'frame_offset': 0,
|
||||
'fps': 24
|
||||
})
|
||||
super(TimeCodeOptions, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class Burnins(object):
|
||||
"""
|
||||
Class that provides convenience API for building filter
|
||||
flags for the FFMPEG command.
|
||||
"""
|
||||
|
||||
def __init__(self, source, streams=None):
|
||||
"""
|
||||
:param str source: source media file
|
||||
:param [] streams: ffprobe stream data if parsed as a pre-process
|
||||
"""
|
||||
self.source = source
|
||||
self.filters = {
|
||||
'drawtext': []
|
||||
}
|
||||
self._streams = streams or _streams(self.source)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Overlayout - %s>' % os.path.basename(self.source)
|
||||
|
||||
@property
|
||||
def start_frame(self):
|
||||
"""
|
||||
:rtype: int
|
||||
"""
|
||||
start_time = float(self._video_stream['start_time'])
|
||||
return round(start_time * self.frame_rate)
|
||||
|
||||
@property
|
||||
def end_frame(self):
|
||||
"""
|
||||
:rtype: int
|
||||
"""
|
||||
end_time = float(self._video_stream['duration'])
|
||||
return round(end_time * self.frame_rate)
|
||||
|
||||
@property
|
||||
def frame_rate(self):
|
||||
"""
|
||||
:rtype: int
|
||||
"""
|
||||
data = self._video_stream
|
||||
tokens = data['r_frame_rate'].split('/')
|
||||
return int(tokens[0]) / int(tokens[1])
|
||||
|
||||
@property
|
||||
def _video_stream(self):
|
||||
video_stream = None
|
||||
for each in self._streams:
|
||||
if each.get('codec_type') == 'video':
|
||||
video_stream = each
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Failed to locate video stream "
|
||||
"from '%s'" % self.source)
|
||||
return video_stream
|
||||
|
||||
@property
|
||||
def resolution(self):
|
||||
"""
|
||||
:rtype: (int, int)
|
||||
"""
|
||||
data = self._video_stream
|
||||
return data['width'], data['height']
|
||||
|
||||
@property
|
||||
def filter_string(self):
|
||||
"""
|
||||
Generates the filter string that would be applied
|
||||
to the `-vf` argument
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
return ','.join(self.filters['drawtext'])
|
||||
|
||||
def add_timecode(self, align, options=None):
|
||||
"""
|
||||
Convenience method to create the frame number expression.
|
||||
|
||||
:param enum align: alignment, must use provided enum flags
|
||||
:param dict options: recommended to use TimeCodeOptions
|
||||
"""
|
||||
options = options or TimeCodeOptions()
|
||||
timecode = _frames_to_timecode(options['frame_offset'],
|
||||
self.frame_rate)
|
||||
options = options.copy()
|
||||
if not options.get('fps'):
|
||||
options['fps'] = self.frame_rate
|
||||
self._add_burnin(timecode.replace(':', r'\:'),
|
||||
align,
|
||||
options,
|
||||
TIMECODE)
|
||||
|
||||
def add_frame_numbers(self, align, options=None):
|
||||
"""
|
||||
Convenience method to create the frame number expression.
|
||||
|
||||
:param enum align: alignment, must use provided enum flags
|
||||
:param dict options: recommended to use FrameNumberOptions
|
||||
"""
|
||||
options = options or FrameNumberOptions()
|
||||
options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset']
|
||||
text = str(int(self.end_frame + options['frame_offset']))
|
||||
self._add_burnin(text, align, options, DRAWTEXT)
|
||||
|
||||
def add_text(self, text, align, options=None):
|
||||
"""
|
||||
Adding static text to a filter.
|
||||
|
||||
:param str text: text to apply to the drawtext
|
||||
:param enum align: alignment, must use provided enum flags
|
||||
:param dict options: recommended to use TextOptions
|
||||
"""
|
||||
options = options or TextOptions()
|
||||
self._add_burnin(text, align, options, DRAWTEXT)
|
||||
|
||||
def _add_burnin(self, text, align, options, draw):
|
||||
"""
|
||||
Generic method for building the filter flags.
|
||||
|
||||
:param str text: text to apply to the drawtext
|
||||
:param enum align: alignment, must use provided enum flags
|
||||
:param dict options:
|
||||
"""
|
||||
resolution = self.resolution
|
||||
data = {
|
||||
'text': options.get('expression') or text,
|
||||
'color': options['font_color'],
|
||||
'size': options['font_size']
|
||||
}
|
||||
data.update(options)
|
||||
data.update(_drawtext(align, resolution, text, options))
|
||||
if 'font' in data and _is_windows():
|
||||
data['font'] = data['font'].replace(os.sep, r'\\' + os.sep)
|
||||
data['font'] = data['font'].replace(':', r'\:')
|
||||
self.filters['drawtext'].append(draw % data)
|
||||
|
||||
if options.get('bg_color') is not None:
|
||||
box = BOX % {
|
||||
'border': options['bg_padding'],
|
||||
'color': options['bg_color'],
|
||||
'opacity': options['opacity']
|
||||
}
|
||||
self.filters['drawtext'][-1] += ':%s' % box
|
||||
|
||||
def command(self, output=None, args=None, overwrite=False):
|
||||
"""
|
||||
Generate the entire FFMPEG command.
|
||||
|
||||
:param str output: output file
|
||||
:param str args: additional FFMPEG arguments
|
||||
:param bool overwrite: overwrite the output if it exists
|
||||
:returns: completed command
|
||||
:rtype: str
|
||||
"""
|
||||
output = output or ''
|
||||
if overwrite:
|
||||
output = '-y %s' % output
|
||||
return (FFMPEG % {
|
||||
'input': self.source,
|
||||
'output': output,
|
||||
'args': '%s ' % args if args else '',
|
||||
'filters': '-vf "%s"' % self.filter_string
|
||||
}).strip()
|
||||
|
||||
def render(self, output, args=None, overwrite=False):
|
||||
"""
|
||||
Render the media to a specified destination.
|
||||
|
||||
:param str output: output file
|
||||
:param str args: additional FFMPEG arguments
|
||||
:param bool overwrite: overwrite the output if it exists
|
||||
"""
|
||||
if not overwrite and os.path.exists(output):
|
||||
raise RuntimeError("Destination '%s' exists, please "
|
||||
"use overwrite" % output)
|
||||
command = self.command(output=output,
|
||||
args=args,
|
||||
overwrite=overwrite)
|
||||
proc = Popen(command, shell=True)
|
||||
proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to render '%s': %s'"
|
||||
% (output, command))
|
||||
if not os.path.exists(output):
|
||||
raise RuntimeError("Failed to generate '%s'" % output)
|
||||
|
||||
|
||||
def _streams(source):
|
||||
"""
|
||||
:param str source: source media file
|
||||
:rtype: [{}, ...]
|
||||
"""
|
||||
command = FFPROBE % {'source': source}
|
||||
proc = Popen(command, shell=True, stdout=PIPE)
|
||||
out = proc.communicate()[0]
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to run: %s" % command)
|
||||
return json.loads(out)['streams']
|
||||
|
||||
|
||||
def _drawtext(align, resolution, text, options):
|
||||
"""
|
||||
:rtype: {'x': int, 'y': int}
|
||||
"""
|
||||
x_pos = '0'
|
||||
if align in (TOP_CENTERED, BOTTOM_CENTERED):
|
||||
x_pos = 'w/2-tw/2'
|
||||
elif align in (TOP_RIGHT, BOTTOM_RIGHT):
|
||||
ifont = ImageFont.truetype(options['font'],
|
||||
options['font_size'])
|
||||
box_size = ifont.getsize(text)
|
||||
x_pos = resolution[0] - (box_size[0] + options['x_offset'])
|
||||
elif align in (TOP_LEFT, BOTTOM_LEFT):
|
||||
x_pos = options['x_offset']
|
||||
|
||||
if align in (TOP_CENTERED,
|
||||
TOP_RIGHT,
|
||||
TOP_LEFT):
|
||||
y_pos = '%d' % options['y_offset']
|
||||
else:
|
||||
y_pos = 'h-text_h-%d' % (options['y_offset'])
|
||||
return {'x': x_pos, 'y': y_pos}
|
||||
|
||||
|
||||
def _frames_to_timecode(frames, framerate):
|
||||
return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format(
|
||||
int(frames / (3600 * framerate)),
|
||||
int(frames / (60 * framerate) % 60),
|
||||
int(frames / framerate % 60),
|
||||
int(frames % framerate))
|
||||
1781
pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py
vendored
Normal file
1781
pype/vendor/python/python_2/opentimelineio_contrib/adapters/hls_playlist.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
132
pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py
vendored
Normal file
132
pype/vendor/python/python_2/opentimelineio_contrib/adapters/maya_sequencer.py
vendored
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""Maya Sequencer Adapter Harness"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from .. import adapters
|
||||
|
||||
|
||||
def write_to_file(input_otio, filepath):
|
||||
if "OTIO_MAYA_PYTHON_BIN" not in os.environ:
|
||||
raise RuntimeError(
|
||||
"'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to "
|
||||
"mayapy within the Maya installation."
|
||||
)
|
||||
maya_python_path = os.environ["OTIO_MAYA_PYTHON_BIN"]
|
||||
if not os.path.exists(maya_python_path):
|
||||
raise RuntimeError(
|
||||
'Cannot access file at OTIO_MAYA_PYTHON_BIN: "{}"'.format(
|
||||
maya_python_path
|
||||
)
|
||||
)
|
||||
if os.path.isdir(maya_python_path):
|
||||
raise RuntimeError(
|
||||
"OTIO_MAYA_PYTHON_BIN contains a path to a directory, not to an "
|
||||
"executable file: {}".format(maya_python_path)
|
||||
)
|
||||
|
||||
input_data = adapters.write_to_string(input_otio, "otio_json")
|
||||
|
||||
os.environ['PYTHONPATH'] = (
|
||||
os.pathsep.join(
|
||||
[
|
||||
os.environ.setdefault('PYTHONPATH', ''),
|
||||
os.path.dirname(__file__)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
proc = subprocess.Popen(
|
||||
[
|
||||
os.environ["OTIO_MAYA_PYTHON_BIN"],
|
||||
'-m',
|
||||
'extern_maya_sequencer',
|
||||
'write',
|
||||
filepath
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
env=os.environ
|
||||
)
|
||||
proc.stdin.write(input_data)
|
||||
out, err = proc.communicate()
|
||||
|
||||
if proc.returncode:
|
||||
raise RuntimeError(
|
||||
"ERROR: extern_maya_sequencer (called through the maya sequencer "
|
||||
"file adapter) failed. stderr output: " + err
|
||||
)
|
||||
|
||||
|
||||
def read_from_file(filepath):
|
||||
if "OTIO_MAYA_PYTHON_BIN" not in os.environ:
|
||||
raise RuntimeError(
|
||||
"'OTIO_MAYA_PYTHON_BIN' not set, please set this to path to "
|
||||
"mayapy within the Maya installation."
|
||||
)
|
||||
|
||||
os.environ['PYTHONPATH'] = (
|
||||
os.pathsep.join(
|
||||
[
|
||||
os.environ.setdefault('PYTHONPATH', ''),
|
||||
os.path.dirname(__file__)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
proc = subprocess.Popen(
|
||||
[
|
||||
os.environ["OTIO_MAYA_PYTHON_BIN"],
|
||||
'-m',
|
||||
'extern_maya_sequencer',
|
||||
'read',
|
||||
filepath
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
env=os.environ
|
||||
)
|
||||
out, err = proc.communicate()
|
||||
|
||||
# maya probably puts a bunch of crap on the stdout
|
||||
sentinel_str = "OTIO_JSON_BEGIN\n"
|
||||
end_sentinel_str = "\nOTIO_JSON_END\n"
|
||||
start = out.find(sentinel_str)
|
||||
end = out.find(end_sentinel_str)
|
||||
result = adapters.read_from_string(
|
||||
out[start + len(sentinel_str):end],
|
||||
"otio_json"
|
||||
)
|
||||
|
||||
if proc.returncode:
|
||||
raise RuntimeError(
|
||||
"ERROR: extern_maya_sequencer (called through the maya sequencer "
|
||||
"file adapter) failed. stderr output: " + err
|
||||
)
|
||||
return result
|
||||
84
pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py
vendored
Normal file
84
pype/vendor/python/python_2/opentimelineio_contrib/adapters/rv.py
vendored
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
#
|
||||
# Copyright 2017 Pixar Animation Studios
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""RvSession Adapter harness"""
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import copy
|
||||
|
||||
from .. import adapters
|
||||
|
||||
|
||||
def write_to_file(input_otio, filepath):
|
||||
if "OTIO_RV_PYTHON_BIN" not in os.environ:
|
||||
raise RuntimeError(
|
||||
"'OTIO_RV_PYTHON_BIN' not set, please set this to path to "
|
||||
"py-interp within the RV installation."
|
||||
)
|
||||
|
||||
if "OTIO_RV_PYTHON_LIB" not in os.environ:
|
||||
raise RuntimeError(
|
||||
"'OTIO_RV_PYTHON_LIB' not set, please set this to path to python "
|
||||
"directory within the RV installation."
|
||||
)
|
||||
|
||||
input_data = adapters.write_to_string(input_otio, "otio_json")
|
||||
|
||||
base_environment = copy.deepcopy(os.environ)
|
||||
|
||||
base_environment['PYTHONPATH'] = (
|
||||
os.pathsep.join(
|
||||
[
|
||||
base_environment.setdefault('PYTHONPATH', ''),
|
||||
os.path.dirname(__file__)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
proc = subprocess.Popen(
|
||||
[
|
||||
base_environment["OTIO_RV_PYTHON_BIN"],
|
||||
'-m',
|
||||
'extern_rv',
|
||||
filepath
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
env=base_environment
|
||||
)
|
||||
proc.stdin.write(input_data)
|
||||
out, err = proc.communicate()
|
||||
|
||||
if out.strip():
|
||||
print("stdout: {}".format(out))
|
||||
if err.strip():
|
||||
print("stderr: {}".format(err))
|
||||
|
||||
if proc.returncode:
|
||||
raise RuntimeError(
|
||||
"ERROR: extern_rv (called through the rv session file adapter) "
|
||||
"failed. stderr output: " + err
|
||||
)
|
||||
819
pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py
vendored
Normal file
819
pype/vendor/python/python_2/opentimelineio_contrib/adapters/xges.py
vendored
Normal file
|
|
@ -0,0 +1,819 @@
|
|||
#
|
||||
# Copyright (C) 2019 Igalia S.L
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "Apache License")
|
||||
# with the following modification; you may not use this file except in
|
||||
# compliance with the Apache License and the following modification to it:
|
||||
# Section 6. Trademarks. is deleted and replaced with:
|
||||
#
|
||||
# 6. Trademarks. This License does not grant permission to use the trade
|
||||
# names, trademarks, service marks, or product names of the Licensor
|
||||
# and its affiliates, except as required to comply with Section 4(c) of
|
||||
# the License and to reproduce the content of the NOTICE file.
|
||||
#
|
||||
# You may obtain a copy of the Apache License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the Apache License with the above modification is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the Apache License for the specific
|
||||
# language governing permissions and limitations under the Apache License.
|
||||
#
|
||||
|
||||
"""OpenTimelineIO GStreamer Editing Services XML Adapter. """
|
||||
import re
|
||||
import unittest
|
||||
|
||||
from decimal import Decimal
|
||||
from fractions import Fraction
|
||||
from xml.etree import cElementTree
|
||||
from xml.dom import minidom
|
||||
import opentimelineio as otio
|
||||
|
||||
META_NAMESPACE = "XGES"
|
||||
|
||||
|
||||
FRAMERATE_FRAMEDURATION = {23.98: "24000/1001",
|
||||
24: "600/25",
|
||||
25: "25/1",
|
||||
29.97: "30000/1001",
|
||||
30: "30/1",
|
||||
50: "50/1",
|
||||
59.94: "60000/1001",
|
||||
60: "60/1"}
|
||||
|
||||
|
||||
TRANSITION_MAP = {
|
||||
"crossfade": otio.schema.TransitionTypes.SMPTE_Dissolve
|
||||
}
|
||||
# Two way map
|
||||
TRANSITION_MAP.update(dict([(v, k) for k, v in TRANSITION_MAP.items()]))
|
||||
|
||||
|
||||
class GstParseError(otio.exceptions.OTIOError):
|
||||
pass
|
||||
|
||||
|
||||
class GstStructure(object):
|
||||
"""
|
||||
GstStructure parser with a "dictionary" like API.
|
||||
"""
|
||||
UNESCAPE = re.compile(r'(?<!\\)\\(.)')
|
||||
INT_TYPES = "".join(
|
||||
("int", "uint", "int8", "uint8", "int16",
|
||||
"uint16", "int32", "uint32", "int64", "uint64")
|
||||
)
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
self.modified = False
|
||||
self.name, self.types, self.values = GstStructure._parse(text + ";")
|
||||
|
||||
def __repr__(self):
|
||||
if not self.modified:
|
||||
return self.text
|
||||
|
||||
res = self.name
|
||||
for key, value in self.values.items():
|
||||
value_type = self.types[key]
|
||||
res += ', %s=(%s)"%s"' % (key, value_type, self.escape(value))
|
||||
res += ';'
|
||||
|
||||
return res
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.values[key]
|
||||
|
||||
def set(self, key, value_type, value):
|
||||
if self.types.get(key) == value_type and self.values.get(key) == value:
|
||||
return
|
||||
|
||||
self.modified = True
|
||||
self.types[key] = value_type
|
||||
self.values[key] = value
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self.values.get(key, default)
|
||||
|
||||
@staticmethod
|
||||
def _find_eos(s):
|
||||
# find next '"' without preceeding '\'
|
||||
line = 0
|
||||
while 1: # faster than regexp for '[^\\]\"'
|
||||
p = s.index('"')
|
||||
line += p + 1
|
||||
if s[p - 1] != '\\':
|
||||
return line
|
||||
s = s[(p + 1):]
|
||||
return -1
|
||||
|
||||
@staticmethod
|
||||
def escape(s):
|
||||
# XXX: The unicode type doesn't exist in Python 3 (all strings are unicode)
|
||||
# so we have to use type(u"") which works in both Python 2 and 3.
|
||||
if type(s) not in (str, type(u"")):
|
||||
return s
|
||||
return s.replace(" ", "\\ ")
|
||||
|
||||
@staticmethod
|
||||
def _parse(s):
|
||||
in_string = s
|
||||
types = {}
|
||||
values = {}
|
||||
scan = True
|
||||
# parse id
|
||||
p = s.find(',')
|
||||
if p == -1:
|
||||
try:
|
||||
p = s.index(';')
|
||||
except ValueError:
|
||||
p = len(s)
|
||||
scan = False
|
||||
name = s[:p]
|
||||
# parse fields
|
||||
while scan:
|
||||
comma_space_it = p
|
||||
# skip 'name, ' / 'value, '
|
||||
while s[comma_space_it] in [' ', ',']:
|
||||
comma_space_it += 1
|
||||
s = s[comma_space_it:]
|
||||
p = s.index('=')
|
||||
k = s[:p]
|
||||
if not s[p + 1] == '(':
|
||||
raise ValueError("In %s position: %d" % (in_string, p))
|
||||
s = s[(p + 2):] # skip 'key=('
|
||||
p = s.index(')')
|
||||
t = s[:p]
|
||||
s = s[(p + 1):] # skip 'type)'
|
||||
|
||||
if s[0] == '"':
|
||||
s = s[1:] # skip '"'
|
||||
p = GstStructure._find_eos(s)
|
||||
if p == -1:
|
||||
raise ValueError
|
||||
v = s[:(p - 1)]
|
||||
if s[p] == ';':
|
||||
scan = False
|
||||
# unescape \., but not \\. (using a backref)
|
||||
# need a reverse for re.escape()
|
||||
v = v.replace('\\\\', '\\')
|
||||
v = GstStructure.UNESCAPE.sub(r'\1', v)
|
||||
else:
|
||||
p = s.find(',')
|
||||
if p == -1:
|
||||
p = s.index(';')
|
||||
scan = False
|
||||
v = s[:p]
|
||||
|
||||
if t == 'structure':
|
||||
v = GstStructure(v)
|
||||
elif t == 'string' and len(v) and v[0] == '"':
|
||||
v = v[1:-1]
|
||||
elif t == 'boolean':
|
||||
v = (v == '1')
|
||||
elif t in GstStructure.INT_TYPES:
|
||||
v = int(v)
|
||||
types[k] = t
|
||||
values[k] = v
|
||||
|
||||
return (name, types, values)
|
||||
|
||||
|
||||
class GESTrackType:
|
||||
UNKNOWN = 1 << 0
|
||||
AUDIO = 1 << 1
|
||||
VIDEO = 1 << 2
|
||||
TEXT = 1 << 3
|
||||
CUSTOM = 1 << 4
|
||||
|
||||
@staticmethod
|
||||
def to_otio_type(_type):
|
||||
if _type == GESTrackType.AUDIO:
|
||||
return otio.schema.TrackKind.Audio
|
||||
elif _type == GESTrackType.VIDEO:
|
||||
return otio.schema.TrackKind.Video
|
||||
|
||||
raise GstParseError("Can't translate track type %s" % _type)
|
||||
|
||||
|
||||
GST_CLOCK_TIME_NONE = 18446744073709551615
|
||||
GST_SECOND = 1000000000
|
||||
|
||||
|
||||
def to_gstclocktime(rational_time):
|
||||
"""
|
||||
This converts a RationalTime object to a GstClockTime
|
||||
|
||||
Args:
|
||||
rational_time (RationalTime): This is a RationalTime object
|
||||
|
||||
Returns:
|
||||
int: A time in nanosecond
|
||||
"""
|
||||
|
||||
return int(rational_time.value_rescaled_to(1) * GST_SECOND)
|
||||
|
||||
|
||||
def get_from_structure(xmlelement, fieldname, default=None, attribute="properties"):
|
||||
structure = GstStructure(xmlelement.get(attribute, attribute))
|
||||
return structure.get(fieldname, default)
|
||||
|
||||
|
||||
class XGES:
|
||||
"""
|
||||
This object is responsible for knowing how to convert an xGES
|
||||
project into an otio timeline
|
||||
"""
|
||||
|
||||
def __init__(self, xml_string):
|
||||
self.xges_xml = cElementTree.fromstring(xml_string)
|
||||
self.rate = 25
|
||||
|
||||
def _set_rate_from_timeline(self, timeline):
|
||||
metas = GstStructure(timeline.attrib.get("metadatas", "metadatas"))
|
||||
framerate = metas.get("framerate")
|
||||
if framerate:
|
||||
rate = Fraction(framerate)
|
||||
else:
|
||||
video_track = timeline.find("./track[@track-type='4']")
|
||||
rate = None
|
||||
if video_track is not None:
|
||||
properties = GstStructure(
|
||||
video_track.get("properties", "properties;"))
|
||||
restriction_caps = GstStructure(properties.get(
|
||||
"restriction-caps", "restriction-caps"))
|
||||
rate = restriction_caps.get("framerate")
|
||||
|
||||
if rate is None:
|
||||
return
|
||||
|
||||
self.rate = float(Fraction(rate))
|
||||
if self.rate == int(self.rate):
|
||||
self.rate = int(self.rate)
|
||||
else:
|
||||
self.rate = float(round(Decimal(self.rate), 2))
|
||||
|
||||
def to_rational_time(self, ns_timestamp):
|
||||
"""
|
||||
This converts a GstClockTime value to an otio RationalTime object
|
||||
|
||||
Args:
|
||||
ns_timestamp (int): This is a GstClockTime value (nanosecond absolute value)
|
||||
|
||||
Returns:
|
||||
RationalTime: A RationalTime object
|
||||
"""
|
||||
return otio.opentime.RationalTime(round(int(ns_timestamp) /
|
||||
(GST_SECOND / self.rate)), self.rate)
|
||||
|
||||
def to_otio(self):
|
||||
"""
|
||||
Convert an xges to an otio
|
||||
|
||||
Returns:
|
||||
OpenTimeline: An OpenTimeline Timeline object
|
||||
"""
|
||||
|
||||
project = self.xges_xml.find("./project")
|
||||
metas = GstStructure(project.attrib.get("metadatas", "metadatas"))
|
||||
otio_project = otio.schema.SerializableCollection(
|
||||
name=metas.get('name'),
|
||||
metadata={
|
||||
META_NAMESPACE: {"metadatas": project.attrib.get(
|
||||
"metadatas", "metadatas")}
|
||||
}
|
||||
)
|
||||
timeline = project.find("./timeline")
|
||||
self._set_rate_from_timeline(timeline)
|
||||
|
||||
otio_timeline = otio.schema.Timeline(
|
||||
name=metas.get('name', "unnamed"),
|
||||
metadata={
|
||||
META_NAMESPACE: {
|
||||
"metadatas": timeline.attrib.get("metadatas", "metadatas"),
|
||||
"properties": timeline.attrib.get("properties", "properties")
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
all_names = set()
|
||||
self._add_layers(timeline, otio_timeline, all_names)
|
||||
otio_project.append(otio_timeline)
|
||||
|
||||
return otio_project
|
||||
|
||||
def _add_layers(self, timeline, otio_timeline, all_names):
|
||||
for layer in timeline.findall("./layer"):
|
||||
tracks = self._build_tracks_from_layer_clips(layer, all_names)
|
||||
otio_timeline.tracks.extend(tracks)
|
||||
|
||||
def _get_clips_for_type(self, clips, track_type):
|
||||
if not clips:
|
||||
return False
|
||||
|
||||
clips_for_type = []
|
||||
for clip in clips:
|
||||
if int(clip.attrib['track-types']) & track_type:
|
||||
clips_for_type.append(clip)
|
||||
|
||||
return clips_for_type
|
||||
|
||||
def _build_tracks_from_layer_clips(self, layer, all_names):
|
||||
all_clips = layer.findall('./clip')
|
||||
|
||||
tracks = []
|
||||
for track_type in [GESTrackType.VIDEO, GESTrackType.AUDIO]:
|
||||
clips = self._get_clips_for_type(all_clips, track_type)
|
||||
if not clips:
|
||||
continue
|
||||
|
||||
track = otio.schema.Track()
|
||||
track.kind = GESTrackType.to_otio_type(track_type)
|
||||
self._add_clips_in_track(clips, track, all_names)
|
||||
|
||||
tracks.append(track)
|
||||
|
||||
return tracks
|
||||
|
||||
def _add_clips_in_track(self, clips, track, all_names):
|
||||
for clip in clips:
|
||||
otio_clip = self._create_otio_clip(clip, all_names)
|
||||
if otio_clip is None:
|
||||
continue
|
||||
|
||||
clip_offset = self.to_rational_time(int(clip.attrib['start']))
|
||||
if clip_offset > track.duration():
|
||||
track.append(
|
||||
self._create_otio_gap(
|
||||
0,
|
||||
(clip_offset - track.duration())
|
||||
)
|
||||
)
|
||||
|
||||
track.append(otio_clip)
|
||||
|
||||
return track
|
||||
|
||||
def _get_clip_name(self, clip, all_names):
|
||||
i = 0
|
||||
tmpname = name = clip.get("name", GstStructure(
|
||||
clip.get("properties", "properties;")).get("name"))
|
||||
while True:
|
||||
if tmpname not in all_names:
|
||||
all_names.add(tmpname)
|
||||
return tmpname
|
||||
|
||||
i += 1
|
||||
tmpname = name + '_%d' % i
|
||||
|
||||
def _create_otio_transition(self, clip, all_names):
|
||||
start = self.to_rational_time(clip.attrib["start"])
|
||||
end = start + self.to_rational_time(clip.attrib["duration"])
|
||||
cut_point = otio.opentime.RationalTime((end.value - start.value) /
|
||||
2, start.rate)
|
||||
|
||||
return otio.schema.Transition(
|
||||
name=self._get_clip_name(clip, all_names),
|
||||
transition_type=TRANSITION_MAP.get(
|
||||
clip.attrib["asset-id"], otio.schema.TransitionTypes.Custom
|
||||
),
|
||||
in_offset=cut_point,
|
||||
out_offset=cut_point,
|
||||
)
|
||||
|
||||
def _create_otio_uri_clip(self, clip, all_names):
|
||||
source_range = otio.opentime.TimeRange(
|
||||
start_time=self.to_rational_time(clip.attrib["inpoint"]),
|
||||
duration=self.to_rational_time(clip.attrib["duration"]),
|
||||
)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=self._get_clip_name(clip, all_names),
|
||||
source_range=source_range,
|
||||
media_reference=self._reference_from_id(
|
||||
clip.get("asset-id"), clip.get("type-name")),
|
||||
)
|
||||
|
||||
return otio_clip
|
||||
|
||||
def _create_otio_clip(self, clip, all_names):
|
||||
otio_clip = None
|
||||
|
||||
if clip.get("type-name") == "GESTransitionClip":
|
||||
otio_clip = self._create_otio_transition(clip, all_names)
|
||||
elif clip.get("type-name") == "GESUriClip":
|
||||
otio_clip = self._create_otio_uri_clip(clip, all_names)
|
||||
|
||||
if otio_clip is None:
|
||||
print("Could not represent: %s" % clip.attrib)
|
||||
return None
|
||||
|
||||
otio_clip.metadata[META_NAMESPACE] = {
|
||||
"properties": clip.get("properties", "properties;"),
|
||||
"metadatas": clip.get("metadatas", "metadatas;"),
|
||||
}
|
||||
|
||||
return otio_clip
|
||||
|
||||
def _create_otio_gap(self, start, duration):
|
||||
source_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(start),
|
||||
duration=duration
|
||||
)
|
||||
return otio.schema.Gap(source_range=source_range)
|
||||
|
||||
def _reference_from_id(self, asset_id, asset_type="GESUriClip"):
|
||||
asset = self._asset_by_id(asset_id, asset_type)
|
||||
if asset is None:
|
||||
return None
|
||||
if not asset.get("id", ""):
|
||||
return otio.schema.MissingReference()
|
||||
|
||||
duration = GST_CLOCK_TIME_NONE
|
||||
if asset_type == "GESUriClip":
|
||||
duration = get_from_structure(asset, "duration", duration)
|
||||
|
||||
available_range = otio.opentime.TimeRange(
|
||||
start_time=self.to_rational_time(0),
|
||||
duration=self.to_rational_time(duration)
|
||||
)
|
||||
ref = otio.schema.ExternalReference(
|
||||
target_url=asset.get("id"),
|
||||
available_range=available_range
|
||||
)
|
||||
|
||||
ref.metadata[META_NAMESPACE] = {
|
||||
"properties": asset.get("properties"),
|
||||
"metadatas": asset.get("metadatas"),
|
||||
}
|
||||
|
||||
return ref
|
||||
|
||||
# --------------------
|
||||
# search helpers
|
||||
# --------------------
|
||||
def _asset_by_id(self, asset_id, asset_type):
|
||||
return self.xges_xml.find(
|
||||
"./project/ressources/asset[@id='{}'][@extractable-type-name='{}']".format(
|
||||
asset_id, asset_type)
|
||||
)
|
||||
|
||||
def _timeline_element_by_name(self, timeline, name):
|
||||
for clip in timeline.findall("./layer/clip"):
|
||||
if get_from_structure(clip, 'name') == name:
|
||||
return clip
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class XGESOtio:
|
||||
|
||||
def __init__(self, input_otio):
|
||||
self.container = input_otio
|
||||
self.rate = 25
|
||||
|
||||
def _insert_new_sub_element(self, into_parent, tag, attrib=None, text=''):
|
||||
elem = cElementTree.SubElement(into_parent, tag, **attrib or {})
|
||||
elem.text = text
|
||||
return elem
|
||||
|
||||
def _get_element_properties(self, element):
|
||||
return element.metadata.get(META_NAMESPACE, {}).get("properties", "properties;")
|
||||
|
||||
def _get_element_metadatas(self, element):
|
||||
return element.metadata.get(META_NAMESPACE,
|
||||
{"GES": {}}).get("metadatas", "metadatas;")
|
||||
|
||||
def _serialize_ressource(self, ressources, ressource, asset_type):
|
||||
if isinstance(ressource, otio.schema.MissingReference):
|
||||
return
|
||||
|
||||
if ressources.find("./asset[@id='%s'][@extractable-type-name='%s']" % (
|
||||
ressource.target_url, asset_type)) is not None:
|
||||
return
|
||||
|
||||
properties = GstStructure(self._get_element_properties(ressource))
|
||||
if properties.get('duration') is None:
|
||||
properties.set('duration', 'guin64',
|
||||
to_gstclocktime(ressource.available_range.duration))
|
||||
|
||||
self._insert_new_sub_element(
|
||||
ressources, 'asset',
|
||||
attrib={
|
||||
"id": ressource.target_url,
|
||||
"extractable-type-name": 'GESUriClip',
|
||||
"properties": str(properties),
|
||||
"metadatas": self._get_element_metadatas(ressource),
|
||||
}
|
||||
)
|
||||
|
||||
def _get_transition_times(self, offset, otio_transition):
|
||||
rational_offset = otio.opentime.RationalTime(
|
||||
round(int(offset) / (GST_SECOND / self.rate)),
|
||||
self.rate
|
||||
)
|
||||
start = rational_offset - otio_transition.in_offset
|
||||
end = rational_offset + otio_transition.out_offset
|
||||
|
||||
return 0, to_gstclocktime(start), to_gstclocktime(end - start)
|
||||
|
||||
def _serialize_clip(
|
||||
self,
|
||||
otio_track,
|
||||
layer,
|
||||
layer_priority,
|
||||
ressources,
|
||||
otio_clip,
|
||||
clip_id,
|
||||
offset
|
||||
):
|
||||
|
||||
# FIXME - Figure out a proper way to determine clip type!
|
||||
asset_id = "GESTitleClip"
|
||||
asset_type = "GESTitleClip"
|
||||
|
||||
if isinstance(otio_clip, otio.schema.Transition):
|
||||
asset_type = "GESTransitionClip"
|
||||
asset_id = TRANSITION_MAP.get(otio_clip.transition_type, "crossfade")
|
||||
inpoint, offset, duration = self._get_transition_times(offset, otio_clip)
|
||||
else:
|
||||
inpoint = to_gstclocktime(otio_clip.source_range.start_time)
|
||||
duration = to_gstclocktime(otio_clip.source_range.duration)
|
||||
|
||||
if not isinstance(otio_clip.media_reference, otio.schema.MissingReference):
|
||||
asset_id = otio_clip.media_reference.target_url
|
||||
asset_type = "GESUriClip"
|
||||
|
||||
self._serialize_ressource(ressources, otio_clip.media_reference,
|
||||
asset_type)
|
||||
|
||||
if otio_track.kind == otio.schema.TrackKind.Audio:
|
||||
track_types = GESTrackType.AUDIO
|
||||
elif otio_track.kind == otio.schema.TrackKind.Video:
|
||||
track_types = GESTrackType.VIDEO
|
||||
else:
|
||||
raise ValueError("Unhandled track type: %s" % otio_track.kind)
|
||||
|
||||
properties = otio_clip.metadata.get(
|
||||
META_NAMESPACE,
|
||||
{
|
||||
"properties": 'properties, name=(string)"%s"' % (
|
||||
GstStructure.escape(otio_clip.name)
|
||||
)
|
||||
}).get("properties")
|
||||
return self._insert_new_sub_element(
|
||||
layer, 'clip',
|
||||
attrib={
|
||||
"id": str(clip_id),
|
||||
"properties": properties,
|
||||
"asset-id": str(asset_id),
|
||||
"type-name": str(asset_type),
|
||||
"track-types": str(track_types),
|
||||
"layer-priority": str(layer_priority),
|
||||
"start": str(offset),
|
||||
"rate": '0',
|
||||
"inpoint": str(inpoint),
|
||||
"duration": str(duration),
|
||||
"metadatas": self._get_element_metadatas(otio_clip),
|
||||
}
|
||||
)
|
||||
|
||||
def _serialize_tracks(self, timeline, otio_timeline):
|
||||
audio_vals = (
|
||||
'properties',
|
||||
'restriction-caps=(string)audio/x-raw(ANY)',
|
||||
'framerate=(GstFraction)1',
|
||||
otio_timeline.duration().rate
|
||||
)
|
||||
|
||||
properties = '%s, %s,%s/%s' % audio_vals
|
||||
self._insert_new_sub_element(
|
||||
timeline, 'track',
|
||||
attrib={
|
||||
"caps": "audio/x-raw(ANY)",
|
||||
"track-type": '2',
|
||||
'track-id': '0',
|
||||
'properties': properties
|
||||
}
|
||||
)
|
||||
|
||||
video_vals = (
|
||||
'properties',
|
||||
'restriction-caps=(string)video/x-raw(ANY)',
|
||||
'framerate=(GstFraction)1',
|
||||
otio_timeline.duration().rate
|
||||
)
|
||||
|
||||
properties = '%s, %s,%s/%s' % video_vals
|
||||
for otio_track in otio_timeline.tracks:
|
||||
if otio_track.kind == otio.schema.TrackKind.Video:
|
||||
self._insert_new_sub_element(
|
||||
timeline, 'track',
|
||||
attrib={
|
||||
"caps": "video/x-raw(ANY)",
|
||||
"track-type": '4',
|
||||
'track-id': '1',
|
||||
'properties': properties,
|
||||
}
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
def _serialize_layer(self, timeline, layers, layer_priority):
|
||||
if layer_priority not in layers:
|
||||
layers[layer_priority] = self._insert_new_sub_element(
|
||||
timeline, 'layer',
|
||||
attrib={
|
||||
"priority": str(layer_priority),
|
||||
}
|
||||
)
|
||||
|
||||
def _serialize_timeline_element(self, timeline, layers, layer_priority,
|
||||
offset, otio_track, otio_element,
|
||||
ressources, all_clips):
|
||||
self._serialize_layer(timeline, layers, layer_priority)
|
||||
layer = layers[layer_priority]
|
||||
if isinstance(otio_element, (otio.schema.Clip, otio.schema.Transition)):
|
||||
element = self._serialize_clip(otio_track, layer, layer_priority,
|
||||
ressources, otio_element,
|
||||
str(len(all_clips)), offset)
|
||||
all_clips.add(element)
|
||||
if isinstance(otio_element, otio.schema.Transition):
|
||||
# Make next clip overlap
|
||||
return int(element.get("start")) - offset
|
||||
elif not isinstance(otio_element, otio.schema.Gap):
|
||||
print("FIXME: Add support for %s" % type(otio_element))
|
||||
return 0
|
||||
|
||||
return to_gstclocktime(otio_element.source_range.duration)
|
||||
|
||||
def _make_element_names_unique(self, all_names, otio_element):
|
||||
if isinstance(otio_element, otio.schema.Gap):
|
||||
return
|
||||
|
||||
if not isinstance(otio_element, otio.schema.Track):
|
||||
i = 0
|
||||
name = otio_element.name
|
||||
while True:
|
||||
if name not in all_names:
|
||||
otio_element.name = name
|
||||
break
|
||||
|
||||
i += 1
|
||||
name = otio_element.name + '_%d' % i
|
||||
all_names.add(otio_element.name)
|
||||
|
||||
if isinstance(otio_element, (otio.schema.Stack, otio.schema.Track)):
|
||||
for sub_element in otio_element:
|
||||
self._make_element_names_unique(all_names, sub_element)
|
||||
|
||||
def _make_timeline_elements_names_unique(self, otio_timeline):
|
||||
element_names = set()
|
||||
for track in otio_timeline.tracks:
|
||||
for element in track:
|
||||
self._make_element_names_unique(element_names, element)
|
||||
|
||||
def _serialize_timeline(self, project, ressources, otio_timeline):
|
||||
metadatas = GstStructure(self._get_element_metadatas(otio_timeline))
|
||||
metadatas.set(
|
||||
"framerate", "fraction", self._framerate_to_frame_duration(
|
||||
otio_timeline.duration().rate
|
||||
)
|
||||
)
|
||||
timeline = self._insert_new_sub_element(
|
||||
project, 'timeline',
|
||||
attrib={
|
||||
"properties": self._get_element_properties(otio_timeline),
|
||||
"metadatas": str(metadatas),
|
||||
}
|
||||
)
|
||||
self._serialize_tracks(timeline, otio_timeline)
|
||||
|
||||
self._make_timeline_elements_names_unique(otio_timeline)
|
||||
|
||||
all_clips = set()
|
||||
layers = {}
|
||||
for layer_priority, otio_track in enumerate(otio_timeline.tracks):
|
||||
self._serialize_layer(timeline, layers, layer_priority)
|
||||
offset = 0
|
||||
for otio_element in otio_track:
|
||||
offset += self._serialize_timeline_element(
|
||||
timeline, layers, layer_priority, offset,
|
||||
otio_track, otio_element, ressources, all_clips,
|
||||
)
|
||||
|
||||
for layer in layers.values():
|
||||
layer[:] = sorted(layer, key=lambda child: int(child.get("start")))
|
||||
|
||||
# --------------------
|
||||
# static methods
|
||||
# --------------------
|
||||
@staticmethod
|
||||
def _framerate_to_frame_duration(framerate):
|
||||
frame_duration = FRAMERATE_FRAMEDURATION.get(int(framerate), "")
|
||||
if not frame_duration:
|
||||
frame_duration = FRAMERATE_FRAMEDURATION.get(float(framerate), "")
|
||||
return frame_duration
|
||||
|
||||
def to_xges(self):
|
||||
xges = cElementTree.Element('ges', version="0.4")
|
||||
|
||||
metadatas = GstStructure(self._get_element_metadatas(self.container))
|
||||
if self.container.name is not None:
|
||||
metadatas.set("name", "string", self.container.name)
|
||||
if not isinstance(self.container, otio.schema.Timeline):
|
||||
project = self._insert_new_sub_element(
|
||||
xges, 'project',
|
||||
attrib={
|
||||
"properties": self._get_element_properties(self.container),
|
||||
"metadatas": str(metadatas),
|
||||
}
|
||||
)
|
||||
|
||||
if len(self.container) > 1:
|
||||
print(
|
||||
"WARNING: Only one timeline supported, using *only* the first one.")
|
||||
|
||||
otio_timeline = self.container[0]
|
||||
|
||||
else:
|
||||
project = self._insert_new_sub_element(
|
||||
xges, 'project',
|
||||
attrib={
|
||||
"metadatas": str(metadatas),
|
||||
}
|
||||
)
|
||||
otio_timeline = self.container
|
||||
|
||||
ressources = self._insert_new_sub_element(project, 'ressources')
|
||||
self.rate = otio_timeline.duration().rate
|
||||
self._serialize_timeline(project, ressources, otio_timeline)
|
||||
|
||||
# with indentations.
|
||||
string = cElementTree.tostring(xges, encoding="UTF-8")
|
||||
dom = minidom.parseString(string)
|
||||
return dom.toprettyxml(indent=' ')
|
||||
|
||||
|
||||
# --------------------
|
||||
# adapter requirements
|
||||
# --------------------
|
||||
def read_from_string(input_str):
|
||||
"""
|
||||
Necessary read method for otio adapter
|
||||
|
||||
Args:
|
||||
input_str (str): A GStreamer Editing Services formated project
|
||||
|
||||
Returns:
|
||||
OpenTimeline: An OpenTimeline object
|
||||
"""
|
||||
|
||||
return XGES(input_str).to_otio()
|
||||
|
||||
|
||||
def write_to_string(input_otio):
|
||||
"""
|
||||
Necessary write method for otio adapter
|
||||
|
||||
Args:
|
||||
input_otio (OpenTimeline): An OpenTimeline object
|
||||
|
||||
Returns:
|
||||
str: The string contents of an FCP X XML
|
||||
"""
|
||||
|
||||
return XGESOtio(input_otio).to_xges()
|
||||
|
||||
|
||||
# --------------------
|
||||
# Some unit check for internal types
|
||||
# --------------------
|
||||
|
||||
class XGESTests(unittest.TestCase):
|
||||
|
||||
def test_gst_structure_parsing(self):
|
||||
struct = GstStructure('properties, name=(string)"%s";' % (
|
||||
GstStructure.escape("sc01 sh010_anim.mov"))
|
||||
)
|
||||
self.assertEqual(struct["name"], "sc01 sh010_anim.mov")
|
||||
|
||||
def test_gst_structure_editing(self):
|
||||
struct = GstStructure('properties, name=(string)"%s";' % (
|
||||
GstStructure.escape("sc01 sh010_anim.mov"))
|
||||
)
|
||||
self.assertEqual(struct["name"], "sc01 sh010_anim.mov")
|
||||
|
||||
struct.set("name", "string", "test")
|
||||
self.assertEqual(struct["name"], "test")
|
||||
self.assertEqual(str(struct), 'properties, name=(string)"test";')
|
||||
|
||||
def test_empty_string(self):
|
||||
struct = GstStructure('properties, name=(string)"";')
|
||||
self.assertEqual(struct["name"], "")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue