mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 08:24:53 +01:00
Merge branch 'develop' into bugfix/PYPE-700-nk-refactory-loaders
This commit is contained in:
commit
935b5f4f38
12 changed files with 188 additions and 49 deletions
|
|
@ -208,11 +208,8 @@ class UserAssigmentEvent(BaseEvent):
|
||||||
work_dir = anatomy.format(data)['avalon']['work']
|
work_dir = anatomy.format(data)['avalon']['work']
|
||||||
# we also need publish but not whole
|
# we also need publish but not whole
|
||||||
filled_all = anatomy.format_all(data)
|
filled_all = anatomy.format_all(data)
|
||||||
if "partial" not in filled_all:
|
publish = filled_all['avalon']['publish']
|
||||||
publish = filled_all['avalon']['publish']
|
|
||||||
else:
|
|
||||||
# Backwards compatibility
|
|
||||||
publish = filled_all["partial"]['avalon']['publish']
|
|
||||||
# now find path to {asset}
|
# now find path to {asset}
|
||||||
m = re.search("(^.+?{})".format(data['asset']),
|
m = re.search("(^.+?{})".format(data['asset']),
|
||||||
publish)
|
publish)
|
||||||
|
|
|
||||||
|
|
@ -1013,7 +1013,8 @@ class BuildWorkfile(WorkfileSettings):
|
||||||
def process(self,
|
def process(self,
|
||||||
regex_filter=None,
|
regex_filter=None,
|
||||||
version=None,
|
version=None,
|
||||||
representations=["exr", "dpx", "lutJson", "mov", "preview"]):
|
representations=["exr", "dpx", "lutJson", "mov",
|
||||||
|
"preview", "png"]):
|
||||||
"""
|
"""
|
||||||
A short description.
|
A short description.
|
||||||
|
|
||||||
|
|
@ -1054,9 +1055,10 @@ class BuildWorkfile(WorkfileSettings):
|
||||||
wn["render"].setValue(True)
|
wn["render"].setValue(True)
|
||||||
vn.setInput(0, wn)
|
vn.setInput(0, wn)
|
||||||
|
|
||||||
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
# adding backdrop under write
|
||||||
color='0xcc1102ff', layer=-1,
|
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||||
nodes=[wn])
|
color='0xcc1102ff', layer=-1,
|
||||||
|
nodes=[wn])
|
||||||
|
|
||||||
# move position
|
# move position
|
||||||
self.position_up(4)
|
self.position_up(4)
|
||||||
|
|
@ -1070,10 +1072,12 @@ class BuildWorkfile(WorkfileSettings):
|
||||||
version=version,
|
version=version,
|
||||||
representations=representations)
|
representations=representations)
|
||||||
|
|
||||||
log.info("__ subsets: `{}`".format(subsets))
|
for name, subset in subsets.items():
|
||||||
|
log.debug("___________________")
|
||||||
|
log.debug(name)
|
||||||
|
log.debug(subset["version"])
|
||||||
|
|
||||||
nodes_backdrop = list()
|
nodes_backdrop = list()
|
||||||
|
|
||||||
for name, subset in subsets.items():
|
for name, subset in subsets.items():
|
||||||
if "lut" in name:
|
if "lut" in name:
|
||||||
continue
|
continue
|
||||||
|
|
@ -1103,9 +1107,10 @@ class BuildWorkfile(WorkfileSettings):
|
||||||
# move position
|
# move position
|
||||||
self.position_right()
|
self.position_right()
|
||||||
|
|
||||||
bdn = self.create_backdrop(label="Loaded Reads",
|
# adding backdrop under all read nodes
|
||||||
color='0x2d7702ff', layer=-1,
|
self.create_backdrop(label="Loaded Reads",
|
||||||
nodes=nodes_backdrop)
|
color='0x2d7702ff', layer=-1,
|
||||||
|
nodes=nodes_backdrop)
|
||||||
|
|
||||||
def read_loader(self, representation):
|
def read_loader(self, representation):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -77,31 +77,38 @@ class ExtractBurnin(pype.api.Extractor):
|
||||||
if "burnin" not in repre.get("tags", []):
|
if "burnin" not in repre.get("tags", []):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
is_sequence = "sequence" in repre.get("tags", [])
|
||||||
|
|
||||||
stagingdir = repre["stagingDir"]
|
stagingdir = repre["stagingDir"]
|
||||||
filename = "{0}".format(repre["files"])
|
filename = "{0}".format(repre["files"])
|
||||||
|
|
||||||
|
if is_sequence:
|
||||||
|
filename = repre["sequence_file"]
|
||||||
|
|
||||||
name = "_burnin"
|
name = "_burnin"
|
||||||
ext = os.path.splitext(filename)[1]
|
ext = os.path.splitext(filename)[1]
|
||||||
movieFileBurnin = filename.replace(ext, "") + name + ext
|
movieFileBurnin = filename.replace(ext, "") + name + ext
|
||||||
|
|
||||||
|
if is_sequence:
|
||||||
|
fn_splt = filename.split(".")
|
||||||
|
movieFileBurnin = ".".join(
|
||||||
|
((fn_splt[0] + name), fn_splt[-2], fn_splt[-1]))
|
||||||
|
|
||||||
|
self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin))
|
||||||
|
|
||||||
full_movie_path = os.path.join(
|
full_movie_path = os.path.join(
|
||||||
os.path.normpath(stagingdir), repre["files"]
|
os.path.normpath(stagingdir), filename)
|
||||||
)
|
|
||||||
full_burnin_path = os.path.join(
|
full_burnin_path = os.path.join(
|
||||||
os.path.normpath(stagingdir), movieFileBurnin
|
os.path.normpath(stagingdir), movieFileBurnin)
|
||||||
)
|
|
||||||
|
self.log.debug("__ full_movie_path: {}".format(full_movie_path))
|
||||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||||
|
|
||||||
# create copy of prep_data for anatomy formatting
|
# create copy of prep_data for anatomy formatting
|
||||||
_prep_data = copy.deepcopy(prep_data)
|
_prep_data = copy.deepcopy(prep_data)
|
||||||
_prep_data["representation"] = repre["name"]
|
_prep_data["representation"] = repre["name"]
|
||||||
filled_anatomy = anatomy.format_all(_prep_data)
|
filled_anatomy = anatomy.format_all(_prep_data)
|
||||||
if hasattr(filled_anatomy, "get_solved"):
|
_prep_data["anatomy"] = filled_anatomy.get_solved()
|
||||||
_filled_anatomy = filled_anatomy.get_solved()
|
|
||||||
else:
|
|
||||||
# Backwards compatibility
|
|
||||||
_filled_anatomy = filled_anatomy.get("solved")
|
|
||||||
_prep_data["anatomy"] = _filled_anatomy or {}
|
|
||||||
|
|
||||||
burnin_data = {
|
burnin_data = {
|
||||||
"input": full_movie_path.replace("\\", "/"),
|
"input": full_movie_path.replace("\\", "/"),
|
||||||
|
|
@ -149,15 +156,35 @@ class ExtractBurnin(pype.api.Extractor):
|
||||||
self.log.debug("Output: {}".format(output))
|
self.log.debug("Output: {}".format(output))
|
||||||
|
|
||||||
repre_update = {
|
repre_update = {
|
||||||
|
"anatomy_template": "render",
|
||||||
"files": movieFileBurnin,
|
"files": movieFileBurnin,
|
||||||
"name": repre["name"],
|
"name": repre["name"],
|
||||||
"tags": [x for x in repre["tags"] if x != "delete"]
|
"tags": [x for x in repre["tags"] if x != "delete"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if is_sequence:
|
||||||
|
burnin_seq_files = list()
|
||||||
|
for frame_index in range(_prep_data["duration"] + 1):
|
||||||
|
if frame_index == 0:
|
||||||
|
continue
|
||||||
|
burnin_seq_files.append(movieFileBurnin % frame_index)
|
||||||
|
repre_update.update({
|
||||||
|
"files": burnin_seq_files
|
||||||
|
})
|
||||||
|
|
||||||
instance.data["representations"][i].update(repre_update)
|
instance.data["representations"][i].update(repre_update)
|
||||||
|
|
||||||
# removing the source mov file
|
# removing the source mov file
|
||||||
os.remove(full_movie_path)
|
if is_sequence:
|
||||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
for frame_index in range(_prep_data["duration"] + 1):
|
||||||
|
if frame_index == 0:
|
||||||
|
continue
|
||||||
|
rm_file = full_movie_path % frame_index
|
||||||
|
os.remove(rm_file)
|
||||||
|
self.log.debug("Removed: `{}`".format(rm_file))
|
||||||
|
else:
|
||||||
|
os.remove(full_movie_path)
|
||||||
|
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||||
|
|
||||||
# Remove any representations tagged for deletion.
|
# Remove any representations tagged for deletion.
|
||||||
for repre in instance.data["representations"]:
|
for repre in instance.data["representations"]:
|
||||||
|
|
|
||||||
|
|
@ -53,10 +53,21 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
||||||
|
|
||||||
if "review" in tags:
|
if "review" in tags:
|
||||||
staging_dir = repre["stagingDir"]
|
staging_dir = repre["stagingDir"]
|
||||||
|
|
||||||
|
# iterating preset output profiles
|
||||||
for name, profile in output_profiles.items():
|
for name, profile in output_profiles.items():
|
||||||
|
repre_new = repre.copy()
|
||||||
|
ext = profile.get("ext", None)
|
||||||
|
p_tags = profile.get('tags', [])
|
||||||
|
self.log.info("p_tags: `{}`".format(p_tags))
|
||||||
|
|
||||||
|
# adding control for presets to be sequence
|
||||||
|
# or single file
|
||||||
|
is_sequence = ("sequence" in p_tags) and (ext in (
|
||||||
|
"png", "jpg", "jpeg"))
|
||||||
|
|
||||||
self.log.debug("Profile name: {}".format(name))
|
self.log.debug("Profile name: {}".format(name))
|
||||||
|
|
||||||
ext = profile.get("ext", None)
|
|
||||||
if not ext:
|
if not ext:
|
||||||
ext = "mov"
|
ext = "mov"
|
||||||
self.log.warning(
|
self.log.warning(
|
||||||
|
|
@ -88,18 +99,22 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
||||||
filename = repre["files"].split(".")[0]
|
filename = repre["files"].split(".")[0]
|
||||||
|
|
||||||
repr_file = filename + "_{0}.{1}".format(name, ext)
|
repr_file = filename + "_{0}.{1}".format(name, ext)
|
||||||
|
|
||||||
full_output_path = os.path.join(
|
full_output_path = os.path.join(
|
||||||
staging_dir, repr_file)
|
staging_dir, repr_file)
|
||||||
|
|
||||||
|
if is_sequence:
|
||||||
|
filename_base = filename + "_{0}".format(name)
|
||||||
|
repr_file = filename_base + ".%08d.{0}".format(
|
||||||
|
ext)
|
||||||
|
repre_new["sequence_file"] = repr_file
|
||||||
|
full_output_path = os.path.join(
|
||||||
|
staging_dir, filename_base, repr_file)
|
||||||
|
|
||||||
self.log.info("input {}".format(full_input_path))
|
self.log.info("input {}".format(full_input_path))
|
||||||
self.log.info("output {}".format(full_output_path))
|
self.log.info("output {}".format(full_output_path))
|
||||||
|
|
||||||
repre_new = repre.copy()
|
|
||||||
|
|
||||||
new_tags = [x for x in tags if x != "delete"]
|
new_tags = [x for x in tags if x != "delete"]
|
||||||
p_tags = profile.get('tags', [])
|
|
||||||
self.log.info("p_tags: `{}`".format(p_tags))
|
|
||||||
# add families
|
# add families
|
||||||
[instance.data["families"].append(t)
|
[instance.data["families"].append(t)
|
||||||
for t in p_tags
|
for t in p_tags
|
||||||
|
|
@ -288,6 +303,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
"_ output_args: `{}`".format(output_args))
|
"_ output_args: `{}`".format(output_args))
|
||||||
|
|
||||||
|
if is_sequence:
|
||||||
|
stg_dir = os.path.dirname(full_output_path)
|
||||||
|
|
||||||
|
if not os.path.exists(stg_dir):
|
||||||
|
self.log.debug(
|
||||||
|
"creating dir: {}".format(stg_dir))
|
||||||
|
os.mkdir(stg_dir)
|
||||||
|
|
||||||
mov_args = [
|
mov_args = [
|
||||||
os.path.join(
|
os.path.join(
|
||||||
os.environ.get(
|
os.environ.get(
|
||||||
|
|
@ -315,6 +338,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
||||||
"resolutionHeight": resolution_height,
|
"resolutionHeight": resolution_height,
|
||||||
"resolutionWidth": resolution_width,
|
"resolutionWidth": resolution_width,
|
||||||
})
|
})
|
||||||
|
if is_sequence:
|
||||||
|
repre_new.update({
|
||||||
|
"stagingDir": stg_dir,
|
||||||
|
"files": os.listdir(stg_dir)
|
||||||
|
})
|
||||||
|
|
||||||
if repre_new.get('preview'):
|
if repre_new.get('preview'):
|
||||||
repre_new.pop("preview")
|
repre_new.pop("preview")
|
||||||
if repre_new.get('thumbnail'):
|
if repre_new.get('thumbnail'):
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
||||||
|
|
||||||
label = "Integrate Asset"
|
label = "Integrate Asset"
|
||||||
order = pyblish.api.IntegratorOrder
|
order = pyblish.api.IntegratorOrder
|
||||||
families = ["assembly"]
|
families = []
|
||||||
exclude_families = ["clip"]
|
exclude_families = ["clip"]
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
||||||
"source",
|
"source",
|
||||||
"matchmove",
|
"matchmove",
|
||||||
"image"
|
"image"
|
||||||
|
"source",
|
||||||
|
"assembly"
|
||||||
]
|
]
|
||||||
exclude_families = ["clip"]
|
exclude_families = ["clip"]
|
||||||
|
|
||||||
|
|
@ -326,8 +328,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
||||||
index_frame_start = None
|
index_frame_start = None
|
||||||
|
|
||||||
if repre.get("frameStart"):
|
if repre.get("frameStart"):
|
||||||
frame_start_padding = len(str(
|
frame_start_padding = anatomy.templates["render"]["padding"]
|
||||||
repre.get("frameEnd")))
|
|
||||||
index_frame_start = int(repre.get("frameStart"))
|
index_frame_start = int(repre.get("frameStart"))
|
||||||
|
|
||||||
# exception for slate workflow
|
# exception for slate workflow
|
||||||
|
|
@ -339,10 +340,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
||||||
for i in src_collection.indexes:
|
for i in src_collection.indexes:
|
||||||
src_padding = src_padding_exp % i
|
src_padding = src_padding_exp % i
|
||||||
|
|
||||||
# for adding first frame into db
|
|
||||||
if not dst_start_frame:
|
|
||||||
dst_start_frame = src_padding
|
|
||||||
|
|
||||||
src_file_name = "{0}{1}{2}".format(
|
src_file_name = "{0}{1}{2}".format(
|
||||||
src_head, src_padding, src_tail)
|
src_head, src_padding, src_tail)
|
||||||
|
|
||||||
|
|
@ -364,6 +361,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
||||||
self.log.debug("source: {}".format(src))
|
self.log.debug("source: {}".format(src))
|
||||||
instance.data["transfers"].append([src, dst])
|
instance.data["transfers"].append([src, dst])
|
||||||
|
|
||||||
|
# for adding first frame into db
|
||||||
|
if not dst_start_frame:
|
||||||
|
dst_start_frame = dst_padding
|
||||||
|
|
||||||
|
|
||||||
dst = "{0}{1}{2}".format(
|
dst = "{0}{1}{2}".format(
|
||||||
dst_head,
|
dst_head,
|
||||||
dst_start_frame,
|
dst_start_frame,
|
||||||
|
|
|
||||||
|
|
@ -256,6 +256,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
||||||
"""
|
"""
|
||||||
# Get a submission job
|
# Get a submission job
|
||||||
data = instance.data.copy()
|
data = instance.data.copy()
|
||||||
|
if hasattr(instance, "_log"):
|
||||||
|
data['_log'] = instance._log
|
||||||
render_job = data.pop("deadlineSubmissionJob", None)
|
render_job = data.pop("deadlineSubmissionJob", None)
|
||||||
submission_type = "deadline"
|
submission_type = "deadline"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,11 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
||||||
|
|
||||||
newNodes = (list(set(nodes) - set(shapes)))
|
newNodes = (list(set(nodes) - set(shapes)))
|
||||||
|
|
||||||
|
current_namespace = pm.namespaceInfo(currentNamespace=True)
|
||||||
|
|
||||||
|
if current_namespace != ":":
|
||||||
|
groupName = current_namespace + ":" + groupName
|
||||||
|
|
||||||
groupNode = pm.PyNode(groupName)
|
groupNode = pm.PyNode(groupName)
|
||||||
roots = set()
|
roots = set()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor):
|
||||||
|
|
||||||
def process(self, instance):
|
def process(self, instance):
|
||||||
|
|
||||||
parent_dir = self.staging_dir(instance)
|
staging_dir = self.staging_dir(instance)
|
||||||
hierarchy_filename = "{}.abc".format(instance.name)
|
hierarchy_filename = "{}.abc".format(instance.name)
|
||||||
hierarchy_path = os.path.join(parent_dir, hierarchy_filename)
|
hierarchy_path = os.path.join(staging_dir, hierarchy_filename)
|
||||||
json_filename = "{}.json".format(instance.name)
|
json_filename = "{}.json".format(instance.name)
|
||||||
json_path = os.path.join(parent_dir, json_filename)
|
json_path = os.path.join(staging_dir, json_filename)
|
||||||
|
|
||||||
self.log.info("Dumping scene data for debugging ..")
|
self.log.info("Dumping scene data for debugging ..")
|
||||||
with open(json_path, "w") as filepath:
|
with open(json_path, "w") as filepath:
|
||||||
|
|
@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor):
|
||||||
"uvWrite": True,
|
"uvWrite": True,
|
||||||
"selection": True})
|
"selection": True})
|
||||||
|
|
||||||
instance.data["files"] = [json_filename, hierarchy_filename]
|
if "representations" not in instance.data:
|
||||||
|
instance.data["representations"] = []
|
||||||
|
|
||||||
|
representation_abc = {
|
||||||
|
'name': 'abc',
|
||||||
|
'ext': 'abc',
|
||||||
|
'files': hierarchy_filename,
|
||||||
|
"stagingDir": staging_dir
|
||||||
|
}
|
||||||
|
instance.data["representations"].append(representation_abc)
|
||||||
|
|
||||||
|
representation_json = {
|
||||||
|
'name': 'json',
|
||||||
|
'ext': 'json',
|
||||||
|
'files': json_filename,
|
||||||
|
"stagingDir": staging_dir
|
||||||
|
}
|
||||||
|
instance.data["representations"].append(representation_json)
|
||||||
# Remove data
|
# Remove data
|
||||||
instance.data.pop("scenedata", None)
|
instance.data.pop("scenedata", None)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
|
import re
|
||||||
|
import nuke
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from avalon import api, io
|
from avalon import api, io
|
||||||
|
from pype.nuke import presets
|
||||||
import nuke
|
|
||||||
|
|
||||||
from pype.api import Logger
|
from pype.api import Logger
|
||||||
log = Logger().get_logger(__name__, "nuke")
|
log = Logger().get_logger(__name__, "nuke")
|
||||||
|
|
@ -137,6 +138,18 @@ class LoadMov(api.Loader):
|
||||||
if colorspace:
|
if colorspace:
|
||||||
read_node["colorspace"].setValue(str(colorspace))
|
read_node["colorspace"].setValue(str(colorspace))
|
||||||
|
|
||||||
|
# load nuke presets for Read's colorspace
|
||||||
|
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||||
|
"nuke", {}).get("read", {})
|
||||||
|
|
||||||
|
# check if any colorspace presets for read is mathing
|
||||||
|
preset_clrsp = next((read_clrs_presets[k]
|
||||||
|
for k in read_clrs_presets
|
||||||
|
if bool(re.search(k, file))),
|
||||||
|
None)
|
||||||
|
if preset_clrsp is not None:
|
||||||
|
read_node["colorspace"].setValue(str(preset_clrsp))
|
||||||
|
|
||||||
# add additional metadata from the version to imprint Avalon knob
|
# add additional metadata from the version to imprint Avalon knob
|
||||||
add_keys = [
|
add_keys = [
|
||||||
"frameStart", "frameEnd", "handles", "source", "author",
|
"frameStart", "frameEnd", "handles", "source", "author",
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
|
import re
|
||||||
|
import nuke
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from avalon import api, io
|
from avalon import api, io
|
||||||
|
from pype.nuke import presets
|
||||||
import nuke
|
|
||||||
|
|
||||||
from pype.api import Logger
|
from pype.api import Logger
|
||||||
|
|
||||||
log = Logger().get_logger(__name__, "nuke")
|
log = Logger().get_logger(__name__, "nuke")
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -131,6 +133,18 @@ class LoadSequence(api.Loader):
|
||||||
if colorspace:
|
if colorspace:
|
||||||
r["colorspace"].setValue(str(colorspace))
|
r["colorspace"].setValue(str(colorspace))
|
||||||
|
|
||||||
|
# load nuke presets for Read's colorspace
|
||||||
|
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||||
|
"nuke", {}).get("read", {})
|
||||||
|
|
||||||
|
# check if any colorspace presets for read is mathing
|
||||||
|
preset_clrsp = next((read_clrs_presets[k]
|
||||||
|
for k in read_clrs_presets
|
||||||
|
if bool(re.search(k, file))),
|
||||||
|
None)
|
||||||
|
if preset_clrsp is not None:
|
||||||
|
r["colorspace"].setValue(str(preset_clrsp))
|
||||||
|
|
||||||
loader_shift(r, first, relative=True)
|
loader_shift(r, first, relative=True)
|
||||||
r["origfirst"].setValue(int(first))
|
r["origfirst"].setValue(int(first))
|
||||||
r["first"].setValue(int(first))
|
r["first"].setValue(int(first))
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ import json
|
||||||
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
||||||
from pypeapp.lib import config
|
from pypeapp.lib import config
|
||||||
from pype import api as pype
|
from pype import api as pype
|
||||||
|
from subprocess import Popen, PIPE
|
||||||
# FFmpeg in PATH is required
|
# FFmpeg in PATH is required
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -21,6 +22,7 @@ else:
|
||||||
FFMPEG = (
|
FFMPEG = (
|
||||||
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
|
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
|
||||||
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
|
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
|
||||||
|
|
||||||
FFPROBE = (
|
FFPROBE = (
|
||||||
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
|
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
|
||||||
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
|
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
|
||||||
|
|
@ -248,6 +250,33 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||||
'filters': filters
|
'filters': filters
|
||||||
}).strip()
|
}).strip()
|
||||||
|
|
||||||
|
def render(self, output, args=None, overwrite=False, **kwargs):
|
||||||
|
"""
|
||||||
|
Render the media to a specified destination.
|
||||||
|
|
||||||
|
:param str output: output file
|
||||||
|
:param str args: additional FFMPEG arguments
|
||||||
|
:param bool overwrite: overwrite the output if it exists
|
||||||
|
"""
|
||||||
|
if not overwrite and os.path.exists(output):
|
||||||
|
raise RuntimeError("Destination '%s' exists, please "
|
||||||
|
"use overwrite" % output)
|
||||||
|
|
||||||
|
is_sequence = "%" in output
|
||||||
|
|
||||||
|
command = self.command(output=output,
|
||||||
|
args=args,
|
||||||
|
overwrite=overwrite)
|
||||||
|
proc = Popen(command, shell=True)
|
||||||
|
proc.communicate()
|
||||||
|
if proc.returncode != 0:
|
||||||
|
raise RuntimeError("Failed to render '%s': %s'"
|
||||||
|
% (output, command))
|
||||||
|
if is_sequence:
|
||||||
|
output = output % kwargs.get("duration")
|
||||||
|
if not os.path.exists(output):
|
||||||
|
raise RuntimeError("Failed to generate this fucking file '%s'" % output)
|
||||||
|
|
||||||
|
|
||||||
def example(input_path, output_path):
|
def example(input_path, output_path):
|
||||||
options_init = {
|
options_init = {
|
||||||
|
|
@ -349,7 +378,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
||||||
|
|
||||||
frame_start = data.get("frame_start")
|
frame_start = data.get("frame_start")
|
||||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||||
|
|
||||||
stream = burnin._streams[0]
|
stream = burnin._streams[0]
|
||||||
if "resolution_width" not in data:
|
if "resolution_width" not in data:
|
||||||
data["resolution_width"] = stream.get("width", "Unknown")
|
data["resolution_width"] = stream.get("width", "Unknown")
|
||||||
|
|
@ -436,7 +465,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
||||||
if codec_data is not []:
|
if codec_data is not []:
|
||||||
codec_args = " ".join(codec_data)
|
codec_args = " ".join(codec_data)
|
||||||
|
|
||||||
burnin.render(output_path, args=codec_args, overwrite=overwrite)
|
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue