mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch 'enhancement/maya_template' of https://github.com/tokejepsen/pype into enhancement/maya_template
This commit is contained in:
commit
43c4161109
19 changed files with 395 additions and 179 deletions
|
|
@ -134,6 +134,27 @@ def append_user_scripts():
|
|||
traceback.print_exc()
|
||||
|
||||
|
||||
def set_app_templates_path():
|
||||
# Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`.
|
||||
# After running Blender, we set that variable to our custom path, so
|
||||
# that the user can use their custom app templates.
|
||||
|
||||
# We look among the scripts paths for one of the paths that contains
|
||||
# the app templates. The path must contain the subfolder
|
||||
# `startup/bl_app_templates_user`.
|
||||
paths = os.environ.get("OPENPYPE_BLENDER_USER_SCRIPTS").split(os.pathsep)
|
||||
|
||||
app_templates_path = None
|
||||
for path in paths:
|
||||
if os.path.isdir(
|
||||
os.path.join(path, "startup", "bl_app_templates_user")):
|
||||
app_templates_path = path
|
||||
break
|
||||
|
||||
if app_templates_path and os.path.isdir(app_templates_path):
|
||||
os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path
|
||||
|
||||
|
||||
def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
|
||||
r"""Write `data` to `node` as userDefined attributes
|
||||
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ def install():
|
|||
register_creator_plugin_path(str(CREATE_PATH))
|
||||
|
||||
lib.append_user_scripts()
|
||||
lib.set_app_templates_path()
|
||||
|
||||
register_event_callback("new", on_new)
|
||||
register_event_callback("open", on_open)
|
||||
|
|
|
|||
|
|
@ -207,7 +207,9 @@ class MaxCreator(Creator, MaxCreatorBase):
|
|||
|
||||
"""
|
||||
for instance in instances:
|
||||
if instance_node := rt.GetNodeByName(instance.data.get("instance_node")): # noqa
|
||||
instance_node = rt.GetNodeByName(
|
||||
instance.data.get("instance_node"))
|
||||
if instance_node:
|
||||
count = rt.custAttributes.count(instance_node)
|
||||
rt.custAttributes.delete(instance_node, count)
|
||||
rt.Delete(instance_node)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,8 @@ class CreateRender(plugin.MaxCreator):
|
|||
instance_data,
|
||||
pre_create_data)
|
||||
container_name = instance.data.get("instance_node")
|
||||
if sel_obj := self.selected_nodes:
|
||||
sel_obj = self.selected_nodes
|
||||
if sel_obj:
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings(self.project_settings).set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class ValidateCameraContent(pyblish.api.InstancePlugin):
|
|||
"$Physical_Camera", "$Target"]
|
||||
|
||||
def process(self, instance):
|
||||
if invalid := self.get_invalid(instance): # noqa
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError(("Camera instance must only include"
|
||||
"camera (and camera target). "
|
||||
f"Invalid content {invalid}"))
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class ValidateModelContent(pyblish.api.InstancePlugin):
|
|||
label = "Model Contents"
|
||||
|
||||
def process(self, instance):
|
||||
if invalid := self.get_invalid(instance): # noqa
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError(("Model instance must only include"
|
||||
"Geometry and Editable Mesh. "
|
||||
f"Invalid types on: {invalid}"))
|
||||
|
|
|
|||
|
|
@ -35,12 +35,16 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
|
||||
"""
|
||||
report = []
|
||||
if invalid := self.get_tyflow_object(instance): # noqa
|
||||
report.append(f"Non tyFlow object found: {invalid}")
|
||||
|
||||
if invalid := self.get_tyflow_operator(instance): # noqa
|
||||
report.append(
|
||||
f"tyFlow ExportParticle operator not found: {invalid}")
|
||||
invalid_object = self.get_tyflow_object(instance)
|
||||
if invalid_object:
|
||||
report.append(f"Non tyFlow object found: {invalid_object}")
|
||||
|
||||
invalid_operator = self.get_tyflow_operator(instance)
|
||||
if invalid_operator:
|
||||
report.append((
|
||||
"tyFlow ExportParticle operator not "
|
||||
f"found: {invalid_operator}"))
|
||||
|
||||
if self.validate_export_mode(instance):
|
||||
report.append("The export mode is not at PRT")
|
||||
|
|
@ -49,9 +53,10 @@ class ValidatePointCloud(pyblish.api.InstancePlugin):
|
|||
report.append(("tyFlow Partition setting is "
|
||||
"not at the default value"))
|
||||
|
||||
if invalid := self.validate_custom_attribute(instance): # noqa
|
||||
invalid_attribute = self.validate_custom_attribute(instance)
|
||||
if invalid_attribute:
|
||||
report.append(("Custom Attribute not found "
|
||||
f":{invalid}"))
|
||||
f":{invalid_attribute}"))
|
||||
|
||||
if report:
|
||||
raise PublishValidationError(f"{report}")
|
||||
|
|
|
|||
|
|
@ -105,7 +105,8 @@ class ImportMayaLoader(load.LoaderPlugin):
|
|||
"camera",
|
||||
"rig",
|
||||
"camerarig",
|
||||
"staticMesh"
|
||||
"staticMesh",
|
||||
"workfile"
|
||||
]
|
||||
|
||||
label = "Import"
|
||||
|
|
|
|||
|
|
@ -6,23 +6,29 @@ import maya.cmds as cmds
|
|||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
legacy_io,
|
||||
get_representation_path
|
||||
)
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
unique_namespace, get_attribute_input, maintained_selection
|
||||
unique_namespace,
|
||||
get_attribute_input,
|
||||
maintained_selection,
|
||||
convert_to_maya_fps
|
||||
)
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
|
||||
|
||||
def is_sequence(files):
|
||||
sequence = False
|
||||
collections, remainder = clique.assemble(files)
|
||||
collections, remainder = clique.assemble(files, minimum_items=1)
|
||||
if collections:
|
||||
sequence = True
|
||||
|
||||
return sequence
|
||||
|
||||
|
||||
def get_current_session_fps():
|
||||
session_fps = float(legacy_io.Session.get('AVALON_FPS', 25))
|
||||
return convert_to_maya_fps(session_fps)
|
||||
|
||||
class ArnoldStandinLoader(load.LoaderPlugin):
|
||||
"""Load as Arnold standin"""
|
||||
|
||||
|
|
@ -90,6 +96,9 @@ class ArnoldStandinLoader(load.LoaderPlugin):
|
|||
sequence = is_sequence(os.listdir(os.path.dirname(self.fname)))
|
||||
cmds.setAttr(standin_shape + ".useFrameExtension", sequence)
|
||||
|
||||
fps = float(version["data"].get("fps"))or get_current_session_fps()
|
||||
cmds.setAttr(standin_shape + ".abcFPS", fps)
|
||||
|
||||
nodes = [root, standin, standin_shape]
|
||||
if operator is not None:
|
||||
nodes.append(operator)
|
||||
|
|
|
|||
|
|
@ -18,18 +18,14 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
for objset in objsets:
|
||||
objset = str(objset)
|
||||
members = cmds.sets(objset, query=True)
|
||||
members = cmds.ls(members, long=True)
|
||||
if members is None:
|
||||
self.log.warning("Skipped empty instance: \"%s\" " % objset)
|
||||
continue
|
||||
if objset.endswith("content_SET"):
|
||||
members = cmds.ls(members, long=True)
|
||||
children = get_all_children(members)
|
||||
instance.data["contentMembers"] = children
|
||||
self.log.debug("content members: {}".format(children))
|
||||
elif objset.endswith("proxy_SET"):
|
||||
set_members = get_all_children(cmds.ls(members, long=True))
|
||||
instance.data["proxy"] = set_members
|
||||
self.log.debug("proxy members: {}".format(set_members))
|
||||
instance.data["contentMembers"] = self.get_hierarchy(members)
|
||||
if objset.endswith("proxy_SET"):
|
||||
instance.data["proxy"] = self.get_hierarchy(members)
|
||||
|
||||
# Use camera in object set if present else default to render globals
|
||||
# camera.
|
||||
|
|
@ -48,3 +44,13 @@ class CollectArnoldSceneSource(pyblish.api.InstancePlugin):
|
|||
self.log.debug("No renderable cameras found.")
|
||||
|
||||
self.log.debug("data: {}".format(instance.data))
|
||||
|
||||
def get_hierarchy(self, nodes):
|
||||
"""Return nodes with all their children"""
|
||||
nodes = cmds.ls(nodes, long=True)
|
||||
if not nodes:
|
||||
return []
|
||||
children = get_all_children(nodes)
|
||||
# Make sure nodes merged with children only
|
||||
# contains unique entries
|
||||
return list(set(nodes + children))
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ class ExtractArnoldSceneSource(publish.Extractor):
|
|||
return
|
||||
|
||||
kwargs["filename"] = file_path.replace(".ass", "_proxy.ass")
|
||||
|
||||
filenames, _ = self._extract(
|
||||
instance.data["proxy"], attribute_data, kwargs
|
||||
)
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ from openpype.lib import (
|
|||
env_value_to_bool,
|
||||
Logger,
|
||||
get_version_from_path,
|
||||
StringTemplate,
|
||||
)
|
||||
|
||||
from openpype.settings import (
|
||||
|
|
@ -1300,13 +1301,8 @@ def create_write_node(
|
|||
|
||||
# build file path to workfiles
|
||||
fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/")
|
||||
fpath = data["fpath_template"].format(
|
||||
work=fdir,
|
||||
version=data["version"],
|
||||
subset=data["subset"],
|
||||
frame=data["frame"],
|
||||
ext=ext
|
||||
)
|
||||
data["work"] = fdir
|
||||
fpath = StringTemplate(data["fpath_template"]).format_strict(data)
|
||||
|
||||
# create directory
|
||||
if not os.path.isdir(os.path.dirname(fpath)):
|
||||
|
|
@ -2064,11 +2060,43 @@ class WorkfileSettings(object):
|
|||
# it will be dict in value
|
||||
if isinstance(value_, dict):
|
||||
continue
|
||||
# skip empty values
|
||||
if not value_:
|
||||
continue
|
||||
if self._root_node[knob].value() not in value_:
|
||||
self._root_node[knob].setValue(str(value_))
|
||||
log.debug("nuke.root()['{}'] changed to: {}".format(
|
||||
knob, value_))
|
||||
|
||||
# set ocio config path
|
||||
if config_data:
|
||||
current_ocio_path = os.getenv("OCIO")
|
||||
if current_ocio_path != config_data["path"]:
|
||||
message = """
|
||||
It seems like there's a mismatch between the OCIO config path set in your Nuke
|
||||
settings and the actual path set in your OCIO environment.
|
||||
|
||||
To resolve this, please follow these steps:
|
||||
1. Close Nuke if it's currently open.
|
||||
2. Reopen Nuke.
|
||||
|
||||
Please note the paths for your reference:
|
||||
|
||||
- The OCIO environment path currently set:
|
||||
`{env_path}`
|
||||
|
||||
- The path in your current Nuke settings:
|
||||
`{settings_path}`
|
||||
|
||||
Reopening Nuke should synchronize these paths and resolve any discrepancies.
|
||||
"""
|
||||
nuke.message(
|
||||
message.format(
|
||||
env_path=current_ocio_path,
|
||||
settings_path=config_data["path"]
|
||||
)
|
||||
)
|
||||
|
||||
def set_writes_colorspace(self):
|
||||
''' Adds correct colorspace to write node dict
|
||||
|
||||
|
|
@ -2164,7 +2192,7 @@ class WorkfileSettings(object):
|
|||
|
||||
log.debug(changes)
|
||||
if changes:
|
||||
msg = "Read nodes are not set to correct colospace:\n\n"
|
||||
msg = "Read nodes are not set to correct colorspace:\n\n"
|
||||
for nname, knobs in changes.items():
|
||||
msg += (
|
||||
" - node: '{0}' is now '{1}' but should be '{2}'\n"
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ class BatchPublishEndpoint(WebpublishApiEndpoint):
|
|||
log.debug("Adding to queue")
|
||||
self.resource.studio_task_queue.append(args)
|
||||
else:
|
||||
subprocess.call(args)
|
||||
subprocess.Popen(args)
|
||||
|
||||
return Response(
|
||||
status=200,
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from openpype.pipeline import (
|
|||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.pipeline.farm.patterning import match_aov_pattern
|
||||
from openpype.lib import is_running_from_build
|
||||
from openpype.pipeline import publish
|
||||
|
||||
|
||||
def get_resources(project_name, version, extension=None):
|
||||
|
|
@ -79,7 +80,8 @@ def get_resource_files(resources, frame_range=None):
|
|||
return list(res_collection)
|
||||
|
||||
|
||||
class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
||||
class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Process Job submitted on farm.
|
||||
|
||||
These jobs are dependent on a deadline or muster job
|
||||
|
|
@ -598,7 +600,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
self.log.debug("instances:{}".format(instances))
|
||||
return instances
|
||||
|
||||
def _get_representations(self, instance, exp_files, do_not_add_review):
|
||||
def _get_representations(self, instance_data, exp_files,
|
||||
do_not_add_review):
|
||||
"""Create representations for file sequences.
|
||||
|
||||
This will return representations of expected files if they are not
|
||||
|
|
@ -606,7 +609,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
most cases, but if not - we create representation from each of them.
|
||||
|
||||
Arguments:
|
||||
instance (dict): instance data for which we are
|
||||
instance_data (dict): instance.data for which we are
|
||||
setting representations
|
||||
exp_files (list): list of expected files
|
||||
do_not_add_review (bool): explicitly skip review
|
||||
|
|
@ -628,9 +631,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
# expected files contains more explicitly and from what
|
||||
# should be review made.
|
||||
# - "review" tag is never added when is set to 'False'
|
||||
if instance["useSequenceForReview"]:
|
||||
if instance_data["useSequenceForReview"]:
|
||||
# toggle preview on if multipart is on
|
||||
if instance.get("multipartExr", False):
|
||||
if instance_data.get("multipartExr", False):
|
||||
self.log.debug(
|
||||
"Adding preview tag because its multipartExr"
|
||||
)
|
||||
|
|
@ -655,8 +658,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(instance.get("frameStartHandle"))
|
||||
if instance.get("slate"):
|
||||
frame_start = int(instance_data.get("frameStartHandle"))
|
||||
if instance_data.get("slate"):
|
||||
frame_start -= 1
|
||||
|
||||
preview = preview and not do_not_add_review
|
||||
|
|
@ -665,10 +668,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(instance.get("frameEndHandle")),
|
||||
"frameEnd": int(instance_data.get("frameEndHandle")),
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": instance.get("fps"),
|
||||
"fps": instance_data.get("fps"),
|
||||
"tags": ["review"] if preview else [],
|
||||
}
|
||||
|
||||
|
|
@ -676,17 +679,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if ext in self.skip_integration_repre_list:
|
||||
rep["tags"].append("delete")
|
||||
|
||||
if instance.get("multipartExr", False):
|
||||
if instance_data.get("multipartExr", False):
|
||||
rep["tags"].append("multipartExr")
|
||||
|
||||
# support conversion from tiled to scanline
|
||||
if instance.get("convertToScanline"):
|
||||
if instance_data.get("convertToScanline"):
|
||||
self.log.info("Adding scanline conversion.")
|
||||
rep["tags"].append("toScanline")
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
self._solve_families(instance, preview)
|
||||
self._solve_families(instance_data, preview)
|
||||
|
||||
# add remainders as representations
|
||||
for remainder in remainders:
|
||||
|
|
@ -717,13 +720,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
preview = preview and not do_not_add_review
|
||||
if preview:
|
||||
rep.update({
|
||||
"fps": instance.get("fps"),
|
||||
"fps": instance_data.get("fps"),
|
||||
"tags": ["review"]
|
||||
})
|
||||
self._solve_families(instance, preview)
|
||||
self._solve_families(instance_data, preview)
|
||||
|
||||
already_there = False
|
||||
for repre in instance.get("representations", []):
|
||||
for repre in instance_data.get("representations", []):
|
||||
# might be added explicitly before by publish_on_farm
|
||||
already_there = repre.get("files") == rep["files"]
|
||||
if already_there:
|
||||
|
|
@ -733,6 +736,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if not already_there:
|
||||
representations.append(rep)
|
||||
|
||||
for rep in representations:
|
||||
# inject colorspace data
|
||||
self.set_representation_colorspace(
|
||||
rep, self.context,
|
||||
colorspace=instance_data["colorspace"]
|
||||
)
|
||||
|
||||
return representations
|
||||
|
||||
def _solve_families(self, instance, preview=False):
|
||||
|
|
@ -861,7 +871,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"jobBatchName": data.get("jobBatchName", ""),
|
||||
"useSequenceForReview": data.get("useSequenceForReview", True),
|
||||
# map inputVersions `ObjectId` -> `str` so json supports it
|
||||
"inputVersions": list(map(str, data.get("inputVersions", [])))
|
||||
"inputVersions": list(map(str, data.get("inputVersions", []))),
|
||||
"colorspace": instance.data.get("colorspace")
|
||||
}
|
||||
|
||||
# skip locking version if we are creating v01
|
||||
|
|
|
|||
|
|
@ -890,7 +890,7 @@ class SyncEntitiesFactory:
|
|||
else:
|
||||
parent_dict = self.entities_dict.get(parent_id, {})
|
||||
|
||||
for child_id in parent_dict.get("children", []):
|
||||
for child_id in list(parent_dict.get("children", [])):
|
||||
# keep original `remove` value for all children
|
||||
_remove = (remove is True)
|
||||
if not _remove:
|
||||
|
|
|
|||
|
|
@ -375,7 +375,11 @@ def get_imageio_config(
|
|||
# This is for backward compatibility.
|
||||
# TODO: in future rewrite this to be more explicit
|
||||
activate_host_color_management = imageio_host.get(
|
||||
"activate_host_color_management", True)
|
||||
"activate_host_color_management")
|
||||
|
||||
# TODO: remove this in future - backward compatibility
|
||||
if activate_host_color_management is None:
|
||||
activate_host_color_management = host_ocio_config.get("enabled", False)
|
||||
|
||||
if not activate_host_color_management:
|
||||
# if host settings are disabled return False because
|
||||
|
|
|
|||
|
|
@ -1574,7 +1574,7 @@ class PlaceholderLoadMixin(object):
|
|||
"population."
|
||||
)
|
||||
return
|
||||
if not placeholder.data["keep_placeholder"]:
|
||||
if not placeholder.data.get("keep_placeholder", True):
|
||||
self.delete_placeholder(placeholder)
|
||||
|
||||
def load_failed(self, placeholder, representation):
|
||||
|
|
|
|||
|
|
@ -266,6 +266,16 @@ class ExtractBurnin(publish.Extractor):
|
|||
first_output = True
|
||||
|
||||
files_to_delete = []
|
||||
|
||||
repre_burnin_options = copy.deepcopy(burnin_options)
|
||||
# Use fps from representation for output in options
|
||||
fps = repre.get("fps")
|
||||
if fps is not None:
|
||||
repre_burnin_options["fps"] = fps
|
||||
# TODO Should we use fps from source representation to fill
|
||||
# it in review?
|
||||
# burnin_data["fps"] = fps
|
||||
|
||||
for filename_suffix, burnin_def in repre_burnin_defs.items():
|
||||
new_repre = copy.deepcopy(repre)
|
||||
new_repre["stagingDir"] = src_repre_staging_dir
|
||||
|
|
@ -308,7 +318,7 @@ class ExtractBurnin(publish.Extractor):
|
|||
"input": temp_data["full_input_path"],
|
||||
"output": temp_data["full_output_path"],
|
||||
"burnin_data": burnin_data,
|
||||
"options": copy.deepcopy(burnin_options),
|
||||
"options": repre_burnin_options,
|
||||
"values": burnin_values,
|
||||
"full_input_path": temp_data["full_input_paths"][0],
|
||||
"first_frame": temp_data["first_frame"],
|
||||
|
|
@ -463,15 +473,11 @@ class ExtractBurnin(publish.Extractor):
|
|||
|
||||
handle_start = instance.data.get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = context.data.get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
handle_start = context.data.get("handleStart") or 0
|
||||
|
||||
handle_end = instance.data.get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = context.data.get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
handle_end = context.data.get("handleEnd") or 0
|
||||
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import platform
|
||||
import json
|
||||
|
|
@ -13,6 +12,7 @@ from openpype.lib import (
|
|||
get_ffmpeg_codec_args,
|
||||
get_ffmpeg_format_args,
|
||||
convert_ffprobe_fps_value,
|
||||
convert_ffprobe_fps_to_float,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -41,45 +41,6 @@ TIMECODE_KEY = "{timecode}"
|
|||
SOURCE_TIMECODE_KEY = "{source_timecode}"
|
||||
|
||||
|
||||
def convert_list_to_command(list_to_convert, fps, label=""):
|
||||
"""Convert a list of values to a drawtext command file for ffmpeg `sendcmd`
|
||||
|
||||
The list of values is expected to have a value per frame. If the video
|
||||
file ends up being longer than the amount of samples per frame than the
|
||||
last value will be held.
|
||||
|
||||
Args:
|
||||
list_to_convert (list): List of values per frame.
|
||||
fps (float or int): The expected frame per seconds of the output file.
|
||||
label (str): Label for the drawtext, if specific drawtext filter is
|
||||
required
|
||||
|
||||
Returns:
|
||||
str: Filepath to the temporary drawtext command file.
|
||||
|
||||
"""
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
|
||||
for i, value in enumerate(list_to_convert):
|
||||
seconds = i / fps
|
||||
|
||||
# Escape special character
|
||||
value = str(value).replace(":", "\\:")
|
||||
|
||||
filter = "drawtext"
|
||||
if label:
|
||||
filter += "@" + label
|
||||
|
||||
line = (
|
||||
"{start} {filter} reinit text='{value}';"
|
||||
"\n".format(start=seconds, filter=filter, value=value)
|
||||
)
|
||||
|
||||
f.write(line)
|
||||
f.flush()
|
||||
return f.name
|
||||
|
||||
|
||||
def _get_ffprobe_data(source):
|
||||
"""Reimplemented from otio burnins to be able use full path to ffprobe
|
||||
:param str source: source media file
|
||||
|
|
@ -178,6 +139,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
self.ffprobe_data = ffprobe_data
|
||||
self.first_frame = first_frame
|
||||
self.input_args = []
|
||||
self.cleanup_paths = []
|
||||
|
||||
super().__init__(source, source_streams)
|
||||
|
||||
|
|
@ -191,7 +153,6 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
frame_start=None,
|
||||
frame_end=None,
|
||||
options=None,
|
||||
cmd=""
|
||||
):
|
||||
"""
|
||||
Adding static text to a filter.
|
||||
|
|
@ -212,13 +173,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if frame_end is not None:
|
||||
options["frame_end"] = frame_end
|
||||
|
||||
draw_text = DRAWTEXT
|
||||
if cmd:
|
||||
draw_text = "{}, {}".format(cmd, DRAWTEXT)
|
||||
|
||||
options["label"] = align
|
||||
|
||||
self._add_burnin(text, align, options, draw_text)
|
||||
self._add_burnin(text, align, options, DRAWTEXT)
|
||||
|
||||
def add_timecode(
|
||||
self, align, frame_start=None, frame_end=None, frame_start_tc=None,
|
||||
|
|
@ -263,6 +220,139 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
|
||||
self._add_burnin(text, align, options, TIMECODE)
|
||||
|
||||
def add_per_frame_text(
|
||||
self,
|
||||
text,
|
||||
align,
|
||||
frame_start,
|
||||
frame_end,
|
||||
listed_keys,
|
||||
options=None
|
||||
):
|
||||
"""Add text that changes per frame.
|
||||
|
||||
Args:
|
||||
text (str): Template string with unfilled keys that are changed
|
||||
per frame.
|
||||
align (str): Alignment of text.
|
||||
frame_start (int): Starting frame for burnins current frame.
|
||||
frame_end (int): Ending frame for burnins current frame.
|
||||
listed_keys (list): List of keys that are changed per frame.
|
||||
options (Optional[dict]): Options to affect style of burnin.
|
||||
"""
|
||||
|
||||
if not options:
|
||||
options = ffmpeg_burnins.TimeCodeOptions(**self.options_init)
|
||||
|
||||
options = options.copy()
|
||||
if frame_start is None:
|
||||
frame_start = options["frame_offset"]
|
||||
|
||||
# `frame_end` is only for meassurements of text position
|
||||
if frame_end is None:
|
||||
frame_end = options["frame_end"]
|
||||
|
||||
fps = options.get("fps")
|
||||
if not fps:
|
||||
fps = self.frame_rate
|
||||
|
||||
text_for_size = text
|
||||
if CURRENT_FRAME_SPLITTER in text:
|
||||
expr = self._get_current_frame_expression(frame_start, frame_end)
|
||||
if expr is None:
|
||||
expr = MISSING_KEY_VALUE
|
||||
text_for_size = text_for_size.replace(
|
||||
CURRENT_FRAME_SPLITTER, MISSING_KEY_VALUE)
|
||||
text = text.replace(CURRENT_FRAME_SPLITTER, expr)
|
||||
|
||||
# Find longest list with values
|
||||
longest_list_len = max(
|
||||
len(item["values"]) for item in listed_keys.values()
|
||||
)
|
||||
# Where to store formatted values per frame by key
|
||||
new_listed_keys = [{} for _ in range(longest_list_len)]
|
||||
# Find the longest value per fill key.
|
||||
# The longest value is used to determine size of burnin box.
|
||||
longest_value_by_key = {}
|
||||
for key, item in listed_keys.items():
|
||||
values = item["values"]
|
||||
# Fill the missing values from the longest list with the last
|
||||
# value to make sure all values have same "frame count"
|
||||
last_value = values[-1] if values else ""
|
||||
for _ in range(longest_list_len - len(values)):
|
||||
values.append(last_value)
|
||||
|
||||
# Prepare dictionary structure for nestes values
|
||||
# - last key is overriden on each frame loop
|
||||
item_keys = list(item["keys"])
|
||||
fill_data = {}
|
||||
sub_value = fill_data
|
||||
last_item_key = item_keys.pop(-1)
|
||||
for item_key in item_keys:
|
||||
sub_value[item_key] = {}
|
||||
sub_value = sub_value[item_key]
|
||||
|
||||
# Fill value per frame
|
||||
key_max_len = 0
|
||||
key_max_value = ""
|
||||
for value, new_values in zip(values, new_listed_keys):
|
||||
sub_value[last_item_key] = value
|
||||
try:
|
||||
value = key.format(**sub_value)
|
||||
except (TypeError, KeyError, ValueError):
|
||||
value = MISSING_KEY_VALUE
|
||||
new_values[key] = value
|
||||
|
||||
value_len = len(value)
|
||||
if value_len > key_max_len:
|
||||
key_max_value = value
|
||||
key_max_len = value_len
|
||||
|
||||
# Store the longest value
|
||||
longest_value_by_key[key] = key_max_value
|
||||
|
||||
# Make sure the longest value of each key is replaced for text size
|
||||
# calculation
|
||||
for key, value in longest_value_by_key.items():
|
||||
text_for_size = text_for_size.replace(key, value)
|
||||
|
||||
# Create temp file with instructions for each frame of text
|
||||
lines = []
|
||||
for frame, value in enumerate(new_listed_keys):
|
||||
seconds = float(frame) / fps
|
||||
# Escape special character
|
||||
new_text = text
|
||||
for _key, _value in value.items():
|
||||
_value = str(_value)
|
||||
new_text = new_text.replace(_key, str(_value))
|
||||
|
||||
new_text = (
|
||||
str(new_text)
|
||||
.replace("\\", "\\\\")
|
||||
.replace(",", "\\,")
|
||||
.replace(":", "\\:")
|
||||
)
|
||||
lines.append(
|
||||
f"{seconds} drawtext@{align} reinit text='{new_text}';")
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp:
|
||||
path = temp.name
|
||||
temp.write("\n".join(lines))
|
||||
|
||||
self.cleanup_paths.append(path)
|
||||
self.filters["drawtext"].append("sendcmd=f='{}'".format(
|
||||
path.replace("\\", "/").replace(":", "\\:")
|
||||
))
|
||||
self.add_text(text_for_size, align, frame_start, frame_end, options)
|
||||
|
||||
def _get_current_frame_expression(self, frame_start, frame_end):
|
||||
if frame_start is None:
|
||||
return None
|
||||
return (
|
||||
"%{eif:n+" + str(frame_start)
|
||||
+ ":d:" + str(len(str(frame_end))) + "}"
|
||||
)
|
||||
|
||||
def _add_burnin(self, text, align, options, draw):
|
||||
"""
|
||||
Generic method for building the filter flags.
|
||||
|
|
@ -276,18 +366,19 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if CURRENT_FRAME_SPLITTER in text:
|
||||
frame_start = options["frame_offset"]
|
||||
frame_end = options.get("frame_end", frame_start)
|
||||
if frame_start is None:
|
||||
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
|
||||
expr = self._get_current_frame_expression(frame_start, frame_end)
|
||||
if expr is not None:
|
||||
max_length = len(str(frame_end))
|
||||
# Use number '8' length times for replacement
|
||||
size_replacement = max_length * "8"
|
||||
else:
|
||||
replacement_final = "%{eif:n+" + str(frame_start) + ":d:" + \
|
||||
str(len(str(frame_end))) + "}"
|
||||
replacement_size = str(frame_end)
|
||||
expr = size_replacement = MISSING_KEY_VALUE
|
||||
|
||||
final_text = final_text.replace(
|
||||
CURRENT_FRAME_SPLITTER, replacement_final
|
||||
CURRENT_FRAME_SPLITTER, expr
|
||||
)
|
||||
text_for_size = text_for_size.replace(
|
||||
CURRENT_FRAME_SPLITTER, replacement_size
|
||||
CURRENT_FRAME_SPLITTER, size_replacement
|
||||
)
|
||||
|
||||
resolution = self.resolution
|
||||
|
|
@ -314,13 +405,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
|
||||
)
|
||||
|
||||
arg_font_path = font_path
|
||||
if platform.system().lower() == "windows":
|
||||
arg_font_path = (
|
||||
arg_font_path
|
||||
.replace(os.sep, r'\\' + os.sep)
|
||||
.replace(':', r'\:')
|
||||
)
|
||||
arg_font_path = (
|
||||
font_path
|
||||
.replace("\\", "\\\\")
|
||||
.replace(':', r'\:')
|
||||
)
|
||||
data["font"] = arg_font_path
|
||||
|
||||
self.filters['drawtext'].append(draw % data)
|
||||
|
|
@ -347,9 +436,15 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if overwrite:
|
||||
output = '-y {}'.format(output)
|
||||
|
||||
filters = ''
|
||||
if self.filter_string:
|
||||
filters = '-vf "{}"'.format(self.filter_string)
|
||||
filters = ""
|
||||
filter_string = self.filter_string
|
||||
if filter_string:
|
||||
with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp:
|
||||
temp.write(filter_string)
|
||||
filters_path = temp.name
|
||||
filters = '-filter_script "{}"'.format(filters_path)
|
||||
print("Filters:", filter_string)
|
||||
self.cleanup_paths.append(filters_path)
|
||||
|
||||
if self.first_frame is not None:
|
||||
start_number_arg = "-start_number {}".format(self.first_frame)
|
||||
|
|
@ -420,6 +515,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
"Failed to generate this f*cking file '%s'" % output
|
||||
)
|
||||
|
||||
for path in self.cleanup_paths:
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
|
||||
|
||||
def example(input_path, output_path):
|
||||
options_init = {
|
||||
|
|
@ -440,6 +539,51 @@ def example(input_path, output_path):
|
|||
burnin.render(output_path, overwrite=True)
|
||||
|
||||
|
||||
def prepare_fill_values(burnin_template, data):
|
||||
"""Prepare values that will be filled instead of burnin template.
|
||||
|
||||
Args:
|
||||
burnin_template (str): Burnin template string.
|
||||
data (dict[str, Any]): Data that will be used to fill template.
|
||||
|
||||
Returns:
|
||||
tuple[dict[str, dict[str, Any]], dict[str, Any], set[str]]: Filled
|
||||
values that can be used as are, listed values that have different
|
||||
value per frame and missing keys that are not present in data.
|
||||
"""
|
||||
|
||||
fill_values = {}
|
||||
listed_keys = {}
|
||||
missing_keys = set()
|
||||
for item in Formatter().parse(burnin_template):
|
||||
_, field_name, format_spec, conversion = item
|
||||
if not field_name:
|
||||
continue
|
||||
# Calculate nested keys '{project[name]}' -> ['project', 'name']
|
||||
keys = [key.rstrip("]") for key in field_name.split("[")]
|
||||
# Calculate original full key for replacement
|
||||
conversion = "!{}".format(conversion) if conversion else ""
|
||||
format_spec = ":{}".format(format_spec) if format_spec else ""
|
||||
orig_key = "{{{}{}{}}}".format(
|
||||
field_name, conversion, format_spec)
|
||||
|
||||
key_value = data
|
||||
try:
|
||||
for key in keys:
|
||||
key_value = key_value[key]
|
||||
|
||||
if isinstance(key_value, list):
|
||||
listed_keys[orig_key] = {
|
||||
"values": key_value,
|
||||
"keys": keys}
|
||||
else:
|
||||
fill_values[orig_key] = orig_key.format(**data)
|
||||
except (KeyError, TypeError):
|
||||
missing_keys.add(orig_key)
|
||||
continue
|
||||
return fill_values, listed_keys, missing_keys
|
||||
|
||||
|
||||
def burnins_from_data(
|
||||
input_path, output_path, data,
|
||||
codec_data=None, options=None, burnin_values=None, overwrite=True,
|
||||
|
|
@ -512,17 +656,26 @@ def burnins_from_data(
|
|||
frame_end = data.get("frame_end")
|
||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||
|
||||
stream = burnin._streams[0]
|
||||
video_stream = None
|
||||
for stream in burnin._streams:
|
||||
if stream.get("codec_type") == "video":
|
||||
video_stream = stream
|
||||
break
|
||||
|
||||
if video_stream is None:
|
||||
raise ValueError("Source didn't have video stream.")
|
||||
|
||||
if "resolution_width" not in data:
|
||||
data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE)
|
||||
data["resolution_width"] = video_stream.get(
|
||||
"width", MISSING_KEY_VALUE)
|
||||
|
||||
if "resolution_height" not in data:
|
||||
data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE)
|
||||
data["resolution_height"] = video_stream.get(
|
||||
"height", MISSING_KEY_VALUE)
|
||||
|
||||
r_frame_rate = video_stream.get("r_frame_rate", "0/0")
|
||||
if "fps" not in data:
|
||||
data["fps"] = convert_ffprobe_fps_value(
|
||||
stream.get("r_frame_rate", "0/0")
|
||||
)
|
||||
data["fps"] = convert_ffprobe_fps_value(r_frame_rate)
|
||||
|
||||
# Check frame start and add expression if is available
|
||||
if frame_start is not None:
|
||||
|
|
@ -531,9 +684,9 @@ def burnins_from_data(
|
|||
if frame_start_tc is not None:
|
||||
data[TIMECODE_KEY[1:-1]] = TIMECODE_KEY
|
||||
|
||||
source_timecode = stream.get("timecode")
|
||||
source_timecode = video_stream.get("timecode")
|
||||
if source_timecode is None:
|
||||
source_timecode = stream.get("tags", {}).get("timecode")
|
||||
source_timecode = video_stream.get("tags", {}).get("timecode")
|
||||
|
||||
# Use "format" key from ffprobe data
|
||||
# - this is used e.g. in mxf extension
|
||||
|
|
@ -589,59 +742,24 @@ def burnins_from_data(
|
|||
print("Source does not have set timecode value.")
|
||||
value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE)
|
||||
|
||||
# Convert lists.
|
||||
cmd = ""
|
||||
text = None
|
||||
keys = [i[1] for i in Formatter().parse(value) if i[1] is not None]
|
||||
list_to_convert = []
|
||||
|
||||
# Warn about nested dictionary support for lists. Ei. we dont support
|
||||
# it.
|
||||
if "[" in "".join(keys):
|
||||
print(
|
||||
"We dont support converting nested dictionaries to lists,"
|
||||
" so skipping {}".format(value)
|
||||
)
|
||||
else:
|
||||
for key in keys:
|
||||
data_value = data[key]
|
||||
|
||||
# Multiple lists are not supported.
|
||||
if isinstance(data_value, list) and list_to_convert:
|
||||
raise ValueError(
|
||||
"Found multiple lists to convert, which is not "
|
||||
"supported: {}".format(value)
|
||||
)
|
||||
|
||||
if isinstance(data_value, list):
|
||||
print("Found list to convert: {}".format(data_value))
|
||||
for v in data_value:
|
||||
data[key] = v
|
||||
list_to_convert.append(value.format(**data))
|
||||
|
||||
if list_to_convert:
|
||||
value = list_to_convert[0]
|
||||
path = convert_list_to_command(
|
||||
list_to_convert, data["fps"], label=align
|
||||
)
|
||||
cmd = "sendcmd=f='{}'".format(path)
|
||||
cmd = cmd.replace("\\", "/")
|
||||
cmd = cmd.replace(":", "\\:")
|
||||
clean_up_paths.append(path)
|
||||
|
||||
# Failsafe for missing keys.
|
||||
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
|
||||
missing_keys = []
|
||||
for group in key_pattern.findall(value):
|
||||
try:
|
||||
group.format(**data)
|
||||
except (TypeError, KeyError):
|
||||
missing_keys.append(group)
|
||||
fill_values, listed_keys, missing_keys = prepare_fill_values(
|
||||
value, data
|
||||
)
|
||||
|
||||
missing_keys = list(set(missing_keys))
|
||||
for key in missing_keys:
|
||||
value = value.replace(key, MISSING_KEY_VALUE)
|
||||
|
||||
if listed_keys:
|
||||
for key, key_value in fill_values.items():
|
||||
if key == CURRENT_FRAME_KEY:
|
||||
key_value = CURRENT_FRAME_SPLITTER
|
||||
value = value.replace(key, str(key_value))
|
||||
burnin.add_per_frame_text(
|
||||
value, align, frame_start, frame_end, listed_keys
|
||||
)
|
||||
continue
|
||||
|
||||
# Handle timecode differently
|
||||
if has_source_timecode:
|
||||
args = [align, frame_start, frame_end, source_timecode]
|
||||
|
|
@ -665,7 +783,7 @@ def burnins_from_data(
|
|||
|
||||
text = value.format(**data)
|
||||
|
||||
burnin.add_text(text, align, frame_start, frame_end, cmd=cmd)
|
||||
burnin.add_text(text, align, frame_start, frame_end)
|
||||
|
||||
ffmpeg_args = []
|
||||
if codec_data:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue