Merge branch 'develop' into feature/harmony_publish_template

This commit is contained in:
Toke Stuart Jepsen 2020-05-25 11:51:22 +01:00
commit 2941a6c293
9 changed files with 2105 additions and 135 deletions

View file

@ -1251,10 +1251,10 @@ class SyncToAvalonEvent(BaseEvent):
return output
def process_renamed(self):
if not self.ftrack_renamed:
ent_infos = self.ftrack_renamed
if not ent_infos:
return
ent_infos = self.ftrack_renamed
renamed_tasks = {}
not_found = {}
changeable_queue = queue.Queue()
@ -1276,9 +1276,9 @@ class SyncToAvalonEvent(BaseEvent):
if not avalon_ent:
# TODO logging
self.log.debug((
"Can't change the name (Entity is not is avalon) <{}>"
"Entity is not is avalon. Moving to \"add\" process. <{}>"
).format(ent_path))
not_found[ftrack_id] = ent_info
self.ftrack_added[ftrack_id] = ent_info
continue
if new_name == avalon_ent["name"]:
@ -1456,7 +1456,6 @@ class SyncToAvalonEvent(BaseEvent):
# - happen when was created by any sync event/action
pop_out_ents = []
new_tasks_by_parent = collections.defaultdict(list)
_new_ent_infos = {}
for ftrack_id, ent_info in ent_infos.items():
if self.avalon_ents_by_ftrack_id.get(ftrack_id):
pop_out_ents.append(ftrack_id)
@ -1560,36 +1559,20 @@ class SyncToAvalonEvent(BaseEvent):
pop_out_ents.append(ftrack_id)
continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
for attr in cust_attrs:
key = attr["key"]
if key != CustAttrIdKey:
continue
if attr["entity_type"] != ent_info["entityType"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
configuration_id = attr["id"]
entity_type_conf_ids[entity_type] = configuration_id
break
if not configuration_id:
self.log.warning(
"BUG REPORT: Missing configuration for `{} < {} >`".format(
entity_type, ent_info["entityType"]
)
)
mongo_id_configuration_id = self._mongo_id_configuration(
ent_info,
cust_attrs,
hier_attrs,
entity_type_conf_ids
)
if not mongo_id_configuration_id:
self.log.warning((
"BUG REPORT: Missing MongoID configuration for `{} < {} >`"
).format(entity_type, ent_info["entityType"]))
continue
_entity_key = collections.OrderedDict({
"configuration_id": configuration_id,
"configuration_id": mongo_id_configuration_id,
"entity_id": ftrack_id
})
@ -1692,6 +1675,53 @@ class SyncToAvalonEvent(BaseEvent):
if new_name not in self.task_changes_by_avalon_id[mongo_id]:
self.task_changes_by_avalon_id[mongo_id].append(new_name)
def _mongo_id_configuration(
self,
ent_info,
cust_attrs,
hier_attrs,
temp_dict
):
# Use hierarchical mongo id attribute if possible.
if "_hierarchical" not in temp_dict:
hier_mongo_id_configuration_id = None
for attr in hier_attrs:
if attr["key"] == CustAttrIdKey:
hier_mongo_id_configuration_id = attr["id"]
break
temp_dict["_hierarchical"] = hier_mongo_id_configuration_id
hier_mongo_id_configuration_id = temp_dict.get("_hierarchical")
if hier_mongo_id_configuration_id is not None:
return hier_mongo_id_configuration_id
# Legacy part for cases that MongoID attribute is per entity type.
entity_type = ent_info["entity_type"]
mongo_id_configuration_id = temp_dict.get(entity_type)
if mongo_id_configuration_id is not None:
return mongo_id_configuration_id
for attr in cust_attrs:
key = attr["key"]
if key != CustAttrIdKey:
continue
if attr["entity_type"] != ent_info["entityType"]:
continue
if (
ent_info["entityType"] == "task" and
attr["object_type_id"] != ent_info["objectTypeId"]
):
continue
mongo_id_configuration_id = attr["id"]
break
temp_dict[entity_type] = mongo_id_configuration_id
return mongo_id_configuration_id
def process_moved(self):
if not self.ftrack_moved:
return
@ -1871,11 +1901,8 @@ class SyncToAvalonEvent(BaseEvent):
obj_type_id = ent_info["objectTypeId"]
ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id)
# Ftrack's entity_type does not have defined custom attributes
if ent_cust_attrs is None:
self.log.warning((
"BUG REPORT: Entity has ent type without"
" custom attributes <{}> \"{}\""
).format(entType, ent_info))
continue
for key, values in ent_info["changes"].items():

View file

@ -1327,3 +1327,25 @@ class BuildWorkfile:
)
return output
def ffprobe_streams(path_to_file):
"""Load streams from entered filepath via ffprobe."""
log.info(
"Getting information about input \"{}\".".format(path_to_file)
)
args = [
get_ffmpeg_tool_path("ffprobe"),
"-v quiet",
"-print_format json",
"-show_format",
"-show_streams",
"\"{}\"".format(path_to_file)
]
command = " ".join(args)
log.debug("FFprobe command: \"{}\"".format(command))
popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
popen_output = popen.communicate()[0]
log.debug("FFprobe output: {}".format(popen_output))
return json.loads(popen_output)["streams"]

View file

@ -1669,6 +1669,7 @@ class ExporterReviewMov(ExporterReview):
if any(colorspaces):
# OCIOColorSpace with controled output
dag_node = nuke.createNode("OCIOColorSpace")
self._temp_nodes.append(dag_node)
for c in colorspaces:
test = dag_node["out_colorspace"].setValue(str(c))
if test:

View file

@ -63,7 +63,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"name": "thumbnail" # Default component name is "main".
}
comp['thumbnail'] = True
elif comp.get('preview') or ("preview" in comp.get('tags', [])):
elif comp.get('ftrackreview') or ("ftrackreview" in comp.get('tags', [])):
'''
Ftrack bug requirement:
- Start frame must be 0

View file

@ -1,4 +1,5 @@
import os
import re
import json
import copy
@ -21,10 +22,771 @@ class ExtractBurnin(pype.api.Extractor):
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
optional = True
positions = [
"top_left", "top_centered", "top_right",
"bottom_right", "bottom_centered", "bottom_left"
]
# Default options for burnins for cases that are not set in presets.
default_options = {
"opacity": 1,
"x_offset": 5,
"y_offset": 5,
"bg_padding": 5,
"bg_opacity": 0.5,
"font_size": 42
}
# Preset attributes
profiles = None
options = None
fields = None
def process(self, instance):
# ffmpeg doesn't support multipart exrs
if instance.data.get("multipartExr") is True:
instance_label = (
getattr(instance, "label", None)
or instance.data.get("label")
or instance.data.get("name")
)
self.log.info((
"Instance \"{}\" contain \"multipartExr\". Skipped."
).format(instance_label))
return
# QUESTION what is this for and should we raise an exception?
if "representations" not in instance.data:
raise RuntimeError("Burnin needs already created mov to work on.")
if self.profiles is None:
return self.legacy_process(instance)
self.main_process(instance)
# Remove any representations tagged for deletion.
# QUESTION Is possible to have representation with "delete" tag?
for repre in tuple(instance.data["representations"]):
if "delete" in repre.get("tags", []):
self.log.debug("Removing representation: {}".format(repre))
instance.data["representations"].remove(repre)
self.log.debug(instance.data["representations"])
def main_process(self, instance):
# TODO get these data from context
host_name = os.environ["AVALON_APP"]
task_name = os.environ["AVALON_TASK"]
family = self.main_family_from_instance(instance)
# Find profile most matching current host, task and instance family
profile = self.find_matching_profile(host_name, task_name, family)
if not profile:
self.log.info((
"Skipped instance. None of profiles in presets are for"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\""
).format(host_name, family, task_name))
return
# Pre-filter burnin definitions by instance families
burnin_defs = self.filter_burnins_by_families(profile, instance)
if not burnin_defs:
self.log.info((
"Skipped instance. Burnin definitions are not set for profile"
" Host: \"{}\" | Family: \"{}\" | Task \"{}\" | Profile \"{}\""
).format(host_name, family, task_name, profile))
return
# Prepare burnin options
profile_options = copy.deepcopy(self.default_options)
for key, value in (self.options or {}).items():
if value is not None:
profile_options[key] = value
# Prepare global burnin values from presets
profile_burnins = {}
for key, value in (self.fields or {}).items():
key_low = key.lower()
if key_low in self.positions:
if value is not None:
profile_burnins[key_low] = value
# Prepare basic data for processing
_burnin_data, _temp_data = self.prepare_basic_data(instance)
anatomy = instance.context.data["anatomy"]
scriptpath = self.burnin_script_path()
executable = self.python_executable_path()
for idx, repre in enumerate(tuple(instance.data["representations"])):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"]))
if not self.repres_is_valid(repre):
continue
# Filter output definition by representation tags (optional)
repre_burnin_defs = self.filter_burnins_by_tags(
burnin_defs, repre["tags"]
)
if not repre_burnin_defs:
self.log.info((
"Skipped representation. All burnin definitions from"
" selected profile does not match to representation's"
" tags. \"{}\""
).format(str(repre["tags"])))
continue
# Create copy of `_burnin_data` and `_temp_data` for repre.
burnin_data = copy.deepcopy(_burnin_data)
temp_data = copy.deepcopy(_temp_data)
# Prepare representation based data.
self.prepare_repre_data(instance, repre, burnin_data, temp_data)
# Add anatomy keys to burnin_data.
filled_anatomy = anatomy.format_all(burnin_data)
burnin_data["anatomy"] = filled_anatomy.get_solved()
first_output = True
files_to_delete = []
for filename_suffix, burnin_def in repre_burnin_defs.items():
new_repre = copy.deepcopy(repre)
# Keep "ftrackreview" tag only on first output
if first_output:
first_output = False
elif "ftrackreview" in new_repre["tags"]:
new_repre["tags"].remove("ftrackreview")
burnin_options = copy.deepcopy(profile_options)
burnin_values = copy.deepcopy(profile_burnins)
# Options overrides
for key, value in (burnin_def.get("options") or {}).items():
# Set or override value if is valid
if value is not None:
burnin_options[key] = value
# Burnin values overrides
for key, value in burnin_def.items():
key_low = key.lower()
if key_low in self.positions:
if value is not None:
# Set or override value if is valid
burnin_values[key_low] = value
elif key_low in burnin_values:
# Pop key if value is set to None (null in json)
burnin_values.pop(key_low)
# Remove "delete" tag from new representation
if "delete" in new_repre["tags"]:
new_repre["tags"].remove("delete")
# Update name and outputName to be able have multiple outputs
# Join previous "outputName" with filename suffix
new_name = "_".join([new_repre["outputName"], filename_suffix])
new_repre["name"] = new_name
new_repre["outputName"] = new_name
# Prepare paths and files for process.
self.input_output_paths(new_repre, temp_data, filename_suffix)
# Data for burnin script
script_data = {
"input": temp_data["full_input_path"],
"output": temp_data["full_output_path"],
"burnin_data": burnin_data,
"options": burnin_options,
"values": burnin_values
}
self.log.debug(
"script_data: {}".format(json.dumps(script_data, indent=4))
)
# Dump data to string
dumped_script_data = json.dumps(script_data)
# Prepare subprocess arguments
args = [executable, scriptpath, dumped_script_data]
self.log.debug("Executing: {}".format(args))
# Run burnin script
output = pype.api.subprocess(args)
self.log.debug("Output: {}".format(output))
for filepath in temp_data["full_input_paths"]:
filepath = filepath.replace("\\", "/")
if filepath not in files_to_delete:
files_to_delete.append(filepath)
# Add new representation to instance
instance.data["representations"].append(new_repre)
# Remove source representation
# NOTE we maybe can keep source representation if necessary
instance.data["representations"].remove(repre)
# Delete input files
for filepath in files_to_delete:
if os.path.exists(filepath):
os.remove(filepath)
self.log.debug("Removed: \"{}\"".format(filepath))
def prepare_basic_data(self, instance):
"""Pick data from instance for processing and for burnin strings.
Args:
instance (Instance): Currently processed instance.
Returns:
tuple: `(burnin_data, temp_data)` - `burnin_data` contain data for
filling burnin strings. `temp_data` are for repre pre-process
preparation.
"""
self.log.debug("Prepring basic data for burnins")
context = instance.context
version = instance.data.get("version")
if version is None:
version = context.data.get("version")
frame_start = instance.data.get("frameStart")
if frame_start is None:
self.log.warning(
"Key \"frameStart\" is not set. Setting to \"0\"."
)
frame_start = 0
frame_start = int(frame_start)
frame_end = instance.data.get("frameEnd")
if frame_end is None:
self.log.warning(
"Key \"frameEnd\" is not set. Setting to \"1\"."
)
frame_end = 1
frame_end = int(frame_end)
handles = instance.data.get("handles")
if handles is None:
handles = context.data.get("handles")
if handles is None:
handles = 0
handle_start = instance.data.get("handleStart")
if handle_start is None:
handle_start = context.data.get("handleStart")
if handle_start is None:
handle_start = handles
handle_end = instance.data.get("handleEnd")
if handle_end is None:
handle_end = context.data.get("handleEnd")
if handle_end is None:
handle_end = handles
frame_start_handle = frame_start - handle_start
frame_end_handle = frame_end + handle_end
burnin_data = copy.deepcopy(instance.data["anatomyData"])
if "slate.farm" in instance.data["families"]:
frame_start_handle += 1
burnin_data.update({
"version": int(version),
"comment": context.data.get("comment") or ""
})
intent_label = context.data.get("intent")
if intent_label and isinstance(intent_label, dict):
intent_label = intent_label.get("label")
if intent_label:
burnin_data["intent"] = intent_label
temp_data = {
"frame_start": frame_start,
"frame_end": frame_end,
"frame_start_handle": frame_start_handle,
"frame_end_handle": frame_end_handle
}
self.log.debug(
"Basic burnin_data: {}".format(json.dumps(burnin_data, indent=4))
)
return burnin_data, temp_data
def repres_is_valid(self, repre):
"""Validation if representaion should be processed.
Args:
repre (dict): Representation which should be checked.
Returns:
bool: False if can't be processed else True.
"""
if "burnin" not in (repre.get("tags") or []):
self.log.info((
"Representation \"{}\" don't have \"burnin\" tag. Skipped."
).format(repre["name"]))
return False
return True
def filter_burnins_by_tags(self, burnin_defs, tags):
"""Filter burnin definitions by entered representation tags.
Burnin definitions without tags filter are marked as valid.
Args:
outputs (list): Contain list of burnin definitions from presets.
tags (list): Tags of processed representation.
Returns:
list: Containg all burnin definitions matching entered tags.
"""
filtered_burnins = {}
repre_tags_low = [tag.lower() for tag in tags]
for filename_suffix, burnin_def in burnin_defs.items():
valid = True
output_filters = burnin_def.get("filter")
if output_filters:
# Check tag filters
tag_filters = output_filters.get("tags")
if tag_filters:
tag_filters_low = [tag.lower() for tag in tag_filters]
valid = False
for tag in repre_tags_low:
if tag in tag_filters_low:
valid = True
break
if not valid:
continue
if valid:
filtered_burnins[filename_suffix] = burnin_def
return filtered_burnins
def input_output_paths(self, new_repre, temp_data, filename_suffix):
"""Prepare input and output paths for representation.
Store data to `temp_data` for keys "full_input_path" which is full path
to source files optionally with sequence formatting,
"full_output_path" full path to otput with optionally with sequence
formatting, "full_input_paths" list of all source files which will be
deleted when burnin script ends, "repre_files" list of output
filenames.
Args:
new_repre (dict): Currently processed new representation.
temp_data (dict): Temp data of representation process.
filename_suffix (str): Filename suffix added to inputl filename.
Returns:
None: This is processing method.
"""
# TODO we should find better way to know if input is sequence
is_sequence = (
"sequence" in new_repre["tags"]
and isinstance(new_repre["files"], (tuple, list))
)
if is_sequence:
input_filename = new_repre["sequence_file"]
else:
input_filename = new_repre["files"]
filepart_start, ext = os.path.splitext(input_filename)
dir_path, basename = os.path.split(filepart_start)
if is_sequence:
# NOTE modified to keep name when multiple dots are in name
basename_parts = basename.split(".")
frame_part = basename_parts.pop(-1)
basename_start = ".".join(basename_parts) + filename_suffix
new_basename = ".".join((basename_start, frame_part))
output_filename = new_basename + ext
else:
output_filename = basename + filename_suffix + ext
if dir_path:
output_filename = os.path.join(dir_path, output_filename)
stagingdir = new_repre["stagingDir"]
full_input_path = os.path.join(
os.path.normpath(stagingdir), input_filename
).replace("\\", "/")
full_output_path = os.path.join(
os.path.normpath(stagingdir), output_filename
).replace("\\", "/")
temp_data["full_input_path"] = full_input_path
temp_data["full_output_path"] = full_output_path
self.log.debug("full_input_path: {}".format(full_input_path))
self.log.debug("full_output_path: {}".format(full_output_path))
# Prepare full paths to input files and filenames for reprensetation
full_input_paths = []
if is_sequence:
repre_files = []
for frame_index in range(1, temp_data["duration"] + 1):
repre_files.append(output_filename % frame_index)
full_input_paths.append(full_input_path % frame_index)
else:
full_input_paths.append(full_input_path)
repre_files = output_filename
temp_data["full_input_paths"] = full_input_paths
new_repre["files"] = repre_files
def prepare_repre_data(self, instance, repre, burnin_data, temp_data):
"""Prepare data for representation.
Args:
instance (Instance): Currently processed Instance.
repre (dict): Currently processed representation.
burnin_data (dict): Copy of basic burnin data based on instance
data.
temp_data (dict): Copy of basic temp data
"""
# Add representation name to burnin data
burnin_data["representation"] = repre["name"]
# no handles switch from profile tags
if "no-handles" in repre["tags"]:
burnin_frame_start = temp_data["frame_start"]
burnin_frame_end = temp_data["frame_end"]
else:
burnin_frame_start = temp_data["frame_start_handle"]
burnin_frame_end = temp_data["frame_end_handle"]
burnin_duration = burnin_frame_end - burnin_frame_start + 1
burnin_data.update({
"frame_start": burnin_frame_start,
"frame_end": burnin_frame_end,
"duration": burnin_duration,
})
temp_data["duration"] = burnin_duration
# Add values for slate frames
burnin_slate_frame_start = burnin_frame_start
# Move frame start by 1 frame when slate is used.
if (
"slate" in instance.data["families"]
and "slate-frame" in repre["tags"]
):
burnin_slate_frame_start -= 1
self.log.debug("burnin_slate_frame_start: {}".format(
burnin_slate_frame_start
))
burnin_data.update({
"slate_frame_start": burnin_slate_frame_start,
"slate_frame_end": burnin_frame_end,
"slate_duration": (
burnin_frame_end - burnin_slate_frame_start + 1
)
})
def find_matching_profile(self, host_name, task_name, family):
""" Filter profiles by Host name, Task name and main Family.
Filtering keys are "hosts" (list), "tasks" (list), "families" (list).
If key is not find or is empty than it's expected to match.
Args:
profiles (list): Profiles definition from presets.
host_name (str): Current running host name.
task_name (str): Current context task name.
family (str): Main family of current Instance.
Returns:
dict/None: Return most matching profile or None if none of profiles
match at least one criteria.
"""
matching_profiles = None
highest_points = -1
for profile in self.profiles or tuple():
profile_points = 0
profile_value = []
# Host filtering
host_names = profile.get("hosts")
match = self.validate_value_by_regexes(host_name, host_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Task filtering
task_names = profile.get("tasks")
match = self.validate_value_by_regexes(task_name, task_names)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
# Family filtering
families = profile.get("families")
match = self.validate_value_by_regexes(family, families)
if match == -1:
continue
profile_points += match
profile_value.append(bool(match))
if profile_points > highest_points:
matching_profiles = []
highest_points = profile_points
if profile_points == highest_points:
profile["__value__"] = profile_value
matching_profiles.append(profile)
if not matching_profiles:
return
if len(matching_profiles) == 1:
return matching_profiles[0]
return self.profile_exclusion(matching_profiles)
def profile_exclusion(self, matching_profiles):
"""Find out most matching profile by host, task and family match.
Profiles are selectivelly filtered. Each profile should have
"__value__" key with list of booleans. Each boolean represents
existence of filter for specific key (host, taks, family).
Profiles are looped in sequence. In each sequence are split into
true_list and false_list. For next sequence loop are used profiles in
true_list if there are any profiles else false_list is used.
Filtering ends when only one profile left in true_list. Or when all
existence booleans loops passed, in that case first profile from left
profiles is returned.
Args:
matching_profiles (list): Profiles with same values.
Returns:
dict: Most matching profile.
"""
self.log.info(
"Search for first most matching profile in match order:"
" Host name -> Task name -> Family."
)
# Filter all profiles with highest points value. First filter profiles
# with matching host if there are any then filter profiles by task
# name if there are any and lastly filter by family. Else use first in
# list.
idx = 0
final_profile = None
while True:
profiles_true = []
profiles_false = []
for profile in matching_profiles:
value = profile["__value__"]
# Just use first profile when idx is greater than values.
if not idx < len(value):
final_profile = profile
break
if value[idx]:
profiles_true.append(profile)
else:
profiles_false.append(profile)
if final_profile is not None:
break
if profiles_true:
matching_profiles = profiles_true
else:
matching_profiles = profiles_false
if len(matching_profiles) == 1:
final_profile = matching_profiles[0]
break
idx += 1
final_profile.pop("__value__")
return final_profile
def filter_burnins_by_families(self, profile, instance):
"""Filter outputs that are not supported for instance families.
Output definitions without families filter are marked as valid.
Args:
profile (dict): Profile from presets matching current context.
families (list): All families of current instance.
Returns:
list: Containg all output definitions matching entered families.
"""
filtered_burnin_defs = {}
burnin_defs = profile.get("burnins")
if not burnin_defs:
return filtered_burnin_defs
# Prepare families
families = self.families_from_instance(instance)
families = [family.lower() for family in families]
for filename_suffix, burnin_def in burnin_defs.items():
burnin_filter = burnin_def.get("filter")
# When filters not set then skip filtering process
if burnin_filter:
families_filters = burnin_filter.get("families")
if not self.families_filter_validation(
families, families_filters
):
continue
filtered_burnin_defs[filename_suffix] = burnin_def
return filtered_burnin_defs
def families_filter_validation(self, families, output_families_filter):
"""Determines if entered families intersect with families filters.
All family values are lowered to avoid unexpected results.
"""
if not output_families_filter:
return True
for family_filter in output_families_filter:
if not family_filter:
continue
if not isinstance(family_filter, (list, tuple)):
if family_filter.lower() not in families:
continue
return True
valid = True
for family in family_filter:
if family.lower() not in families:
valid = False
break
if valid:
return True
return False
def compile_list_of_regexes(self, in_list):
"""Convert strings in entered list to compiled regex objects."""
regexes = []
if not in_list:
return regexes
for item in in_list:
if not item:
continue
try:
regexes.append(re.compile(item))
except TypeError:
self.log.warning((
"Invalid type \"{}\" value \"{}\"."
" Expected string based object. Skipping."
).format(str(type(item)), str(item)))
return regexes
def validate_value_by_regexes(self, value, in_list):
"""Validates in any regexe from list match entered value.
Args:
in_list (list): List with regexes.
value (str): String where regexes is checked.
Returns:
int: Returns `0` when list is not set or is empty. Returns `1` when
any regex match value and returns `-1` when none of regexes
match value entered.
"""
if not in_list:
return 0
output = -1
regexes = self.compile_list_of_regexes(in_list)
for regex in regexes:
if re.match(regex, value):
output = 1
break
return output
def main_family_from_instance(self, instance):
"""Returns main family of entered instance."""
family = instance.data.get("family")
if not family:
family = instance.data["families"][0]
return family
def families_from_instance(self, instance):
"""Returns all families of entered instance."""
families = []
family = instance.data.get("family")
if family:
families.append(family)
for family in (instance.data.get("families") or tuple()):
if family not in families:
families.append(family)
return families
def burnin_script_path(self):
"""Returns path to python script for burnin processing."""
# TODO maybe convert to Plugin's attribute
# Get script path.
module_path = os.environ["PYPE_MODULE_ROOT"]
# There can be multiple paths in PYPE_MODULE_ROOT, in which case
# we just take first one.
if os.pathsep in module_path:
module_path = module_path.split(os.pathsep)[0]
scriptpath = os.path.normpath(
os.path.join(
module_path,
"pype",
"scripts",
"otio_burnin.py"
)
)
self.log.debug("scriptpath: {}".format(scriptpath))
return scriptpath
def python_executable_path(self):
"""Returns path to Python 3 executable."""
# TODO maybe convert to Plugin's attribute
# Get executable.
executable = os.getenv("PYPE_PYTHON_EXE")
# There can be multiple paths in PYPE_PYTHON_EXE, in which case
# we just take first one.
if os.pathsep in executable:
executable = executable.split(os.pathsep)[0]
self.log.debug("executable: {}".format(executable))
return executable
def legacy_process(self, instance):
self.log.warning("Legacy burnin presets are used.")
context_data = instance.context.data
version = instance.data.get(

File diff suppressed because it is too large Load diff

View file

@ -26,47 +26,60 @@ class ExtractReviewSlate(pype.api.Extractor):
slate_path = inst_data.get("slateFrame")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# values are set in ExtractReview
to_width = inst_data["reviewToWidth"]
to_height = inst_data["reviewToHeight"]
slate_stream = pype.lib.ffprobe_streams(slate_path)[0]
slate_width = slate_stream["width"]
slate_height = slate_stream["height"]
if "reviewToWidth" in inst_data:
use_legacy_code = True
else:
use_legacy_code = False
resolution_width = inst_data.get("resolutionWidth", to_width)
resolution_height = inst_data.get("resolutionHeight", to_height)
pixel_aspect = inst_data.get("pixelAspect", 1)
fps = inst_data.get("fps")
# defining image ratios
resolution_ratio = ((float(resolution_width) * pixel_aspect) /
resolution_height)
delivery_ratio = float(to_width) / float(to_height)
self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio))
# get scale factor
scale_factor = float(to_height) / (
resolution_height * pixel_aspect)
# shorten two decimals long float number for testing conditions
resolution_ratio_test = float(
"{:0.2f}".format(resolution_ratio))
delivery_ratio_test = float(
"{:0.2f}".format(delivery_ratio))
if resolution_ratio_test < delivery_ratio_test:
scale_factor = float(to_width) / (
resolution_width * pixel_aspect)
self.log.debug("__ scale_factor: `{}`".format(scale_factor))
for i, repre in enumerate(inst_data["representations"]):
_remove_at_end = []
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
for idx, repre in enumerate(inst_data["representations"]):
self.log.debug("repre ({}): `{}`".format(idx + 1, repre))
p_tags = repre.get("tags", [])
if "slate-frame" not in p_tags:
continue
# values are set in ExtractReview
if use_legacy_code:
to_width = inst_data["reviewToWidth"]
to_height = inst_data["reviewToHeight"]
else:
to_width = repre["resolutionWidth"]
to_height = repre["resolutionHeight"]
# defining image ratios
resolution_ratio = (
(float(slate_width) * pixel_aspect) / slate_height
)
delivery_ratio = float(to_width) / float(to_height)
self.log.debug("resolution_ratio: `{}`".format(resolution_ratio))
self.log.debug("delivery_ratio: `{}`".format(delivery_ratio))
# get scale factor
scale_factor_by_height = float(to_height) / slate_height
scale_factor_by_width = float(to_width) / (
slate_width * pixel_aspect
)
# shorten two decimals long float number for testing conditions
resolution_ratio_test = float("{:0.2f}".format(resolution_ratio))
delivery_ratio_test = float("{:0.2f}".format(delivery_ratio))
self.log.debug("__ scale_factor_by_width: `{}`".format(
scale_factor_by_width
))
self.log.debug("__ scale_factor_by_height: `{}`".format(
scale_factor_by_height
))
_remove_at_end = []
stagingdir = repre["stagingDir"]
input_file = "{0}".format(repre["files"])
@ -84,21 +97,27 @@ class ExtractReviewSlate(pype.api.Extractor):
input_args = []
output_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(repre["_profile"].get('input', []))
if use_legacy_code:
input_args.extend(repre["_profile"].get('input', []))
else:
input_args.extend(repre["outputDef"].get('input', []))
input_args.append("-loop 1 -i {}".format(slate_path))
input_args.extend([
"-r {}".format(fps),
"-t 0.04"]
)
# output args
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
if use_legacy_code:
codec_args = repre["_profile"].get('codec', [])
output_args.extend(codec_args)
# preset's output data
output_args.extend(repre["_profile"].get('output', []))
else:
# Codecs are copied from source for whole input
codec_args = self.codec_args(repre)
output_args.extend(codec_args)
# make sure colors are correct
output_args.extend([
@ -109,34 +128,37 @@ class ExtractReviewSlate(pype.api.Extractor):
])
# scaling none square pixels and 1920 width
if "reformat" in p_tags:
if (
# Always scale slate if not legacy
not use_legacy_code or
# Legacy code required reformat tag
(use_legacy_code and "reformat" in p_tags)
):
if resolution_ratio_test < delivery_ratio_test:
self.log.debug("lower then delivery")
width_scale = int(to_width * scale_factor)
width_half_pad = int((
to_width - width_scale) / 2)
width_scale = int(slate_width * scale_factor_by_height)
width_half_pad = int((to_width - width_scale) / 2)
height_scale = to_height
height_half_pad = 0
else:
self.log.debug("heigher then delivery")
width_scale = to_width
width_half_pad = 0
scale_factor = float(to_width) / (float(
resolution_width) * pixel_aspect)
self.log.debug(scale_factor)
height_scale = int(
resolution_height * scale_factor)
height_half_pad = int(
(to_height - height_scale) / 2)
height_scale = int(slate_height * scale_factor_by_width)
height_half_pad = int((to_height - height_scale) / 2)
self.log.debug(
"__ width_scale: `{}`".format(width_scale))
"__ width_scale: `{}`".format(width_scale)
)
self.log.debug(
"__ width_half_pad: `{}`".format(width_half_pad))
"__ width_half_pad: `{}`".format(width_half_pad)
)
self.log.debug(
"__ height_scale: `{}`".format(height_scale))
"__ height_scale: `{}`".format(height_scale)
)
self.log.debug(
"__ height_half_pad: `{}`".format(height_half_pad))
"__ height_half_pad: `{}`".format(height_half_pad)
)
scaling_arg = ("scale={0}x{1}:flags=lanczos,"
"pad={2}:{3}:{4}:{5}:black,setsar=1").format(
@ -144,10 +166,12 @@ class ExtractReviewSlate(pype.api.Extractor):
width_half_pad, height_half_pad
)
vf_back = self.add_video_filter_args(
output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
vf_back = self.add_video_filter_args(output_args, scaling_arg)
# add it to output_args
output_args.insert(0, vf_back)
# overrides output file
output_args.append("-y")
slate_v_path = slate_path.replace(".png", ext)
output_args.append(slate_v_path)
@ -206,10 +230,10 @@ class ExtractReviewSlate(pype.api.Extractor):
"name": repre["name"],
"tags": [x for x in repre["tags"] if x != "delete"]
}
inst_data["representations"][i].update(repre_update)
inst_data["representations"][idx].update(repre_update)
self.log.debug(
"_ representation {}: `{}`".format(
i, inst_data["representations"][i]))
idx, inst_data["representations"][idx]))
# removing temp files
for f in _remove_at_end:
@ -260,3 +284,39 @@ class ExtractReviewSlate(pype.api.Extractor):
vf_back = "-vf " + ",".join(vf_fixed)
return vf_back
def codec_args(self, repre):
"""Detect possible codec arguments from representation."""
codec_args = []
# Get one filename of representation files
filename = repre["files"]
# If files is list then pick first filename in list
if isinstance(filename, (tuple, list)):
filename = filename[0]
# Get full path to the file
full_input_path = os.path.join(repre["stagingDir"], filename)
try:
# Get information about input file via ffprobe tool
streams = pype.lib.ffprobe_streams(full_input_path)
except Exception:
self.log.warning(
"Could not get codec data from input.",
exc_info=True
)
return codec_args
codec_name = streams[0].get("codec_name")
if codec_name:
codec_args.append("-codec:v {}".format(codec_name))
profile_name = streams[0].get("profile")
if profile_name:
profile_name = profile_name.replace(" ", "_").lower()
codec_args.append("-profile:v {}".format(profile_name))
pix_fmt = streams[0].get("pix_fmt")
if pix_fmt:
codec_args.append("-pix_fmt {}".format(pix_fmt))
return codec_args

View file

@ -750,7 +750,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
)
elif len(matching_profiles) > 1:
template_name = matching_profiles.keys()[0]
template_name = tuple(matching_profiles.keys())[0]
self.log.warning((
"More than one template profiles matched"
" Family \"{}\" and Task: \"{}\"."

View file

@ -20,7 +20,7 @@ FFMPEG = (
).format(ffmpeg_path)
FFPROBE = (
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
'{} -v quiet -print_format json -show_format -show_streams "%(source)s"'
).format(ffprobe_path)
DRAWTEXT = (
@ -55,7 +55,7 @@ def _streams(source):
def get_fps(str_value):
if str_value == "0/0":
print("Source has \"r_frame_rate\" value set to \"0/0\".")
log.warning("Source has \"r_frame_rate\" value set to \"0/0\".")
return "Unknown"
items = str_value.split("/")
@ -266,7 +266,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
:returns: completed command
:rtype: str
"""
output = output or ''
output = '"{}"'.format(output or '')
if overwrite:
output = '-y {}'.format(output)
@ -300,10 +300,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
args=args,
overwrite=overwrite
)
# print(command)
log.info("Launching command: {}".format(command))
proc = subprocess.Popen(command, shell=True)
proc.communicate()
log.info(proc.communicate()[0])
if proc.returncode != 0:
raise RuntimeError("Failed to render '%s': %s'"
% (output, command))
@ -335,22 +335,23 @@ def example(input_path, output_path):
def burnins_from_data(
input_path, output_path, data, codec_data=None, overwrite=True
input_path, output_path, data,
codec_data=None, options=None, burnin_values=None, overwrite=True
):
'''
This method adds burnins to video/image file based on presets setting.
"""This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
:param input_path: full path to input file where burnins should be add
:type input_path: str
:param codec_data: all codec related arguments in list
:param codec_data: list
:param output_path: full path to output file where output will be rendered
:type output_path: str
:param data: data required for burnin settings (more info below)
:type data: dict
:param overwrite: output will be overriden if already exists, defaults to True
:type overwrite: bool
Args:
input_path (str): Full path to input file where burnins should be add.
output_path (str): Full path to output file where output will be
rendered.
data (dict): Data required for burnin settings (more info below).
codec_data (list): All codec related arguments in list.
options (dict): Options for burnins.
burnin_values (dict): Contain positioned values.
overwrite (bool): Output will be overriden if already exists,
True by default.
Presets must be set separately. Should be dict with 2 keys:
- "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc)
@ -391,11 +392,15 @@ def burnins_from_data(
"frame_start_tc": 1,
"shot": "sh0010"
}
'''
presets = config.get_presets().get('tools', {}).get('burnins', {})
options_init = presets.get('options')
"""
burnin = ModifiedBurnins(input_path, options_init=options_init)
# Use legacy processing when options are not set
if options is None or burnin_values is None:
presets = config.get_presets().get("tools", {}).get("burnins", {})
options = presets.get("options")
burnin_values = presets.get("burnins") or {}
burnin = ModifiedBurnins(input_path, options_init=options)
frame_start = data.get("frame_start")
frame_end = data.get("frame_end")
@ -425,7 +430,7 @@ def burnins_from_data(
if source_timecode is not None:
data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY
for align_text, value in presets.get('burnins', {}).items():
for align_text, value in burnin_values.items():
if not value:
continue
@ -504,18 +509,39 @@ def burnins_from_data(
text = value.format(**data)
burnin.add_text(text, align, frame_start, frame_end)
codec_args = ""
ffmpeg_args = []
if codec_data:
codec_args = " ".join(codec_data)
# Use codec definition from method arguments
ffmpeg_args = codec_data
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
else:
codec_name = burnin._streams[0].get("codec_name")
if codec_name:
ffmpeg_args.append("-codec:v {}".format(codec_name))
profile_name = burnin._streams[0].get("profile")
if profile_name:
# lower profile name and repalce spaces with underscore
profile_name = profile_name.replace(" ", "_").lower()
ffmpeg_args.append("-profile:v {}".format(profile_name))
pix_fmt = burnin._streams[0].get("pix_fmt")
if pix_fmt:
ffmpeg_args.append("-pix_fmt {}".format(pix_fmt))
ffmpeg_args_str = " ".join(ffmpeg_args)
burnin.render(
output_path, args=ffmpeg_args_str, overwrite=overwrite, **data
)
if __name__ == '__main__':
if __name__ == "__main__":
in_data = json.loads(sys.argv[-1])
burnins_from_data(
in_data['input'],
in_data['output'],
in_data['burnin_data'],
in_data['codec']
in_data["input"],
in_data["output"],
in_data["burnin_data"],
codec_data=in_data.get("codec"),
options=in_data.get("options"),
burnin_values=in_data.get("values")
)