Merge branch 'release/2.14.0' of https://github.com/pypeclub/pype into bugfix/AE_issues

This commit is contained in:
Petr Kalis 2020-11-20 17:49:58 +01:00
commit 846a663023
12 changed files with 963 additions and 145 deletions

View file

@ -27,6 +27,9 @@ class NextTaskUpdate(BaseEvent):
first_filtered_entities.append(entity_info)
if not first_filtered_entities:
return first_filtered_entities
status_ids = [
entity_info["changes"]["statusid"]["new"]
for entity_info in first_filtered_entities
@ -34,10 +37,16 @@ class NextTaskUpdate(BaseEvent):
statuses_by_id = self.get_statuses_by_id(
session, status_ids=status_ids
)
# Make sure `entity_type` is "Task"
task_object_type = session.query(
"select id, name from ObjectType where name is \"Task\""
).one()
# Care only about tasks having status with state `Done`
filtered_entities = []
for entity_info in first_filtered_entities:
if entity_info["objectTypeId"] != task_object_type["id"]:
continue
status_id = entity_info["changes"]["statusid"]["new"]
status_entity = statuses_by_id[status_id]
if status_entity["state"]["name"].lower() == "done":

View file

@ -0,0 +1,399 @@
import collections
from pype.modules.ftrack import BaseEvent
class TaskStatusToParent(BaseEvent):
# Parent types where we care about changing of status
parent_types = ["shot", "asset build"]
# All parent's tasks must have status name in `task_statuses` key to apply
# status name in `new_status`
parent_status_match_all_task_statuses = [
{
"new_status": "approved",
"task_statuses": [
"approved", "omitted"
]
}
]
# Task's status was changed to something in `task_statuses` to apply
# `new_status` on it's parent
# - this is done only if `parent_status_match_all_task_statuses` filtering
# didn't found matching status
parent_status_by_task_status = [
{
"new_status": "in progress",
"task_statuses": [
"in progress"
]
}
]
def register(self, *args, **kwargs):
result = super(TaskStatusToParent, self).register(*args, **kwargs)
# Clean up presetable attributes
_new_all_match = []
if self.parent_status_match_all_task_statuses:
for item in self.parent_status_match_all_task_statuses:
_new_all_match.append({
"new_status": item["new_status"].lower(),
"task_statuses": [
status_name.lower()
for status_name in item["task_statuses"]
]
})
self.parent_status_match_all_task_statuses = _new_all_match
_new_single_match = []
if self.parent_status_by_task_status:
for item in self.parent_status_by_task_status:
_new_single_match.append({
"new_status": item["new_status"].lower(),
"task_statuses": [
status_name.lower()
for status_name in item["task_statuses"]
]
})
self.parent_status_by_task_status = _new_single_match
self.parent_types = [
parent_type.lower()
for parent_type in self.parent_types
]
return result
def filter_entities_info(self, session, event):
# Filter if event contain relevant data
entities_info = event["data"].get("entities")
if not entities_info:
return
filtered_entities = []
for entity_info in entities_info:
# Care only about tasks
if entity_info.get("entityType") != "task":
continue
# Care only about changes of status
changes = entity_info.get("changes") or {}
statusid_changes = changes.get("statusid") or {}
if (
statusid_changes.get("new") is None
or statusid_changes.get("old") is None
):
continue
filtered_entities.append(entity_info)
if not filtered_entities:
return
status_ids = [
entity_info["changes"]["statusid"]["new"]
for entity_info in filtered_entities
]
statuses_by_id = self.get_statuses_by_id(
session, status_ids=status_ids
)
# Care only about tasks having status with state `Done`
output = []
for entity_info in filtered_entities:
status_id = entity_info["changes"]["statusid"]["new"]
entity_info["status_entity"] = statuses_by_id[status_id]
output.append(entity_info)
return output
def get_parents_by_id(self, session, entities_info, object_types):
task_type_id = None
valid_object_type_ids = []
for object_type in object_types:
object_name_low = object_type["name"].lower()
if object_name_low == "task":
task_type_id = object_type["id"]
if object_name_low in self.parent_types:
valid_object_type_ids.append(object_type["id"])
parent_ids = [
"\"{}\"".format(entity_info["parentId"])
for entity_info in entities_info
if entity_info["objectTypeId"] == task_type_id
]
if not parent_ids:
return {}
parent_entities = session.query((
"TypedContext where id in ({}) and object_type_id in ({})"
).format(
", ".join(parent_ids), ", ".join(valid_object_type_ids))
).all()
return {
entity["id"]: entity
for entity in parent_entities
}
def get_tasks_by_id(self, session, parent_ids):
joined_parent_ids = ",".join([
"\"{}\"".format(parent_id)
for parent_id in parent_ids
])
task_entities = session.query(
"Task where parent_id in ({})".format(joined_parent_ids)
).all()
return {
entity["id"]: entity
for entity in task_entities
}
def get_statuses_by_id(self, session, task_entities=None, status_ids=None):
if task_entities is None and status_ids is None:
return {}
if status_ids is None:
status_ids = []
for task_entity in task_entities:
status_ids.append(task_entity["status_id"])
if not status_ids:
return {}
status_entities = session.query(
"Status where id in ({})".format(", ".join(status_ids))
).all()
return {
entity["id"]: entity
for entity in status_entities
}
def launch(self, session, event):
'''Propagates status from version to task when changed'''
entities_info = self.filter_entities_info(session, event)
if not entities_info:
return
object_types = session.query("select id, name from ObjectType").all()
parents_by_id = self.get_parents_by_id(
session, entities_info, object_types
)
if not parents_by_id:
return
tasks_by_id = self.get_tasks_by_id(
session, tuple(parents_by_id.keys())
)
# Just collect them in one variable
entities_by_id = {}
for entity_id, entity in parents_by_id.items():
entities_by_id[entity_id] = entity
for entity_id, entity in tasks_by_id.items():
entities_by_id[entity_id] = entity
# Map task entities by their parents
tasks_by_parent_id = collections.defaultdict(list)
for task_entity in tasks_by_id.values():
tasks_by_parent_id[task_entity["parent_id"]].append(task_entity)
# Found status entities for all queried entities
statuses_by_id = self.get_statuses_by_id(
session,
entities_by_id.values()
)
# New status determination logic
new_statuses_by_parent_id = self.new_status_by_all_task_statuses(
parents_by_id.keys(), tasks_by_parent_id, statuses_by_id
)
# Check if there are remaining any parents that does not have
# determined new status yet
remainder_tasks_by_parent_id = collections.defaultdict(list)
for entity_info in entities_info:
parent_id = entity_info["parentId"]
if (
# Skip if already has determined new status
parent_id in new_statuses_by_parent_id
# Skip if parent is not in parent mapping
# - if was not found or parent type is not interesting
or parent_id not in parents_by_id
):
continue
remainder_tasks_by_parent_id[parent_id].append(
entities_by_id[entity_info["entityId"]]
)
# Try to find new status for remained parents
new_statuses_by_parent_id.update(
self.new_status_by_remainders(
remainder_tasks_by_parent_id,
statuses_by_id
)
)
# Make sure new_status is set to valid value
for parent_id in tuple(new_statuses_by_parent_id.keys()):
new_status_name = new_statuses_by_parent_id[parent_id]
if not new_status_name:
new_statuses_by_parent_id.pop(parent_id)
# If there are not new statuses then just skip
if not new_statuses_by_parent_id:
return
# Get project schema from any available entity
_entity = None
for _ent in entities_by_id.values():
_entity = _ent
break
project_entity = self.get_project_from_entity(_entity)
project_schema = project_entity["project_schema"]
# Map type names by lowere type names
types_mapping = {
_type.lower(): _type
for _type in session.types
}
# Map object type id by lowered and modified object type name
object_type_mapping = {}
for object_type in object_types:
mapping_name = object_type["name"].lower().replace(" ", "")
object_type_mapping[object_type["id"]] = mapping_name
statuses_by_obj_id = {}
for parent_id, new_status_name in new_statuses_by_parent_id.items():
if not new_status_name:
continue
parent_entity = entities_by_id[parent_id]
obj_id = parent_entity["object_type_id"]
# Find statuses for entity type by object type name
# in project's schema and cache them
if obj_id not in statuses_by_obj_id:
mapping_name = object_type_mapping[obj_id]
mapped_name = types_mapping.get(mapping_name)
statuses = project_schema.get_statuses(mapped_name)
statuses_by_obj_id[obj_id] = {
status["name"].lower(): status
for status in statuses
}
statuses_by_name = statuses_by_obj_id[obj_id]
new_status = statuses_by_name.get(new_status_name)
ent_path = "/".join(
[ent["name"] for ent in parent_entity["link"]]
)
if not new_status:
self.log.warning((
"\"{}\" Couldn't change status to \"{}\"."
" Status is not available for entity type \"{}\"."
).format(
ent_path, new_status_name, parent_entity.entity_type
))
continue
current_status_name = parent_entity["status"]["name"]
# Do nothing if status is already set
if new_status["name"] == current_status_name:
self.log.debug(
"\"{}\" Status \"{}\" already set.".format(
ent_path, current_status_name
)
)
continue
try:
parent_entity["status"] = new_status
session.commit()
self.log.info(
"\"{}\" changed status to \"{}\"".format(
ent_path, new_status["name"]
)
)
except Exception:
session.rollback()
self.log.warning(
"\"{}\" status couldnt be set to \"{}\"".format(
ent_path, new_status["name"]
),
exc_info=True
)
def new_status_by_all_task_statuses(
self, parent_ids, tasks_by_parent_id, statuses_by_id
):
"""All statuses of parent entity must match specific status names.
Only if all task statuses match the condition parent's status name is
determined.
"""
output = {}
for parent_id in parent_ids:
task_statuses_lowered = set()
for task_entity in tasks_by_parent_id[parent_id]:
task_status = statuses_by_id[task_entity["status_id"]]
low_status_name = task_status["name"].lower()
task_statuses_lowered.add(low_status_name)
new_status = None
for item in self.parent_status_match_all_task_statuses:
valid_item = True
for status_name_low in task_statuses_lowered:
if status_name_low not in item["task_statuses"]:
valid_item = False
break
if valid_item:
new_status = item["new_status"]
break
if new_status is not None:
output[parent_id] = new_status
return output
def new_status_by_remainders(
self, remainder_tasks_by_parent_id, statuses_by_id
):
"""By new task status can be determined new status of parent."""
output = {}
if not remainder_tasks_by_parent_id:
return output
for parent_id, task_entities in remainder_tasks_by_parent_id.items():
if not task_entities:
continue
# For cases there are multiple tasks in changes
# - task status which match any new status item by order in the
# list `parent_status_by_task_status` is preffered
best_order = len(self.parent_status_by_task_status)
best_order_status = None
for task_entity in task_entities:
task_status = statuses_by_id[task_entity["status_id"]]
low_status_name = task_status["name"].lower()
for order, item in enumerate(
self.parent_status_by_task_status
):
if order >= best_order:
break
if low_status_name in item["task_statuses"]:
best_order = order
best_order_status = item["new_status"]
break
if best_order_status:
output[parent_id] = best_order_status
return output
def register(session, plugins_presets):
TaskStatusToParent(session, plugins_presets).register()

View file

@ -0,0 +1,21 @@
"""
Optional:
instance.data["remove"] -> mareker for removing
"""
import pyblish.api
class CollectClearInstances(pyblish.api.InstancePlugin):
"""Clear all marked instances"""
order = pyblish.api.CollectorOrder + 0.4999
label = "Clear Instances"
hosts = ["standalonepublisher"]
def process(self, instance):
self.log.debug(
f"Instance: `{instance}` | "
f"families: `{instance.data['families']}`")
if instance.data.get("remove"):
self.log.info(f"Removing: {instance}")
instance.context.remove(instance)

View file

@ -1,3 +1,19 @@
"""
Optional:
presets -> extensions (
example of use:
[".mov", ".mp4"]
)
presets -> source_dir (
example of use:
"C:/pathToFolder"
"{root}/{project[name]}/inputs"
"{root[work]}/{project[name]}/inputs"
"./input"
"../input"
)
"""
import os
import opentimelineio as otio
import pyblish.api
@ -33,8 +49,10 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# presets
extensions = [".mov", ".mp4"]
source_dir = None
def process(self, instance):
root_dir = None
# remove context test attribute
if instance.context.data.get("subsetNamesCheck"):
instance.context.data.pop("subsetNamesCheck")
@ -53,19 +71,42 @@ class CollectEditorial(pyblish.api.InstancePlugin):
# get video file path
video_path = None
basename = os.path.splitext(os.path.basename(file_path))[0]
for f in os.listdir(staging_dir):
self.log.debug(f"__ test file: `{f}`")
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialVideoPath"] = video_path
if self.source_dir:
source_dir = self.source_dir.replace("\\", "/")
if ("./" in source_dir) or ("../" in source_dir):
# get current working dir
cwd = os.getcwd()
# set cwd to staging dir for absolute path solving
os.chdir(staging_dir)
root_dir = os.path.abspath(source_dir)
# set back original cwd
os.chdir(cwd)
elif "{" in source_dir:
root_dir = source_dir
else:
root_dir = os.path.normpath(source_dir)
if root_dir:
# search for source data will need to be done
instance.data["editorialSourceRoot"] = root_dir
instance.data["editorialSourcePath"] = None
else:
# source data are already found
for f in os.listdir(staging_dir):
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialSourceRoot"] = staging_dir
instance.data["editorialSourcePath"] = video_path
instance.data["stagingDir"] = staging_dir
# get editorial sequence file into otio timeline object

View file

@ -2,7 +2,7 @@ import pyblish.api
import re
import os
from avalon import io
from copy import deepcopy
class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data
@ -60,7 +60,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def create_hierarchy(self, instance):
parents = list()
hierarchy = ""
hierarchy = list()
visual_hierarchy = [instance.context.data["assetEntity"]]
while True:
visual_parent = io.find_one(
@ -81,27 +81,74 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
})
if self.shot_add_hierarchy:
parent_template_patern = re.compile(r"\{([a-z]*?)\}")
# fill the parents parts from presets
shot_add_hierarchy = self.shot_add_hierarchy.copy()
hierarchy_parents = shot_add_hierarchy["parents"].copy()
for parent in hierarchy_parents:
hierarchy_parents[parent] = hierarchy_parents[parent].format(
**instance.data["anatomyData"])
# fill parent keys data template from anatomy data
for parent_key in hierarchy_parents:
hierarchy_parents[parent_key] = hierarchy_parents[
parent_key].format(**instance.data["anatomyData"])
for _index, _parent in enumerate(
shot_add_hierarchy["parents_path"].split("/")):
parent_filled = _parent.format(**hierarchy_parents)
parent_key = parent_template_patern.findall(_parent).pop()
# in case SP context is set to the same folder
if (_index == 0) and ("folder" in parent_key) \
and (parents[-1]["entityName"] == parent_filled):
self.log.debug(f" skiping : {parent_filled}")
continue
# in case first parent is project then start parents from start
if (_index == 0) and ("project" in parent_key):
self.log.debug("rebuilding parents from scratch")
project_parent = parents[0]
parents = [project_parent]
self.log.debug(f"project_parent: {project_parent}")
self.log.debug(f"parents: {parents}")
continue
prnt = self.convert_to_entity(
parent, hierarchy_parents[parent])
parent_key, parent_filled)
parents.append(prnt)
hierarchy.append(parent_filled)
hierarchy = shot_add_hierarchy[
"parents_path"].format(**hierarchy_parents)
# convert hierarchy to string
hierarchy = "/".join(hierarchy)
# assing to instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
# print
self.log.debug(f"Hierarchy: {hierarchy}")
self.log.debug(f"parents: {parents}")
if self.shot_add_tasks:
instance.data["tasks"] = self.shot_add_tasks
tasks_to_add = dict()
project_tasks = io.find_one({"type": "project"})["config"]["tasks"]
for task_name, task_data in self.shot_add_tasks.items():
try:
if task_data["type"] in project_tasks.keys():
tasks_to_add.update({task_name: task_data})
else:
raise KeyError(
"Wrong FtrackTaskType `{}` for `{}` is not"
" existing in `{}``".format(
task_data["type"],
task_name,
list(project_tasks.keys())))
except KeyError as error:
raise KeyError(
"Wrong presets: `{0}`".format(error)
)
instance.data["tasks"] = tasks_to_add
else:
instance.data["tasks"] = list()
instance.data["tasks"] = dict()
# updating hierarchy data
instance.data["anatomyData"].update({
@ -117,7 +164,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def processing_instance(self, instance):
self.log.info(f"_ instance: {instance}")
# adding anatomyData for burnins
instance.data["anatomyData"] = instance.context.data["anatomyData"]
instance.data["anatomyData"] = deepcopy(
instance.context.data["anatomyData"])
asset = instance.data["asset"]
assets_shared = instance.context.data.get("assetsShared")
@ -133,9 +181,6 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
shot_name = instance.data["asset"]
self.log.debug(f"Shot Name: {shot_name}")
if instance.data["hierarchy"] not in shot_name:
self.log.warning("wrong parent")
label = f"{shot_name} ({frame_start}-{frame_end})"
instance.data["label"] = label
@ -150,7 +195,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"],
"tasks": instance.data["tasks"]
"tasks": instance.data["tasks"],
"anatomyData": instance.data["anatomyData"]
})
@ -194,6 +240,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["anatomyData"] = s_asset_data["anatomyData"]
# generate hierarchy data only on shot instances
if 'shot' not in instance.data.get('family', ''):
@ -224,7 +271,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
in_info['tasks'] = instance.data['tasks']
from pprint import pformat
parents = instance.data.get('parents', [])
self.log.debug(f"parents: {pformat(parents)}")
actual = {name: in_info}
@ -240,4 +289,5 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding hierarchy context to instance
context.data["hierarchyContext"] = final_context
self.log.debug(f"hierarchyContext: {pformat(final_context)}")
self.log.info("Hierarchy instance collected")

View file

@ -23,6 +23,7 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
def process(self, instance):
fps = instance.context.data["fps"]
instance.data.update({
"fps": fps
})

View file

@ -0,0 +1,266 @@
import os
import re
import tempfile
import pyblish.api
from copy import deepcopy
import clique
class CollectInstanceResources(pyblish.api.InstancePlugin):
"""Collect instance's resources"""
# must be after `CollectInstances`
order = pyblish.api.CollectorOrder + 0.011
label = "Collect Instance Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
self.context = instance.context
self.log.info(f"Processing instance: {instance}")
self.new_instances = []
subset_files = dict()
subset_dirs = list()
anatomy = self.context.data["anatomy"]
anatomy_data = deepcopy(self.context.data["anatomyData"])
anatomy_data.update({"root": anatomy.roots})
subset = instance.data["subset"]
clip_name = instance.data["clipName"]
editorial_source_root = instance.data["editorialSourceRoot"]
editorial_source_path = instance.data["editorialSourcePath"]
# if `editorial_source_path` then loop trough
if editorial_source_path:
# add family if mov or mp4 found which is longer for
# cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data["families"] += ["trimming"]
return
# if template patern in path then fill it with `anatomy_data`
if "{" in editorial_source_root:
editorial_source_root = editorial_source_root.format(
**anatomy_data)
self.log.debug(f"root: {editorial_source_root}")
# loop `editorial_source_root` and find clip name in folders
# and look for any subset name alternatives
for root, dirs, _files in os.walk(editorial_source_root):
# search only for directories related to clip name
correct_clip_dir = None
for _d_search in dirs:
# avoid all non clip dirs
if _d_search not in clip_name:
continue
# found correct dir for clip
correct_clip_dir = _d_search
# continue if clip dir was not found
if not correct_clip_dir:
continue
clip_dir_path = os.path.join(root, correct_clip_dir)
subset_files_items = list()
# list content of clip dir and search for subset items
for subset_item in os.listdir(clip_dir_path):
# avoid all items which are not defined as subsets by name
if subset not in subset_item:
continue
subset_item_path = os.path.join(
clip_dir_path, subset_item)
# if it is dir store it to `subset_dirs` list
if os.path.isdir(subset_item_path):
subset_dirs.append(subset_item_path)
# if it is file then store it to `subset_files` list
if os.path.isfile(subset_item_path):
subset_files_items.append(subset_item_path)
if subset_files_items:
subset_files.update({clip_dir_path: subset_files_items})
# break the loop if correct_clip_dir was captured
# no need to cary on if corect folder was found
if correct_clip_dir:
break
if subset_dirs:
# look all dirs and check for subset name alternatives
for _dir in subset_dirs:
instance_data = deepcopy(
{k: v for k, v in instance.data.items()})
sub_dir = os.path.basename(_dir)
# if subset name is only alternative then create new instance
if sub_dir != subset:
instance_data = self.duplicate_instance(
instance_data, subset, sub_dir)
# create all representations
self.create_representations(
os.listdir(_dir), instance_data, _dir)
if sub_dir == subset:
self.new_instances.append(instance_data)
# instance.data.update(instance_data)
if subset_files:
unique_subset_names = list()
root_dir = list(subset_files.keys()).pop()
files_list = subset_files[root_dir]
search_patern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])"
for _file in files_list:
patern = re.compile(search_patern)
match = patern.findall(_file)
if not match:
continue
match_subset = match.pop()
if match_subset in unique_subset_names:
continue
unique_subset_names.append(match_subset)
self.log.debug(f"unique_subset_names: {unique_subset_names}")
for _un_subs in unique_subset_names:
instance_data = self.duplicate_instance(
instance.data, subset, _un_subs)
# create all representations
self.create_representations(
[os.path.basename(f) for f in files_list
if _un_subs in f],
instance_data, root_dir)
# remove the original instance as it had been used only
# as template and is duplicated
self.context.remove(instance)
# create all instances in self.new_instances into context
for new_instance in self.new_instances:
_new_instance = self.context.create_instance(
new_instance["name"])
_new_instance.data.update(new_instance)
def duplicate_instance(self, instance_data, subset, new_subset):
new_instance_data = dict()
for _key, _value in instance_data.items():
new_instance_data[_key] = _value
if not isinstance(_value, str):
continue
if subset in _value:
new_instance_data[_key] = _value.replace(
subset, new_subset)
self.log.info(f"Creating new instance: {new_instance_data['name']}")
self.new_instances.append(new_instance_data)
return new_instance_data
def create_representations(
self, files_list, instance_data, staging_dir):
""" Create representations from Collection object
"""
# collecting frames for later frame start/end reset
frames = list()
# break down Collection object to collections and reminders
collections, remainder = clique.assemble(files_list)
# add staging_dir to instance_data
instance_data["stagingDir"] = staging_dir
# add representations to instance_data
instance_data["representations"] = list()
collection_head_name = None
# loop trough collections and create representations
for _collection in collections:
ext = _collection.tail
collection_head_name = _collection.head
frame_start = list(_collection.indexes)[0]
frame_end = list(_collection.indexes)[-1]
repre_data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"name": ext[1:],
"ext": ext[1:],
"files": [item for item in _collection],
"stagingDir": staging_dir
}
if "review" in instance_data["families"]:
repre_data.update({
"thumbnail": True,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"tags": ["review", "ftrackreview", "delete"],
})
instance_data["representations"].append(repre_data)
# add to frames for frame range reset
frames.append(frame_start)
frames.append(frame_end)
# loop trough reminders and create representations
for _reminding_file in remainder:
ext = os.path.splitext(_reminding_file)[-1]
if ext not in instance_data["extensions"]:
continue
if collection_head_name and (
(collection_head_name + ext[1:]) not in _reminding_file
) and (ext in [".mp4", ".mov"]):
self.log.info(f"Skipping file: {_reminding_file}")
continue
frame_start = 1
frame_end = 1
repre_data = {
"name": ext[1:],
"ext": ext[1:],
"files": _reminding_file,
"stagingDir": staging_dir
}
# exception for thumbnail
if "thumb" in _reminding_file:
repre_data.update({
'name': "thumbnail",
'thumbnail': True
})
# exception for mp4 preview
if ".mp4" in _reminding_file:
frame_start = 0
frame_end = (
(instance_data["frameEnd"] - instance_data["frameStart"])
+ 1)
# add review ftrack family into families
for _family in ["review", "ftrack"]:
if _family not in instance_data["families"]:
instance_data["families"].append(_family)
repre_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"tags": ["review", "ftrackreview", "delete"],
})
# add to frames for frame range reset only if no collection
if not collections:
frames.append(frame_start)
frames.append(frame_end)
instance_data["representations"].append(repre_data)
# reset frame start / end
instance_data["frameStart"] = min(frames)
instance_data["frameEnd"] = max(frames)

View file

@ -1,15 +1,14 @@
import os
import opentimelineio as otio
import tempfile
import pyblish.api
from pype import lib as plib
class CollectClipInstances(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
class CollectInstances(pyblish.api.InstancePlugin):
"""Collect instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips"
label = "Collect Instances"
hosts = ["standalonepublisher"]
families = ["editorial"]
@ -18,31 +17,31 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"referenceMain": {
"family": "review",
"families": ["clip", "ftrack"],
# "ftrackFamily": "review",
"extension": ".mp4"
"extensions": [".mp4"]
},
"audioMain": {
"family": "audio",
"families": ["clip", "ftrack"],
# "ftrackFamily": "audio",
"extension": ".wav",
# "version": 1
"extensions": [".wav"],
},
"shotMain": {
"family": "shot",
"families": []
}
}
timeline_frame_offset = None # if 900000 for edl default then -900000
timeline_frame_start = 900000 # starndard edl default (10:00:00:00)
timeline_frame_offset = None
custom_start_frame = None
def process(self, instance):
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
# get context
context = instance.context
instance_data_filter = [
"editorialSourceRoot",
"editorialSourcePath"
]
# attribute for checking duplicity during creation
if not context.data.get("assetNameCheck"):
context.data["assetNameCheck"] = list()
@ -68,15 +67,19 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
handle_start = int(asset_data["handleStart"])
handle_end = int(asset_data["handleEnd"])
instances = []
for track in tracks:
self.log.debug(f"track.name: {track.name}")
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
self.log.debug(f"track_start_frame: {track_start_frame}")
track_start_frame -= self.timeline_frame_start
except AttributeError:
track_start_frame = 0
self.log.debug(f"track_start_frame: {track_start_frame}")
for clip in track.each_child():
if clip.name is None:
continue
@ -103,7 +106,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# frame ranges data
clip_in = clip.range_in_parent().start_time.value
clip_in += track_start_frame
clip_out = clip.range_in_parent().end_time_inclusive().value
clip_out += track_start_frame
self.log.info(f"clip_in: {clip_in} | clip_out: {clip_out}")
# add offset in case there is any
if self.timeline_frame_offset:
@ -131,14 +137,11 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
# create shared new instance data
instance_data = {
"stagingDir": staging_dir,
# shared attributes
"asset": name,
"assetShareName": name,
"editorialVideoPath": instance.data[
"editorialVideoPath"],
"item": clip,
"clipName": clip_name,
# parent time properities
"trackStartFrame": track_start_frame,
@ -167,6 +170,10 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
"frameEndH": frame_end + handle_end
}
for data_key in instance_data_filter:
instance_data.update({
data_key: instance.data.get(data_key)})
# adding subsets to context as instances
for subset, properities in self.subsets.items():
# adding Review-able instance
@ -174,14 +181,20 @@ class CollectClipInstances(pyblish.api.InstancePlugin):
subset_instance_data.update(properities)
subset_instance_data.update({
# unique attributes
"name": f"{subset}_{name}",
"label": f"{subset} {name} ({clip_in}-{clip_out})",
"name": f"{name}_{subset}",
"label": f"{name} {subset} ({clip_in}-{clip_out})",
"subset": subset
})
instances.append(instance.context.create_instance(
**subset_instance_data))
# create new instance
_instance = instance.context.create_instance(
**subset_instance_data)
self.log.debug(
f"Instance: `{_instance}` | "
f"families: `{subset_instance_data['families']}`")
context.data["assetsShared"][name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
self.log.debug("Instance: `{}` | families: `{}`")

View file

@ -1,92 +0,0 @@
import os
import clique
import pype.api
from pprint import pformat
class ExtractShotData(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["clip"]
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = instance.data.get("extension", ".mov")
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
#
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext in ".wav":
start += 0.5
args = [
"\"{}\"".format(ffmpeg_path),
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -46,6 +46,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
files_len = 1
file = files
staging_dir = None
is_jpeg = False
if file.endswith(".jpeg") or file.endswith(".jpg"):
is_jpeg = True
@ -106,7 +107,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
thumbnail_repre.pop("thumbnail")
filename = os.path.basename(full_thumbnail_path)
staging_dir = os.path.dirname(full_thumbnail_path)
staging_dir = staging_dir or os.path.dirname(full_thumbnail_path)
# create new thumbnail representation
representation = {
@ -121,4 +122,5 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
if not is_jpeg:
representation["tags"].append("delete")
self.log.info(f"New representation {representation}")
instance.data["representations"].append(representation)

View file

@ -0,0 +1,105 @@
import os
import pyblish.api
import pype.api
from pprint import pformat
class ExtractTrimVideoAudio(pype.api.Extractor):
"""Trim with ffmpeg "mov" and "wav" files."""
# must be before `ExtractThumbnailSP`
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Trim Video/Audio"
hosts = ["standalonepublisher"]
families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
# presets
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialSourcePath"]
extensions = instance.data.get("extensions", [".mov"])
for ext in extensions:
self.log.info("Processing ext: `{}`".format(ext))
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext == ".wav":
# offset time as ffmpeg is having bug
start += 0.5
# remove "review" from families
instance.data["families"] = [
fml for fml in instance.data["families"]
if "trimming" not in fml
]
args = [
ffmpeg_path,
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args, shell=True)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext in [".mov", ".mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -7,7 +7,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
label = "Validate Editorial Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
families = ["clip", "trimming"]
# make sure it is enabled only if at least both families are available
match = pyblish.api.Subset
order = pype.api.ValidateContentsOrder
@ -15,6 +18,6 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
self.log.debug(
f"Instance: {instance}, Families: "
f"{[instance.data['family']] + instance.data['families']}")
check_file = instance.data["editorialVideoPath"]
check_file = instance.data["editorialSourcePath"]
msg = f"Missing \"{check_file}\"."
assert check_file, msg