Merge branch 'hotfix/invalid-scope' into 2.x/develop

This commit is contained in:
Milan Kolar 2020-08-12 14:18:07 +02:00
commit 1923d60600
20 changed files with 791 additions and 376 deletions

View file

@ -228,12 +228,7 @@ class Delivery(BaseAction):
if location_path:
location_path = os.path.normpath(location_path)
if not os.path.exists(location_path):
return {
"success": False,
"message": (
"Entered location path does not exists. \"{}\""
).format(location_path)
}
os.makedirs(location_path)
self.db_con.install()
self.db_con.Session["AVALON_PROJECT"] = project_name

View file

@ -5,14 +5,14 @@ import tempfile
import random
import string
from avalon import io, api
from avalon.tools import publish as av_publish
from avalon import io
import pype
from pype.api import execute
from pype.api import execute, Logger
import pyblish.api
from . import PUBLISH_PATHS
log = Logger().get_logger("standalonepublisher")
def set_context(project, asset, task, app):
@ -61,105 +61,71 @@ def set_context(project, asset, task, app):
def publish(data, gui=True):
# cli pyblish seems like better solution
return cli_publish(data, gui)
# # this uses avalon pyblish launch tool
# avalon_api_publish(data, gui)
def avalon_api_publish(data, gui=True):
''' Launches Pyblish (GUI by default)
:param data: Should include data for pyblish and standalone collector
:type data: dict
:param gui: Pyblish will be launched in GUI mode if set to True
:type gui: bool
'''
io.install()
# Create hash name folder in temp
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
staging_dir = tempfile.mkdtemp(chars)
# create also json and fill with data
json_data_path = staging_dir + os.path.basename(staging_dir) + '.json'
with open(json_data_path, 'w') as outfile:
json.dump(data, outfile)
args = [
"-pp", os.pathsep.join(pyblish.api.registered_paths())
]
envcopy = os.environ.copy()
envcopy["PYBLISH_HOSTS"] = "standalonepublisher"
envcopy["SAPUBLISH_INPATH"] = json_data_path
if gui:
av_publish.show()
else:
returncode = execute([
sys.executable, "-u", "-m", "pyblish"
] + args, env=envcopy)
io.uninstall()
def cli_publish(data, gui=True):
from . import PUBLISH_PATHS
PUBLISH_SCRIPT_PATH = os.path.join(os.path.dirname(__file__), "publish.py")
io.install()
pyblish.api.deregister_all_plugins()
# Registers Global pyblish plugins
pype.install()
# Registers Standalone pyblish plugins
for path in PUBLISH_PATHS:
pyblish.api.register_plugin_path(path)
project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS")
project_name = os.environ["AVALON_PROJECT"]
if project_plugins_paths and project_name:
for path in project_plugins_paths.split(os.pathsep):
if not path:
continue
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.api.register_plugin_path(plugin_path)
api.register_plugin_path(api.Loader, plugin_path)
api.register_plugin_path(api.Creator, plugin_path)
# Create hash name folder in temp
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
staging_dir = tempfile.mkdtemp(chars)
# create json for return data
return_data_path = (
staging_dir + os.path.basename(staging_dir) + 'return.json'
)
# create also json and fill with data
json_data_path = staging_dir + os.path.basename(staging_dir) + '.json'
with open(json_data_path, 'w') as outfile:
json.dump(data, outfile)
args = [
"-pp", os.pathsep.join(pyblish.api.registered_paths())
]
if gui:
args += ["gui"]
envcopy = os.environ.copy()
envcopy["PYBLISH_HOSTS"] = "standalonepublisher"
envcopy["SAPUBLISH_INPATH"] = json_data_path
envcopy["SAPUBLISH_OUTPATH"] = return_data_path
envcopy["PYBLISH_GUI"] = "pyblish_pype"
envcopy["PYBLISHGUI"] = "pyblish_pype"
envcopy["PUBLISH_PATHS"] = os.pathsep.join(PUBLISH_PATHS)
returncode = execute([
sys.executable, "-u", "-m", "pyblish"
] + args, env=envcopy)
result = execute(
[sys.executable, PUBLISH_SCRIPT_PATH],
env=envcopy
)
result = {}
if os.path.exists(json_data_path):
with open(json_data_path, "r") as f:
result = json.load(f)
log.info(f"Publish result: {result}")
io.uninstall()
# TODO: check if was pyblish successful
# if successful return True
print('Check result here')
return False
def main(env):
from avalon.tools import publish
# Registers pype's Global pyblish plugins
pype.install()
# Register additional paths
addition_paths_str = env.get("PUBLISH_PATHS") or ""
addition_paths = addition_paths_str.split(os.pathsep)
for path in addition_paths:
path = os.path.normpath(path)
if not os.path.exists(path):
continue
pyblish.api.register_plugin_path(path)
# Register project specific plugins
project_name = os.environ["AVALON_PROJECT"]
project_plugins_paths = env.get("PYPE_PROJECT_PLUGINS") or ""
for path in project_plugins_paths.split(os.pathsep):
plugin_path = os.path.join(path, project_name, "plugins")
if os.path.exists(plugin_path):
pyblish.api.register_plugin_path(plugin_path)
return publish.show()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -357,7 +357,7 @@ class DropDataFrame(QtWidgets.QFrame):
if data['name'] == item.in_data['name']:
found = True
break
paths = data['files']
paths = list(data['files'])
paths.extend(item.in_data['files'])
c, r = clique.assemble(paths)
if len(c) == 0:
@ -392,7 +392,7 @@ class DropDataFrame(QtWidgets.QFrame):
else:
if data['name'] != item.in_data['name']:
continue
if data['files'] == item.in_data['files']:
if data['files'] == list(item.in_data['files']):
found = True
break
a_name = 'merge'

View file

@ -35,7 +35,7 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder - 0.04
label = 'Integrate Hierarchy To Ftrack'
families = ["clip", "shot"]
families = ["shot"]
optional = False
def process(self, context):

View file

@ -19,7 +19,14 @@ class ExtractBurnin(pype.api.Extractor):
label = "Extract burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere"]
hosts = [
"nuke",
"maya",
"shell",
"nukestudio",
"premiere",
"standalonepublisher"
]
optional = True
positions = [

View file

@ -7,7 +7,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
order = pyblish.api.ExtractorOrder - 0.01
label = "Extract Hierarchy To Avalon"
families = ["clip", "shot", "editorial"]
families = ["clip", "shot"]
def process(self, context):
if "hierarchyContext" not in context.data:

View file

@ -22,7 +22,15 @@ class ExtractReview(pyblish.api.InstancePlugin):
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["nuke", "maya", "shell", "nukestudio", "premiere", "harmony"]
hosts = [
"nuke",
"maya",
"shell",
"nukestudio",
"premiere",
"harmony",
"standalonepublisher"
]
# Supported extensions
image_exts = ["exr", "jpg", "jpeg", "png", "dpx"]

View file

@ -40,13 +40,11 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin):
expected_settings["frameEnd"] = frame_end - frame_start + 1
expected_settings["frameStart"] = 1
self.log.info(instance.context.data['anatomyData']['asset'])
if any(string in instance.context.data['anatomyData']['asset']
for string in frame_check_filter):
expected_settings.pop("frameEnd")
for string in self.frame_check_filter):
expected_settings.pop("frameEnd")
func = """function func()
{

View file

@ -210,6 +210,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
self.log.debug(
"assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building
context hierarchy tree

View file

@ -0,0 +1,187 @@
import os
import opentimelineio as otio
import tempfile
import pyblish.api
from pype import lib as plib
class CollectClipInstances(pyblish.api.InstancePlugin):
"""Collect Clips instances from editorial's OTIO sequence"""
order = pyblish.api.CollectorOrder + 0.01
label = "Collect Clips"
hosts = ["standalonepublisher"]
families = ["editorial"]
# presets
subsets = {
"referenceMain": {
"family": "review",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"extension": ".mp4"
},
"audioMain": {
"family": "audio",
"families": ["ftrack"],
"ftrackFamily": "audio",
"extension": ".wav",
"version": 1
},
"shotMain": {
"family": "shot",
"families": []
}
}
timeline_frame_offset = None # if 900000 for edl default then -900000
custom_start_frame = None
def process(self, instance):
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
# get context
context = instance.context
# attribute for checking duplicity during creation
if not context.data.get("assetNameCheck"):
context.data["assetNameCheck"] = list()
# create asset_names conversion table
if not context.data.get("assetsShared"):
context.data["assetsShared"] = dict()
# get timeline otio data
timeline = instance.data["otio_timeline"]
fps = plib.get_asset()["data"]["fps"]
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
# get data from avalon
asset_entity = instance.context.data["assetEntity"]
asset_data = asset_entity["data"]
asset_name = asset_entity["name"]
# Timeline data.
handle_start = int(asset_data["handleStart"])
handle_end = int(asset_data["handleEnd"])
instances = []
for track in tracks:
try:
track_start_frame = (
abs(track.source_range.start_time.value)
)
except AttributeError:
track_start_frame = 0
for clip in track.each_child():
if clip.name is None:
continue
# skip all generators like black ampty
if isinstance(
clip.media_reference,
otio.schema.GeneratorReference):
continue
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(clip, otio.schema.transition.Transition):
continue
# basic unique asset name
clip_name = os.path.splitext(clip.name)[0].lower()
name = f"{asset_name.split('_')[0]}_{clip_name}"
if name not in context.data["assetNameCheck"]:
context.data["assetNameCheck"].append(name)
else:
self.log.warning(f"duplicate shot name: {name}")
# frame ranges data
clip_in = clip.range_in_parent().start_time.value
clip_out = clip.range_in_parent().end_time_inclusive().value
# add offset in case there is any
if self.timeline_frame_offset:
clip_in += self.timeline_frame_offset
clip_out += self.timeline_frame_offset
clip_duration = clip.duration().value
self.log.info(f"clip duration: {clip_duration}")
source_in = clip.trimmed_range().start_time.value
source_out = source_in + clip_duration
source_in_h = source_in - handle_start
source_out_h = source_out + handle_end
clip_in_h = clip_in - handle_start
clip_out_h = clip_out + handle_end
# define starting frame for future shot
if self.custom_start_frame is not None:
frame_start = self.custom_start_frame
else:
frame_start = clip_in
frame_end = frame_start + (clip_duration - 1)
# create shared new instance data
instance_data = {
"stagingDir": staging_dir,
# shared attributes
"asset": name,
"assetShareName": name,
"editorialVideoPath": instance.data[
"editorialVideoPath"],
"item": clip,
# parent time properities
"trackStartFrame": track_start_frame,
"handleStart": handle_start,
"handleEnd": handle_end,
"fps": fps,
# media source
"sourceIn": source_in,
"sourceOut": source_out,
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
# timeline
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": clip_duration,
"clipInH": clip_in_h,
"clipOutH": clip_out_h,
"clipDurationH": clip_duration + handle_start + handle_end,
# task
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartH": frame_start - handle_start,
"frameEndH": frame_end + handle_end
}
# adding subsets to context as instances
for subset, properities in self.subsets.items():
# adding Review-able instance
subset_instance_data = instance_data.copy()
subset_instance_data.update(properities)
subset_instance_data.update({
# unique attributes
"name": f"{subset}_{name}",
"label": f"{subset} {name} ({clip_in}-{clip_out})",
"subset": subset
})
instances.append(instance.context.create_instance(
**subset_instance_data))
context.data["assetsShared"][name] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}

View file

@ -36,18 +36,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
def process(self, context):
# get json paths from os and load them
io.install()
input_json_path = os.environ.get("SAPUBLISH_INPATH")
output_json_path = os.environ.get("SAPUBLISH_OUTPATH")
# context.data["stagingDir"] = os.path.dirname(input_json_path)
context.data["returnJsonPath"] = output_json_path
with open(input_json_path, "r") as f:
in_data = json.load(f)
asset_name = in_data["asset"]
family = in_data["family"]
subset = in_data["subset"]
# Load presets
presets = context.data.get("presets")
@ -57,19 +45,92 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
presets = config.get_presets()
project = io.find_one({"type": "project"})
asset = io.find_one({"type": "asset", "name": asset_name})
context.data["project"] = project
# get json file context
input_json_path = os.environ.get("SAPUBLISH_INPATH")
with open(input_json_path, "r") as f:
in_data = json.load(f)
self.log.debug(f"_ in_data: {in_data}")
self.asset_name = in_data["asset"]
self.family = in_data["family"]
asset = io.find_one({"type": "asset", "name": self.asset_name})
context.data["asset"] = asset
# exception for editorial
if "editorial" in self.family:
# avoid subset name duplicity
if not context.data.get("subsetNamesCheck"):
context.data["subsetNamesCheck"] = list()
in_data_list = list()
representations = in_data.pop("representations")
for repr in representations:
in_data_copy = in_data.copy()
ext = repr["ext"][1:]
subset = in_data_copy["subset"]
# filter out non editorial files
if ext not in ["edl", "xml"]:
in_data_copy["representations"] = [repr]
in_data_copy["subset"] = f"{ext}{subset}"
in_data_list.append(in_data_copy)
files = repr.pop("files")
# delete unneeded keys
delete_repr_keys = ["frameStart", "frameEnd"]
for k in delete_repr_keys:
if repr.get(k):
repr.pop(k)
# convert files to list if it isnt
if not isinstance(files, list):
files = [files]
self.log.debug(f"_ files: {files}")
for index, f in enumerate(files):
index += 1
# copy dictionaries
in_data_copy = in_data_copy.copy()
repr_new = repr.copy()
repr_new["files"] = f
repr_new["name"] = ext
in_data_copy["representations"] = [repr_new]
# create subset Name
new_subset = f"{ext}{index}{subset}"
while new_subset in context.data["subsetNamesCheck"]:
index += 1
new_subset = f"{ext}{index}{subset}"
context.data["subsetNamesCheck"].append(new_subset)
in_data_copy["subset"] = new_subset
in_data_list.append(in_data_copy)
self.log.info(f"Creating subset: {ext}{index}{subset}")
else:
in_data_list = [in_data]
self.log.debug(f"_ in_data_list: {in_data_list}")
for in_data in in_data_list:
# create instance
self.create_instance(context, in_data)
def create_instance(self, context, in_data):
subset = in_data["subset"]
instance = context.create_instance(subset)
instance.data.update(
{
"subset": subset,
"asset": asset_name,
"asset": self.asset_name,
"label": subset,
"name": subset,
"family": family,
"family": self.family,
"version": in_data.get("version", 1),
"frameStart": in_data.get("representations", [None])[0].get(
"frameStart", None
@ -77,7 +138,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
"frameEnd": in_data.get("representations", [None])[0].get(
"frameEnd", None
),
"families": [family, "ftrack"],
"families": [self.family, "ftrack"],
}
)
self.log.info("collected instance: {}".format(instance.data))
@ -105,5 +166,3 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
self.log.debug("Adding review family")
instance.data["representations"].append(component)
self.log.info(in_data)

View file

@ -0,0 +1,82 @@
import os
import opentimelineio as otio
import pyblish.api
from pype import lib as plib
class OTIO_View(pyblish.api.Action):
"""Currently disabled because OTIO requires PySide2. Issue on Qt.py:
https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289
"""
label = "OTIO View"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
instance = context[0]
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
plib._subprocess(["otioview", file_path])
class CollectEditorial(pyblish.api.InstancePlugin):
"""Collect Editorial OTIO timeline"""
order = pyblish.api.CollectorOrder
label = "Collect Editorial"
hosts = ["standalonepublisher"]
families = ["editorial"]
actions = []
# presets
extensions = [".mov"]
def process(self, instance):
# remove context test attribute
if instance.context.data.get("subsetNamesCheck"):
instance.context.data.pop("subsetNamesCheck")
self.log.debug(f"__ instance: `{instance}`")
# get representation with editorial file
for representation in instance.data["representations"]:
self.log.debug(f"__ representation: `{representation}`")
# make editorial sequence file path
staging_dir = representation["stagingDir"]
file_path = os.path.join(
staging_dir, str(representation["files"])
)
instance.context.data["currentFile"] = file_path
# get video file path
video_path = None
basename = os.path.splitext(os.path.basename(file_path))[0]
for f in os.listdir(staging_dir):
self.log.debug(f"__ test file: `{f}`")
# filter out by not sharing the same name
if os.path.splitext(f)[0] not in basename:
continue
# filter out by respected extensions
if os.path.splitext(f)[1] not in self.extensions:
continue
video_path = os.path.join(
staging_dir, f
)
self.log.debug(f"__ video_path: `{video_path}`")
instance.data["editorialVideoPath"] = video_path
instance.data["stagingDir"] = staging_dir
# get editorial sequence file into otio timeline object
extension = os.path.splitext(file_path)[1]
kwargs = {}
if extension == ".edl":
# EDL has no frame rate embedded so needs explicit
# frame rate else 24 is asssumed.
kwargs["rate"] = plib.get_asset()["data"]["fps"]
instance.data["otio_timeline"] = otio.adapters.read_from_file(
file_path, **kwargs)
self.log.info(f"Added OTIO timeline from: `{file_path}`")

View file

@ -0,0 +1,242 @@
import pyblish.api
import re
import os
from avalon import io
class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
plugins to be able to create needed parents for the context if they
don't exist yet
"""
label = "Collect Hierarchy Clip"
order = pyblish.api.CollectorOrder + 0.101
hosts = ["standalonepublisher"]
families = ["shot"]
# presets
shot_rename_template = None
shot_rename_search_patterns = None
shot_add_hierarchy = None
shot_add_tasks = None
def convert_to_entity(self, key, value):
# ftrack compatible entity types
types = {"shot": "Shot",
"folder": "Folder",
"episode": "Episode",
"sequence": "Sequence",
"track": "Sequence",
}
# convert to entity type
entity_type = types.get(key, None)
# return if any
if entity_type:
return {"entityType": entity_type, "entityName": value}
def rename_with_hierarchy(self, instance):
search_text = ""
parent_name = instance.context.data["assetEntity"]["name"]
clip = instance.data["item"]
clip_name = os.path.splitext(clip.name)[0].lower()
if self.shot_rename_search_patterns:
search_text += parent_name + clip_name
instance.data["anatomyData"].update({"clip_name": clip_name})
for type, pattern in self.shot_rename_search_patterns.items():
p = re.compile(pattern)
match = p.findall(search_text)
if not match:
continue
instance.data["anatomyData"][type] = match[-1]
# format to new shot name
instance.data["asset"] = self.shot_rename_template.format(
**instance.data["anatomyData"])
def create_hierarchy(self, instance):
parents = list()
hierarchy = ""
visual_hierarchy = [instance.context.data["assetEntity"]]
while True:
visual_parent = io.find_one(
{"_id": visual_hierarchy[-1]["data"]["visualParent"]}
)
if visual_parent:
visual_hierarchy.append(visual_parent)
else:
visual_hierarchy.append(
instance.context.data["projectEntity"])
break
# add current selection context hierarchy from standalonepublisher
for entity in reversed(visual_hierarchy):
parents.append({
"entityType": entity["data"]["entityType"],
"entityName": entity["name"]
})
if self.shot_add_hierarchy:
# fill the parents parts from presets
shot_add_hierarchy = self.shot_add_hierarchy.copy()
hierarchy_parents = shot_add_hierarchy["parents"].copy()
for parent in hierarchy_parents:
hierarchy_parents[parent] = hierarchy_parents[parent].format(
**instance.data["anatomyData"])
prnt = self.convert_to_entity(
parent, hierarchy_parents[parent])
parents.append(prnt)
hierarchy = shot_add_hierarchy[
"parents_path"].format(**hierarchy_parents)
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
self.log.debug(f"Hierarchy: {hierarchy}")
if self.shot_add_tasks:
instance.data["tasks"] = self.shot_add_tasks
else:
instance.data["tasks"] = list()
# updating hierarchy data
instance.data["anatomyData"].update({
"asset": instance.data["asset"],
"task": "conform"
})
def process(self, context):
for instance in context:
if instance.data["family"] in self.families:
self.processing_instance(instance)
def processing_instance(self, instance):
self.log.info(f"_ instance: {instance}")
# adding anatomyData for burnins
instance.data["anatomyData"] = instance.context.data["anatomyData"]
asset = instance.data["asset"]
assets_shared = instance.context.data.get("assetsShared")
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
if self.shot_rename_template:
self.rename_with_hierarchy(instance)
self.create_hierarchy(instance)
shot_name = instance.data["asset"]
self.log.debug(f"Shot Name: {shot_name}")
if instance.data["hierarchy"] not in shot_name:
self.log.warning("wrong parent")
label = f"{shot_name} ({frame_start}-{frame_end})"
instance.data["label"] = label
# dealing with shared attributes across instances
# with the same asset name
if assets_shared.get(asset):
asset_shared = assets_shared.get(asset)
else:
asset_shared = assets_shared[asset]
asset_shared.update({
"asset": instance.data["asset"],
"hierarchy": instance.data["hierarchy"],
"parents": instance.data["parents"],
"tasks": instance.data["tasks"]
})
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building
context hierarchy tree
'''
label = "Collect Hierarchy Context"
order = pyblish.api.CollectorOrder + 0.102
hosts = ["standalonepublisher"]
def update_dict(self, ex_dict, new_dict):
for key in ex_dict:
if key in new_dict and isinstance(ex_dict[key], dict):
new_dict[key] = self.update_dict(ex_dict[key], new_dict[key])
else:
if ex_dict.get(key) and new_dict.get(key):
continue
else:
new_dict[key] = ex_dict[key]
return new_dict
def process(self, context):
instances = context
# create hierarchyContext attr if context has none
assets_shared = context.data.get("assetsShared")
final_context = {}
for instance in instances:
if 'editorial' in instance.data.get('family', ''):
continue
# inject assetsShared to other instances with
# the same `assetShareName` attribute in data
asset_shared_name = instance.data.get("assetShareName")
s_asset_data = assets_shared.get(asset_shared_name)
if s_asset_data:
instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
# generate hierarchy data only on shot instances
if 'shot' not in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
in_info = {}
# suppose that all instances are Shots
in_info['entity_type'] = 'Shot'
# get custom attributes of the shot
in_info['custom_attributes'] = {
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.data["fps"]
}
in_info['tasks'] = instance.data['tasks']
parents = instance.data.get('parents', [])
actual = {name: in_info}
for parent in reversed(parents):
next_dict = {}
parent_name = parent["entityName"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent["entityType"]
next_dict[parent_name]["childs"] = actual
actual = next_dict
final_context = self.update_dict(final_context, actual)
# adding hierarchy context to instance
context.data["hierarchyContext"] = final_context
self.log.info("Hierarchy instance collected")

View file

@ -1,147 +0,0 @@
import os
import opentimelineio as otio
from bson import json_util
import pyblish.api
from pype import lib
from avalon import io
class OTIO_View(pyblish.api.Action):
"""Currently disabled because OTIO requires PySide2. Issue on Qt.py:
https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/289
"""
label = "OTIO View"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
instance = context[0]
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
lib._subprocess(["otioview", file_path])
class CollectShots(pyblish.api.InstancePlugin):
"""Collect Anatomy object into Context"""
order = pyblish.api.CollectorOrder
label = "Collect Shots"
hosts = ["standalonepublisher"]
families = ["editorial"]
actions = []
def process(self, instance):
representation = instance.data["representations"][0]
file_path = os.path.join(
representation["stagingDir"], representation["files"]
)
instance.context.data["editorialPath"] = file_path
extension = os.path.splitext(file_path)[1][1:]
kwargs = {}
if extension == "edl":
# EDL has no frame rate embedded so needs explicit frame rate else
# 24 is asssumed.
kwargs["rate"] = lib.get_asset()["data"]["fps"]
timeline = otio.adapters.read_from_file(file_path, **kwargs)
tracks = timeline.each_child(
descended_from_type=otio.schema.track.Track
)
asset_entity = instance.context.data["assetEntity"]
asset_name = asset_entity["name"]
# Ask user for sequence start. Usually 10:00:00:00.
sequence_start_frame = 900000
# Project specific prefix naming. This needs to be replaced with some
# options to be more flexible.
asset_name = asset_name.split("_")[0]
instances = []
for track in tracks:
track_start_frame = (
abs(track.source_range.start_time.value) - sequence_start_frame
)
for child in track.each_child():
# Transitions are ignored, because Clips have the full frame
# range.
if isinstance(child, otio.schema.transition.Transition):
continue
if child.name is None:
continue
# Hardcoded to expect a shot name of "[name].[extension]"
child_name = os.path.splitext(child.name)[0].lower()
name = f"{asset_name}_{child_name}"
frame_start = track_start_frame
frame_start += child.range_in_parent().start_time.value
frame_end = track_start_frame
frame_end += child.range_in_parent().end_time_inclusive().value
label = f"{name} (framerange: {frame_start}-{frame_end})"
instances.append(
instance.context.create_instance(**{
"name": name,
"label": label,
"frameStart": frame_start,
"frameEnd": frame_end,
"family": "shot",
"families": ["review", "ftrack"],
"ftrackFamily": "review",
"asset": name,
"subset": "shotMain",
"representations": [],
"source": file_path
})
)
visual_hierarchy = [asset_entity]
while True:
visual_parent = io.find_one(
{"_id": visual_hierarchy[-1]["data"]["visualParent"]}
)
if visual_parent:
visual_hierarchy.append(visual_parent)
else:
visual_hierarchy.append(instance.context.data["projectEntity"])
break
context_hierarchy = None
for entity in visual_hierarchy:
childs = {}
if context_hierarchy:
name = context_hierarchy.pop("name")
childs = {name: context_hierarchy}
else:
for instance in instances:
childs[instance.data["name"]] = {
"childs": {},
"entity_type": "Shot",
"custom_attributes": {
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"]
}
}
context_hierarchy = {
"entity_type": entity["data"]["entityType"],
"childs": childs,
"name": entity["name"]
}
name = context_hierarchy.pop("name")
context_hierarchy = {name: context_hierarchy}
instance.context.data["hierarchyContext"] = context_hierarchy
self.log.info(
"Hierarchy:\n" +
json_util.dumps(context_hierarchy, sort_keys=True, indent=4)
)

View file

@ -1,96 +0,0 @@
import os
import clique
import pype.api
import pype.lib
class ExtractShot(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot"
hosts = ["standalonepublisher"]
families = ["shot"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting shot to {}".format(staging_dir))
editorial_path = instance.context.data["editorialPath"]
basename = os.path.splitext(os.path.basename(editorial_path))[0]
# Generate mov file.
fps = pype.lib.get_asset()["data"]["fps"]
input_path = os.path.join(
os.path.dirname(editorial_path), basename + ".mov"
)
shot_mov = os.path.join(staging_dir, instance.data["name"] + ".mov")
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
args = [
ffmpeg_path,
"-ss", str(instance.data["frameStart"] / fps),
"-i", input_path,
"-t", str(
(instance.data["frameEnd"] - instance.data["frameStart"] + 1) /
fps
),
"-crf", "18",
"-pix_fmt", "yuv420p",
shot_mov
]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",
"files": os.path.basename(shot_mov),
"stagingDir": staging_dir,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"fps": fps,
"thumbnail": True,
"tags": ["review", "ftrackreview"]
})
# Generate jpegs.
shot_jpegs = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [ffmpeg_path, "-i", shot_mov, shot_jpegs]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
# Generate wav file.
shot_wav = os.path.join(staging_dir, instance.data["name"] + ".wav")
args = [ffmpeg_path, "-i", shot_mov, shot_wav]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
instance.data["representations"].append({
"name": "wav",
"ext": "wav",
"files": os.path.basename(shot_wav),
"stagingDir": staging_dir
})
# Required for extract_review plugin (L222 onwards).
instance.data["fps"] = fps

View file

@ -0,0 +1,123 @@
import os
import clique
import pype.api
from pprint import pformat
class ExtractShotData(pype.api.Extractor):
"""Extract shot "mov" and "wav" files."""
label = "Extract Shot Data"
hosts = ["standalonepublisher"]
families = ["review", "audio"]
# presets
add_representation = None # ".jpeg"
def process(self, instance):
representation = instance.data.get("representations")
self.log.debug(f"_ representation: {representation}")
if not representation:
instance.data["representations"] = list()
# get ffmpet path
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
# get staging dir
staging_dir = self.staging_dir(instance)
self.log.info("Staging dir set to: `{}`".format(staging_dir))
# Generate mov file.
fps = instance.data["fps"]
video_file_path = instance.data["editorialVideoPath"]
ext = instance.data.get("extension", ".mov")
clip_trimed_path = os.path.join(
staging_dir, instance.data["name"] + ext)
#
# # check video file metadata
# input_data = plib.ffprobe_streams(video_file_path)[0]
# self.log.debug(f"__ input_data: `{input_data}`")
start = float(instance.data["clipInH"])
dur = float(instance.data["clipDurationH"])
if ext in ".wav":
start += 0.5
args = [
ffmpeg_path,
"-ss", str(start / fps),
"-i", f"\"{video_file_path}\"",
"-t", str(dur / fps)
]
if ext in [".mov", ".mp4"]:
args.extend([
"-crf", "18",
"-pix_fmt", "yuv420p"])
elif ext in ".wav":
args.extend([
"-vn -acodec pcm_s16le",
"-ar 48000 -ac 2"
])
# add output path
args.append(f"\"{clip_trimed_path}\"")
self.log.info(f"Processing: {args}")
ffmpeg_args = " ".join(args)
output = pype.api.subprocess(ffmpeg_args)
self.log.info(output)
repr = {
"name": ext[1:],
"ext": ext[1:],
"files": os.path.basename(clip_trimed_path),
"stagingDir": staging_dir,
"frameStart": int(instance.data["frameStart"]),
"frameEnd": int(instance.data["frameEnd"]),
"frameStartFtrack": int(instance.data["frameStartH"]),
"frameEndFtrack": int(instance.data["frameEndH"]),
"fps": fps,
}
if ext[1:] in ["mov", "mp4"]:
repr.update({
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"]})
instance.data["representations"].append(repr)
if self.add_representation:
# Generate jpegs.
clip_img_sequence = os.path.join(
staging_dir, instance.data["name"] + ".%04d.jpeg"
)
args = [
ffmpeg_path, "-i",
f"\"{clip_trimed_path}\"",
f"\"{clip_img_sequence}\""
]
self.log.info(f"Processing: {args}")
output = pype.lib._subprocess(args)
self.log.info(output)
# collect jpeg sequence if editorial data for publish
# are image sequence
collection = clique.Collection(
head=instance.data["name"] + ".", tail='.jpeg', padding=4
)
for f in os.listdir(staging_dir):
if collection.match(f):
collection.add(f)
instance.data["representations"].append({
"name": "jpeg",
"ext": "jpeg",
"files": list(collection),
"stagingDir": staging_dir
})
self.log.debug(f"Instance data: {pformat(instance.data)}")

View file

@ -9,20 +9,10 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin):
label = "Validate Editorial Resources"
hosts = ["standalonepublisher"]
families = ["editorial"]
families = ["audio", "review"]
order = pype.api.ValidateContentsOrder
def process(self, instance):
representation = instance.data["representations"][0]
staging_dir = representation["stagingDir"]
basename = os.path.splitext(
os.path.basename(representation["files"])
)[0]
files = [x for x in os.listdir(staging_dir)]
# Check for "mov" file.
filename = basename + ".mov"
filepath = os.path.join(staging_dir, filename)
msg = f"Missing \"{filepath}\"."
assert filename in files, msg
check_file = instance.data["editorialVideoPath"]
msg = f"Missing \"{check_file}\"."
assert check_file, msg

View file

@ -2,10 +2,10 @@ import pyblish.api
import pype.api
class ValidateShots(pyblish.api.ContextPlugin):
"""Validate there is a "mov" next to the editorial file."""
class ValidateShotDuplicates(pyblish.api.ContextPlugin):
"""Validating no duplicate names are in context."""
label = "Validate Shots"
label = "Validate Shot Duplicates"
hosts = ["standalonepublisher"]
order = pype.api.ValidateContentsOrder

View file

@ -1 +1 @@
__version__ = "2.11.3"
__version__ = "2.11.4"