mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge remote-tracking branch 'origin/develop' into feature/PYPE-570-maya-renderlayer-creator
This commit is contained in:
commit
80231011df
21 changed files with 411 additions and 145 deletions
|
|
@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction):
|
|||
|
||||
# Filter event even more (skip task entities)
|
||||
# - task entities are not relevant for avalon
|
||||
entity_mapping = {}
|
||||
for entity in entities:
|
||||
ftrack_id = entity["id"]
|
||||
if ftrack_id not in ftrack_ids:
|
||||
|
|
@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction):
|
|||
if entity.entity_type.lower() == "task":
|
||||
ftrack_ids.remove(ftrack_id)
|
||||
|
||||
entity_mapping[ftrack_id] = entity
|
||||
|
||||
if not ftrack_ids:
|
||||
# It is bug if this happens!
|
||||
return {
|
||||
|
|
@ -122,11 +125,41 @@ class DeleteAssetSubset(BaseAction):
|
|||
project_name = project["full_name"]
|
||||
self.dbcon.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
selected_av_entities = self.dbcon.find({
|
||||
selected_av_entities = list(self.dbcon.find({
|
||||
"type": "asset",
|
||||
"data.ftrackId": {"$in": ftrack_ids}
|
||||
})
|
||||
selected_av_entities = [ent for ent in selected_av_entities]
|
||||
}))
|
||||
found_without_ftrack_id = {}
|
||||
if len(selected_av_entities) != len(ftrack_ids):
|
||||
found_ftrack_ids = [
|
||||
ent["data"]["ftrackId"] for ent in selected_av_entities
|
||||
]
|
||||
for ftrack_id, entity in entity_mapping.items():
|
||||
if ftrack_id in found_ftrack_ids:
|
||||
continue
|
||||
|
||||
av_ents_by_name = list(self.dbcon.find({
|
||||
"type": "asset",
|
||||
"name": entity["name"]
|
||||
}))
|
||||
if not av_ents_by_name:
|
||||
continue
|
||||
|
||||
ent_path_items = [ent["name"] for ent in entity["link"]]
|
||||
parents = ent_path_items[1:len(ent_path_items)-1:]
|
||||
# TODO we should say to user that
|
||||
# few of them are missing in avalon
|
||||
for av_ent in av_ents_by_name:
|
||||
if av_ent["data"]["parents"] != parents:
|
||||
continue
|
||||
|
||||
# TODO we should say to user that found entity
|
||||
# with same name does not match same ftrack id?
|
||||
if "ftrackId" not in av_ent["data"]:
|
||||
selected_av_entities.append(av_ent)
|
||||
found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id
|
||||
break
|
||||
|
||||
if not selected_av_entities:
|
||||
return {
|
||||
"success": False,
|
||||
|
|
@ -155,7 +188,8 @@ class DeleteAssetSubset(BaseAction):
|
|||
"created_at": datetime.now(),
|
||||
"project_name": project_name,
|
||||
"subset_ids_by_name": {},
|
||||
"subset_ids_by_parent": {}
|
||||
"subset_ids_by_parent": {},
|
||||
"without_ftrack_id": found_without_ftrack_id
|
||||
}
|
||||
|
||||
id_item = {
|
||||
|
|
@ -413,14 +447,21 @@ class DeleteAssetSubset(BaseAction):
|
|||
asset_ids_to_archive = []
|
||||
ftrack_ids_to_delete = []
|
||||
if len(assets_to_delete) > 0:
|
||||
map_av_ftrack_id = spec_data["without_ftrack_id"]
|
||||
# Prepare data when deleting whole avalon asset
|
||||
avalon_assets = self.dbcon.find({"type": "asset"})
|
||||
avalon_assets_by_parent = collections.defaultdict(list)
|
||||
for asset in avalon_assets:
|
||||
asset_id = asset["_id"]
|
||||
parent_id = asset["data"]["visualParent"]
|
||||
avalon_assets_by_parent[parent_id].append(asset)
|
||||
if asset["_id"] in assets_to_delete:
|
||||
ftrack_id = asset["data"]["ftrackId"]
|
||||
if asset_id in assets_to_delete:
|
||||
ftrack_id = map_av_ftrack_id.get(str(asset_id))
|
||||
if not ftrack_id:
|
||||
ftrack_id = asset["data"].get("ftrackId")
|
||||
|
||||
if not ftrack_id:
|
||||
continue
|
||||
ftrack_ids_to_delete.append(ftrack_id)
|
||||
|
||||
children_queue = Queue()
|
||||
|
|
|
|||
|
|
@ -1445,7 +1445,7 @@ class SyncToAvalonEvent(BaseEvent):
|
|||
.get("name", {})
|
||||
.get("new")
|
||||
)
|
||||
avalon_ent_by_name = self.avalon_ents_by_name.get(name)
|
||||
avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {}
|
||||
avalon_ent_by_name_ftrack_id = (
|
||||
avalon_ent_by_name
|
||||
.get("data", {})
|
||||
|
|
|
|||
|
|
@ -208,11 +208,8 @@ class UserAssigmentEvent(BaseEvent):
|
|||
work_dir = anatomy.format(data)['avalon']['work']
|
||||
# we also need publish but not whole
|
||||
filled_all = anatomy.format_all(data)
|
||||
if "partial" not in filled_all:
|
||||
publish = filled_all['avalon']['publish']
|
||||
else:
|
||||
# Backwards compatibility
|
||||
publish = filled_all["partial"]['avalon']['publish']
|
||||
publish = filled_all['avalon']['publish']
|
||||
|
||||
# now find path to {asset}
|
||||
m = re.search("(^.+?{})".format(data['asset']),
|
||||
publish)
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None):
|
|||
now_node.setInput(0, prev_node)
|
||||
|
||||
# imprinting group node
|
||||
GN = avalon.nuke.imprint(GN, data["avalon"])
|
||||
avalon.nuke.imprint(GN, data["avalon"], tab="Pype")
|
||||
|
||||
divider = nuke.Text_Knob('')
|
||||
GN.addKnob(divider)
|
||||
|
|
@ -1013,7 +1013,8 @@ class BuildWorkfile(WorkfileSettings):
|
|||
def process(self,
|
||||
regex_filter=None,
|
||||
version=None,
|
||||
representations=["exr", "dpx", "lutJson", "mov", "preview"]):
|
||||
representations=["exr", "dpx", "lutJson", "mov",
|
||||
"preview", "png"]):
|
||||
"""
|
||||
A short description.
|
||||
|
||||
|
|
@ -1054,9 +1055,10 @@ class BuildWorkfile(WorkfileSettings):
|
|||
wn["render"].setValue(True)
|
||||
vn.setInput(0, wn)
|
||||
|
||||
bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||
color='0xcc1102ff', layer=-1,
|
||||
nodes=[wn])
|
||||
# adding backdrop under write
|
||||
self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
|
||||
color='0xcc1102ff', layer=-1,
|
||||
nodes=[wn])
|
||||
|
||||
# move position
|
||||
self.position_up(4)
|
||||
|
|
@ -1070,10 +1072,12 @@ class BuildWorkfile(WorkfileSettings):
|
|||
version=version,
|
||||
representations=representations)
|
||||
|
||||
log.info("__ subsets: `{}`".format(subsets))
|
||||
for name, subset in subsets.items():
|
||||
log.debug("___________________")
|
||||
log.debug(name)
|
||||
log.debug(subset["version"])
|
||||
|
||||
nodes_backdrop = list()
|
||||
|
||||
for name, subset in subsets.items():
|
||||
if "lut" in name:
|
||||
continue
|
||||
|
|
@ -1103,9 +1107,10 @@ class BuildWorkfile(WorkfileSettings):
|
|||
# move position
|
||||
self.position_right()
|
||||
|
||||
bdn = self.create_backdrop(label="Loaded Reads",
|
||||
color='0x2d7702ff', layer=-1,
|
||||
nodes=nodes_backdrop)
|
||||
# adding backdrop under all read nodes
|
||||
self.create_backdrop(label="Loaded Reads",
|
||||
color='0x2d7702ff', layer=-1,
|
||||
nodes=nodes_backdrop)
|
||||
|
||||
def read_loader(self, representation):
|
||||
"""
|
||||
|
|
@ -1269,7 +1274,7 @@ class ExporterReview:
|
|||
'ext': self.ext,
|
||||
'files': self.file,
|
||||
"stagingDir": self.staging_dir,
|
||||
"anatomy_template": "publish",
|
||||
"anatomy_template": "render",
|
||||
"tags": [self.name.replace("_", "-")] + add_tags
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None):
|
|||
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
|
||||
|
||||
|
||||
def install(config):
|
||||
def install():
|
||||
"""
|
||||
Installing Nukestudio integration for avalon
|
||||
|
||||
|
|
|
|||
18
pype/plugins/global/publish/collect_datetime_data.py
Normal file
18
pype/plugins/global/publish/collect_datetime_data.py
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
"""These data *must* be collected only once during publishing process.
|
||||
|
||||
Provides:
|
||||
context -> datetimeData
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
class CollectDateTimeData(pyblish.api.ContextPlugin):
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect DateTime data"
|
||||
|
||||
def process(self, context):
|
||||
key = "datetimeData"
|
||||
if key not in context.data:
|
||||
context.data[key] = config.get_datetime_data()
|
||||
|
|
@ -78,6 +78,8 @@ class CollectTemplates(pyblish.api.InstancePlugin):
|
|||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = os.path.join(*hierarchy)
|
||||
else:
|
||||
hierarchy = ""
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
|
|
@ -88,7 +90,11 @@ class CollectTemplates(pyblish.api.InstancePlugin):
|
|||
"subset": subset_name,
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy.replace("\\", "/"),
|
||||
"representation": "TEMP")}
|
||||
"representation": "TEMP"}
|
||||
|
||||
# Add datetime data to template data
|
||||
datetime_data = instance.context.data.get("datetimeData") or {}
|
||||
template_data.update(datetime_data)
|
||||
|
||||
resolution_width = instance.data.get("resolutionWidth")
|
||||
resolution_height = instance.data.get("resolutionHeight")
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
`tags` including `burnin`
|
||||
"""
|
||||
|
||||
label = "Quicktime with burnins"
|
||||
label = "Extract burnins"
|
||||
order = pyblish.api.ExtractorOrder + 0.03
|
||||
families = ["review", "burnin"]
|
||||
hosts = ["nuke", "maya", "shell"]
|
||||
|
|
@ -45,7 +45,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
}
|
||||
|
||||
# Add datetime data to preparation data
|
||||
prep_data.update(config.get_datetime_data())
|
||||
datetime_data = instance.context.data.get("datetimeData") or {}
|
||||
prep_data.update(datetime_data)
|
||||
|
||||
slate_frame_start = frame_start
|
||||
slate_frame_end = frame_end
|
||||
|
|
@ -77,31 +78,38 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "burnin" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
is_sequence = "sequence" in repre.get("tags", [])
|
||||
|
||||
stagingdir = repre["stagingDir"]
|
||||
filename = "{0}".format(repre["files"])
|
||||
|
||||
if is_sequence:
|
||||
filename = repre["sequence_file"]
|
||||
|
||||
name = "_burnin"
|
||||
ext = os.path.splitext(filename)[1]
|
||||
movieFileBurnin = filename.replace(ext, "") + name + ext
|
||||
|
||||
if is_sequence:
|
||||
fn_splt = filename.split(".")
|
||||
movieFileBurnin = ".".join(
|
||||
((fn_splt[0] + name), fn_splt[-2], fn_splt[-1]))
|
||||
|
||||
self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin))
|
||||
|
||||
full_movie_path = os.path.join(
|
||||
os.path.normpath(stagingdir), repre["files"]
|
||||
)
|
||||
os.path.normpath(stagingdir), filename)
|
||||
full_burnin_path = os.path.join(
|
||||
os.path.normpath(stagingdir), movieFileBurnin
|
||||
)
|
||||
os.path.normpath(stagingdir), movieFileBurnin)
|
||||
|
||||
self.log.debug("__ full_movie_path: {}".format(full_movie_path))
|
||||
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
|
||||
|
||||
# create copy of prep_data for anatomy formatting
|
||||
_prep_data = copy.deepcopy(prep_data)
|
||||
_prep_data["representation"] = repre["name"]
|
||||
filled_anatomy = anatomy.format_all(_prep_data)
|
||||
if hasattr(filled_anatomy, "get_solved"):
|
||||
_filled_anatomy = filled_anatomy.get_solved()
|
||||
else:
|
||||
# Backwards compatibility
|
||||
_filled_anatomy = filled_anatomy.get("solved")
|
||||
_prep_data["anatomy"] = _filled_anatomy or {}
|
||||
_prep_data["anatomy"] = filled_anatomy.get_solved()
|
||||
|
||||
burnin_data = {
|
||||
"input": full_movie_path.replace("\\", "/"),
|
||||
|
|
@ -149,15 +157,35 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_update = {
|
||||
"anatomy_template": "render",
|
||||
"files": movieFileBurnin,
|
||||
"name": repre["name"],
|
||||
"tags": [x for x in repre["tags"] if x != "delete"]
|
||||
}
|
||||
|
||||
if is_sequence:
|
||||
burnin_seq_files = list()
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
burnin_seq_files.append(movieFileBurnin % frame_index)
|
||||
repre_update.update({
|
||||
"files": burnin_seq_files
|
||||
})
|
||||
|
||||
instance.data["representations"][i].update(repre_update)
|
||||
|
||||
# removing the source mov file
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
if is_sequence:
|
||||
for frame_index in range(_prep_data["duration"] + 1):
|
||||
if frame_index == 0:
|
||||
continue
|
||||
rm_file = full_movie_path % frame_index
|
||||
os.remove(rm_file)
|
||||
self.log.debug("Removed: `{}`".format(rm_file))
|
||||
else:
|
||||
os.remove(full_movie_path)
|
||||
self.log.debug("Removed: `{}`".format(full_movie_path))
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
for repre in instance.data["representations"]:
|
||||
|
|
|
|||
|
|
@ -53,10 +53,21 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
if "review" in tags:
|
||||
staging_dir = repre["stagingDir"]
|
||||
|
||||
# iterating preset output profiles
|
||||
for name, profile in output_profiles.items():
|
||||
repre_new = repre.copy()
|
||||
ext = profile.get("ext", None)
|
||||
p_tags = profile.get('tags', [])
|
||||
self.log.info("p_tags: `{}`".format(p_tags))
|
||||
|
||||
# adding control for presets to be sequence
|
||||
# or single file
|
||||
is_sequence = ("sequence" in p_tags) and (ext in (
|
||||
"png", "jpg", "jpeg"))
|
||||
|
||||
self.log.debug("Profile name: {}".format(name))
|
||||
|
||||
ext = profile.get("ext", None)
|
||||
if not ext:
|
||||
ext = "mov"
|
||||
self.log.warning(
|
||||
|
|
@ -88,18 +99,22 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
filename = repre["files"].split(".")[0]
|
||||
|
||||
repr_file = filename + "_{0}.{1}".format(name, ext)
|
||||
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, repr_file)
|
||||
|
||||
if is_sequence:
|
||||
filename_base = filename + "_{0}".format(name)
|
||||
repr_file = filename_base + ".%08d.{0}".format(
|
||||
ext)
|
||||
repre_new["sequence_file"] = repr_file
|
||||
full_output_path = os.path.join(
|
||||
staging_dir, filename_base, repr_file)
|
||||
|
||||
self.log.info("input {}".format(full_input_path))
|
||||
self.log.info("output {}".format(full_output_path))
|
||||
|
||||
repre_new = repre.copy()
|
||||
|
||||
new_tags = [x for x in tags if x != "delete"]
|
||||
p_tags = profile.get('tags', [])
|
||||
self.log.info("p_tags: `{}`".format(p_tags))
|
||||
|
||||
# add families
|
||||
[instance.data["families"].append(t)
|
||||
for t in p_tags
|
||||
|
|
@ -288,6 +303,14 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
self.log.debug(
|
||||
"_ output_args: `{}`".format(output_args))
|
||||
|
||||
if is_sequence:
|
||||
stg_dir = os.path.dirname(full_output_path)
|
||||
|
||||
if not os.path.exists(stg_dir):
|
||||
self.log.debug(
|
||||
"creating dir: {}".format(stg_dir))
|
||||
os.mkdir(stg_dir)
|
||||
|
||||
mov_args = [
|
||||
os.path.join(
|
||||
os.environ.get(
|
||||
|
|
@ -315,6 +338,12 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"resolutionHeight": resolution_height,
|
||||
"resolutionWidth": resolution_width,
|
||||
})
|
||||
if is_sequence:
|
||||
repre_new.update({
|
||||
"stagingDir": stg_dir,
|
||||
"files": os.listdir(stg_dir)
|
||||
})
|
||||
|
||||
if repre_new.get('preview'):
|
||||
repre_new.pop("preview")
|
||||
if repre_new.get('thumbnail'):
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
|
|||
|
||||
label = "Integrate Asset"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
families = ["assembly"]
|
||||
families = []
|
||||
exclude_families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -76,8 +76,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"source",
|
||||
"matchmove",
|
||||
"image"
|
||||
"source",
|
||||
"assembly"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
db_representation_context_keys = [
|
||||
"project", "asset", "task", "subset", "version", "representation",
|
||||
"family", "hierarchy", "task", "username"
|
||||
]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
@ -266,6 +272,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"version": int(version["name"]),
|
||||
"hierarchy": hierarchy}
|
||||
|
||||
# Add datetime data to template data
|
||||
datetime_data = context.data.get("datetimeData") or {}
|
||||
template_data.update(datetime_data)
|
||||
|
||||
resolution_width = repre.get("resolutionWidth")
|
||||
resolution_height = repre.get("resolutionHeight")
|
||||
fps = instance.data.get("fps")
|
||||
|
|
@ -286,7 +296,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
anatomy.templates[template_name]["path"])
|
||||
|
||||
sequence_repre = isinstance(files, list)
|
||||
|
||||
repre_context = None
|
||||
if sequence_repre:
|
||||
src_collections, remainder = clique.assemble(files)
|
||||
self.log.debug(
|
||||
|
|
@ -309,10 +319,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
template_data["representation"] = repre['ext']
|
||||
template_data["frame"] = src_padding_exp % i
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
template_filled = anatomy_filled[template_name]["path"]
|
||||
if repre_context is None:
|
||||
repre_context = template_filled.used_values
|
||||
|
||||
test_dest_files.append(
|
||||
os.path.normpath(
|
||||
anatomy_filled[template_name]["path"])
|
||||
os.path.normpath(template_filled)
|
||||
)
|
||||
|
||||
self.log.debug(
|
||||
|
|
@ -326,8 +338,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
index_frame_start = None
|
||||
|
||||
if repre.get("frameStart"):
|
||||
frame_start_padding = len(str(
|
||||
repre.get("frameEnd")))
|
||||
frame_start_padding = anatomy.templates["render"]["padding"]
|
||||
index_frame_start = int(repre.get("frameStart"))
|
||||
|
||||
# exception for slate workflow
|
||||
|
|
@ -339,10 +350,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
for i in src_collection.indexes:
|
||||
src_padding = src_padding_exp % i
|
||||
|
||||
# for adding first frame into db
|
||||
if not dst_start_frame:
|
||||
dst_start_frame = src_padding
|
||||
|
||||
src_file_name = "{0}{1}{2}".format(
|
||||
src_head, src_padding, src_tail)
|
||||
|
||||
|
|
@ -364,6 +371,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
self.log.debug("source: {}".format(src))
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
# for adding first frame into db
|
||||
if not dst_start_frame:
|
||||
dst_start_frame = dst_padding
|
||||
|
||||
|
||||
dst = "{0}{1}{2}".format(
|
||||
dst_head,
|
||||
dst_start_frame,
|
||||
|
|
@ -392,14 +404,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
src = os.path.join(stagingdir, fname)
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
dst = os.path.normpath(
|
||||
anatomy_filled[template_name]["path"]).replace("..", ".")
|
||||
template_filled = anatomy_filled[template_name]["path"]
|
||||
repre_context = template_filled.used_values
|
||||
dst = os.path.normpath(template_filled).replace("..", ".")
|
||||
|
||||
instance.data["transfers"].append([src, dst])
|
||||
|
||||
repre['published_path'] = self.unc_convert(dst)
|
||||
self.log.debug("__ dst: {}".format(dst))
|
||||
|
||||
for key in self.db_representation_context_keys:
|
||||
value = template_data.get(key)
|
||||
if not value:
|
||||
continue
|
||||
repre_context[key] = template_data[key]
|
||||
|
||||
representation = {
|
||||
"_id": io.ObjectId(),
|
||||
"schema": "pype:representation-2.0",
|
||||
|
|
@ -411,19 +430,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
|
||||
# Imprint shortcut to context
|
||||
# for performance reasons.
|
||||
"context": {
|
||||
"root": root,
|
||||
"project": {"name": PROJECT,
|
||||
"code": project['data']['code']},
|
||||
'task': TASK,
|
||||
"silo": asset.get('silo'),
|
||||
"asset": ASSET,
|
||||
"family": instance.data['family'],
|
||||
"subset": subset["name"],
|
||||
"version": version["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"representation": repre['ext']
|
||||
}
|
||||
"context": repre_context
|
||||
}
|
||||
|
||||
if repre.get("outputName"):
|
||||
|
|
|
|||
|
|
@ -294,6 +294,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
data = instance.data.copy()
|
||||
context = instance.context
|
||||
|
||||
if hasattr(instance, "_log"):
|
||||
data['_log'] = instance._log
|
||||
render_job = data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
if not render_job:
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin):
|
|||
|
||||
host = pyblish.api.current_host()
|
||||
to_check = context.data["presets"].get(
|
||||
host, {}).get("ftrack_attributes")
|
||||
host, {}).get("ftrack_custom_attributes")
|
||||
if not to_check:
|
||||
self.log.warning("ftrack_attributes preset not found")
|
||||
return
|
||||
|
|
|
|||
|
|
@ -52,6 +52,11 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
|
|||
|
||||
newNodes = (list(set(nodes) - set(shapes)))
|
||||
|
||||
current_namespace = pm.namespaceInfo(currentNamespace=True)
|
||||
|
||||
if current_namespace != ":":
|
||||
groupName = current_namespace + ":" + groupName
|
||||
|
||||
groupNode = pm.PyNode(groupName)
|
||||
roots = set()
|
||||
|
||||
|
|
|
|||
|
|
@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor):
|
|||
|
||||
def process(self, instance):
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
staging_dir = self.staging_dir(instance)
|
||||
hierarchy_filename = "{}.abc".format(instance.name)
|
||||
hierarchy_path = os.path.join(parent_dir, hierarchy_filename)
|
||||
hierarchy_path = os.path.join(staging_dir, hierarchy_filename)
|
||||
json_filename = "{}.json".format(instance.name)
|
||||
json_path = os.path.join(parent_dir, json_filename)
|
||||
json_path = os.path.join(staging_dir, json_filename)
|
||||
|
||||
self.log.info("Dumping scene data for debugging ..")
|
||||
with open(json_path, "w") as filepath:
|
||||
|
|
@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor):
|
|||
"uvWrite": True,
|
||||
"selection": True})
|
||||
|
||||
instance.data["files"] = [json_filename, hierarchy_filename]
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation_abc = {
|
||||
'name': 'abc',
|
||||
'ext': 'abc',
|
||||
'files': hierarchy_filename,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
instance.data["representations"].append(representation_abc)
|
||||
|
||||
representation_json = {
|
||||
'name': 'json',
|
||||
'ext': 'json',
|
||||
'files': json_filename,
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
instance.data["representations"].append(representation_json)
|
||||
# Remove data
|
||||
instance.data.pop("scenedata", None)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import re
|
||||
import nuke
|
||||
import contextlib
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
import nuke
|
||||
from pype.nuke import presets
|
||||
|
||||
from pype.api import Logger
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
|
|
@ -24,7 +25,7 @@ def preserve_trim(node):
|
|||
offset_frame = None
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
if node['frame_mode'].value() == "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
try:
|
||||
|
|
@ -85,47 +86,48 @@ class LoadMov(api.Loader):
|
|||
containerise,
|
||||
viewer_update_and_undo_stop
|
||||
)
|
||||
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
|
||||
orig_first = version_data.get("frameStart", None)
|
||||
orig_last = version_data.get("frameEnd", None)
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
# set first to 1
|
||||
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
handles = version_data.get("handles", None)
|
||||
handle_start = version_data.get("handleStart", None)
|
||||
handle_end = version_data.get("handleEnd", None)
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
handle_end = handles
|
||||
handle_start = version_data.get("handleStart")
|
||||
handle_end = version_data.get("handleEnd")
|
||||
|
||||
colorspace = version_data.get("colorspace")
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
# offset should be with handles so it match orig frame range
|
||||
offset_frame = orig_first + handle_start
|
||||
offset_frame = orig_first - handle_start
|
||||
|
||||
# Fallback to asset name when namespace is None
|
||||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
file = self.fname.replace("\\", "/")
|
||||
log.info("file: {}\n".format(self.fname))
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
read_node = nuke.createNode(
|
||||
"Read",
|
||||
"name {}".format(read_name)
|
||||
|
|
@ -139,7 +141,23 @@ class LoadMov(api.Loader):
|
|||
read_node["last"].setValue(last)
|
||||
read_node["frame_mode"].setValue("start at")
|
||||
read_node["frame"].setValue(str(offset_frame))
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
|
||||
if colorspace:
|
||||
read_node["colorspace"].setValue(str(colorspace))
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
if preset_clrsp is not None:
|
||||
read_node["colorspace"].setValue(str(preset_clrsp))
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "handles", "source", "author",
|
||||
"fps", "version", "handleStart", "handleEnd"
|
||||
|
|
@ -147,7 +165,7 @@ class LoadMov(api.Loader):
|
|||
|
||||
data_imprint = {}
|
||||
for key in add_keys:
|
||||
if key is 'version':
|
||||
if key == 'version':
|
||||
data_imprint.update({
|
||||
key: context["version"]['name']
|
||||
})
|
||||
|
|
@ -186,10 +204,18 @@ class LoadMov(api.Loader):
|
|||
)
|
||||
|
||||
node = nuke.toNode(container['objectName'])
|
||||
# TODO: prepare also for other Read img/geo/camera
|
||||
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
file = api.get_representation_path(representation)
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
|
|
@ -207,15 +233,17 @@ class LoadMov(api.Loader):
|
|||
|
||||
version_data = version.get("data", {})
|
||||
|
||||
orig_first = version_data.get("frameStart", None)
|
||||
orig_last = version_data.get("frameEnd", None)
|
||||
orig_first = version_data.get("frameStart")
|
||||
orig_last = version_data.get("frameEnd")
|
||||
diff = orig_first - 1
|
||||
|
||||
# set first to 1
|
||||
first = orig_first - diff
|
||||
last = orig_last - diff
|
||||
handles = version_data.get("handles", 0)
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
colorspace = version_data.get("colorspace")
|
||||
|
||||
if first is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
|
|
@ -231,11 +259,11 @@ class LoadMov(api.Loader):
|
|||
# create handles offset (only to last, because of mov)
|
||||
last += handle_start + handle_end
|
||||
# offset should be with handles so it match orig frame range
|
||||
offset_frame = orig_first + handle_start
|
||||
offset_frame = orig_first - handle_start
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file["path"])
|
||||
node["file"].setValue(file)
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
|
|
@ -247,19 +275,35 @@ class LoadMov(api.Loader):
|
|||
node["frame_mode"].setValue("start at")
|
||||
node["frame"].setValue(str(offset_frame))
|
||||
|
||||
if colorspace:
|
||||
node["colorspace"].setValue(str(colorspace))
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
if preset_clrsp is not None:
|
||||
node["colorspace"].setValue(str(preset_clrsp))
|
||||
|
||||
|
||||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": version_data.get("frameStart"),
|
||||
"frameEnd": version_data.get("frameEnd"),
|
||||
"version": version.get("name"),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handles": version_data.get("handles"),
|
||||
"handleStart": version_data.get("handleStart"),
|
||||
"handleEnd": version_data.get("handleEnd"),
|
||||
"fps": version_data.get("fps"),
|
||||
"handleStart": str(handle_start),
|
||||
"handleEnd": str(handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
"outputDir": version_data.get("outputDir")
|
||||
})
|
||||
|
||||
# change color of node
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
import re
|
||||
import nuke
|
||||
import contextlib
|
||||
|
||||
from avalon import api, io
|
||||
|
||||
import nuke
|
||||
from pype.nuke import presets
|
||||
|
||||
from pype.api import Logger
|
||||
|
||||
log = Logger().get_logger(__name__, "nuke")
|
||||
|
||||
|
||||
|
|
@ -24,7 +26,7 @@ def preserve_trim(node):
|
|||
offset_frame = None
|
||||
if node['frame_mode'].value() == "start at":
|
||||
start_at_frame = node['frame'].value()
|
||||
if node['frame_mode'].value() is "offset":
|
||||
if node['frame_mode'].value() == "offset":
|
||||
offset_frame = node['frame'].value()
|
||||
|
||||
try:
|
||||
|
|
@ -93,7 +95,6 @@ class LoadSequence(api.Loader):
|
|||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
|
|
@ -106,21 +107,27 @@ class LoadSequence(api.Loader):
|
|||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
file = self.fname.replace("\\", "/")
|
||||
file = self.fname
|
||||
|
||||
log.info("file: {}\n".format(self.fname))
|
||||
if not file:
|
||||
repr_id = context["representation"]["_id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#"*padding)
|
||||
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
# TODO: it might be universal read to img/geo/camera
|
||||
|
|
@ -130,24 +137,36 @@ class LoadSequence(api.Loader):
|
|||
r["file"].setValue(file)
|
||||
|
||||
# Set colorspace defined in version data
|
||||
colorspace = context["version"]["data"].get("colorspace", None)
|
||||
if colorspace is not None:
|
||||
colorspace = context["version"]["data"].get("colorspace")
|
||||
if colorspace:
|
||||
r["colorspace"].setValue(str(colorspace))
|
||||
|
||||
# load nuke presets for Read's colorspace
|
||||
read_clrs_presets = presets.get_colorspace_preset().get(
|
||||
"nuke", {}).get("read", {})
|
||||
|
||||
# check if any colorspace presets for read is mathing
|
||||
preset_clrsp = next((read_clrs_presets[k]
|
||||
for k in read_clrs_presets
|
||||
if bool(re.search(k, file))),
|
||||
None)
|
||||
if preset_clrsp is not None:
|
||||
r["colorspace"].setValue(str(preset_clrsp))
|
||||
|
||||
loader_shift(r, first, relative=True)
|
||||
r["origfirst"].setValue(int(first))
|
||||
r["first"].setValue(int(first))
|
||||
r["origlast"].setValue(int(last))
|
||||
r["last"].setValue(int(last))
|
||||
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd",
|
||||
"source", "colorspace", "author", "fps", "version",
|
||||
"handleStart", "handleEnd"]
|
||||
|
||||
data_imprint = {}
|
||||
for k in add_keys:
|
||||
if k is 'version':
|
||||
if k == 'version':
|
||||
data_imprint.update({k: context["version"]['name']})
|
||||
else:
|
||||
data_imprint.update(
|
||||
|
|
@ -179,7 +198,7 @@ class LoadSequence(api.Loader):
|
|||
rtn["after"].setValue("continue")
|
||||
rtn["input.first_lock"].setValue(True)
|
||||
rtn["input.first"].setValue(
|
||||
self.handle_start + self.first_frame
|
||||
self.handle_start + self.first_frame
|
||||
)
|
||||
|
||||
if time_warp_nodes != []:
|
||||
|
|
@ -210,16 +229,29 @@ class LoadSequence(api.Loader):
|
|||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
ls_img_sequence,
|
||||
update_container
|
||||
)
|
||||
|
||||
node = nuke.toNode(container['objectName'])
|
||||
# TODO: prepare also for other Read img/geo/camera
|
||||
|
||||
assert node.Class() == "Read", "Must be Read"
|
||||
|
||||
path = api.get_representation_path(representation)
|
||||
file = ls_img_sequence(path)
|
||||
repr_cont = representation["context"]
|
||||
|
||||
file = self.fname
|
||||
|
||||
if not file:
|
||||
repr_id = representation["_id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return
|
||||
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
if "#" not in file:
|
||||
frame = repr_cont.get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#"*padding)
|
||||
|
||||
# Get start frame from version data
|
||||
version = io.find_one({
|
||||
|
|
@ -241,8 +273,8 @@ class LoadSequence(api.Loader):
|
|||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
first = version_data.get("frameStart")
|
||||
last = version_data.get("frameEnd")
|
||||
|
||||
if first is None:
|
||||
log.warning("Missing start frame for updated version"
|
||||
|
|
@ -255,7 +287,7 @@ class LoadSequence(api.Loader):
|
|||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file["path"])
|
||||
node["file"].setValue(file)
|
||||
log.info("__ node['file']: {}".format(node["file"].value()))
|
||||
|
||||
# Set the global in to the start frame of the sequence
|
||||
|
|
@ -268,14 +300,14 @@ class LoadSequence(api.Loader):
|
|||
updated_dict = {}
|
||||
updated_dict.update({
|
||||
"representation": str(representation["_id"]),
|
||||
"frameStart": version_data.get("frameStart"),
|
||||
"frameEnd": version_data.get("frameEnd"),
|
||||
"version": version.get("name"),
|
||||
"frameStart": str(first),
|
||||
"frameEnd": str(last),
|
||||
"version": str(version.get("name")),
|
||||
"colorspace": version_data.get("colorspace"),
|
||||
"source": version_data.get("source"),
|
||||
"handleStart": version_data.get("handleStart"),
|
||||
"handleEnd": version_data.get("handleEnd"),
|
||||
"fps": version_data.get("fps"),
|
||||
"handleStart": str(self.handle_start),
|
||||
"handleEnd": str(self.handle_end),
|
||||
"fps": str(version_data.get("fps")),
|
||||
"author": version_data.get("author"),
|
||||
"outputDir": version_data.get("outputDir"),
|
||||
})
|
||||
|
|
|
|||
|
|
@ -28,12 +28,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
|
|||
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
|
||||
for node in nuke.allNodes():
|
||||
|
||||
if node.Class() in ["Viewer", "Dot"]:
|
||||
continue
|
||||
|
||||
try:
|
||||
if node["disable"].value():
|
||||
continue
|
||||
except Exception as E:
|
||||
self.log.warning(E)
|
||||
|
||||
|
||||
|
||||
# get data from avalon knob
|
||||
self.log.debug("node[name]: {}".format(node['name'].value()))
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import pyblish.api
|
||||
import nuke
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin):
|
||||
"""Collect review instance from rendered frames
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.3
|
||||
family = "review"
|
||||
label = "Collect Review"
|
||||
hosts = ["nuke"]
|
||||
families = ["render", "render.local", "render.farm"]
|
||||
|
|
@ -25,4 +25,6 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
|
||||
instance.data["families"].append("review")
|
||||
instance.data['families'].append('ftrack')
|
||||
|
||||
self.log.info("Review collected: `{}`".format(instance))
|
||||
self.log.debug("__ instance.data: `{}`".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
families = ["write"]
|
||||
|
||||
def process(self, instance):
|
||||
# adding 2d focused rendering
|
||||
instance.data["families"].append("render2d")
|
||||
|
||||
node = None
|
||||
for x in instance:
|
||||
|
|
@ -97,7 +99,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"frameEnd": last_frame - handle_end,
|
||||
"version": int(instance.data['version']),
|
||||
"colorspace": node["colorspace"].value(),
|
||||
"families": [instance.data["family"]],
|
||||
"families": ["render"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.context.data["fps"]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import json
|
|||
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
||||
from pypeapp.lib import config
|
||||
from pype import api as pype
|
||||
from subprocess import Popen, PIPE
|
||||
# FFmpeg in PATH is required
|
||||
|
||||
|
||||
|
|
@ -21,6 +22,7 @@ else:
|
|||
FFMPEG = (
|
||||
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
|
||||
|
||||
FFPROBE = (
|
||||
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
|
||||
|
|
@ -248,6 +250,33 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
'filters': filters
|
||||
}).strip()
|
||||
|
||||
def render(self, output, args=None, overwrite=False, **kwargs):
|
||||
"""
|
||||
Render the media to a specified destination.
|
||||
|
||||
:param str output: output file
|
||||
:param str args: additional FFMPEG arguments
|
||||
:param bool overwrite: overwrite the output if it exists
|
||||
"""
|
||||
if not overwrite and os.path.exists(output):
|
||||
raise RuntimeError("Destination '%s' exists, please "
|
||||
"use overwrite" % output)
|
||||
|
||||
is_sequence = "%" in output
|
||||
|
||||
command = self.command(output=output,
|
||||
args=args,
|
||||
overwrite=overwrite)
|
||||
proc = Popen(command, shell=True)
|
||||
proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to render '%s': %s'"
|
||||
% (output, command))
|
||||
if is_sequence:
|
||||
output = output % kwargs.get("duration")
|
||||
if not os.path.exists(output):
|
||||
raise RuntimeError("Failed to generate this fucking file '%s'" % output)
|
||||
|
||||
|
||||
def example(input_path, output_path):
|
||||
options_init = {
|
||||
|
|
@ -349,7 +378,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
|||
|
||||
frame_start = data.get("frame_start")
|
||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||
|
||||
|
||||
stream = burnin._streams[0]
|
||||
if "resolution_width" not in data:
|
||||
data["resolution_width"] = stream.get("width", "Unknown")
|
||||
|
|
@ -436,7 +465,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True)
|
|||
if codec_data is not []:
|
||||
codec_args = " ".join(codec_data)
|
||||
|
||||
burnin.render(output_path, args=codec_args, overwrite=overwrite)
|
||||
burnin.render(output_path, args=codec_args, overwrite=overwrite, **data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue