mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 08:24:53 +01:00
Merge branch 'feature/OP-1562_Flame--pre-collecting-timeline-segments-as-subsets' into feature/OP-1537_Flame-Submitting-jobs-to-Burner-farm
This commit is contained in:
commit
dabcf629b7
3 changed files with 50 additions and 46 deletions
|
|
@ -601,12 +601,12 @@ def get_clips_in_reels(project):
|
||||||
return output_clips
|
return output_clips
|
||||||
|
|
||||||
|
|
||||||
def get_reformated_path(path, padded=True):
|
def get_reformated_path(fname, padded=True):
|
||||||
"""
|
"""
|
||||||
Return fixed python expression path
|
Return fixed python expression path
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
path (str): path url or simple file name
|
fname (str): file name
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
type: string with reformated path
|
type: string with reformated path
|
||||||
|
|
@ -615,27 +615,27 @@ def get_reformated_path(path, padded=True):
|
||||||
get_reformated_path("plate.1001.exr") > plate.%04d.exr
|
get_reformated_path("plate.1001.exr") > plate.%04d.exr
|
||||||
|
|
||||||
"""
|
"""
|
||||||
padding = get_padding_from_path(path)
|
padding = get_padding_from_path(fname)
|
||||||
found = get_frame_from_path(path)
|
found = get_frame_from_path(fname)
|
||||||
|
|
||||||
if not found:
|
if not found:
|
||||||
log.info("Path is not sequence: {}".format(path))
|
log.info("File name is not sequence: {}".format(fname))
|
||||||
return path
|
return fname
|
||||||
|
|
||||||
if padded:
|
if padded:
|
||||||
path = path.replace(found, "%0{}d".format(padding))
|
fname = fname.replace(found, "%0{}d".format(padding))
|
||||||
else:
|
else:
|
||||||
path = path.replace(found, "%d")
|
fname = fname.replace(found, "%d")
|
||||||
|
|
||||||
return path
|
return fname
|
||||||
|
|
||||||
|
|
||||||
def get_padding_from_path(path):
|
def get_padding_from_path(fname):
|
||||||
"""
|
"""
|
||||||
Return padding number from Flame path style
|
Return padding number from Flame path style
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
path (str): path url or simple file name
|
fname (str): file name
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: padding number
|
int: padding number
|
||||||
|
|
@ -644,20 +644,17 @@ def get_padding_from_path(path):
|
||||||
get_padding_from_path("plate.0001.exr") > 4
|
get_padding_from_path("plate.0001.exr") > 4
|
||||||
|
|
||||||
"""
|
"""
|
||||||
found = get_frame_from_path(path)
|
found = get_frame_from_path(fname)
|
||||||
|
|
||||||
if found:
|
return len(found) if found else None
|
||||||
return len(found)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_frame_from_path(path):
|
def get_frame_from_path(fname):
|
||||||
"""
|
"""
|
||||||
Return sequence number from Flame path style
|
Return sequence number from Flame path style
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
path (str): path url or simple file name
|
fname (str): file name
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: sequence frame number
|
int: sequence frame number
|
||||||
|
|
@ -669,12 +666,9 @@ def get_frame_from_path(path):
|
||||||
"""
|
"""
|
||||||
frame_pattern = re.compile(r"[._](\d+)[.]")
|
frame_pattern = re.compile(r"[._](\d+)[.]")
|
||||||
|
|
||||||
found = re.findall(frame_pattern, path)
|
found = re.findall(frame_pattern, fname)
|
||||||
|
|
||||||
if found:
|
return found.pop() if found else None
|
||||||
return found.pop()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ from pprint import pformat
|
||||||
class PrecollectInstances(pyblish.api.ContextPlugin):
|
class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||||
"""Collect all Track items selection."""
|
"""Collect all Track items selection."""
|
||||||
|
|
||||||
order = pyblish.api.CollectorOrder - 0.49
|
order = pyblish.api.CollectorOrder - 0.47
|
||||||
label = "Precollect Instances"
|
label = "Precollect Instances"
|
||||||
hosts = ["flame"]
|
hosts = ["flame"]
|
||||||
|
|
||||||
|
|
@ -57,16 +57,10 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||||
marker_data["handleEnd"] = min(
|
marker_data["handleEnd"] = min(
|
||||||
marker_data["handleEnd"], tail)
|
marker_data["handleEnd"], tail)
|
||||||
|
|
||||||
# add audio to families
|
with_audio = bool(marker_data.pop("audio"))
|
||||||
with_audio = False
|
|
||||||
if marker_data.pop("audio"):
|
|
||||||
with_audio = True
|
|
||||||
|
|
||||||
# add tag data to instance data
|
# add marker data to instance data
|
||||||
data = {
|
inst_data = dict(marker_data.items())
|
||||||
k: v for k, v in marker_data.items()
|
|
||||||
if k not in ("id", "applieswhole", "label")
|
|
||||||
}
|
|
||||||
|
|
||||||
asset = marker_data["asset"]
|
asset = marker_data["asset"]
|
||||||
subset = marker_data["subset"]
|
subset = marker_data["subset"]
|
||||||
|
|
@ -83,7 +77,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||||
label += " {}".format(subset)
|
label += " {}".format(subset)
|
||||||
label += " {}".format("[" + ", ".join(families) + "]")
|
label += " {}".format("[" + ", ".join(families) + "]")
|
||||||
|
|
||||||
data.update({
|
inst_data.update({
|
||||||
"name": "{}_{}".format(asset, subset),
|
"name": "{}_{}".format(asset, subset),
|
||||||
"label": label,
|
"label": label,
|
||||||
"asset": asset,
|
"asset": asset,
|
||||||
|
|
@ -96,17 +90,19 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||||
"path": file_path
|
"path": file_path
|
||||||
})
|
})
|
||||||
|
|
||||||
# otio clip data
|
# get otio clip data
|
||||||
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
|
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
|
||||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||||
data.update(otio_data)
|
|
||||||
self.log.debug("__ data: {}".format(pformat(data)))
|
# add to instance data
|
||||||
|
inst_data.update(otio_data)
|
||||||
|
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
|
||||||
|
|
||||||
# add resolution
|
# add resolution
|
||||||
self._get_resolution_to_data(data, context)
|
self._get_resolution_to_data(inst_data, context)
|
||||||
|
|
||||||
# create instance
|
# create instance
|
||||||
instance = context.create_instance(**data)
|
instance = context.create_instance(**inst_data)
|
||||||
|
|
||||||
# add colorspace data
|
# add colorspace data
|
||||||
instance.data.update({
|
instance.data.update({
|
||||||
|
|
@ -116,7 +112,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||||
})
|
})
|
||||||
|
|
||||||
# create shot instance for shot attributes create/update
|
# create shot instance for shot attributes create/update
|
||||||
self._create_shot_instance(context, clip_name, **data)
|
self._create_shot_instance(context, clip_name, **inst_data)
|
||||||
|
|
||||||
self.log.info("Creating instance: {}".format(instance))
|
self.log.info("Creating instance: {}".format(instance))
|
||||||
self.log.info(
|
self.log.info(
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
import pyblish.api
|
import pyblish.api
|
||||||
import avalon.api as avalon
|
import avalon.api as avalon
|
||||||
|
import openpype.lib as oplib
|
||||||
import openpype.hosts.flame.api as opfapi
|
import openpype.hosts.flame.api as opfapi
|
||||||
from openpype.hosts.flame.otio import flame_export
|
from openpype.hosts.flame.otio import flame_export
|
||||||
|
|
||||||
|
|
@ -8,22 +9,35 @@ class PrecollecTimelineOCIO(pyblish.api.ContextPlugin):
|
||||||
"""Inject the current working context into publish context"""
|
"""Inject the current working context into publish context"""
|
||||||
|
|
||||||
label = "Precollect Timeline OTIO"
|
label = "Precollect Timeline OTIO"
|
||||||
order = pyblish.api.CollectorOrder - 0.5
|
order = pyblish.api.CollectorOrder - 0.48
|
||||||
|
|
||||||
def process(self, context):
|
def process(self, context):
|
||||||
asset = avalon.Session["AVALON_ASSET"]
|
# plugin defined
|
||||||
subset = "otioTimeline"
|
family = "workfile"
|
||||||
|
variant = "otioTimeline"
|
||||||
|
|
||||||
|
# main
|
||||||
|
asset_doc = context.data["assetEntity"]
|
||||||
|
task_name = avalon.Session["AVALON_TASK"]
|
||||||
project = opfapi.get_current_project()
|
project = opfapi.get_current_project()
|
||||||
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
|
||||||
|
|
||||||
|
# create subset name
|
||||||
|
subset_name = oplib.get_subset_name_with_asset_doc(
|
||||||
|
family,
|
||||||
|
variant,
|
||||||
|
task_name,
|
||||||
|
asset_doc,
|
||||||
|
)
|
||||||
|
|
||||||
# adding otio timeline to context
|
# adding otio timeline to context
|
||||||
with opfapi.maintained_segment_selection(sequence):
|
with opfapi.maintained_segment_selection(sequence):
|
||||||
otio_timeline = flame_export.create_otio_timeline(sequence)
|
otio_timeline = flame_export.create_otio_timeline(sequence)
|
||||||
|
|
||||||
instance_data = {
|
instance_data = {
|
||||||
"name": "{}_{}".format(asset, subset),
|
"name": subset_name,
|
||||||
"asset": asset,
|
"asset": asset_doc["name"],
|
||||||
"subset": "{}{}".format(asset, subset.capitalize()),
|
"subset": subset_name,
|
||||||
"family": "workfile"
|
"family": "workfile"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue