mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/OP-3098_flame-extract-preset-distribution
This commit is contained in:
commit
dbf212f299
53 changed files with 2164 additions and 769 deletions
0
.gitmodules
vendored
0
.gitmodules
vendored
|
|
@ -10,16 +10,6 @@ log = Logger.get_logger(__name__)
|
|||
|
||||
def tag_data():
|
||||
return {
|
||||
# "Retiming": {
|
||||
# "editable": "1",
|
||||
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
|
||||
# "icon": "retiming.png",
|
||||
# "metadata": {
|
||||
# "family": "retiming",
|
||||
# "marginIn": 1,
|
||||
# "marginOut": 1
|
||||
# }
|
||||
# },
|
||||
"[Lenses]": {
|
||||
"Set lense here": {
|
||||
"editable": "1",
|
||||
|
|
@ -48,6 +38,16 @@ def tag_data():
|
|||
"family": "comment",
|
||||
"subset": "main"
|
||||
}
|
||||
},
|
||||
"FrameMain": {
|
||||
"editable": "1",
|
||||
"note": "Publishing a frame subset.",
|
||||
"icon": "z_layer_main.png",
|
||||
"metadata": {
|
||||
"family": "frame",
|
||||
"subset": "main",
|
||||
"format": "png"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,142 @@
|
|||
from pprint import pformat
|
||||
import re
|
||||
import ast
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect frames from tags.
|
||||
|
||||
Tag is expected to have metadata:
|
||||
{
|
||||
"family": "frame"
|
||||
"subset": "main"
|
||||
}
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Frames"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
self._context = context
|
||||
|
||||
# collect all sequence tags
|
||||
subset_data = self._create_frame_subset_data_sequence(context)
|
||||
|
||||
self.log.debug("__ subset_data: {}".format(
|
||||
pformat(subset_data)
|
||||
))
|
||||
|
||||
# create instances
|
||||
self._create_instances(subset_data)
|
||||
|
||||
def _get_tag_data(self, tag):
|
||||
data = {}
|
||||
|
||||
# get tag metadata attribute
|
||||
tag_data = tag.metadata()
|
||||
|
||||
# convert tag metadata to normal keys names and values to correct types
|
||||
for k, v in dict(tag_data).items():
|
||||
key = k.replace("tag.", "")
|
||||
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
if re.match(r"^[\d]+$", v):
|
||||
value = int(v)
|
||||
elif re.match(r"^True$", v):
|
||||
value = True
|
||||
elif re.match(r"^False$", v):
|
||||
value = False
|
||||
elif re.match(r"^None$", v):
|
||||
value = None
|
||||
elif re.match(r"^[\w\d_]+$", v):
|
||||
value = v
|
||||
else:
|
||||
value = ast.literal_eval(v)
|
||||
except (ValueError, SyntaxError):
|
||||
value = v
|
||||
|
||||
data[key] = value
|
||||
|
||||
return data
|
||||
|
||||
def _create_frame_subset_data_sequence(self, context):
|
||||
|
||||
sequence_tags = []
|
||||
sequence = context.data["activeTimeline"]
|
||||
|
||||
# get all publishable sequence frames
|
||||
publish_frames = range(int(sequence.duration() + 1))
|
||||
|
||||
self.log.debug("__ publish_frames: {}".format(
|
||||
pformat(publish_frames)
|
||||
))
|
||||
|
||||
# get all sequence tags
|
||||
for tag in sequence.tags():
|
||||
tag_data = self._get_tag_data(tag)
|
||||
self.log.debug("__ tag_data: {}".format(
|
||||
pformat(tag_data)
|
||||
))
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if "family" not in tag_data:
|
||||
continue
|
||||
|
||||
if tag_data["family"] != "frame":
|
||||
continue
|
||||
|
||||
sequence_tags.append(tag_data)
|
||||
|
||||
self.log.debug("__ sequence_tags: {}".format(
|
||||
pformat(sequence_tags)
|
||||
))
|
||||
|
||||
# first collect all available subset tag frames
|
||||
subset_data = {}
|
||||
for tag_data in sequence_tags:
|
||||
frame = int(tag_data["start"])
|
||||
|
||||
if frame not in publish_frames:
|
||||
continue
|
||||
|
||||
subset = tag_data["subset"]
|
||||
|
||||
if subset in subset_data:
|
||||
# update existing subset key
|
||||
subset_data[subset]["frames"].append(frame)
|
||||
else:
|
||||
# create new subset key
|
||||
subset_data[subset] = {
|
||||
"frames": [frame],
|
||||
"format": tag_data["format"],
|
||||
"asset": context.data["assetEntity"]["name"]
|
||||
}
|
||||
return subset_data
|
||||
|
||||
def _create_instances(self, subset_data):
|
||||
# create instance per subset
|
||||
for subset_name, subset_data in subset_data.items():
|
||||
name = "frame" + subset_name.title()
|
||||
data = {
|
||||
"name": name,
|
||||
"label": "{} {}".format(name, subset_data["frames"]),
|
||||
"family": "image",
|
||||
"families": ["frame"],
|
||||
"asset": subset_data["asset"],
|
||||
"subset": name,
|
||||
"format": subset_data["format"],
|
||||
"frames": subset_data["frames"]
|
||||
}
|
||||
self._context.create_instance(**data)
|
||||
|
||||
self.log.info(
|
||||
"Created instance: {}".format(
|
||||
json.dumps(data, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
82
openpype/hosts/hiero/plugins/publish/extract_frames.py
Normal file
82
openpype/hosts/hiero/plugins/publish/extract_frames.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
import openpype
|
||||
|
||||
|
||||
class ExtractFrames(openpype.api.Extractor):
|
||||
"""Extracts frames"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Frames"
|
||||
hosts = ["hiero"]
|
||||
families = ["frame"]
|
||||
movie_extensions = ["mov", "mp4"]
|
||||
|
||||
def process(self, instance):
|
||||
oiio_tool_path = openpype.lib.get_oiio_tools_path()
|
||||
staging_dir = self.staging_dir(instance)
|
||||
output_template = os.path.join(staging_dir, instance.data["name"])
|
||||
sequence = instance.context.data["activeTimeline"]
|
||||
|
||||
files = []
|
||||
for frame in instance.data["frames"]:
|
||||
track_item = sequence.trackItemAt(frame)
|
||||
media_source = track_item.source().mediaSource()
|
||||
input_path = media_source.fileinfos()[0].filename()
|
||||
input_frame = (
|
||||
track_item.mapTimelineToSource(frame) +
|
||||
track_item.source().mediaSource().startTime()
|
||||
)
|
||||
output_ext = instance.data["format"]
|
||||
output_path = output_template
|
||||
output_path += ".{:04d}.{}".format(int(frame), output_ext)
|
||||
|
||||
args = [oiio_tool_path]
|
||||
|
||||
ext = os.path.splitext(input_path)[1][1:]
|
||||
if ext in self.movie_extensions:
|
||||
args.extend(["--subimage", str(int(input_frame))])
|
||||
else:
|
||||
args.extend(["--frames", str(int(input_frame))])
|
||||
|
||||
if ext == "exr":
|
||||
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
|
||||
|
||||
args.extend([input_path, "-o", output_path])
|
||||
output = openpype.api.run_subprocess(args)
|
||||
|
||||
failed_output = "oiiotool produced no output."
|
||||
if failed_output in output:
|
||||
raise ValueError(
|
||||
"oiiotool processing failed. Args: {}".format(args)
|
||||
)
|
||||
|
||||
files.append(output_path)
|
||||
|
||||
# Feedback to user because "oiiotool" can make the publishing
|
||||
# appear unresponsive.
|
||||
self.log.info(
|
||||
"Processed {} of {} frames".format(
|
||||
instance.data["frames"].index(frame) + 1,
|
||||
len(instance.data["frames"])
|
||||
)
|
||||
)
|
||||
|
||||
if len(files) == 1:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": os.path.basename(files[0]),
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
else:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": [os.path.basename(x) for x in files],
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
|
|
@ -68,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
"families": [],
|
||||
"representations": [workfile_representation, thumb_representation]
|
||||
}
|
||||
|
||||
|
|
@ -77,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
|||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"activeTimeline": active_timeline,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": curent_file,
|
||||
"colorspace": self.get_colorspace(project),
|
||||
|
|
|
|||
|
|
@ -1,38 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectClipResolution(pyblish.api.InstancePlugin):
|
||||
"""Collect clip geometry resolution"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
label = "Collect Clip Resolution"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
sequence = instance.context.data['activeSequence']
|
||||
item = instance.data["item"]
|
||||
source_resolution = instance.data.get("sourceResolution", None)
|
||||
|
||||
resolution_width = int(sequence.format().width())
|
||||
resolution_height = int(sequence.format().height())
|
||||
pixel_aspect = sequence.format().pixelAspect()
|
||||
|
||||
# source exception
|
||||
if source_resolution:
|
||||
resolution_width = int(item.source().mediaSource().width())
|
||||
resolution_height = int(item.source().mediaSource().height())
|
||||
pixel_aspect = item.source().mediaSource().pixelAspect()
|
||||
|
||||
resolution_data = {
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"pixelAspect": pixel_aspect
|
||||
}
|
||||
# add to instacne data
|
||||
instance.data.update(resolution_data)
|
||||
|
||||
self.log.info("Resolution of instance '{}' is: {}".format(
|
||||
instance,
|
||||
resolution_data
|
||||
))
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectHostVersion(pyblish.api.ContextPlugin):
|
||||
"""Inject the hosts version into context"""
|
||||
|
||||
label = "Collect Host and HostVersion"
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
|
||||
def process(self, context):
|
||||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
context.set_data("host", pyblish.api.current_host())
|
||||
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectTagRetime(api.InstancePlugin):
|
||||
"""Collect Retiming from Tags of selected track items."""
|
||||
|
||||
order = api.CollectorOrder + 0.014
|
||||
label = "Collect Retiming Tag"
|
||||
hosts = ["hiero"]
|
||||
families = ['clip']
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
for t in tags:
|
||||
t_metadata = dict(t["metadata"])
|
||||
t_family = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task family tags and collect labels
|
||||
if "retiming" in t_family:
|
||||
margin_in = t_metadata.get("tag.marginIn", "")
|
||||
margin_out = t_metadata.get("tag.marginOut", "")
|
||||
|
||||
instance.data["retimeMarginIn"] = int(margin_in)
|
||||
instance.data["retimeMarginOut"] = int(margin_out)
|
||||
instance.data["retime"] = True
|
||||
|
||||
self.log.info("retimeMarginIn: `{}`".format(margin_in))
|
||||
self.log.info("retimeMarginOut: `{}`".format(margin_out))
|
||||
|
||||
instance.data["families"] += ["retime"]
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
from compiler.ast import flatten
|
||||
from pyblish import api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
import hiero
|
||||
# from openpype.hosts.hiero.api import lib
|
||||
# reload(lib)
|
||||
# reload(phiero)
|
||||
|
||||
|
||||
class PreCollectInstances(api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = api.CollectorOrder - 0.509
|
||||
label = "Pre-collect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
track_items = phiero.get_track_items(
|
||||
selected=True, check_tagged=True, check_enabled=True)
|
||||
# only return enabled track items
|
||||
if not track_items:
|
||||
track_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
# get sequence and video tracks
|
||||
sequence = context.data["activeSequence"]
|
||||
tracks = sequence.videoTracks()
|
||||
|
||||
# add collection to context
|
||||
tracks_effect_items = self.collect_sub_track_items(tracks)
|
||||
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(len(track_items)))
|
||||
|
||||
for _ti in track_items:
|
||||
data = {}
|
||||
clip = _ti.source()
|
||||
|
||||
# get clips subtracks and anotations
|
||||
annotations = self.clip_annotations(clip)
|
||||
subtracks = self.clip_subtrack(_ti)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# get pype tag data
|
||||
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
|
||||
# self.log.debug(pformat(tag_parsed_data))
|
||||
|
||||
if not tag_parsed_data:
|
||||
continue
|
||||
|
||||
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
asset = tag_parsed_data["asset"]
|
||||
subset = tag_parsed_data["subset"]
|
||||
review_track = tag_parsed_data.get("reviewTrack")
|
||||
hiero_track = tag_parsed_data.get("heroTrack")
|
||||
audio = tag_parsed_data.get("audio")
|
||||
|
||||
# remove audio attribute from data
|
||||
data.pop("audio")
|
||||
|
||||
# insert family into families
|
||||
family = tag_parsed_data["family"]
|
||||
families = [str(f) for f in tag_parsed_data["families"]]
|
||||
families.insert(0, str(family))
|
||||
|
||||
track = _ti.parent()
|
||||
media_source = _ti.source().mediaSource()
|
||||
source_path = media_source.firstpath()
|
||||
file_head = media_source.filenameHead()
|
||||
file_info = media_source.fileinfos().pop()
|
||||
source_first_frame = int(file_info.startFrame())
|
||||
|
||||
# apply only for review and master track instance
|
||||
if review_track and hiero_track:
|
||||
families += ["review", "ftrack"]
|
||||
|
||||
data.update({
|
||||
"name": "{} {} {}".format(asset, subset, families),
|
||||
"asset": asset,
|
||||
"item": _ti,
|
||||
"families": families,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
|
||||
# track item attributes
|
||||
"track": track.name(),
|
||||
"trackItem": track,
|
||||
"reviewTrack": review_track,
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _ti.sourceMediaColourTransform()
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"source": source_path,
|
||||
"sourceMedia": media_source,
|
||||
"sourcePath": source_path,
|
||||
"sourceFileHead": file_head,
|
||||
"sourceFirst": source_first_frame,
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Creating instance.data: {}".format(instance.data))
|
||||
|
||||
if audio:
|
||||
a_data = dict()
|
||||
|
||||
# add tag data to instance data
|
||||
a_data.update({
|
||||
k: v for k, v in tag_parsed_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
# create main attributes
|
||||
subset = "audioMain"
|
||||
family = "audio"
|
||||
families = ["clip", "ftrack"]
|
||||
families.insert(0, str(family))
|
||||
|
||||
name = "{} {} {}".format(asset, subset, families)
|
||||
|
||||
a_data.update({
|
||||
"name": name,
|
||||
"subset": subset,
|
||||
"asset": asset,
|
||||
"family": family,
|
||||
"families": families,
|
||||
"item": _ti,
|
||||
|
||||
# tags
|
||||
"tags": _ti.tags(),
|
||||
})
|
||||
|
||||
a_instance = context.create_instance(**a_data)
|
||||
self.log.info("Creating audio instance: {}".format(a_instance))
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
# avoid all anotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# # avoid all not anaibled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
||||
@staticmethod
|
||||
def collect_sub_track_items(tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = dict()
|
||||
for track in tracks:
|
||||
items = track.items()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = flatten(track.subTrackItems())
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(_sub_track_items) < 1:
|
||||
continue
|
||||
|
||||
enabled_sti = list()
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if len(enabled_sti) < 1:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.hosts.hiero import api as phiero
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class PreCollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
label = "Pre-collect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.51
|
||||
|
||||
def process(self, context):
|
||||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
subset = "workfile"
|
||||
|
||||
project = phiero.get_current_project()
|
||||
active_sequence = phiero.get_current_sequence()
|
||||
video_tracks = active_sequence.videoTracks()
|
||||
audio_tracks = active_sequence.audioTracks()
|
||||
current_file = project.path()
|
||||
staging_dir = os.path.dirname(current_file)
|
||||
base_name = os.path.basename(current_file)
|
||||
|
||||
# get workfile's colorspace properties
|
||||
_clrs = {}
|
||||
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
|
||||
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
|
||||
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
|
||||
_clrs["lutSettingFloat"] = project.lutSettingFloat()
|
||||
_clrs["lutSettingLog"] = project.lutSettingLog()
|
||||
_clrs["lutSettingViewer"] = project.lutSettingViewer()
|
||||
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
|
||||
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
|
||||
_clrs["ocioConfigName"] = project.ocioConfigName()
|
||||
_clrs["ocioConfigPath"] = project.ocioConfigPath()
|
||||
|
||||
# set main project attributes to context
|
||||
context.data["activeProject"] = project
|
||||
context.data["activeSequence"] = active_sequence
|
||||
context.data["videoTracks"] = video_tracks
|
||||
context.data["audioTracks"] = audio_tracks
|
||||
context.data["currentFile"] = current_file
|
||||
context.data["colorspace"] = _clrs
|
||||
|
||||
self.log.info("currentFile: {}".format(current_file))
|
||||
|
||||
# creating workfile representation
|
||||
representation = {
|
||||
'name': 'hrox',
|
||||
'ext': 'hrox',
|
||||
'files': base_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance_data = {
|
||||
"name": "{}_{}".format(asset, subset),
|
||||
"asset": asset,
|
||||
"subset": "{}{}".format(asset, subset.capitalize()),
|
||||
"item": project,
|
||||
"family": "workfile",
|
||||
|
||||
# version data
|
||||
"versionData": {
|
||||
"colorspace": _clrs
|
||||
},
|
||||
|
||||
# source attribute
|
||||
"sourcePath": current_file,
|
||||
"representations": [representation]
|
||||
}
|
||||
|
||||
instance = context.create_instance(**instance_data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
|
|
@ -79,6 +79,7 @@ IMAGE_PREFIXES = {
|
|||
"redshift": "defaultRenderGlobals.imageFilePrefix",
|
||||
}
|
||||
|
||||
RENDERMAN_IMAGE_DIR = "maya/<scene>/<layer>"
|
||||
|
||||
@attr.s
|
||||
class LayerMetadata(object):
|
||||
|
|
@ -1054,6 +1055,8 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
:func:`ARenderProducts.get_render_products()`
|
||||
|
||||
"""
|
||||
from rfm2.api.displays import get_displays # noqa
|
||||
|
||||
cameras = [
|
||||
self.sanitize_camera_name(c)
|
||||
for c in self.get_renderable_cameras()
|
||||
|
|
@ -1066,42 +1069,56 @@ class RenderProductsRenderman(ARenderProducts):
|
|||
]
|
||||
products = []
|
||||
|
||||
default_ext = "exr"
|
||||
displays = cmds.listConnections("rmanGlobals.displays")
|
||||
for aov in displays:
|
||||
enabled = self._get_attr(aov, "enabled")
|
||||
# NOTE: This is guessing extensions from renderman display types.
|
||||
# Some of them are just framebuffers, d_texture format can be
|
||||
# set in display setting. We set those now to None, but it
|
||||
# should be handled more gracefully.
|
||||
display_types = {
|
||||
"d_deepexr": "exr",
|
||||
"d_it": None,
|
||||
"d_null": None,
|
||||
"d_openexr": "exr",
|
||||
"d_png": "png",
|
||||
"d_pointcloud": "ptc",
|
||||
"d_targa": "tga",
|
||||
"d_texture": None,
|
||||
"d_tiff": "tif"
|
||||
}
|
||||
|
||||
displays = get_displays()["displays"]
|
||||
for name, display in displays.items():
|
||||
enabled = display["params"]["enable"]["value"]
|
||||
if not enabled:
|
||||
continue
|
||||
|
||||
aov_name = str(aov)
|
||||
aov_name = name
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
extensions = display_types.get(
|
||||
display["driverNode"]["type"], "exr")
|
||||
|
||||
for camera in cameras:
|
||||
product = RenderProduct(productName=aov_name,
|
||||
ext=default_ext,
|
||||
ext=extensions,
|
||||
camera=camera)
|
||||
products.append(product)
|
||||
|
||||
return products
|
||||
|
||||
def get_files(self, product, camera):
|
||||
def get_files(self, product):
|
||||
"""Get expected files.
|
||||
|
||||
In renderman we hack it with prepending path. This path would
|
||||
normally be translated from `rmanGlobals.imageOutputDir`. We skip
|
||||
this and hardcode prepend path we expect. There is no place for user
|
||||
to mess around with this settings anyway and it is enforced in
|
||||
render settings validator.
|
||||
"""
|
||||
files = super(RenderProductsRenderman, self).get_files(product, camera)
|
||||
files = super(RenderProductsRenderman, self).get_files(product)
|
||||
|
||||
layer_data = self.layer_data
|
||||
new_files = []
|
||||
|
||||
resolved_image_dir = re.sub("<scene>", layer_data.sceneName, RENDERMAN_IMAGE_DIR, flags=re.IGNORECASE) # noqa: E501
|
||||
resolved_image_dir = re.sub("<layer>", layer_data.layerName, resolved_image_dir, flags=re.IGNORECASE) # noqa: E501
|
||||
for file in files:
|
||||
new_file = "{}/{}/{}".format(
|
||||
layer_data["sceneName"], layer_data["layerName"], file
|
||||
)
|
||||
new_file = "{}/{}".format(resolved_image_dir, file)
|
||||
new_files.append(new_file)
|
||||
|
||||
return new_files
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ class CreateRender(plugin.Creator):
|
|||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
|
|
@ -84,7 +84,9 @@ class CreateRender(plugin.Creator):
|
|||
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
|
||||
'vray': 'maya/<scene>/<Layer>/<Layer>',
|
||||
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa
|
||||
'renderman': 'maya/<Scene>/<layer>/<layer>{aov_separator}<aov>',
|
||||
# this needs `imageOutputDir`
|
||||
# (<ws>/renders/maya/<scene>) set separately
|
||||
'renderman': '<layer>_<aov>.<f4>.<ext>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>' # noqa
|
||||
}
|
||||
|
||||
|
|
@ -440,6 +442,10 @@ class CreateRender(plugin.Creator):
|
|||
|
||||
self._set_global_output_settings()
|
||||
|
||||
if renderer == "renderman":
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
"maya/<scene>/<layer>", type="string")
|
||||
|
||||
def _set_vray_settings(self, asset):
|
||||
# type: (dict) -> None
|
||||
"""Sets important settings for Vray."""
|
||||
|
|
|
|||
|
|
@ -69,14 +69,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
redshift_AOV_prefix = "<BeautyPath>/<BeautyFile>{aov_separator}<RenderPass>" # noqa: E501
|
||||
|
||||
# WARNING: There is bug? in renderman, translating <scene> token
|
||||
# to something left behind mayas default image prefix. So instead
|
||||
# `SceneName_v01` it translates to:
|
||||
# `SceneName_v01/<RenderLayer>/<RenderLayers_<RenderPass>` that means
|
||||
# for example:
|
||||
# `SceneName_v01/Main/Main_<RenderPass>`. Possible solution is to define
|
||||
# custom token like <scene_name> to point to determined scene name.
|
||||
RendermanDirPrefix = "<ws>/renders/maya/<scene>/<layer>"
|
||||
renderman_dir_prefix = "maya/<scene>/<layer>"
|
||||
|
||||
R_AOV_TOKEN = re.compile(
|
||||
r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
|
|
@ -116,15 +109,22 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
prefix = prefix.replace(
|
||||
"{aov_separator}", instance.data.get("aovSeparator", "_"))
|
||||
|
||||
required_prefix = "maya/<scene>"
|
||||
|
||||
if not anim_override:
|
||||
invalid = True
|
||||
cls.log.error("Animation needs to be enabled. Use the same "
|
||||
"frame for start and end to render single frame")
|
||||
|
||||
if not prefix.lower().startswith("maya/<scene>"):
|
||||
if renderer != "renderman" and not prefix.lower().startswith(
|
||||
required_prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't start with: 'maya/<scene>'".format(prefix))
|
||||
cls.log.error(
|
||||
("Wrong image prefix [ {} ] "
|
||||
" - doesn't start with: '{}'").format(
|
||||
prefix, required_prefix)
|
||||
)
|
||||
|
||||
if not re.search(cls.R_LAYER_TOKEN, prefix):
|
||||
invalid = True
|
||||
|
|
@ -198,7 +198,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ]".format(file_prefix))
|
||||
|
||||
if dir_prefix.lower() != cls.RendermanDirPrefix.lower():
|
||||
if dir_prefix.lower() != cls.renderman_dir_prefix.lower():
|
||||
invalid = True
|
||||
cls.log.error("Wrong directory prefix [ {} ]".format(
|
||||
dir_prefix))
|
||||
|
|
@ -304,7 +304,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
default_prefix,
|
||||
type="string")
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
cls.RendermanDirPrefix,
|
||||
cls.renderman_dir_prefix,
|
||||
type="string")
|
||||
|
||||
if renderer == "vray":
|
||||
|
|
|
|||
|
|
@ -400,7 +400,7 @@ def add_write_node(name, **kwarg):
|
|||
return w
|
||||
|
||||
|
||||
def read(node):
|
||||
def read_avalon_data(node):
|
||||
"""Return user-defined knobs from given `node`
|
||||
|
||||
Args:
|
||||
|
|
@ -415,8 +415,6 @@ def read(node):
|
|||
return knob_name[len("avalon:"):]
|
||||
elif knob_name.startswith("ak:"):
|
||||
return knob_name[len("ak:"):]
|
||||
else:
|
||||
return knob_name
|
||||
|
||||
data = dict()
|
||||
|
||||
|
|
@ -445,7 +443,8 @@ def read(node):
|
|||
(knob_type == 26 and value)
|
||||
):
|
||||
key = compat_prefixed(knob_name)
|
||||
data[key] = value
|
||||
if key is not None:
|
||||
data[key] = value
|
||||
|
||||
if knob_name == first_user_knob:
|
||||
break
|
||||
|
|
@ -507,20 +506,74 @@ def get_created_node_imageio_setting(**kwarg):
|
|||
log.debug(kwarg)
|
||||
nodeclass = kwarg.get("nodeclass", None)
|
||||
creator = kwarg.get("creator", None)
|
||||
subset = kwarg.get("subset", None)
|
||||
|
||||
assert any([creator, nodeclass]), nuke.message(
|
||||
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
|
||||
|
||||
imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"]
|
||||
imageio_nodes = get_nuke_imageio_settings()["nodes"]
|
||||
required_nodes = imageio_nodes["requiredNodes"]
|
||||
override_nodes = imageio_nodes["overrideNodes"]
|
||||
|
||||
imageio_node = None
|
||||
for node in imageio_nodes:
|
||||
for node in required_nodes:
|
||||
log.info(node)
|
||||
if (nodeclass in node["nukeNodeClass"]) and (
|
||||
creator in node["plugins"]):
|
||||
if (
|
||||
nodeclass in node["nukeNodeClass"]
|
||||
and creator in node["plugins"]
|
||||
):
|
||||
imageio_node = node
|
||||
break
|
||||
|
||||
log.debug("__ imageio_node: {}".format(imageio_node))
|
||||
|
||||
# find matching override node
|
||||
override_imageio_node = None
|
||||
for onode in override_nodes:
|
||||
log.info(onode)
|
||||
if nodeclass not in node["nukeNodeClass"]:
|
||||
continue
|
||||
|
||||
if creator not in node["plugins"]:
|
||||
continue
|
||||
|
||||
if (
|
||||
onode["subsets"]
|
||||
and not any(re.search(s, subset) for s in onode["subsets"])
|
||||
):
|
||||
continue
|
||||
|
||||
override_imageio_node = onode
|
||||
break
|
||||
|
||||
log.debug("__ override_imageio_node: {}".format(override_imageio_node))
|
||||
# add overrides to imageio_node
|
||||
if override_imageio_node:
|
||||
# get all knob names in imageio_node
|
||||
knob_names = [k["name"] for k in imageio_node["knobs"]]
|
||||
|
||||
for oknob in override_imageio_node["knobs"]:
|
||||
for knob in imageio_node["knobs"]:
|
||||
# override matching knob name
|
||||
if oknob["name"] == knob["name"]:
|
||||
log.debug(
|
||||
"_ overriding knob: `{}` > `{}`".format(
|
||||
knob, oknob
|
||||
))
|
||||
if not oknob["value"]:
|
||||
# remove original knob if no value found in oknob
|
||||
imageio_node["knobs"].remove(knob)
|
||||
else:
|
||||
# override knob value with oknob's
|
||||
knob["value"] = oknob["value"]
|
||||
|
||||
# add missing knobs into imageio_node
|
||||
if oknob["name"] not in knob_names:
|
||||
log.debug(
|
||||
"_ adding knob: `{}`".format(oknob))
|
||||
imageio_node["knobs"].append(oknob)
|
||||
knob_names.append(oknob["name"])
|
||||
|
||||
log.info("ImageIO node: {}".format(imageio_node))
|
||||
return imageio_node
|
||||
|
||||
|
|
@ -542,7 +595,7 @@ def get_imageio_input_colorspace(filename):
|
|||
def on_script_load():
|
||||
''' Callback for ffmpeg support
|
||||
'''
|
||||
if nuke.env['LINUX']:
|
||||
if nuke.env["LINUX"]:
|
||||
nuke.tcl('load ffmpegReader')
|
||||
nuke.tcl('load ffmpegWriter')
|
||||
else:
|
||||
|
|
@ -567,7 +620,7 @@ def check_inventory_versions():
|
|||
|
||||
if container:
|
||||
node = nuke.toNode(container["objectName"])
|
||||
avalon_knob_data = read(node)
|
||||
avalon_knob_data = read_avalon_data(node)
|
||||
|
||||
# get representation from io
|
||||
representation = legacy_io.find_one({
|
||||
|
|
@ -593,7 +646,7 @@ def check_inventory_versions():
|
|||
versions = legacy_io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
}).distinct("name")
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
|
|
@ -623,20 +676,20 @@ def writes_version_sync():
|
|||
if _NODE_TAB_NAME not in each.knobs():
|
||||
continue
|
||||
|
||||
avalon_knob_data = read(each)
|
||||
avalon_knob_data = read_avalon_data(each)
|
||||
|
||||
try:
|
||||
if avalon_knob_data['families'] not in ["render"]:
|
||||
log.debug(avalon_knob_data['families'])
|
||||
if avalon_knob_data["families"] not in ["render"]:
|
||||
log.debug(avalon_knob_data["families"])
|
||||
continue
|
||||
|
||||
node_file = each['file'].value()
|
||||
node_file = each["file"].value()
|
||||
|
||||
node_version = "v" + get_version_from_path(node_file)
|
||||
log.debug("node_version: {}".format(node_version))
|
||||
|
||||
node_new_file = node_file.replace(node_version, new_version)
|
||||
each['file'].setValue(node_new_file)
|
||||
each["file"].setValue(node_new_file)
|
||||
if not os.path.isdir(os.path.dirname(node_new_file)):
|
||||
log.warning("Path does not exist! I am creating it.")
|
||||
os.makedirs(os.path.dirname(node_new_file))
|
||||
|
|
@ -665,18 +718,19 @@ def check_subsetname_exists(nodes, subset_name):
|
|||
bool: True of False
|
||||
"""
|
||||
return next((True for n in nodes
|
||||
if subset_name in read(n).get("subset", "")),
|
||||
if subset_name in read_avalon_data(n).get("subset", "")),
|
||||
False)
|
||||
|
||||
|
||||
def get_render_path(node):
|
||||
''' Generate Render path from presets regarding avalon knob data
|
||||
'''
|
||||
data = {'avalon': read(node)}
|
||||
data = {'avalon': read_avalon_data(node)}
|
||||
data_preset = {
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
"creator": data['avalon']['creator']
|
||||
"nodeclass": data["avalon"]["family"],
|
||||
"families": [data["avalon"]["families"]],
|
||||
"creator": data["avalon"]["creator"],
|
||||
"subset": data["avalon"]["subset"]
|
||||
}
|
||||
|
||||
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
|
||||
|
|
@ -749,7 +803,7 @@ def format_anatomy(data):
|
|||
def script_name():
|
||||
''' Returns nuke script path
|
||||
'''
|
||||
return nuke.root().knob('name').value()
|
||||
return nuke.root().knob("name").value()
|
||||
|
||||
|
||||
def add_button_write_to_read(node):
|
||||
|
|
@ -844,7 +898,7 @@ def create_write_node(name, data, input=None, prenodes=None,
|
|||
# adding dataflow template
|
||||
log.debug("imageio_writes: `{}`".format(imageio_writes))
|
||||
for knob in imageio_writes["knobs"]:
|
||||
_data.update({knob["name"]: knob["value"]})
|
||||
_data[knob["name"]] = knob["value"]
|
||||
|
||||
_data = fix_data_for_node_create(_data)
|
||||
|
||||
|
|
@ -1193,15 +1247,19 @@ class WorkfileSettings(object):
|
|||
|
||||
erased_viewers = []
|
||||
for v in nuke.allNodes(filter="Viewer"):
|
||||
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
|
||||
# set viewProcess to preset from settings
|
||||
v["viewerProcess"].setValue(
|
||||
str(viewer_dict["viewerProcess"])
|
||||
)
|
||||
|
||||
if str(viewer_dict["viewerProcess"]) \
|
||||
not in v['viewerProcess'].value():
|
||||
not in v["viewerProcess"].value():
|
||||
copy_inputs = v.dependencies()
|
||||
copy_knobs = {k: v[k].value() for k in v.knobs()
|
||||
if k not in filter_knobs}
|
||||
|
||||
# delete viewer with wrong settings
|
||||
erased_viewers.append(v['name'].value())
|
||||
erased_viewers.append(v["name"].value())
|
||||
nuke.delete(v)
|
||||
|
||||
# create new viewer
|
||||
|
|
@ -1217,7 +1275,7 @@ class WorkfileSettings(object):
|
|||
nv[k].setValue(v)
|
||||
|
||||
# set viewerProcess
|
||||
nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
|
||||
nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"]))
|
||||
|
||||
if erased_viewers:
|
||||
log.warning(
|
||||
|
|
@ -1293,12 +1351,12 @@ class WorkfileSettings(object):
|
|||
for node in nuke.allNodes(filter="Group"):
|
||||
|
||||
# get data from avalon knob
|
||||
avalon_knob_data = read(node)
|
||||
avalon_knob_data = read_avalon_data(node)
|
||||
|
||||
if not avalon_knob_data:
|
||||
if avalon_knob_data.get("id") != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
if avalon_knob_data["id"] != "pyblish.avalon.instance":
|
||||
if "creator" not in avalon_knob_data:
|
||||
continue
|
||||
|
||||
# establish families
|
||||
|
|
@ -1309,7 +1367,8 @@ class WorkfileSettings(object):
|
|||
data_preset = {
|
||||
"nodeclass": avalon_knob_data["family"],
|
||||
"families": families,
|
||||
"creator": avalon_knob_data['creator']
|
||||
"creator": avalon_knob_data["creator"],
|
||||
"subset": avalon_knob_data["subset"]
|
||||
}
|
||||
|
||||
nuke_imageio_writes = get_created_node_imageio_setting(
|
||||
|
|
@ -1342,7 +1401,6 @@ class WorkfileSettings(object):
|
|||
|
||||
write_node[knob["name"]].setValue(value)
|
||||
|
||||
|
||||
def set_reads_colorspace(self, read_clrs_inputs):
|
||||
""" Setting colorspace to Read nodes
|
||||
|
||||
|
|
@ -1368,17 +1426,16 @@ class WorkfileSettings(object):
|
|||
current = n["colorspace"].value()
|
||||
future = str(preset_clrsp)
|
||||
if current != future:
|
||||
changes.update({
|
||||
n.name(): {
|
||||
"from": current,
|
||||
"to": future
|
||||
}
|
||||
})
|
||||
changes[n.name()] = {
|
||||
"from": current,
|
||||
"to": future
|
||||
}
|
||||
|
||||
log.debug(changes)
|
||||
if changes:
|
||||
msg = "Read nodes are not set to correct colospace:\n\n"
|
||||
for nname, knobs in changes.items():
|
||||
msg += str(
|
||||
msg += (
|
||||
" - node: '{0}' is now '{1}' but should be '{2}'\n"
|
||||
).format(nname, knobs["from"], knobs["to"])
|
||||
|
||||
|
|
@ -1610,17 +1667,17 @@ def get_hierarchical_attr(entity, attr, default=None):
|
|||
if not value:
|
||||
break
|
||||
|
||||
if value or entity['type'].lower() == 'project':
|
||||
if value or entity["type"].lower() == "project":
|
||||
return value
|
||||
|
||||
parent_id = entity['parent']
|
||||
parent_id = entity["parent"]
|
||||
if (
|
||||
entity['type'].lower() == 'asset'
|
||||
and entity.get('data', {}).get('visualParent')
|
||||
entity["type"].lower() == "asset"
|
||||
and entity.get("data", {}).get("visualParent")
|
||||
):
|
||||
parent_id = entity['data']['visualParent']
|
||||
parent_id = entity["data"]["visualParent"]
|
||||
|
||||
parent = legacy_io.find_one({'_id': parent_id})
|
||||
parent = legacy_io.find_one({"_id": parent_id})
|
||||
|
||||
return get_hierarchical_attr(parent, attr)
|
||||
|
||||
|
|
@ -1630,12 +1687,13 @@ def get_write_node_template_attr(node):
|
|||
|
||||
'''
|
||||
# get avalon data from node
|
||||
data = dict()
|
||||
data['avalon'] = read(node)
|
||||
data = {"avalon": read_avalon_data(node)}
|
||||
|
||||
data_preset = {
|
||||
"nodeclass": data['avalon']['family'],
|
||||
"families": [data['avalon']['families']],
|
||||
"creator": data['avalon']['creator']
|
||||
"nodeclass": data["avalon"]["family"],
|
||||
"families": [data["avalon"]["families"]],
|
||||
"creator": data["avalon"]["creator"],
|
||||
"subset": data["avalon"]["subset"]
|
||||
}
|
||||
|
||||
# get template data
|
||||
|
|
@ -1646,10 +1704,11 @@ def get_write_node_template_attr(node):
|
|||
"file": get_render_path(node)
|
||||
})
|
||||
|
||||
# adding imageio template
|
||||
{correct_data.update({k: v})
|
||||
for k, v in nuke_imageio_writes.items()
|
||||
if k not in ["_id", "_previous"]}
|
||||
# adding imageio knob presets
|
||||
for k, v in nuke_imageio_writes.items():
|
||||
if k in ["_id", "_previous"]:
|
||||
continue
|
||||
correct_data[k] = v
|
||||
|
||||
# fix badly encoded data
|
||||
return fix_data_for_node_create(correct_data)
|
||||
|
|
@ -1765,8 +1824,8 @@ def maintained_selection():
|
|||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node['selected'].setValue(True)
|
||||
>>> print(node['selected'].value())
|
||||
... node["selected"].setValue(True)
|
||||
>>> print(node["selected"].value())
|
||||
False
|
||||
"""
|
||||
previous_selection = nuke.selectedNodes()
|
||||
|
|
@ -1774,11 +1833,11 @@ def maintained_selection():
|
|||
yield
|
||||
finally:
|
||||
# unselect all selection in case there is some
|
||||
current_seletion = nuke.selectedNodes()
|
||||
[n['selected'].setValue(False) for n in current_seletion]
|
||||
reset_selection()
|
||||
|
||||
# and select all previously selected nodes
|
||||
if previous_selection:
|
||||
[n['selected'].setValue(True) for n in previous_selection]
|
||||
select_nodes(previous_selection)
|
||||
|
||||
|
||||
def reset_selection():
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ from .lib import (
|
|||
launch_workfiles_app,
|
||||
check_inventory_versions,
|
||||
set_avalon_knob_data,
|
||||
read,
|
||||
read_avalon_data,
|
||||
Context
|
||||
)
|
||||
|
||||
|
|
@ -359,7 +359,7 @@ def parse_container(node):
|
|||
dict: The container schema data for this container node.
|
||||
|
||||
"""
|
||||
data = read(node)
|
||||
data = read_avalon_data(node)
|
||||
|
||||
# (TODO) Remove key validation when `ls` has re-implemented.
|
||||
#
|
||||
|
|
|
|||
|
|
@ -260,8 +260,6 @@ class ExporterReview(object):
|
|||
return nuke_imageio["viewer"]["viewerProcess"]
|
||||
|
||||
|
||||
|
||||
|
||||
class ExporterReviewLut(ExporterReview):
|
||||
"""
|
||||
Generator object for review lut from Nuke
|
||||
|
|
@ -673,7 +671,8 @@ class AbstractWriteRender(OpenPypeCreator):
|
|||
write_data = {
|
||||
"nodeclass": self.n_class,
|
||||
"families": [self.family],
|
||||
"avalon": self.data
|
||||
"avalon": self.data,
|
||||
"subset": self.data["subset"]
|
||||
}
|
||||
|
||||
# add creator data
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class ExtractReviewDataMov(openpype.api.Extractor):
|
|||
for o_name, o_data in self.outputs.items():
|
||||
f_families = o_data["filter"]["families"]
|
||||
f_task_types = o_data["filter"]["task_types"]
|
||||
f_subsets = o_data["filter"]["sebsets"]
|
||||
f_subsets = o_data["filter"]["subsets"]
|
||||
|
||||
self.log.debug(
|
||||
"f_families `{}` > families: {}".format(
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@ def install():
|
|||
print("installing OpenPype for Unreal ...")
|
||||
print("-=" * 40)
|
||||
logger.info("installing OpenPype for Unreal")
|
||||
pyblish.api.register_host("unreal")
|
||||
pyblish.api.register_plugin_path(str(PUBLISH_PATH))
|
||||
register_loader_plugin_path(str(LOAD_PATH))
|
||||
register_creator_plugin_path(str(CREATE_PATH))
|
||||
|
|
@ -392,3 +393,24 @@ def cast_map_to_str_dict(umap) -> dict:
|
|||
|
||||
"""
|
||||
return {str(key): str(value) for (key, value) in umap.items()}
|
||||
|
||||
|
||||
def get_subsequences(sequence: unreal.LevelSequence):
|
||||
"""Get list of subsequences from sequence.
|
||||
|
||||
Args:
|
||||
sequence (unreal.LevelSequence): Sequence
|
||||
|
||||
Returns:
|
||||
list(unreal.LevelSequence): List of subsequences
|
||||
|
||||
"""
|
||||
tracks = sequence.get_master_tracks()
|
||||
subscene_track = None
|
||||
for t in tracks:
|
||||
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
|
||||
subscene_track = t
|
||||
break
|
||||
if subscene_track is not None and subscene_track.get_sections():
|
||||
return subscene_track.get_sections()
|
||||
return []
|
||||
|
|
|
|||
125
openpype/hosts/unreal/api/rendering.py
Normal file
125
openpype/hosts/unreal/api/rendering.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
import unreal
|
||||
|
||||
from openpype.hosts.unreal.api import pipeline
|
||||
|
||||
|
||||
queue = None
|
||||
executor = None
|
||||
|
||||
|
||||
def _queue_finish_callback(exec, success):
|
||||
unreal.log("Render completed. Success: " + str(success))
|
||||
|
||||
# Delete our reference so we don't keep it alive.
|
||||
global executor
|
||||
global queue
|
||||
del executor
|
||||
del queue
|
||||
|
||||
|
||||
def _job_finish_callback(job, success):
|
||||
# You can make any edits you want to the editor world here, and the world
|
||||
# will be duplicated when the next render happens. Make sure you undo your
|
||||
# edits in OnQueueFinishedCallback if you don't want to leak state changes
|
||||
# into the editor world.
|
||||
unreal.log("Individual job completed.")
|
||||
|
||||
|
||||
def start_rendering():
|
||||
"""
|
||||
Start the rendering process.
|
||||
"""
|
||||
print("Starting rendering...")
|
||||
|
||||
# Get selected sequences
|
||||
assets = unreal.EditorUtilityLibrary.get_selected_assets()
|
||||
|
||||
# instances = pipeline.ls_inst()
|
||||
instances = [
|
||||
a for a in assets
|
||||
if a.get_class().get_name() == "OpenPypePublishInstance"]
|
||||
|
||||
inst_data = []
|
||||
|
||||
for i in instances:
|
||||
data = pipeline.parse_container(i.get_path_name())
|
||||
if data["family"] == "render":
|
||||
inst_data.append(data)
|
||||
|
||||
# subsystem = unreal.get_editor_subsystem(
|
||||
# unreal.MoviePipelineQueueSubsystem)
|
||||
# queue = subsystem.get_queue()
|
||||
global queue
|
||||
queue = unreal.MoviePipelineQueue()
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
for i in inst_data:
|
||||
sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset()
|
||||
|
||||
sequences = [{
|
||||
"sequence": sequence,
|
||||
"output": f"{i['output']}",
|
||||
"frame_range": (
|
||||
int(float(i["frameStart"])),
|
||||
int(float(i["frameEnd"])) + 1)
|
||||
}]
|
||||
render_list = []
|
||||
|
||||
# Get all the sequences to render. If there are subsequences,
|
||||
# add them and their frame ranges to the render list. We also
|
||||
# use the names for the output paths.
|
||||
for s in sequences:
|
||||
subscenes = pipeline.get_subsequences(s.get('sequence'))
|
||||
|
||||
if subscenes:
|
||||
for ss in subscenes:
|
||||
sequences.append({
|
||||
"sequence": ss.get_sequence(),
|
||||
"output": (f"{s.get('output')}/"
|
||||
f"{ss.get_sequence().get_name()}"),
|
||||
"frame_range": (
|
||||
ss.get_start_frame(), ss.get_end_frame())
|
||||
})
|
||||
else:
|
||||
# Avoid rendering camera sequences
|
||||
if "_camera" not in s.get('sequence').get_name():
|
||||
render_list.append(s)
|
||||
|
||||
# Create the rendering jobs and add them to the queue.
|
||||
for r in render_list:
|
||||
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
|
||||
job.sequence = unreal.SoftObjectPath(i["master_sequence"])
|
||||
job.map = unreal.SoftObjectPath(i["master_level"])
|
||||
job.author = "OpenPype"
|
||||
|
||||
# User data could be used to pass data to the job, that can be
|
||||
# read in the job's OnJobFinished callback. We could,
|
||||
# for instance, pass the AvalonPublishInstance's path to the job.
|
||||
# job.user_data = ""
|
||||
|
||||
settings = job.get_configuration().find_or_add_setting_by_class(
|
||||
unreal.MoviePipelineOutputSetting)
|
||||
settings.output_resolution = unreal.IntPoint(1920, 1080)
|
||||
settings.custom_start_frame = r.get("frame_range")[0]
|
||||
settings.custom_end_frame = r.get("frame_range")[1]
|
||||
settings.use_custom_playback_range = True
|
||||
settings.file_name_format = "{sequence_name}.{frame_number}"
|
||||
settings.output_directory.path += r.get('output')
|
||||
|
||||
renderPass = job.get_configuration().find_or_add_setting_by_class(
|
||||
unreal.MoviePipelineDeferredPassBase)
|
||||
renderPass.disable_multisample_effects = True
|
||||
|
||||
job.get_configuration().find_or_add_setting_by_class(
|
||||
unreal.MoviePipelineImageSequenceOutput_PNG)
|
||||
|
||||
# If there are jobs in the queue, start the rendering process.
|
||||
if queue.get_jobs():
|
||||
global executor
|
||||
executor = unreal.MoviePipelinePIEExecutor()
|
||||
executor.on_executor_finished_delegate.add_callable_unique(
|
||||
_queue_finish_callback)
|
||||
executor.on_individual_job_finished_delegate.add_callable_unique(
|
||||
_job_finish_callback) # Only available on PIE Executor
|
||||
executor.execute(queue)
|
||||
|
|
@ -7,6 +7,7 @@ from openpype import (
|
|||
)
|
||||
from openpype.tools.utils import host_tools
|
||||
from openpype.tools.utils.lib import qt_app_context
|
||||
from openpype.hosts.unreal.api import rendering
|
||||
|
||||
|
||||
class ToolsBtnsWidget(QtWidgets.QWidget):
|
||||
|
|
@ -20,6 +21,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
load_btn = QtWidgets.QPushButton("Load...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
manage_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
render_btn = QtWidgets.QPushButton("Render...", self)
|
||||
experimental_tools_btn = QtWidgets.QPushButton(
|
||||
"Experimental tools...", self
|
||||
)
|
||||
|
|
@ -30,6 +32,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
layout.addWidget(load_btn, 0)
|
||||
layout.addWidget(publish_btn, 0)
|
||||
layout.addWidget(manage_btn, 0)
|
||||
layout.addWidget(render_btn, 0)
|
||||
layout.addWidget(experimental_tools_btn, 0)
|
||||
layout.addStretch(1)
|
||||
|
||||
|
|
@ -37,6 +40,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
load_btn.clicked.connect(self._on_load)
|
||||
publish_btn.clicked.connect(self._on_publish)
|
||||
manage_btn.clicked.connect(self._on_manage)
|
||||
render_btn.clicked.connect(self._on_render)
|
||||
experimental_tools_btn.clicked.connect(self._on_experimental)
|
||||
|
||||
def _on_create(self):
|
||||
|
|
@ -51,6 +55,9 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
|
|||
def _on_manage(self):
|
||||
self.tool_required.emit("sceneinventory")
|
||||
|
||||
def _on_render(self):
|
||||
rendering.start_rendering()
|
||||
|
||||
def _on_experimental(self):
|
||||
self.tool_required.emit("experimental_tools")
|
||||
|
||||
|
|
|
|||
|
|
@ -254,6 +254,7 @@ def create_unreal_project(project_name: str,
|
|||
{"Name": "PythonScriptPlugin", "Enabled": True},
|
||||
{"Name": "EditorScriptingUtilities", "Enabled": True},
|
||||
{"Name": "SequencerScripting", "Enabled": True},
|
||||
{"Name": "MovieRenderPipeline", "Enabled": True},
|
||||
{"Name": "OpenPype", "Enabled": True}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from unreal import EditorLevelLibrary as ell
|
||||
from unreal import EditorLevelLibrary
|
||||
|
||||
from openpype.hosts.unreal.api import plugin
|
||||
from openpype.hosts.unreal.api.pipeline import instantiate
|
||||
|
||||
|
|
@ -28,13 +29,13 @@ class CreateLayout(plugin.Creator):
|
|||
# sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
|
||||
# selection = [a.get_path_name() for a in sel_objects]
|
||||
|
||||
data["level"] = ell.get_editor_world().get_path_name()
|
||||
data["level"] = EditorLevelLibrary.get_editor_world().get_path_name()
|
||||
|
||||
data["members"] = []
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
# Set as members the selected actors
|
||||
for actor in ell.get_selected_level_actors():
|
||||
for actor in EditorLevelLibrary.get_selected_level_actors():
|
||||
data["members"].append("{}.{}".format(
|
||||
actor.get_outer().get_name(), actor.get_name()))
|
||||
|
||||
|
|
|
|||
111
openpype/hosts/unreal/plugins/create/create_render.py
Normal file
111
openpype/hosts/unreal/plugins/create/create_render.py
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
import unreal
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.unreal.api import pipeline
|
||||
from openpype.hosts.unreal.api.plugin import Creator
|
||||
|
||||
|
||||
class CreateRender(Creator):
|
||||
"""Create instance for sequence for rendering"""
|
||||
|
||||
name = "unrealRender"
|
||||
label = "Unreal - Render"
|
||||
family = "render"
|
||||
icon = "cube"
|
||||
asset_types = ["LevelSequence"]
|
||||
|
||||
root = "/Game/OpenPype/PublishInstances"
|
||||
suffix = "_INS"
|
||||
|
||||
def process(self):
|
||||
subset = self.data["subset"]
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
# Get the master sequence and the master level.
|
||||
# There should be only one sequence and one level in the directory.
|
||||
filter = unreal.ARFilter(
|
||||
class_names=["LevelSequence"],
|
||||
package_paths=[f"/Game/OpenPype/{self.data['asset']}"],
|
||||
recursive_paths=False)
|
||||
sequences = ar.get_assets(filter)
|
||||
ms = sequences[0].get_editor_property('object_path')
|
||||
filter = unreal.ARFilter(
|
||||
class_names=["World"],
|
||||
package_paths=[f"/Game/OpenPype/{self.data['asset']}"],
|
||||
recursive_paths=False)
|
||||
levels = ar.get_assets(filter)
|
||||
ml = levels[0].get_editor_property('object_path')
|
||||
|
||||
selection = []
|
||||
if (self.options or {}).get("useSelection"):
|
||||
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
|
||||
selection = [
|
||||
a.get_path_name() for a in sel_objects
|
||||
if a.get_class().get_name() in self.asset_types]
|
||||
else:
|
||||
selection.append(self.data['sequence'])
|
||||
|
||||
unreal.log(f"selection: {selection}")
|
||||
|
||||
path = f"{self.root}"
|
||||
unreal.EditorAssetLibrary.make_directory(path)
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
for a in selection:
|
||||
ms_obj = ar.get_asset_by_object_path(ms).get_asset()
|
||||
|
||||
seq_data = None
|
||||
|
||||
if a == ms:
|
||||
seq_data = {
|
||||
"sequence": ms_obj,
|
||||
"output": f"{ms_obj.get_name()}",
|
||||
"frame_range": (
|
||||
ms_obj.get_playback_start(), ms_obj.get_playback_end())
|
||||
}
|
||||
else:
|
||||
seq_data_list = [{
|
||||
"sequence": ms_obj,
|
||||
"output": f"{ms_obj.get_name()}",
|
||||
"frame_range": (
|
||||
ms_obj.get_playback_start(), ms_obj.get_playback_end())
|
||||
}]
|
||||
|
||||
for s in seq_data_list:
|
||||
subscenes = pipeline.get_subsequences(s.get('sequence'))
|
||||
|
||||
for ss in subscenes:
|
||||
curr_data = {
|
||||
"sequence": ss.get_sequence(),
|
||||
"output": (f"{s.get('output')}/"
|
||||
f"{ss.get_sequence().get_name()}"),
|
||||
"frame_range": (
|
||||
ss.get_start_frame(), ss.get_end_frame() - 1)
|
||||
}
|
||||
|
||||
if ss.get_sequence().get_path_name() == a:
|
||||
seq_data = curr_data
|
||||
break
|
||||
seq_data_list.append(curr_data)
|
||||
|
||||
if seq_data is not None:
|
||||
break
|
||||
|
||||
if not seq_data:
|
||||
continue
|
||||
|
||||
d = self.data.copy()
|
||||
d["members"] = [a]
|
||||
d["sequence"] = a
|
||||
d["master_sequence"] = ms
|
||||
d["master_level"] = ml
|
||||
d["output"] = seq_data.get('output')
|
||||
d["frameStart"] = seq_data.get('frame_range')[0]
|
||||
d["frameEnd"] = seq_data.get('frame_range')[1]
|
||||
|
||||
container_name = f"{subset}{self.suffix}"
|
||||
pipeline.create_publish_instance(
|
||||
instance=container_name, path=path)
|
||||
pipeline.imprint(f"{path}/{container_name}", d)
|
||||
|
|
@ -3,13 +3,17 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import unreal
|
||||
from unreal import EditorAssetLibrary
|
||||
from unreal import MovieSceneSkeletalAnimationTrack
|
||||
from unreal import MovieSceneSkeletalAnimationSection
|
||||
|
||||
from openpype.pipeline import (
|
||||
get_representation_path,
|
||||
AVALON_CONTAINER_ID
|
||||
)
|
||||
from openpype.hosts.unreal.api import plugin
|
||||
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
|
||||
import unreal # noqa
|
||||
|
||||
|
||||
class AnimationFBXLoader(plugin.Loader):
|
||||
|
|
@ -21,59 +25,13 @@ class AnimationFBXLoader(plugin.Loader):
|
|||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, options=None):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
|
||||
"""
|
||||
# Create directory for asset and OpenPype container
|
||||
root = "/Game/OpenPype/Assets"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
asset_name = "{}_{}".format(asset, name)
|
||||
else:
|
||||
asset_name = "{}".format(name)
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}/{}".format(root, asset, name), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
def _process(self, asset_dir, asset_name, instance_name):
|
||||
automated = False
|
||||
actor = None
|
||||
|
||||
task = unreal.AssetImportTask()
|
||||
task.options = unreal.FbxImportUI()
|
||||
|
||||
lib_path = self.fname.replace("fbx", "json")
|
||||
|
||||
with open(lib_path, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
instance_name = data.get("instance_name")
|
||||
|
||||
if instance_name:
|
||||
automated = True
|
||||
# Old method to get the actor
|
||||
|
|
@ -131,6 +89,116 @@ class AnimationFBXLoader(plugin.Loader):
|
|||
|
||||
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
|
||||
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
animation = None
|
||||
|
||||
for a in asset_content:
|
||||
imported_asset_data = EditorAssetLibrary.find_asset_data(a)
|
||||
imported_asset = unreal.AssetRegistryHelpers.get_asset(
|
||||
imported_asset_data)
|
||||
if imported_asset.__class__ == unreal.AnimSequence:
|
||||
animation = imported_asset
|
||||
break
|
||||
|
||||
if animation:
|
||||
animation.set_editor_property('enable_root_motion', True)
|
||||
actor.skeletal_mesh_component.set_editor_property(
|
||||
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
|
||||
actor.skeletal_mesh_component.animation_data.set_editor_property(
|
||||
'anim_to_play', animation)
|
||||
|
||||
return animation
|
||||
|
||||
def load(self, context, name, namespace, options=None):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
||||
This is two step process. First, import FBX to temporary path and
|
||||
then call `containerise()` on it - this moves all content to new
|
||||
directory and then it will create AssetContainer there and imprint it
|
||||
with metadata. This will mark this path as container.
|
||||
|
||||
Args:
|
||||
context (dict): application context
|
||||
name (str): subset name
|
||||
namespace (str): in Unreal this is basically path to container.
|
||||
This is not passed here, so namespace is set
|
||||
by `containerise()` because only then we know
|
||||
real path.
|
||||
data (dict): Those would be data to be imprinted. This is not used
|
||||
now, data are imprinted by `containerise()`.
|
||||
|
||||
Returns:
|
||||
list(str): list of container content
|
||||
"""
|
||||
|
||||
# Create directory for asset and avalon container
|
||||
hierarchy = context.get('asset').get('data').get('parents')
|
||||
root = "/Game/OpenPype"
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
asset_name = "{}_{}".format(asset, name)
|
||||
else:
|
||||
asset_name = "{}".format(name)
|
||||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
f"{root}/Animations/{asset}/{name}", suffix="")
|
||||
|
||||
hierarchy_dir = root
|
||||
for h in hierarchy:
|
||||
hierarchy_dir = f"{hierarchy_dir}/{h}"
|
||||
hierarchy_dir = f"{hierarchy_dir}/{asset}"
|
||||
|
||||
container_name += suffix
|
||||
|
||||
EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
libpath = self.fname.replace("fbx", "json")
|
||||
|
||||
with open(libpath, "r") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
instance_name = data.get("instance_name")
|
||||
|
||||
animation = self._process(asset_dir, container_name, instance_name)
|
||||
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
hierarchy_dir, recursive=True, include_folder=False)
|
||||
|
||||
# Get the sequence for the layout, excluding the camera one.
|
||||
sequences = [a for a in asset_content
|
||||
if (EditorAssetLibrary.find_asset_data(a).get_class() ==
|
||||
unreal.LevelSequence.static_class() and
|
||||
"_camera" not in a.split("/")[-1])]
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
for s in sequences:
|
||||
sequence = ar.get_asset_by_object_path(s).get_asset()
|
||||
possessables = [
|
||||
p for p in sequence.get_possessables()
|
||||
if p.get_display_name() == instance_name]
|
||||
|
||||
for p in possessables:
|
||||
tracks = [
|
||||
t for t in p.get_tracks()
|
||||
if (t.get_class() ==
|
||||
MovieSceneSkeletalAnimationTrack.static_class())]
|
||||
|
||||
for t in tracks:
|
||||
sections = [
|
||||
s for s in t.get_sections()
|
||||
if (s.get_class() ==
|
||||
MovieSceneSkeletalAnimationSection.static_class())]
|
||||
|
||||
for s in sections:
|
||||
s.params.set_editor_property('animation', animation)
|
||||
|
||||
# Create Asset Container
|
||||
unreal_pipeline.create_container(
|
||||
container=container_name, path=asset_dir)
|
||||
|
|
@ -150,29 +218,11 @@ class AnimationFBXLoader(plugin.Loader):
|
|||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
imported_content = EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=False)
|
||||
|
||||
animation = None
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
imported_asset_data = unreal.EditorAssetLibrary.find_asset_data(a)
|
||||
imported_asset = unreal.AssetRegistryHelpers.get_asset(
|
||||
imported_asset_data)
|
||||
if imported_asset.__class__ == unreal.AnimSequence:
|
||||
animation = imported_asset
|
||||
break
|
||||
|
||||
if animation:
|
||||
animation.set_editor_property('enable_root_motion', True)
|
||||
actor.skeletal_mesh_component.set_editor_property(
|
||||
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
|
||||
actor.skeletal_mesh_component.animation_data.set_editor_property(
|
||||
'anim_to_play', animation)
|
||||
|
||||
return asset_content
|
||||
for a in imported_content:
|
||||
EditorAssetLibrary.save_asset(a)
|
||||
|
||||
def update(self, container, representation):
|
||||
name = container["asset_name"]
|
||||
|
|
@ -218,7 +268,7 @@ class AnimationFBXLoader(plugin.Loader):
|
|||
task.options.anim_sequence_import_data.set_editor_property(
|
||||
'convert_scene', True)
|
||||
|
||||
skeletal_mesh = unreal.EditorAssetLibrary.load_asset(
|
||||
skeletal_mesh = EditorAssetLibrary.load_asset(
|
||||
container.get('namespace') + "/" + container.get('asset_name'))
|
||||
skeleton = skeletal_mesh.get_editor_property('skeleton')
|
||||
task.options.set_editor_property('skeleton', skeleton)
|
||||
|
|
@ -235,22 +285,22 @@ class AnimationFBXLoader(plugin.Loader):
|
|||
"parent": str(representation["parent"])
|
||||
})
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
destination_path, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
EditorAssetLibrary.save_asset(a)
|
||||
|
||||
def remove(self, container):
|
||||
path = container["namespace"]
|
||||
parent_path = os.path.dirname(path)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(path)
|
||||
EditorAssetLibrary.delete_directory(path)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
parent_path, recursive=False, include_folder=True
|
||||
)
|
||||
|
||||
if len(asset_content) == 0:
|
||||
unreal.EditorAssetLibrary.delete_directory(parent_path)
|
||||
EditorAssetLibrary.delete_directory(parent_path)
|
||||
|
|
|
|||
|
|
@ -2,13 +2,16 @@
|
|||
"""Load camera from FBX."""
|
||||
import os
|
||||
|
||||
import unreal
|
||||
from unreal import EditorAssetLibrary
|
||||
from unreal import EditorLevelLibrary
|
||||
|
||||
from openpype.pipeline import (
|
||||
AVALON_CONTAINER_ID,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.hosts.unreal.api import plugin
|
||||
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
|
||||
import unreal # noqa
|
||||
|
||||
|
||||
class CameraLoader(plugin.Loader):
|
||||
|
|
@ -20,6 +23,40 @@ class CameraLoader(plugin.Loader):
|
|||
icon = "cube"
|
||||
color = "orange"
|
||||
|
||||
def _get_data(self, asset_name):
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
return asset_doc.get("data")
|
||||
|
||||
def _set_sequence_hierarchy(
|
||||
self, seq_i, seq_j, min_frame_j, max_frame_j
|
||||
):
|
||||
tracks = seq_i.get_master_tracks()
|
||||
track = None
|
||||
for t in tracks:
|
||||
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
|
||||
track = t
|
||||
break
|
||||
if not track:
|
||||
track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
|
||||
|
||||
subscenes = track.get_sections()
|
||||
subscene = None
|
||||
for s in subscenes:
|
||||
if s.get_editor_property('sub_sequence') == seq_j:
|
||||
subscene = s
|
||||
break
|
||||
if not subscene:
|
||||
subscene = track.add_section()
|
||||
subscene.set_row_index(len(track.get_sections()))
|
||||
subscene.set_editor_property('sub_sequence', seq_j)
|
||||
subscene.set_range(
|
||||
min_frame_j,
|
||||
max_frame_j + 1)
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Load and containerise representation into Content Browser.
|
||||
|
|
@ -43,8 +80,14 @@ class CameraLoader(plugin.Loader):
|
|||
list(str): list of container content
|
||||
"""
|
||||
|
||||
# Create directory for asset and OpenPype container
|
||||
root = "/Game/OpenPype/Assets"
|
||||
# Create directory for asset and avalon container
|
||||
hierarchy = context.get('asset').get('data').get('parents')
|
||||
root = "/Game/OpenPype"
|
||||
hierarchy_dir = root
|
||||
hierarchy_list = []
|
||||
for h in hierarchy:
|
||||
hierarchy_dir = f"{hierarchy_dir}/{h}"
|
||||
hierarchy_list.append(hierarchy_dir)
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
|
|
@ -54,10 +97,10 @@ class CameraLoader(plugin.Loader):
|
|||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
|
||||
# Create a unique name for the camera directory
|
||||
unique_number = 1
|
||||
|
||||
if unreal.EditorAssetLibrary.does_directory_exist(f"{root}/{asset}"):
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"):
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
f"{root}/{asset}", recursive=False, include_folder=True
|
||||
)
|
||||
|
||||
|
|
@ -76,42 +119,122 @@ class CameraLoader(plugin.Loader):
|
|||
unique_number = f_numbers[-1] + 1
|
||||
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
f"{root}/{asset}/{name}_{unique_number:02d}", suffix="")
|
||||
f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
unreal.EditorAssetLibrary.make_directory(asset_dir)
|
||||
current_level = EditorLevelLibrary.get_editor_world().get_full_name()
|
||||
EditorLevelLibrary.save_all_dirty_levels()
|
||||
|
||||
sequence = tools.create_asset(
|
||||
asset_name=asset_name,
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
filter = unreal.ARFilter(
|
||||
class_names=["World"],
|
||||
package_paths=[f"{hierarchy_dir}/{asset}/"],
|
||||
recursive_paths=True)
|
||||
maps = ar.get_assets(filter)
|
||||
|
||||
# There should be only one map in the list
|
||||
EditorLevelLibrary.load_level(maps[0].get_full_name())
|
||||
|
||||
# Get all the sequences in the hierarchy. It will create them, if
|
||||
# they don't exist.
|
||||
sequences = []
|
||||
frame_ranges = []
|
||||
i = 0
|
||||
for h in hierarchy_list:
|
||||
root_content = EditorAssetLibrary.list_assets(
|
||||
h, recursive=False, include_folder=False)
|
||||
|
||||
existing_sequences = [
|
||||
EditorAssetLibrary.find_asset_data(asset)
|
||||
for asset in root_content
|
||||
if EditorAssetLibrary.find_asset_data(
|
||||
asset).get_class().get_name() == 'LevelSequence'
|
||||
]
|
||||
|
||||
if not existing_sequences:
|
||||
scene = tools.create_asset(
|
||||
asset_name=hierarchy[i],
|
||||
package_path=h,
|
||||
asset_class=unreal.LevelSequence,
|
||||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
asset_data = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": h.split('/')[-1]
|
||||
})
|
||||
|
||||
id = asset_data.get('_id')
|
||||
|
||||
start_frames = []
|
||||
end_frames = []
|
||||
|
||||
elements = list(
|
||||
legacy_io.find({"type": "asset", "data.visualParent": id}))
|
||||
for e in elements:
|
||||
start_frames.append(e.get('data').get('clipIn'))
|
||||
end_frames.append(e.get('data').get('clipOut'))
|
||||
|
||||
elements.extend(legacy_io.find({
|
||||
"type": "asset",
|
||||
"data.visualParent": e.get('_id')
|
||||
}))
|
||||
|
||||
min_frame = min(start_frames)
|
||||
max_frame = max(end_frames)
|
||||
|
||||
scene.set_display_rate(
|
||||
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
|
||||
scene.set_playback_start(min_frame)
|
||||
scene.set_playback_end(max_frame)
|
||||
|
||||
sequences.append(scene)
|
||||
frame_ranges.append((min_frame, max_frame))
|
||||
else:
|
||||
for e in existing_sequences:
|
||||
sequences.append(e.get_asset())
|
||||
frame_ranges.append((
|
||||
e.get_asset().get_playback_start(),
|
||||
e.get_asset().get_playback_end()))
|
||||
|
||||
i += 1
|
||||
|
||||
EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
cam_seq = tools.create_asset(
|
||||
asset_name=f"{asset}_camera",
|
||||
package_path=asset_dir,
|
||||
asset_class=unreal.LevelSequence,
|
||||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
io_asset = legacy_io.Session["AVALON_ASSET"]
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": io_asset
|
||||
})
|
||||
# Add sequences data to hierarchy
|
||||
for i in range(0, len(sequences) - 1):
|
||||
self._set_sequence_hierarchy(
|
||||
sequences[i], sequences[i + 1],
|
||||
frame_ranges[i + 1][0], frame_ranges[i + 1][1])
|
||||
|
||||
data = asset_doc.get("data")
|
||||
|
||||
if data:
|
||||
sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0))
|
||||
sequence.set_playback_start(data.get("frameStart"))
|
||||
sequence.set_playback_end(data.get("frameEnd"))
|
||||
data = self._get_data(asset)
|
||||
cam_seq.set_display_rate(
|
||||
unreal.FrameRate(data.get("fps"), 1.0))
|
||||
cam_seq.set_playback_start(0)
|
||||
cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
|
||||
self._set_sequence_hierarchy(
|
||||
sequences[-1], cam_seq,
|
||||
data.get('clipIn'), data.get('clipOut'))
|
||||
|
||||
settings = unreal.MovieSceneUserImportFBXSettings()
|
||||
settings.set_editor_property('reduce_keys', False)
|
||||
|
||||
unreal.SequencerTools.import_fbx(
|
||||
unreal.EditorLevelLibrary.get_editor_world(),
|
||||
sequence,
|
||||
sequence.get_bindings(),
|
||||
settings,
|
||||
self.fname
|
||||
)
|
||||
if cam_seq:
|
||||
unreal.SequencerTools.import_fbx(
|
||||
EditorLevelLibrary.get_editor_world(),
|
||||
cam_seq,
|
||||
cam_seq.get_bindings(),
|
||||
settings,
|
||||
self.fname
|
||||
)
|
||||
|
||||
# Create Asset Container
|
||||
unreal_pipeline.create_container(
|
||||
|
|
@ -132,12 +255,15 @@ class CameraLoader(plugin.Loader):
|
|||
unreal_pipeline.imprint(
|
||||
"{}/{}".format(asset_dir, container_name), data)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
EditorLevelLibrary.save_all_dirty_levels()
|
||||
EditorLevelLibrary.load_level(current_level)
|
||||
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
asset_dir, recursive=True, include_folder=True
|
||||
)
|
||||
|
||||
for a in asset_content:
|
||||
unreal.EditorAssetLibrary.save_asset(a)
|
||||
EditorAssetLibrary.save_asset(a)
|
||||
|
||||
return asset_content
|
||||
|
||||
|
|
@ -147,25 +273,25 @@ class CameraLoader(plugin.Loader):
|
|||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
path, recursive=False, include_folder=False
|
||||
)
|
||||
asset_name = ""
|
||||
for a in asset_content:
|
||||
asset = ar.get_asset_by_object_path(a)
|
||||
if a.endswith("_CON"):
|
||||
loaded_asset = unreal.EditorAssetLibrary.load_asset(a)
|
||||
unreal.EditorAssetLibrary.set_metadata_tag(
|
||||
loaded_asset = EditorAssetLibrary.load_asset(a)
|
||||
EditorAssetLibrary.set_metadata_tag(
|
||||
loaded_asset, "representation", str(representation["_id"])
|
||||
)
|
||||
unreal.EditorAssetLibrary.set_metadata_tag(
|
||||
EditorAssetLibrary.set_metadata_tag(
|
||||
loaded_asset, "parent", str(representation["parent"])
|
||||
)
|
||||
asset_name = unreal.EditorAssetLibrary.get_metadata_tag(
|
||||
asset_name = EditorAssetLibrary.get_metadata_tag(
|
||||
loaded_asset, "asset_name"
|
||||
)
|
||||
elif asset.asset_class == "LevelSequence":
|
||||
unreal.EditorAssetLibrary.delete_asset(a)
|
||||
EditorAssetLibrary.delete_asset(a)
|
||||
|
||||
sequence = tools.create_asset(
|
||||
asset_name=asset_name,
|
||||
|
|
@ -191,7 +317,7 @@ class CameraLoader(plugin.Loader):
|
|||
settings.set_editor_property('reduce_keys', False)
|
||||
|
||||
unreal.SequencerTools.import_fbx(
|
||||
unreal.EditorLevelLibrary.get_editor_world(),
|
||||
EditorLevelLibrary.get_editor_world(),
|
||||
sequence,
|
||||
sequence.get_bindings(),
|
||||
settings,
|
||||
|
|
@ -202,11 +328,11 @@ class CameraLoader(plugin.Loader):
|
|||
path = container["namespace"]
|
||||
parent_path = os.path.dirname(path)
|
||||
|
||||
unreal.EditorAssetLibrary.delete_directory(path)
|
||||
EditorAssetLibrary.delete_directory(path)
|
||||
|
||||
asset_content = unreal.EditorAssetLibrary.list_assets(
|
||||
asset_content = EditorAssetLibrary.list_assets(
|
||||
parent_path, recursive=False, include_folder=True
|
||||
)
|
||||
|
||||
if len(asset_content) == 0:
|
||||
unreal.EditorAssetLibrary.delete_directory(parent_path)
|
||||
EditorAssetLibrary.delete_directory(parent_path)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from pathlib import Path
|
|||
import unreal
|
||||
from unreal import EditorAssetLibrary
|
||||
from unreal import EditorLevelLibrary
|
||||
from unreal import EditorLevelUtils
|
||||
from unreal import AssetToolsHelpers
|
||||
from unreal import FBXImportType
|
||||
from unreal import MathLibrary as umath
|
||||
|
|
@ -17,6 +18,7 @@ from openpype.pipeline import (
|
|||
load_container,
|
||||
get_representation_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
legacy_io,
|
||||
)
|
||||
from openpype.hosts.unreal.api import plugin
|
||||
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
|
||||
|
|
@ -31,7 +33,7 @@ class LayoutLoader(plugin.Loader):
|
|||
label = "Load Layout"
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
ASSET_ROOT = "/Game/OpenPype/Assets"
|
||||
ASSET_ROOT = "/Game/OpenPype"
|
||||
|
||||
def _get_asset_containers(self, path):
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
|
@ -85,11 +87,91 @@ class LayoutLoader(plugin.Loader):
|
|||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _process_family(assets, class_name, transform, inst_name=None):
|
||||
def _get_data(self, asset_name):
|
||||
asset_doc = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
})
|
||||
|
||||
return asset_doc.get("data")
|
||||
|
||||
def _set_sequence_hierarchy(
|
||||
self, seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
|
||||
):
|
||||
# Get existing sequencer tracks or create them if they don't exist
|
||||
tracks = seq_i.get_master_tracks()
|
||||
subscene_track = None
|
||||
visibility_track = None
|
||||
for t in tracks:
|
||||
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
|
||||
subscene_track = t
|
||||
if (t.get_class() ==
|
||||
unreal.MovieSceneLevelVisibilityTrack.static_class()):
|
||||
visibility_track = t
|
||||
if not subscene_track:
|
||||
subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
|
||||
if not visibility_track:
|
||||
visibility_track = seq_i.add_master_track(
|
||||
unreal.MovieSceneLevelVisibilityTrack)
|
||||
|
||||
# Create the sub-scene section
|
||||
subscenes = subscene_track.get_sections()
|
||||
subscene = None
|
||||
for s in subscenes:
|
||||
if s.get_editor_property('sub_sequence') == seq_j:
|
||||
subscene = s
|
||||
break
|
||||
if not subscene:
|
||||
subscene = subscene_track.add_section()
|
||||
subscene.set_row_index(len(subscene_track.get_sections()))
|
||||
subscene.set_editor_property('sub_sequence', seq_j)
|
||||
subscene.set_range(
|
||||
min_frame_j,
|
||||
max_frame_j + 1)
|
||||
|
||||
# Create the visibility section
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
maps = []
|
||||
for m in map_paths:
|
||||
# Unreal requires to load the level to get the map name
|
||||
EditorLevelLibrary.save_all_dirty_levels()
|
||||
EditorLevelLibrary.load_level(m)
|
||||
maps.append(str(ar.get_asset_by_object_path(m).asset_name))
|
||||
|
||||
vis_section = visibility_track.add_section()
|
||||
index = len(visibility_track.get_sections())
|
||||
|
||||
vis_section.set_range(
|
||||
min_frame_j,
|
||||
max_frame_j + 1)
|
||||
vis_section.set_visibility(unreal.LevelVisibility.VISIBLE)
|
||||
vis_section.set_row_index(index)
|
||||
vis_section.set_level_names(maps)
|
||||
|
||||
if min_frame_j > 1:
|
||||
hid_section = visibility_track.add_section()
|
||||
hid_section.set_range(
|
||||
1,
|
||||
min_frame_j)
|
||||
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
|
||||
hid_section.set_row_index(index)
|
||||
hid_section.set_level_names(maps)
|
||||
if max_frame_j < max_frame_i:
|
||||
hid_section = visibility_track.add_section()
|
||||
hid_section.set_range(
|
||||
max_frame_j + 1,
|
||||
max_frame_i + 1)
|
||||
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
|
||||
hid_section.set_row_index(index)
|
||||
hid_section.set_level_names(maps)
|
||||
|
||||
def _process_family(
|
||||
self, assets, class_name, transform, sequence, inst_name=None
|
||||
):
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
actors = []
|
||||
bindings = []
|
||||
|
||||
for asset in assets:
|
||||
obj = ar.get_asset_by_object_path(asset).get_asset()
|
||||
|
|
@ -119,14 +201,23 @@ class LayoutLoader(plugin.Loader):
|
|||
), False)
|
||||
actor.set_actor_scale3d(transform.get('scale'))
|
||||
|
||||
if class_name == 'SkeletalMesh':
|
||||
skm_comp = actor.get_editor_property(
|
||||
'skeletal_mesh_component')
|
||||
skm_comp.set_bounds_scale(10.0)
|
||||
|
||||
actors.append(actor)
|
||||
|
||||
return actors
|
||||
binding = sequence.add_possessable(actor)
|
||||
|
||||
bindings.append(binding)
|
||||
|
||||
return actors, bindings
|
||||
|
||||
@staticmethod
|
||||
def _import_animation(
|
||||
asset_dir, path, instance_name, skeleton, actors_dict,
|
||||
animation_file):
|
||||
self, asset_dir, path, instance_name, skeleton, actors_dict,
|
||||
animation_file, bindings_dict, sequence
|
||||
):
|
||||
anim_file = Path(animation_file)
|
||||
anim_file_name = anim_file.with_suffix('')
|
||||
|
||||
|
|
@ -205,7 +296,20 @@ class LayoutLoader(plugin.Loader):
|
|||
actor.skeletal_mesh_component.animation_data.set_editor_property(
|
||||
'anim_to_play', animation)
|
||||
|
||||
def _process(self, lib_path, asset_dir, loaded=None):
|
||||
# Add animation to the sequencer
|
||||
bindings = bindings_dict.get(instance_name)
|
||||
|
||||
for binding in bindings:
|
||||
binding.add_track(unreal.MovieSceneSkeletalAnimationTrack)
|
||||
for track in binding.get_tracks():
|
||||
section = track.add_section()
|
||||
section.set_range(
|
||||
sequence.get_playback_start(),
|
||||
sequence.get_playback_end())
|
||||
sec_params = section.get_editor_property('params')
|
||||
sec_params.set_editor_property('animation', animation)
|
||||
|
||||
def _process(self, lib_path, asset_dir, sequence, loaded=None):
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
with open(lib_path, "r") as fp:
|
||||
|
|
@ -220,6 +324,7 @@ class LayoutLoader(plugin.Loader):
|
|||
|
||||
skeleton_dict = {}
|
||||
actors_dict = {}
|
||||
bindings_dict = {}
|
||||
|
||||
for element in data:
|
||||
reference = None
|
||||
|
|
@ -277,12 +382,13 @@ class LayoutLoader(plugin.Loader):
|
|||
actors = []
|
||||
|
||||
if family == 'model':
|
||||
actors = self._process_family(
|
||||
assets, 'StaticMesh', transform, inst)
|
||||
actors, _ = self._process_family(
|
||||
assets, 'StaticMesh', transform, sequence, inst)
|
||||
elif family == 'rig':
|
||||
actors = self._process_family(
|
||||
assets, 'SkeletalMesh', transform, inst)
|
||||
actors, bindings = self._process_family(
|
||||
assets, 'SkeletalMesh', transform, sequence, inst)
|
||||
actors_dict[inst] = actors
|
||||
bindings_dict[inst] = bindings
|
||||
|
||||
if family == 'rig':
|
||||
# Finds skeleton among the imported assets
|
||||
|
|
@ -302,8 +408,8 @@ class LayoutLoader(plugin.Loader):
|
|||
|
||||
if animation_file and skeleton:
|
||||
self._import_animation(
|
||||
asset_dir, path, instance_name, skeleton,
|
||||
actors_dict, animation_file)
|
||||
asset_dir, path, instance_name, skeleton, actors_dict,
|
||||
animation_file, bindings_dict, sequence)
|
||||
|
||||
@staticmethod
|
||||
def _remove_family(assets, components, class_name, prop_name):
|
||||
|
|
@ -369,7 +475,13 @@ class LayoutLoader(plugin.Loader):
|
|||
list(str): list of container content
|
||||
"""
|
||||
# Create directory for asset and avalon container
|
||||
hierarchy = context.get('asset').get('data').get('parents')
|
||||
root = self.ASSET_ROOT
|
||||
hierarchy_dir = root
|
||||
hierarchy_list = []
|
||||
for h in hierarchy:
|
||||
hierarchy_dir = f"{hierarchy_dir}/{h}"
|
||||
hierarchy_list.append(hierarchy_dir)
|
||||
asset = context.get('asset').get('name')
|
||||
suffix = "_CON"
|
||||
if asset:
|
||||
|
|
@ -379,13 +491,156 @@ class LayoutLoader(plugin.Loader):
|
|||
|
||||
tools = unreal.AssetToolsHelpers().get_asset_tools()
|
||||
asset_dir, container_name = tools.create_unique_asset_name(
|
||||
"{}/{}/{}".format(root, asset, name), suffix="")
|
||||
"{}/{}/{}".format(hierarchy_dir, asset, name), suffix="")
|
||||
|
||||
container_name += suffix
|
||||
|
||||
EditorAssetLibrary.make_directory(asset_dir)
|
||||
|
||||
self._process(self.fname, asset_dir)
|
||||
# Create map for the shot, and create hierarchy of map. If the maps
|
||||
# already exist, we will use them.
|
||||
maps = []
|
||||
for h in hierarchy_list:
|
||||
a = h.split('/')[-1]
|
||||
map = f"{h}/{a}_map.{a}_map"
|
||||
new = False
|
||||
|
||||
if not EditorAssetLibrary.does_asset_exist(map):
|
||||
EditorLevelLibrary.new_level(f"{h}/{a}_map")
|
||||
new = True
|
||||
|
||||
maps.append({"map": map, "new": new})
|
||||
|
||||
EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map")
|
||||
maps.append(
|
||||
{"map": f"{asset_dir}/{asset}_map.{asset}_map", "new": True})
|
||||
|
||||
for i in range(0, len(maps) - 1):
|
||||
for j in range(i + 1, len(maps)):
|
||||
if maps[j].get('new'):
|
||||
EditorLevelLibrary.load_level(maps[i].get('map'))
|
||||
EditorLevelUtils.add_level_to_world(
|
||||
EditorLevelLibrary.get_editor_world(),
|
||||
maps[j].get('map'),
|
||||
unreal.LevelStreamingDynamic
|
||||
)
|
||||
EditorLevelLibrary.save_all_dirty_levels()
|
||||
|
||||
EditorLevelLibrary.load_level(maps[-1].get('map'))
|
||||
|
||||
# Get all the sequences in the hierarchy. It will create them, if
|
||||
# they don't exist.
|
||||
sequences = []
|
||||
frame_ranges = []
|
||||
i = 0
|
||||
for h in hierarchy_list:
|
||||
root_content = EditorAssetLibrary.list_assets(
|
||||
h, recursive=False, include_folder=False)
|
||||
|
||||
existing_sequences = [
|
||||
EditorAssetLibrary.find_asset_data(asset)
|
||||
for asset in root_content
|
||||
if EditorAssetLibrary.find_asset_data(
|
||||
asset).get_class().get_name() == 'LevelSequence'
|
||||
]
|
||||
|
||||
if not existing_sequences:
|
||||
sequence = tools.create_asset(
|
||||
asset_name=hierarchy[i],
|
||||
package_path=h,
|
||||
asset_class=unreal.LevelSequence,
|
||||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
asset_data = legacy_io.find_one({
|
||||
"type": "asset",
|
||||
"name": h.split('/')[-1]
|
||||
})
|
||||
|
||||
id = asset_data.get('_id')
|
||||
|
||||
start_frames = []
|
||||
end_frames = []
|
||||
|
||||
elements = list(
|
||||
legacy_io.find({"type": "asset", "data.visualParent": id}))
|
||||
for e in elements:
|
||||
start_frames.append(e.get('data').get('clipIn'))
|
||||
end_frames.append(e.get('data').get('clipOut'))
|
||||
|
||||
elements.extend(legacy_io.find({
|
||||
"type": "asset",
|
||||
"data.visualParent": e.get('_id')
|
||||
}))
|
||||
|
||||
min_frame = min(start_frames)
|
||||
max_frame = max(end_frames)
|
||||
|
||||
sequence.set_display_rate(
|
||||
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
|
||||
sequence.set_playback_start(min_frame)
|
||||
sequence.set_playback_end(max_frame)
|
||||
|
||||
sequences.append(sequence)
|
||||
frame_ranges.append((min_frame, max_frame))
|
||||
|
||||
tracks = sequence.get_master_tracks()
|
||||
track = None
|
||||
for t in tracks:
|
||||
if (t.get_class() ==
|
||||
unreal.MovieSceneCameraCutTrack.static_class()):
|
||||
track = t
|
||||
break
|
||||
if not track:
|
||||
track = sequence.add_master_track(
|
||||
unreal.MovieSceneCameraCutTrack)
|
||||
else:
|
||||
for e in existing_sequences:
|
||||
sequences.append(e.get_asset())
|
||||
frame_ranges.append((
|
||||
e.get_asset().get_playback_start(),
|
||||
e.get_asset().get_playback_end()))
|
||||
|
||||
i += 1
|
||||
|
||||
shot = tools.create_asset(
|
||||
asset_name=asset,
|
||||
package_path=asset_dir,
|
||||
asset_class=unreal.LevelSequence,
|
||||
factory=unreal.LevelSequenceFactoryNew()
|
||||
)
|
||||
|
||||
# sequences and frame_ranges have the same length
|
||||
for i in range(0, len(sequences) - 1):
|
||||
maps_to_add = []
|
||||
for j in range(i + 1, len(maps)):
|
||||
maps_to_add.append(maps[j].get('map'))
|
||||
|
||||
self._set_sequence_hierarchy(
|
||||
sequences[i], sequences[i + 1],
|
||||
frame_ranges[i][1],
|
||||
frame_ranges[i + 1][0], frame_ranges[i + 1][1],
|
||||
maps_to_add)
|
||||
|
||||
data = self._get_data(asset)
|
||||
shot.set_display_rate(
|
||||
unreal.FrameRate(data.get("fps"), 1.0))
|
||||
shot.set_playback_start(0)
|
||||
shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
|
||||
self._set_sequence_hierarchy(
|
||||
sequences[-1], shot,
|
||||
frame_ranges[-1][1],
|
||||
data.get('clipIn'), data.get('clipOut'),
|
||||
[maps[-1].get('map')])
|
||||
|
||||
EditorLevelLibrary.load_level(maps[-1].get('map'))
|
||||
|
||||
self._process(self.fname, asset_dir, shot)
|
||||
|
||||
for s in sequences:
|
||||
EditorAssetLibrary.save_asset(s.get_full_name())
|
||||
|
||||
EditorLevelLibrary.save_current_level()
|
||||
|
||||
# Create Asset Container
|
||||
unreal_pipeline.create_container(
|
||||
|
|
@ -412,6 +667,8 @@ class LayoutLoader(plugin.Loader):
|
|||
for a in asset_content:
|
||||
EditorAssetLibrary.save_asset(a)
|
||||
|
||||
EditorLevelLibrary.load_level(maps[0].get('map'))
|
||||
|
||||
return asset_content
|
||||
|
||||
def update(self, container, representation):
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
|
|||
"""
|
||||
|
||||
label = "Collect Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
order = pyblish.api.CollectorOrder - 0.1
|
||||
hosts = ["unreal"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectRemoveMarked(pyblish.api.ContextPlugin):
|
||||
"""Remove marked data
|
||||
|
||||
Remove instances that have 'remove' in their instance.data
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.499
|
||||
label = 'Remove Marked Instances'
|
||||
|
||||
def process(self, context):
|
||||
|
||||
self.log.debug(context)
|
||||
# make ftrack publishable
|
||||
instances_to_remove = []
|
||||
for instance in context:
|
||||
if instance.data.get('remove'):
|
||||
instances_to_remove.append(instance)
|
||||
|
||||
for instance in instances_to_remove:
|
||||
context.remove(instance)
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
from pathlib import Path
|
||||
import unreal
|
||||
|
||||
import pyblish.api
|
||||
from openpype.hosts.unreal.api import pipeline
|
||||
|
||||
|
||||
class CollectRenderInstances(pyblish.api.InstancePlugin):
|
||||
""" This collector will try to find all the rendered frames.
|
||||
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["unreal"]
|
||||
families = ["render"]
|
||||
label = "Collect Render Instances"
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug("Preparing Rendering Instances")
|
||||
|
||||
context = instance.context
|
||||
|
||||
data = instance.data
|
||||
data['remove'] = True
|
||||
|
||||
ar = unreal.AssetRegistryHelpers.get_asset_registry()
|
||||
|
||||
sequence = ar.get_asset_by_object_path(
|
||||
data.get('sequence')).get_asset()
|
||||
|
||||
sequences = [{
|
||||
"sequence": sequence,
|
||||
"output": data.get('output'),
|
||||
"frame_range": (
|
||||
data.get('frameStart'), data.get('frameEnd'))
|
||||
}]
|
||||
|
||||
for s in sequences:
|
||||
self.log.debug(f"Processing: {s.get('sequence').get_name()}")
|
||||
subscenes = pipeline.get_subsequences(s.get('sequence'))
|
||||
|
||||
if subscenes:
|
||||
for ss in subscenes:
|
||||
sequences.append({
|
||||
"sequence": ss.get_sequence(),
|
||||
"output": (f"{s.get('output')}/"
|
||||
f"{ss.get_sequence().get_name()}"),
|
||||
"frame_range": (
|
||||
ss.get_start_frame(), ss.get_end_frame() - 1)
|
||||
})
|
||||
else:
|
||||
# Avoid creating instances for camera sequences
|
||||
if "_camera" not in s.get('sequence').get_name():
|
||||
seq = s.get('sequence')
|
||||
seq_name = seq.get_name()
|
||||
|
||||
new_instance = context.create_instance(
|
||||
f"{data.get('subset')}_"
|
||||
f"{seq_name}")
|
||||
new_instance[:] = seq_name
|
||||
|
||||
new_data = new_instance.data
|
||||
|
||||
new_data["asset"] = seq_name
|
||||
new_data["setMembers"] = seq_name
|
||||
new_data["family"] = "render"
|
||||
new_data["families"] = ["render", "review"]
|
||||
new_data["parent"] = data.get("parent")
|
||||
new_data["subset"] = f"{data.get('subset')}_{seq_name}"
|
||||
new_data["level"] = data.get("level")
|
||||
new_data["output"] = s.get('output')
|
||||
new_data["fps"] = seq.get_display_rate().numerator
|
||||
new_data["frameStart"] = s.get('frame_range')[0]
|
||||
new_data["frameEnd"] = s.get('frame_range')[1]
|
||||
new_data["sequence"] = seq.get_path_name()
|
||||
new_data["master_sequence"] = data["master_sequence"]
|
||||
new_data["master_level"] = data["master_level"]
|
||||
|
||||
self.log.debug(f"new instance data: {new_data}")
|
||||
|
||||
project_dir = unreal.Paths.project_dir()
|
||||
render_dir = (f"{project_dir}/Saved/MovieRenders/"
|
||||
f"{s.get('output')}")
|
||||
render_path = Path(render_dir)
|
||||
|
||||
frames = []
|
||||
|
||||
for x in render_path.iterdir():
|
||||
if x.is_file() and x.suffix == '.png':
|
||||
frames.append(str(x.name))
|
||||
|
||||
if "representations" not in new_instance.data:
|
||||
new_instance.data["representations"] = []
|
||||
|
||||
repr = {
|
||||
'frameStart': s.get('frame_range')[0],
|
||||
'frameEnd': s.get('frame_range')[1],
|
||||
'name': 'png',
|
||||
'ext': 'png',
|
||||
'files': frames,
|
||||
'stagingDir': render_dir,
|
||||
'tags': ['review']
|
||||
}
|
||||
new_instance.data["representations"].append(repr)
|
||||
48
openpype/hosts/unreal/plugins/publish/extract_render.py
Normal file
48
openpype/hosts/unreal/plugins/publish/extract_render.py
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
from pathlib import Path
|
||||
|
||||
import unreal
|
||||
|
||||
import openpype.api
|
||||
|
||||
|
||||
class ExtractRender(openpype.api.Extractor):
|
||||
"""Extract render."""
|
||||
|
||||
label = "Extract Render"
|
||||
hosts = ["unreal"]
|
||||
families = ["render"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
# Define extract output file path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
|
||||
# Perform extraction
|
||||
self.log.info("Performing extraction..")
|
||||
|
||||
# Get the render output directory
|
||||
project_dir = unreal.Paths.project_dir()
|
||||
render_dir = (f"{project_dir}/Saved/MovieRenders/"
|
||||
f"{instance.data['subset']}")
|
||||
|
||||
assert unreal.Paths.directory_exists(render_dir), \
|
||||
"Render directory does not exist"
|
||||
|
||||
render_path = Path(render_dir)
|
||||
|
||||
frames = []
|
||||
|
||||
for x in render_path.iterdir():
|
||||
if x.is_file() and x.suffix == '.png':
|
||||
frames.append(str(x))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
render_representation = {
|
||||
'name': 'png',
|
||||
'ext': 'png',
|
||||
'files': frames,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(render_representation)
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
import clique
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateSequenceFrames(pyblish.api.InstancePlugin):
|
||||
"""Ensure the sequence of frames is complete
|
||||
|
||||
The files found in the folder are checked against the frameStart and
|
||||
frameEnd of the instance. If the first or last file is not
|
||||
corresponding with the first or last frame it is flagged as invalid.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Sequence Frames"
|
||||
families = ["render"]
|
||||
hosts = ["unreal"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
representations = instance.data.get("representations")
|
||||
for repr in representations:
|
||||
patterns = [clique.PATTERNS["frames"]]
|
||||
collections, remainder = clique.assemble(
|
||||
repr["files"], minimum_items=1, patterns=patterns)
|
||||
|
||||
assert not remainder, "Must not have remainder"
|
||||
assert len(collections) == 1, "Must detect single collection"
|
||||
collection = collections[0]
|
||||
frames = list(collection.indexes)
|
||||
|
||||
current_range = (frames[0], frames[-1])
|
||||
required_range = (instance.data["frameStart"],
|
||||
instance.data["frameEnd"])
|
||||
|
||||
if current_range != required_range:
|
||||
raise ValueError(f"Invalid frame range: {current_range} - "
|
||||
f"expected: {required_range}")
|
||||
|
||||
missing = collection.holes().indexes
|
||||
assert not missing, "Missing frames: %s" % (missing,)
|
||||
|
|
@ -1532,13 +1532,13 @@ class BuildWorkfile:
|
|||
|
||||
subsets = list(legacy_io.find({
|
||||
"type": "subset",
|
||||
"parent": {"$in": asset_entity_by_ids.keys()}
|
||||
"parent": {"$in": list(asset_entity_by_ids.keys())}
|
||||
}))
|
||||
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
|
||||
|
||||
sorted_versions = list(legacy_io.find({
|
||||
"type": "version",
|
||||
"parent": {"$in": subset_entity_by_ids.keys()}
|
||||
"parent": {"$in": list(subset_entity_by_ids.keys())}
|
||||
}).sort("name", -1))
|
||||
|
||||
subset_id_with_latest_version = []
|
||||
|
|
@ -1552,7 +1552,7 @@ class BuildWorkfile:
|
|||
|
||||
repres = legacy_io.find({
|
||||
"type": "representation",
|
||||
"parent": {"$in": last_versions_by_id.keys()}
|
||||
"parent": {"$in": list(last_versions_by_id.keys())}
|
||||
})
|
||||
|
||||
output = {}
|
||||
|
|
|
|||
|
|
@ -365,6 +365,7 @@ class TemplateResult(str):
|
|||
when value of key in data is dictionary but template expect string
|
||||
of number.
|
||||
"""
|
||||
|
||||
used_values = None
|
||||
solved = None
|
||||
template = None
|
||||
|
|
@ -383,6 +384,12 @@ class TemplateResult(str):
|
|||
new_obj.invalid_types = invalid_types
|
||||
return new_obj
|
||||
|
||||
def __copy__(self, *args, **kwargs):
|
||||
return self.copy()
|
||||
|
||||
def __deepcopy__(self, *args, **kwargs):
|
||||
return self.copy()
|
||||
|
||||
def validate(self):
|
||||
if not self.solved:
|
||||
raise TemplateUnsolved(
|
||||
|
|
@ -391,6 +398,17 @@ class TemplateResult(str):
|
|||
self.invalid_types
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
cls = self.__class__
|
||||
return cls(
|
||||
str(self),
|
||||
self.template,
|
||||
self.solved,
|
||||
self.used_values,
|
||||
self.missing_keys,
|
||||
self.invalid_types
|
||||
)
|
||||
|
||||
|
||||
class TemplatesResultDict(dict):
|
||||
"""Holds and wrap TemplateResults for easy bug report."""
|
||||
|
|
|
|||
|
|
@ -727,9 +727,9 @@ def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None):
|
|||
def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd):
|
||||
input_format = ffprobe_data["format"]
|
||||
format_tags = input_format.get("tags") or {}
|
||||
product_name = format_tags.get("product_name") or ""
|
||||
operational_pattern_ul = format_tags.get("operational_pattern_ul") or ""
|
||||
output = []
|
||||
if "opatom" in product_name.lower():
|
||||
if operational_pattern_ul == "060e2b34.04010102.0d010201.10030000":
|
||||
output.extend(["-f", "mxf_opatom"])
|
||||
return output
|
||||
|
||||
|
|
|
|||
|
|
@ -188,6 +188,10 @@ def get_renderer_variables(renderlayer, root):
|
|||
filename_0 = re.sub('_<RenderPass>', '_beauty',
|
||||
filename_0, flags=re.IGNORECASE)
|
||||
prefix_attr = "defaultRenderGlobals.imageFilePrefix"
|
||||
|
||||
scene = cmds.file(query=True, sceneName=True)
|
||||
scene, _ = os.path.splitext(os.path.basename(scene))
|
||||
|
||||
if renderer == "vray":
|
||||
renderlayer = renderlayer.split("_")[-1]
|
||||
# Maya's renderSettings function does not return V-Ray file extension
|
||||
|
|
@ -207,8 +211,7 @@ def get_renderer_variables(renderlayer, root):
|
|||
filename_prefix = cmds.getAttr(prefix_attr)
|
||||
# we need to determine path for vray as maya `renderSettings` query
|
||||
# does not work for vray.
|
||||
scene = cmds.file(query=True, sceneName=True)
|
||||
scene, _ = os.path.splitext(os.path.basename(scene))
|
||||
|
||||
filename_0 = re.sub('<Scene>', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = re.sub('<Layer>', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = "{}.{}.{}".format(
|
||||
|
|
@ -216,6 +219,39 @@ def get_renderer_variables(renderlayer, root):
|
|||
filename_0 = os.path.normpath(os.path.join(root, filename_0))
|
||||
elif renderer == "renderman":
|
||||
prefix_attr = "rmanGlobals.imageFileFormat"
|
||||
# NOTE: This is guessing extensions from renderman display types.
|
||||
# Some of them are just framebuffers, d_texture format can be
|
||||
# set in display setting. We set those now to None, but it
|
||||
# should be handled more gracefully.
|
||||
display_types = {
|
||||
"d_deepexr": "exr",
|
||||
"d_it": None,
|
||||
"d_null": None,
|
||||
"d_openexr": "exr",
|
||||
"d_png": "png",
|
||||
"d_pointcloud": "ptc",
|
||||
"d_targa": "tga",
|
||||
"d_texture": None,
|
||||
"d_tiff": "tif"
|
||||
}
|
||||
|
||||
extension = display_types.get(
|
||||
cmds.listConnections("rmanDefaultDisplay.displayType")[0],
|
||||
"exr"
|
||||
) or "exr"
|
||||
|
||||
filename_prefix = "{}/{}".format(
|
||||
cmds.getAttr("rmanGlobals.imageOutputDir"),
|
||||
cmds.getAttr("rmanGlobals.imageFileFormat")
|
||||
)
|
||||
|
||||
renderlayer = renderlayer.split("_")[-1]
|
||||
|
||||
filename_0 = re.sub('<scene>', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = re.sub('<layer>', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = re.sub('<f[\\d+]>', "#" * int(padding), filename_0, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = re.sub('<ext>', extension, filename_0, flags=re.IGNORECASE) # noqa: E501
|
||||
filename_0 = os.path.normpath(os.path.join(root, filename_0))
|
||||
elif renderer == "redshift":
|
||||
# mapping redshift extension dropdown values to strings
|
||||
ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"]
|
||||
|
|
@ -404,6 +440,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
output_filename_0 = filename_0
|
||||
|
||||
dirname = os.path.dirname(output_filename_0)
|
||||
|
||||
# Create render folder ----------------------------------------------
|
||||
try:
|
||||
# Ensure render folder exists
|
||||
|
|
@ -799,6 +837,23 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"AssetDependency0": data["filepath"],
|
||||
}
|
||||
|
||||
renderer = self._instance.data["renderer"]
|
||||
|
||||
# This hack is here because of how Deadline handles Renderman version.
|
||||
# it considers everything with `renderman` set as version older than
|
||||
# Renderman 22, and so if we are using renderman > 21 we need to set
|
||||
# renderer string on the job to `renderman22`. We will have to change
|
||||
# this when Deadline releases new version handling this.
|
||||
if self._instance.data["renderer"] == "renderman":
|
||||
try:
|
||||
from rfm2.config import cfg # noqa
|
||||
except ImportError:
|
||||
raise Exception("Cannot determine renderman version")
|
||||
|
||||
rman_version = cfg().build_info.version() # type: str
|
||||
if int(rman_version.split(".")[0]) > 22:
|
||||
renderer = "renderman22"
|
||||
|
||||
plugin_info = {
|
||||
"SceneFile": data["filepath"],
|
||||
# Output directory and filename
|
||||
|
|
@ -812,7 +867,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
"RenderLayer": data["renderlayer"],
|
||||
|
||||
# Determine which renderer to use from the file itself
|
||||
"Renderer": self._instance.data["renderer"],
|
||||
"Renderer": renderer,
|
||||
|
||||
# Resolve relative references
|
||||
"ProjectPath": data["workspace"],
|
||||
|
|
|
|||
|
|
@ -24,48 +24,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
label = "Integrate Ftrack Api"
|
||||
families = ["ftrack"]
|
||||
|
||||
def query(self, entitytype, data):
|
||||
""" Generate a query expression from data supplied.
|
||||
|
||||
If a value is not a string, we'll add the id of the entity to the
|
||||
query.
|
||||
|
||||
Args:
|
||||
entitytype (str): The type of entity to query.
|
||||
data (dict): The data to identify the entity.
|
||||
exclusions (list): All keys to exclude from the query.
|
||||
|
||||
Returns:
|
||||
str: String query to use with "session.query"
|
||||
"""
|
||||
queries = []
|
||||
if sys.version_info[0] < 3:
|
||||
for key, value in data.iteritems():
|
||||
if not isinstance(value, (basestring, int)):
|
||||
self.log.info("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
else:
|
||||
for key, value in data.items():
|
||||
if not isinstance(value, (str, int)):
|
||||
self.log.info("value: {}".format(value))
|
||||
if "id" in value.keys():
|
||||
queries.append(
|
||||
"{0}.id is \"{1}\"".format(key, value["id"])
|
||||
)
|
||||
else:
|
||||
queries.append("{0} is \"{1}\"".format(key, value))
|
||||
|
||||
query = (
|
||||
"select id from " + entitytype + " where " + " and ".join(queries)
|
||||
)
|
||||
self.log.debug(query)
|
||||
return query
|
||||
|
||||
def process(self, instance):
|
||||
session = instance.context.data["ftrackSession"]
|
||||
context = instance.context
|
||||
|
|
@ -108,7 +66,19 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
default_asset_name = parent_entity["name"]
|
||||
|
||||
# Change status on task
|
||||
self._set_task_status(instance, task_entity, session)
|
||||
asset_version_status_ids_by_name = {}
|
||||
project_entity = instance.context.data.get("ftrackProject")
|
||||
if project_entity:
|
||||
project_schema = project_entity["project_schema"]
|
||||
asset_version_statuses = (
|
||||
project_schema.get_statuses("AssetVersion")
|
||||
)
|
||||
asset_version_status_ids_by_name = {
|
||||
status["name"].lower(): status["id"]
|
||||
for status in asset_version_statuses
|
||||
}
|
||||
|
||||
self._set_task_status(instance, project_entity, task_entity, session)
|
||||
|
||||
# Prepare AssetTypes
|
||||
asset_types_by_short = self._ensure_asset_types_exists(
|
||||
|
|
@ -139,7 +109,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
# Asset Version
|
||||
asset_version_data = data.get("assetversion_data") or {}
|
||||
asset_version_entity = self._ensure_asset_version_exists(
|
||||
session, asset_version_data, asset_entity["id"], task_entity
|
||||
session,
|
||||
asset_version_data,
|
||||
asset_entity["id"],
|
||||
task_entity,
|
||||
asset_version_status_ids_by_name
|
||||
)
|
||||
|
||||
# Component
|
||||
|
|
@ -174,8 +148,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
if asset_version not in instance.data[asset_versions_key]:
|
||||
instance.data[asset_versions_key].append(asset_version)
|
||||
|
||||
def _set_task_status(self, instance, task_entity, session):
|
||||
project_entity = instance.context.data.get("ftrackProject")
|
||||
def _set_task_status(self, instance, project_entity, task_entity, session):
|
||||
if not project_entity:
|
||||
self.log.info("Task status won't be set, project is not known.")
|
||||
return
|
||||
|
|
@ -319,12 +292,19 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
).first()
|
||||
|
||||
def _ensure_asset_version_exists(
|
||||
self, session, asset_version_data, asset_id, task_entity
|
||||
self,
|
||||
session,
|
||||
asset_version_data,
|
||||
asset_id,
|
||||
task_entity,
|
||||
status_ids_by_name
|
||||
):
|
||||
task_id = None
|
||||
if task_entity:
|
||||
task_id = task_entity["id"]
|
||||
|
||||
status_name = asset_version_data.pop("status_name", None)
|
||||
|
||||
# Try query asset version by criteria (asset id and version)
|
||||
version = asset_version_data.get("version") or 0
|
||||
asset_version_entity = self._query_asset_version(
|
||||
|
|
@ -366,6 +346,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
|
|||
session, version, asset_id
|
||||
)
|
||||
|
||||
if status_name:
|
||||
status_id = status_ids_by_name.get(status_name.lower())
|
||||
if not status_id:
|
||||
self.log.info((
|
||||
"Ftrack status with name \"{}\""
|
||||
" for AssetVersion was not found."
|
||||
).format(status_name))
|
||||
|
||||
elif asset_version_entity["status_id"] != status_id:
|
||||
asset_version_entity["status_id"] = status_id
|
||||
session.commit()
|
||||
|
||||
# Set custom attributes if there were any set
|
||||
custom_attrs = asset_version_data.get("custom_attributes") or {}
|
||||
for attr_key, attr_value in custom_attrs.items():
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import json
|
|||
import copy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.lib.profiles_filtering import filter_profiles
|
||||
|
||||
|
||||
class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
||||
"""Collect ftrack component data (not integrate yet).
|
||||
|
|
@ -36,6 +38,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
"reference": "reference"
|
||||
}
|
||||
keep_first_subset_name_for_review = True
|
||||
asset_versions_status_profiles = {}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.debug("instance {}".format(instance))
|
||||
|
|
@ -80,6 +83,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
if instance_fps is None:
|
||||
instance_fps = instance.context.data["fps"]
|
||||
|
||||
status_name = self._get_asset_version_status_name(instance)
|
||||
|
||||
# Base of component item data
|
||||
# - create a copy of this object when want to use it
|
||||
base_component_item = {
|
||||
|
|
@ -91,7 +96,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
},
|
||||
"assetversion_data": {
|
||||
"version": version_number,
|
||||
"comment": instance.context.data.get("comment") or ""
|
||||
"comment": instance.context.data.get("comment") or "",
|
||||
"status_name": status_name
|
||||
},
|
||||
"component_overwrite": False,
|
||||
# This can be change optionally
|
||||
|
|
@ -317,3 +323,24 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
)
|
||||
))
|
||||
instance.data["ftrackComponentsList"] = component_list
|
||||
|
||||
def _get_asset_version_status_name(self, instance):
|
||||
if not self.asset_versions_status_profiles:
|
||||
return None
|
||||
|
||||
# Prepare filtering data for new asset version status
|
||||
anatomy_data = instance.data["anatomyData"]
|
||||
task_type = anatomy_data.get("task", {}).get("type")
|
||||
filtering_criteria = {
|
||||
"families": instance.data["family"],
|
||||
"hosts": instance.context.data["hostName"],
|
||||
"task_types": task_type
|
||||
}
|
||||
matching_profile = filter_profiles(
|
||||
self.asset_versions_status_profiles,
|
||||
filtering_criteria
|
||||
)
|
||||
if not matching_profile:
|
||||
return None
|
||||
|
||||
return matching_profile["status"] or None
|
||||
|
|
|
|||
|
|
@ -30,14 +30,15 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
|
||||
# shot data dict
|
||||
shot_data = {}
|
||||
family = instance.data.get("family")
|
||||
family = instance.data["family"]
|
||||
families = instance.data["families"]
|
||||
|
||||
# filter out all unepropriate instances
|
||||
if not instance.data["publish"]:
|
||||
continue
|
||||
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection([family]):
|
||||
if not set(self.families).intersection(set(families + [family])):
|
||||
continue
|
||||
|
||||
# exclude if not masterLayer True
|
||||
|
|
|
|||
|
|
@ -41,21 +41,33 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
|
|||
loaded_versions = []
|
||||
_containers = list(host.ls())
|
||||
_repr_ids = [ObjectId(c["representation"]) for c in _containers]
|
||||
repre_docs = legacy_io.find(
|
||||
{"_id": {"$in": _repr_ids}},
|
||||
projection={"_id": 1, "parent": 1}
|
||||
)
|
||||
version_by_repr = {
|
||||
str(doc["_id"]): doc["parent"] for doc in
|
||||
legacy_io.find(
|
||||
{"_id": {"$in": _repr_ids}},
|
||||
projection={"parent": 1}
|
||||
)
|
||||
str(doc["_id"]): doc["parent"]
|
||||
for doc in repre_docs
|
||||
}
|
||||
|
||||
# QUESTION should we add same representation id when loaded multiple
|
||||
# times?
|
||||
for con in _containers:
|
||||
repre_id = con["representation"]
|
||||
version_id = version_by_repr.get(repre_id)
|
||||
if version_id is None:
|
||||
self.log.warning((
|
||||
"Skipping container,"
|
||||
" did not find representation document. {}"
|
||||
).format(str(con)))
|
||||
continue
|
||||
|
||||
# NOTE:
|
||||
# may have more then one representation that are same version
|
||||
version = {
|
||||
"subsetName": con["name"],
|
||||
"representation": ObjectId(con["representation"]),
|
||||
"version": version_by_repr[con["representation"]], # _id
|
||||
"representation": ObjectId(repre_id),
|
||||
"version": version_id,
|
||||
}
|
||||
loaded_versions.append(version)
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
"resolve",
|
||||
"webpublisher",
|
||||
"aftereffects",
|
||||
"flame"
|
||||
"flame",
|
||||
"unreal"
|
||||
]
|
||||
|
||||
# Supported extensions
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@
|
|||
]
|
||||
}
|
||||
],
|
||||
"customNodes": []
|
||||
"overrideNodes": []
|
||||
},
|
||||
"regexInputs": {
|
||||
"inputs": [
|
||||
|
|
|
|||
|
|
@ -418,7 +418,8 @@
|
|||
"redshiftproxy": "cache",
|
||||
"usd": "usd"
|
||||
},
|
||||
"keep_first_subset_name_for_review": true
|
||||
"keep_first_subset_name_for_review": true,
|
||||
"asset_versions_status_profiles": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -120,7 +120,7 @@
|
|||
"filter": {
|
||||
"task_types": [],
|
||||
"families": [],
|
||||
"sebsets": []
|
||||
"subsets": []
|
||||
},
|
||||
"read_raw": false,
|
||||
"viewer_process_override": "",
|
||||
|
|
|
|||
|
|
@ -52,10 +52,39 @@
|
|||
"environment": {},
|
||||
"variants": {}
|
||||
},
|
||||
"renderman": {
|
||||
"environment": {},
|
||||
"variants": {
|
||||
"24-3-maya": {
|
||||
"host_names": [
|
||||
"maya"
|
||||
],
|
||||
"app_variants": [
|
||||
"maya/2022"
|
||||
],
|
||||
"environment": {
|
||||
"RFMTREE": {
|
||||
"windows": "C:\\Program Files\\Pixar\\RenderManForMaya-24.3",
|
||||
"darwin": "/Applications/Pixar/RenderManForMaya-24.3",
|
||||
"linux": "/opt/pixar/RenderManForMaya-24.3"
|
||||
},
|
||||
"RMANTREE": {
|
||||
"windows": "C:\\Program Files\\Pixar\\RenderManProServer-24.3",
|
||||
"darwin": "/Applications/Pixar/RenderManProServer-24.3",
|
||||
"linux": "/opt/pixar/RenderManProServer-24.3"
|
||||
}
|
||||
}
|
||||
},
|
||||
"__dynamic_keys_labels__": {
|
||||
"24-3-maya": "24.3 RFM"
|
||||
}
|
||||
}
|
||||
},
|
||||
"__dynamic_keys_labels__": {
|
||||
"mtoa": "Autodesk Arnold",
|
||||
"vray": "Chaos Group Vray",
|
||||
"yeti": "Pergrine Labs Yeti"
|
||||
"yeti": "Peregrine Labs Yeti",
|
||||
"renderman": "Pixar Renderman"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -858,6 +858,43 @@
|
|||
"key": "keep_first_subset_name_for_review",
|
||||
"label": "Make subset name as first asset name",
|
||||
"default": true
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"collapsible": true,
|
||||
"key": "asset_versions_status_profiles",
|
||||
"label": "AssetVersion status on publish",
|
||||
"use_label_wrap": true,
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"key": "hosts",
|
||||
"label": "Host names",
|
||||
"type": "hosts-enum",
|
||||
"multiselection": true
|
||||
},
|
||||
{
|
||||
"key": "task_types",
|
||||
"label": "Task types",
|
||||
"type": "task-types-enum"
|
||||
},
|
||||
{
|
||||
"key": "family",
|
||||
"label": "Family",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"key": "status",
|
||||
"label": "Status name",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@
|
|||
{
|
||||
"key": "requiredNodes",
|
||||
"type": "list",
|
||||
"label": "Required Nodes",
|
||||
"label": "Plugin required",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
|
|
@ -272,35 +272,43 @@
|
|||
"label": "Nuke Node Class"
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"key": "knobs",
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Knobs",
|
||||
"type": "list",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "value",
|
||||
"label": "Value"
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"key": "knobs",
|
||||
"type": "list",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "value",
|
||||
"label": "Value"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "customNodes",
|
||||
"label": "Custom Nodes",
|
||||
"key": "overrideNodes",
|
||||
"label": "Plugin's node overrides",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
|
|
@ -319,27 +327,37 @@
|
|||
"label": "Nuke Node Class"
|
||||
},
|
||||
{
|
||||
"type": "splitter"
|
||||
"key": "subsets",
|
||||
"label": "Subsets",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"key": "knobs",
|
||||
"label": "Knobs",
|
||||
"type": "list",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "value",
|
||||
"label": "Value"
|
||||
"type": "collapsible-wrap",
|
||||
"label": "Knobs overrides",
|
||||
"collapsible": true,
|
||||
"collapsed": true,
|
||||
"children": [
|
||||
{
|
||||
"key": "knobs",
|
||||
"type": "list",
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "name",
|
||||
"label": "Name"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "value",
|
||||
"label": "Value"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@
|
|||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"key": "sebsets",
|
||||
"key": "subsets",
|
||||
"label": "Subsets",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
|
|
|
|||
|
|
@ -61,7 +61,11 @@
|
|||
"icon-entity-default": "#bfccd6",
|
||||
"icon-entity-disabled": "#808080",
|
||||
"font-entity-deprecated": "#666666",
|
||||
|
||||
"overlay-messages": {
|
||||
"close-btn": "#D3D8DE",
|
||||
"bg-success": "#458056",
|
||||
"bg-success-hover": "#55a066"
|
||||
},
|
||||
"tab-widget": {
|
||||
"bg": "#21252B",
|
||||
"bg-selected": "#434a56",
|
||||
|
|
|
|||
|
|
@ -687,6 +687,26 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
|
|||
background: none;
|
||||
}
|
||||
|
||||
/* Messages overlay */
|
||||
#OverlayMessageWidget {
|
||||
border-radius: 0.2em;
|
||||
background: {color:bg-buttons};
|
||||
}
|
||||
|
||||
#OverlayMessageWidget:hover {
|
||||
background: {color:bg-button-hover};
|
||||
}
|
||||
#OverlayMessageWidget {
|
||||
background: {color:overlay-messages:bg-success};
|
||||
}
|
||||
#OverlayMessageWidget:hover {
|
||||
background: {color:overlay-messages:bg-success-hover};
|
||||
}
|
||||
|
||||
#OverlayMessageWidget QWidget {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
/* Password dialog*/
|
||||
#PasswordBtn {
|
||||
border: none;
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from openpype.settings.lib import (
|
|||
save_local_settings
|
||||
)
|
||||
from openpype.tools.settings import CHILD_OFFSET
|
||||
from openpype.tools.utils import MessageOverlayObject
|
||||
from openpype.api import (
|
||||
Logger,
|
||||
SystemSettings,
|
||||
|
|
@ -221,6 +222,8 @@ class LocalSettingsWindow(QtWidgets.QWidget):
|
|||
|
||||
self.setWindowTitle("OpenPype Local settings")
|
||||
|
||||
overlay_object = MessageOverlayObject(self)
|
||||
|
||||
stylesheet = style.load_stylesheet()
|
||||
self.setStyleSheet(stylesheet)
|
||||
self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
|
||||
|
|
@ -247,6 +250,7 @@ class LocalSettingsWindow(QtWidgets.QWidget):
|
|||
save_btn.clicked.connect(self._on_save_clicked)
|
||||
reset_btn.clicked.connect(self._on_reset_clicked)
|
||||
|
||||
self._overlay_object = overlay_object
|
||||
# Do not create local settings widget in init phase as it's using
|
||||
# settings objects that must be OK to be able create this widget
|
||||
# - we want to show dialog if anything goes wrong
|
||||
|
|
@ -312,8 +316,10 @@ class LocalSettingsWindow(QtWidgets.QWidget):
|
|||
|
||||
def _on_reset_clicked(self):
|
||||
self.reset()
|
||||
self._overlay_object.add_message("Refreshed...")
|
||||
|
||||
def _on_save_clicked(self):
|
||||
value = self._settings_widget.settings_value()
|
||||
save_local_settings(value)
|
||||
self._overlay_object.add_message("Saved...", message_type="success")
|
||||
self.reset()
|
||||
|
|
|
|||
|
|
@ -22,6 +22,10 @@ from .lib import (
|
|||
from .models import (
|
||||
RecursiveSortFilterProxyModel,
|
||||
)
|
||||
from .overlay_messages import (
|
||||
MessageOverlayObject,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"PlaceholderLineEdit",
|
||||
|
|
@ -45,4 +49,6 @@ __all__ = (
|
|||
"get_asset_icon",
|
||||
|
||||
"RecursiveSortFilterProxyModel",
|
||||
|
||||
"MessageOverlayObject",
|
||||
)
|
||||
|
|
|
|||
324
openpype/tools/utils/overlay_messages.py
Normal file
324
openpype/tools/utils/overlay_messages.py
Normal file
|
|
@ -0,0 +1,324 @@
|
|||
import uuid
|
||||
|
||||
from Qt import QtWidgets, QtCore, QtGui
|
||||
|
||||
from openpype.style import get_objected_colors
|
||||
|
||||
from .lib import set_style_property
|
||||
|
||||
|
||||
class CloseButton(QtWidgets.QFrame):
|
||||
"""Close button drawed manually."""
|
||||
|
||||
clicked = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent):
|
||||
super(CloseButton, self).__init__(parent)
|
||||
colors = get_objected_colors()
|
||||
close_btn_color = colors["overlay-messages"]["close-btn"]
|
||||
self._color = close_btn_color.get_qcolor()
|
||||
self._mouse_pressed = False
|
||||
policy = QtWidgets.QSizePolicy(
|
||||
QtWidgets.QSizePolicy.Fixed,
|
||||
QtWidgets.QSizePolicy.Fixed
|
||||
)
|
||||
self.setSizePolicy(policy)
|
||||
|
||||
def sizeHint(self):
|
||||
size = self.fontMetrics().height()
|
||||
return QtCore.QSize(size, size)
|
||||
|
||||
def mousePressEvent(self, event):
|
||||
if event.button() == QtCore.Qt.LeftButton:
|
||||
self._mouse_pressed = True
|
||||
super(CloseButton, self).mousePressEvent(event)
|
||||
|
||||
def mouseReleaseEvent(self, event):
|
||||
if self._mouse_pressed:
|
||||
self._mouse_pressed = False
|
||||
if self.rect().contains(event.pos()):
|
||||
self.clicked.emit()
|
||||
|
||||
super(CloseButton, self).mouseReleaseEvent(event)
|
||||
|
||||
def paintEvent(self, event):
|
||||
rect = self.rect()
|
||||
painter = QtGui.QPainter(self)
|
||||
painter.setClipRect(event.rect())
|
||||
pen = QtGui.QPen()
|
||||
pen.setWidth(2)
|
||||
pen.setColor(self._color)
|
||||
pen.setStyle(QtCore.Qt.SolidLine)
|
||||
pen.setCapStyle(QtCore.Qt.RoundCap)
|
||||
painter.setPen(pen)
|
||||
offset = int(rect.height() / 4)
|
||||
top = rect.top() + offset
|
||||
left = rect.left() + offset
|
||||
right = rect.right() - offset
|
||||
bottom = rect.bottom() - offset
|
||||
painter.drawLine(
|
||||
left, top,
|
||||
right, bottom
|
||||
)
|
||||
painter.drawLine(
|
||||
left, bottom,
|
||||
right, top
|
||||
)
|
||||
|
||||
|
||||
class OverlayMessageWidget(QtWidgets.QFrame):
|
||||
"""Message widget showed as overlay.
|
||||
|
||||
Message is hidden after timeout but can be overriden by mouse hover.
|
||||
Mouse hover can add additional 2 seconds of widget's visibility.
|
||||
|
||||
Args:
|
||||
message_id (str): Unique identifier of message widget for
|
||||
'MessageOverlayObject'.
|
||||
message (str): Text shown in message.
|
||||
parent (QWidget): Parent widget where message is visible.
|
||||
timeout (int): Timeout of message's visibility (default 5000).
|
||||
message_type (str): Property which can be used in styles for specific
|
||||
kid of message.
|
||||
"""
|
||||
|
||||
close_requested = QtCore.Signal(str)
|
||||
_default_timeout = 5000
|
||||
|
||||
def __init__(
|
||||
self, message_id, message, parent, message_type=None, timeout=None
|
||||
):
|
||||
super(OverlayMessageWidget, self).__init__(parent)
|
||||
self.setObjectName("OverlayMessageWidget")
|
||||
|
||||
if message_type:
|
||||
set_style_property(self, "type", message_type)
|
||||
|
||||
if not timeout:
|
||||
timeout = self._default_timeout
|
||||
timeout_timer = QtCore.QTimer()
|
||||
timeout_timer.setInterval(timeout)
|
||||
timeout_timer.setSingleShot(True)
|
||||
|
||||
hover_timer = QtCore.QTimer()
|
||||
hover_timer.setInterval(2000)
|
||||
hover_timer.setSingleShot(True)
|
||||
|
||||
label_widget = QtWidgets.QLabel(message, self)
|
||||
label_widget.setAlignment(QtCore.Qt.AlignCenter)
|
||||
label_widget.setWordWrap(True)
|
||||
close_btn = CloseButton(self)
|
||||
|
||||
layout = QtWidgets.QHBoxLayout(self)
|
||||
layout.setContentsMargins(5, 5, 0, 5)
|
||||
layout.addWidget(label_widget, 1)
|
||||
layout.addWidget(close_btn, 0)
|
||||
|
||||
close_btn.clicked.connect(self._on_close_clicked)
|
||||
timeout_timer.timeout.connect(self._on_timer_timeout)
|
||||
hover_timer.timeout.connect(self._on_hover_timeout)
|
||||
|
||||
self._label_widget = label_widget
|
||||
self._message_id = message_id
|
||||
self._timeout_timer = timeout_timer
|
||||
self._hover_timer = hover_timer
|
||||
|
||||
def size_hint_without_word_wrap(self):
|
||||
"""Size hint in cases that word wrap of label is disabled."""
|
||||
self._label_widget.setWordWrap(False)
|
||||
size_hint = self.sizeHint()
|
||||
self._label_widget.setWordWrap(True)
|
||||
return size_hint
|
||||
|
||||
def showEvent(self, event):
|
||||
"""Start timeout on show."""
|
||||
super(OverlayMessageWidget, self).showEvent(event)
|
||||
self._timeout_timer.start()
|
||||
|
||||
def _on_timer_timeout(self):
|
||||
"""On message timeout."""
|
||||
# Skip closing if hover timer is active
|
||||
if not self._hover_timer.isActive():
|
||||
self._close_message()
|
||||
|
||||
def _on_hover_timeout(self):
|
||||
"""Hover timer timed out."""
|
||||
# Check if is still under widget
|
||||
if self.underMouse():
|
||||
self._hover_timer.start()
|
||||
else:
|
||||
self._close_message()
|
||||
|
||||
def _on_close_clicked(self):
|
||||
self._close_message()
|
||||
|
||||
def _close_message(self):
|
||||
"""Emmit close request to 'MessageOverlayObject'."""
|
||||
self.close_requested.emit(self._message_id)
|
||||
|
||||
def enterEvent(self, event):
|
||||
"""Start hover timer on hover."""
|
||||
super(OverlayMessageWidget, self).enterEvent(event)
|
||||
self._hover_timer.start()
|
||||
|
||||
def leaveEvent(self, event):
|
||||
"""Start hover timer on hover leave."""
|
||||
super(OverlayMessageWidget, self).leaveEvent(event)
|
||||
self._hover_timer.start()
|
||||
|
||||
|
||||
class MessageOverlayObject(QtCore.QObject):
|
||||
"""Object that can be used to add overlay messages.
|
||||
|
||||
Args:
|
||||
widget (QWidget):
|
||||
"""
|
||||
|
||||
def __init__(self, widget, default_timeout=None):
|
||||
super(MessageOverlayObject, self).__init__()
|
||||
|
||||
widget.installEventFilter(self)
|
||||
|
||||
# Timer which triggers recalculation of message positions
|
||||
recalculate_timer = QtCore.QTimer()
|
||||
recalculate_timer.setInterval(10)
|
||||
|
||||
recalculate_timer.timeout.connect(self._recalculate_positions)
|
||||
|
||||
self._widget = widget
|
||||
self._recalculate_timer = recalculate_timer
|
||||
|
||||
self._messages_order = []
|
||||
self._closing_messages = set()
|
||||
self._messages = {}
|
||||
self._spacing = 5
|
||||
self._move_size = 4
|
||||
self._move_size_remove = 8
|
||||
self._default_timeout = default_timeout
|
||||
|
||||
def add_message(self, message, message_type=None, timeout=None):
|
||||
"""Add single message into overlay.
|
||||
|
||||
Args:
|
||||
message (str): Message that will be shown.
|
||||
timeout (int): Message timeout.
|
||||
message_type (str): Message type can be used as property in
|
||||
stylesheets.
|
||||
"""
|
||||
# Skip empty messages
|
||||
if not message:
|
||||
return
|
||||
|
||||
if timeout is None:
|
||||
timeout = self._default_timeout
|
||||
|
||||
# Create unique id of message
|
||||
label_id = str(uuid.uuid4())
|
||||
# Create message widget
|
||||
widget = OverlayMessageWidget(
|
||||
label_id, message, self._widget, message_type, timeout
|
||||
)
|
||||
widget.close_requested.connect(self._on_message_close_request)
|
||||
widget.show()
|
||||
|
||||
# Move widget outside of window
|
||||
pos = widget.pos()
|
||||
pos.setY(pos.y() - widget.height())
|
||||
widget.move(pos)
|
||||
# Store message
|
||||
self._messages[label_id] = widget
|
||||
self._messages_order.append(label_id)
|
||||
# Trigger recalculation timer
|
||||
self._recalculate_timer.start()
|
||||
|
||||
def _on_message_close_request(self, label_id):
|
||||
"""Message widget requested removement."""
|
||||
|
||||
widget = self._messages.get(label_id)
|
||||
if widget is not None:
|
||||
# Add message to closing messages and start recalculation
|
||||
self._closing_messages.add(label_id)
|
||||
self._recalculate_timer.start()
|
||||
|
||||
def _recalculate_positions(self):
|
||||
"""Recalculate positions of widgets."""
|
||||
|
||||
# Skip if there are no messages to process
|
||||
if not self._messages_order:
|
||||
self._recalculate_timer.stop()
|
||||
return
|
||||
|
||||
# All message widgets are in expected positions
|
||||
all_at_place = True
|
||||
# Starting y position
|
||||
pos_y = self._spacing
|
||||
# Current widget width
|
||||
widget_width = self._widget.width()
|
||||
max_width = widget_width - (2 * self._spacing)
|
||||
widget_half_width = widget_width / 2
|
||||
|
||||
# Store message ids that should be removed
|
||||
message_ids_to_remove = set()
|
||||
for message_id in reversed(self._messages_order):
|
||||
widget = self._messages[message_id]
|
||||
pos = widget.pos()
|
||||
# Messages to remove are moved upwards
|
||||
if message_id in self._closing_messages:
|
||||
bottom = pos.y() + widget.height()
|
||||
# Add message to remove if is not visible
|
||||
if bottom < 0 or self._move_size_remove < 1:
|
||||
message_ids_to_remove.add(message_id)
|
||||
continue
|
||||
|
||||
# Calculate new y position of message
|
||||
dst_pos_y = pos.y() - self._move_size_remove
|
||||
|
||||
else:
|
||||
# Calculate y position of message
|
||||
# - use y position of previous message widget and add
|
||||
# move size if is not in final destination yet
|
||||
if widget.underMouse():
|
||||
dst_pos_y = pos.y()
|
||||
elif pos.y() == pos_y or self._move_size < 1:
|
||||
dst_pos_y = pos_y
|
||||
elif pos.y() < pos_y:
|
||||
dst_pos_y = min(pos_y, pos.y() + self._move_size)
|
||||
else:
|
||||
dst_pos_y = max(pos_y, pos.y() - self._move_size)
|
||||
|
||||
# Store if widget is in place where should be
|
||||
if all_at_place and dst_pos_y != pos_y:
|
||||
all_at_place = False
|
||||
|
||||
# Calculate ideal width and height of message widget
|
||||
height = widget.heightForWidth(max_width)
|
||||
w_size_hint = widget.size_hint_without_word_wrap()
|
||||
widget.resize(min(max_width, w_size_hint.width()), height)
|
||||
|
||||
# Center message widget
|
||||
size = widget.size()
|
||||
pos_x = widget_half_width - (size.width() / 2)
|
||||
# Move widget to destination position
|
||||
widget.move(pos_x, dst_pos_y)
|
||||
|
||||
# Add message widget height and spacing for next message widget
|
||||
pos_y += size.height() + self._spacing
|
||||
|
||||
# Remove widgets to remove
|
||||
for message_id in message_ids_to_remove:
|
||||
self._messages_order.remove(message_id)
|
||||
self._closing_messages.remove(message_id)
|
||||
widget = self._messages.pop(message_id)
|
||||
widget.hide()
|
||||
widget.deleteLater()
|
||||
|
||||
# Stop recalculation timer if all widgets are where should be
|
||||
if all_at_place:
|
||||
self._recalculate_timer.stop()
|
||||
|
||||
def eventFilter(self, source, event):
|
||||
# Trigger recalculation of timer on resize of widget
|
||||
if source is self._widget and event.type() == QtCore.QEvent.Resize:
|
||||
self._recalculate_timer.start()
|
||||
|
||||
return super(MessageOverlayObject, self).eventFilter(source, event)
|
||||
69
poetry.lock
generated
69
poetry.lock
generated
|
|
@ -820,7 +820,7 @@ six = "*"
|
|||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "9.0.0"
|
||||
version = "9.0.1"
|
||||
description = "Python Imaging Library (Fork)"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
|
@ -2310,38 +2310,41 @@ pathlib2 = [
|
|||
{file = "pathlib2-2.3.6.tar.gz", hash = "sha256:7d8bcb5555003cdf4a8d2872c538faa3a0f5d20630cb360e518ca3b981795e5f"},
|
||||
]
|
||||
pillow = [
|
||||
{file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"},
|
||||
{file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"},
|
||||
{file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"},
|
||||
{file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"},
|
||||
{file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"},
|
||||
{file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"},
|
||||
{file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"},
|
||||
{file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"},
|
||||
{file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"},
|
||||
{file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"},
|
||||
{file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
|
||||
{file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
|
||||
{file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
|
||||
{file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
|
||||
{file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
|
||||
{file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
|
||||
{file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
|
||||
{file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
|
||||
{file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
|
||||
{file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
|
||||
{file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
|
||||
{file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
|
||||
]
|
||||
platformdirs = [
|
||||
{file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"},
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue