[Automated] Merged develop into main

This commit is contained in:
pypebot 2022-04-30 05:36:41 +02:00 committed by GitHub
commit 5c16917c78
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
93 changed files with 3212 additions and 1836 deletions

0
.gitmodules vendored
View file

View file

@ -3,7 +3,6 @@ from .settings import (
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
SystemSettings,
ProjectSettings
@ -23,7 +22,6 @@ from .lib import (
get_app_environments_for_context,
source_hash,
get_latest_version,
get_global_environments,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
@ -69,10 +67,10 @@ __all__ = [
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
@ -102,8 +100,9 @@ __all__ = [
# get contextual data
"version_up",
"get_hierarchy",
"get_asset",
"get_hierarchy",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
@ -111,7 +110,6 @@ __all__ = [
"run_subprocess",
"get_latest_version",
"get_global_environments",
"get_local_site_id",
"change_openpype_mongo_url",

View file

@ -873,6 +873,5 @@ class OpenClipSolver(flib.MediaInfoFile):
if feed_clr_obj is not None:
feed_clr_obj = ET.Element(
"colourSpace", {"type": "string"})
feed_clr_obj.text = profile_name
feed_storage_obj.append(feed_clr_obj)
feed_clr_obj.text = profile_name

View file

@ -26,12 +26,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
add_tasks = []
def process(self, context):
project = context.data["flameProject"]
selected_segments = context.data["flameSelectedSegments"]
self.log.debug("__ selected_segments: {}".format(selected_segments))
self.otio_timeline = context.data["otioTimeline"]
self.clips_in_reels = opfapi.get_clips_in_reels(project)
self.fps = context.data["fps"]
# process all sellected
@ -63,9 +61,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
# get file path
file_path = clip_data["fpath"]
# get source clip
source_clip = self._get_reel_clip(file_path)
first_frame = opfapi.get_frame_from_filename(file_path) or 0
head, tail = self._get_head_tail(clip_data, first_frame)
@ -103,7 +98,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"flameSourceClip": source_clip,
"sourceFirstFrame": int(first_frame),
"path": file_path,
"flameAddTasks": self.add_tasks,
@ -258,14 +252,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
)
return head, tail
def _get_reel_clip(self, path):
match_reel_clip = [
clip for clip in self.clips_in_reels
if clip["fpath"] == path
]
if match_reel_clip:
return match_reel_clip.pop()
def _get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"

View file

@ -1,4 +1,5 @@
import os
import re
from pprint import pformat
from copy import deepcopy
@ -6,6 +7,8 @@ import pyblish.api
import openpype.api
from openpype.hosts.flame import api as opfapi
import flame
class ExtractSubsetResources(openpype.api.Extractor):
"""
@ -20,27 +23,31 @@ class ExtractSubsetResources(openpype.api.Extractor):
# plugin defaults
default_presets = {
"thumbnail": {
"active": True,
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
"representation_tags": ["thumbnail"],
"path_regex": ".*"
},
"ftrackpreview": {
"active": True,
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
"export_type": "Movie",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - Rec.709",
"representation_add_range": True,
"representation_tags": [
"review",
"delete"
]
],
"path_regex": ".*"
}
}
keep_original_representation = False
@ -61,13 +68,10 @@ class ExtractSubsetResources(openpype.api.Extractor):
# flame objects
segment = instance.data["item"]
asset_name = instance.data["asset"]
segment_name = segment.name.get_value()
clip_path = instance.data["path"]
sequence_clip = instance.context.data["flameSequence"]
clip_data = instance.data["flameSourceClip"]
reel_clip = None
if clip_data:
reel_clip = clip_data["PyClip"]
# segment's parent track name
s_track_name = segment.parent.name.get_value()
@ -104,14 +108,44 @@ class ExtractSubsetResources(openpype.api.Extractor):
for unique_name, preset_config in export_presets.items():
modify_xml_data = {}
# get activating attributes
activated_preset = preset_config["active"]
filter_path_regex = preset_config.get("filter_path_regex")
self.log.info(
"Preset `{}` is active `{}` with filter `{}`".format(
unique_name, activated_preset, filter_path_regex
)
)
self.log.debug(
"__ clip_path: `{}`".format(clip_path))
# skip if not activated presete
if not activated_preset:
continue
# exclude by regex filter if any
if (
filter_path_regex
and not re.search(filter_path_regex, clip_path)
):
continue
# get all presets attributes
extension = preset_config["ext"]
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
export_type = preset_config["export_type"]
repre_tags = preset_config["representation_tags"]
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
color_out = preset_config["colorspace_out"]
self.log.info(
"Processing `{}` as `{}` to `{}` type...".format(
preset_file, export_type, extension
)
)
# get attribures related loading in integrate_batch_group
load_to_batch_group = preset_config.get(
"load_to_batch_group")
@ -131,161 +165,157 @@ class ExtractSubsetResources(openpype.api.Extractor):
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
# make test for type of preset and available reel_clip
if (
not reel_clip
and export_type != "Sequence Publish"
):
self.log.warning((
"Skipping preset {}. Not available "
"reel clip for {}").format(
preset_file, segment_name
))
continue
# by default export source clips
exporting_clip = reel_clip
exporting_clip = None
name_patern_xml = "<name>_{}.".format(
unique_name)
if export_type == "Sequence Publish":
# change export clip to sequence
exporting_clip = sequence_clip
exporting_clip = flame.duplicate(sequence_clip)
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# only keep visible layer where instance segment is child
self.hide_others(
exporting_clip, segment_name, s_track_name)
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start
})
# change name patern
name_patern_xml = (
"<segment name>_<shot name>_{}.").format(
unique_name)
else:
exporting_clip = self.import_clip(clip_path)
exporting_clip.name.set_value("{}_{}".format(
asset_name, segment_name))
if not ignore_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start,
"namePattern": name_patern_xml
})
if parsed_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
self.log.debug("__ modify_xml_data: {}".format(pformat(
modify_xml_data
)))
# with maintained duplication loop all presets
with opfapi.maintained_object_duplication(
exporting_clip) as duplclip:
kwargs = {}
export_kwargs = {}
# validate xml preset file is filled
if preset_file == "":
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
)
if export_type == "Sequence Publish":
# only keep visible layer where instance segment is child
self.hide_others(duplclip, segment_name, s_track_name)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# validate xml preset file is filled
if preset_file == "":
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
# define kwargs based on preset type
if "thumbnail" in unique_name:
export_kwargs["thumb_frame_number"] = int(in_mark + (
source_duration_handles / 2))
else:
export_kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# define kwargs based on preset type
if "thumbnail" in unique_name:
kwargs["thumb_frame_number"] = in_mark + (
source_duration_handles / 2)
else:
kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
# export
opfapi.export_clip(
export_dir_path, exporting_clip, preset_path, **export_kwargs)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# export
opfapi.export_clip(
export_dir_path, duplclip, preset_path, **kwargs)
# collect all available content of export dir
files = os.listdir(export_dir_path)
extension = preset_config["ext"]
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
# collect all available content of export dir
files = os.listdir(export_dir_path)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
instance.data["representations"].append(representation_data)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
self.log.info("Added representation: {}".format(
representation_data))
instance.data["representations"].append(representation_data)
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
self.log.info("Added representation: {}".format(
representation_data))
if export_type == "Sequence Publish":
# at the end remove the duplicated clip
flame.delete(exporting_clip)
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
@ -373,3 +403,18 @@ class ExtractSubsetResources(openpype.api.Extractor):
for segment in track.segments:
if segment.name.get_value() != segment_name:
segment.hidden = True
def import_clip(self, path):
"""
Import clip from path
"""
clips = flame.import_clips(path)
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
if not clips:
self.log.warning("Path `{}` is not having any clips".format(path))
return None
elif len(clips) > 1:
self.log.warning(
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]

View file

@ -1,26 +0,0 @@
import pyblish
@pyblish.api.log
class ValidateSourceClip(pyblish.api.InstancePlugin):
"""Validate instance is not having empty `flameSourceClip`"""
order = pyblish.api.ValidatorOrder
label = "Validate Source Clip"
hosts = ["flame"]
families = ["clip"]
optional = True
active = False
def process(self, instance):
flame_source_clip = instance.data["flameSourceClip"]
self.log.debug("_ flame_source_clip: {}".format(flame_source_clip))
if flame_source_clip is None:
raise AttributeError((
"Timeline segment `{}` is not having "
"relative clip in reels. Please make sure "
"you push `Save Sources` button in Conform Tab").format(
instance.data["asset"]
))

View file

@ -10,16 +10,6 @@ log = Logger.get_logger(__name__)
def tag_data():
return {
# "Retiming": {
# "editable": "1",
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
# "icon": "retiming.png",
# "metadata": {
# "family": "retiming",
# "marginIn": 1,
# "marginOut": 1
# }
# },
"[Lenses]": {
"Set lense here": {
"editable": "1",
@ -48,6 +38,16 @@ def tag_data():
"family": "comment",
"subset": "main"
}
},
"FrameMain": {
"editable": "1",
"note": "Publishing a frame subset.",
"icon": "z_layer_main.png",
"metadata": {
"family": "frame",
"subset": "main",
"format": "png"
}
}
}

View file

@ -0,0 +1,142 @@
from pprint import pformat
import re
import ast
import json
import pyblish.api
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
"""Collect frames from tags.
Tag is expected to have metadata:
{
"family": "frame"
"subset": "main"
}
"""
order = pyblish.api.CollectorOrder
label = "Collect Frames"
hosts = ["hiero"]
def process(self, context):
self._context = context
# collect all sequence tags
subset_data = self._create_frame_subset_data_sequence(context)
self.log.debug("__ subset_data: {}".format(
pformat(subset_data)
))
# create instances
self._create_instances(subset_data)
def _get_tag_data(self, tag):
data = {}
# get tag metadata attribute
tag_data = tag.metadata()
# convert tag metadata to normal keys names and values to correct types
for k, v in dict(tag_data).items():
key = k.replace("tag.", "")
try:
# capture exceptions which are related to strings only
if re.match(r"^[\d]+$", v):
value = int(v)
elif re.match(r"^True$", v):
value = True
elif re.match(r"^False$", v):
value = False
elif re.match(r"^None$", v):
value = None
elif re.match(r"^[\w\d_]+$", v):
value = v
else:
value = ast.literal_eval(v)
except (ValueError, SyntaxError):
value = v
data[key] = value
return data
def _create_frame_subset_data_sequence(self, context):
sequence_tags = []
sequence = context.data["activeTimeline"]
# get all publishable sequence frames
publish_frames = range(int(sequence.duration() + 1))
self.log.debug("__ publish_frames: {}".format(
pformat(publish_frames)
))
# get all sequence tags
for tag in sequence.tags():
tag_data = self._get_tag_data(tag)
self.log.debug("__ tag_data: {}".format(
pformat(tag_data)
))
if not tag_data:
continue
if "family" not in tag_data:
continue
if tag_data["family"] != "frame":
continue
sequence_tags.append(tag_data)
self.log.debug("__ sequence_tags: {}".format(
pformat(sequence_tags)
))
# first collect all available subset tag frames
subset_data = {}
for tag_data in sequence_tags:
frame = int(tag_data["start"])
if frame not in publish_frames:
continue
subset = tag_data["subset"]
if subset in subset_data:
# update existing subset key
subset_data[subset]["frames"].append(frame)
else:
# create new subset key
subset_data[subset] = {
"frames": [frame],
"format": tag_data["format"],
"asset": context.data["assetEntity"]["name"]
}
return subset_data
def _create_instances(self, subset_data):
# create instance per subset
for subset_name, subset_data in subset_data.items():
name = "frame" + subset_name.title()
data = {
"name": name,
"label": "{} {}".format(name, subset_data["frames"]),
"family": "image",
"families": ["frame"],
"asset": subset_data["asset"],
"subset": name,
"format": subset_data["format"],
"frames": subset_data["frames"]
}
self._context.create_instance(**data)
self.log.info(
"Created instance: {}".format(
json.dumps(data, sort_keys=True, indent=4)
)
)

View file

@ -0,0 +1,82 @@
import os
import pyblish.api
import openpype
class ExtractFrames(openpype.api.Extractor):
"""Extracts frames"""
order = pyblish.api.ExtractorOrder
label = "Extract Frames"
hosts = ["hiero"]
families = ["frame"]
movie_extensions = ["mov", "mp4"]
def process(self, instance):
oiio_tool_path = openpype.lib.get_oiio_tools_path()
staging_dir = self.staging_dir(instance)
output_template = os.path.join(staging_dir, instance.data["name"])
sequence = instance.context.data["activeTimeline"]
files = []
for frame in instance.data["frames"]:
track_item = sequence.trackItemAt(frame)
media_source = track_item.source().mediaSource()
input_path = media_source.fileinfos()[0].filename()
input_frame = (
track_item.mapTimelineToSource(frame) +
track_item.source().mediaSource().startTime()
)
output_ext = instance.data["format"]
output_path = output_template
output_path += ".{:04d}.{}".format(int(frame), output_ext)
args = [oiio_tool_path]
ext = os.path.splitext(input_path)[1][1:]
if ext in self.movie_extensions:
args.extend(["--subimage", str(int(input_frame))])
else:
args.extend(["--frames", str(int(input_frame))])
if ext == "exr":
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
args.extend([input_path, "-o", output_path])
output = openpype.api.run_subprocess(args)
failed_output = "oiiotool produced no output."
if failed_output in output:
raise ValueError(
"oiiotool processing failed. Args: {}".format(args)
)
files.append(output_path)
# Feedback to user because "oiiotool" can make the publishing
# appear unresponsive.
self.log.info(
"Processed {} of {} frames".format(
instance.data["frames"].index(frame) + 1,
len(instance.data["frames"])
)
)
if len(files) == 1:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": os.path.basename(files[0]),
"stagingDir": staging_dir
}
]
else:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": [os.path.basename(x) for x in files],
"stagingDir": staging_dir
}
]

View file

@ -68,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
"families": [],
"representations": [workfile_representation, thumb_representation]
}
@ -77,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
# update context with main project attributes
context_data = {
"activeProject": project,
"activeTimeline": active_timeline,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"colorspace": self.get_colorspace(project),

View file

@ -1,38 +0,0 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resolution"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,15 +0,0 @@
import pyblish.api
class CollectHostVersion(pyblish.api.ContextPlugin):
"""Inject the hosts version into context"""
label = "Collect Host and HostVersion"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
import nuke
import pyblish.api
context.set_data("host", pyblish.api.current_host())
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)

View file

@ -1,32 +0,0 @@
from pyblish import api
class CollectTagRetime(api.InstancePlugin):
"""Collect Retiming from Tags of selected track items."""
order = api.CollectorOrder + 0.014
label = "Collect Retiming Tag"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "retiming" in t_family:
margin_in = t_metadata.get("tag.marginIn", "")
margin_out = t_metadata.get("tag.marginOut", "")
instance.data["retimeMarginIn"] = int(margin_in)
instance.data["retimeMarginOut"] = int(margin_out)
instance.data["retime"] = True
self.log.info("retimeMarginIn: `{}`".format(margin_in))
self.log.info("retimeMarginOut: `{}`".format(margin_out))
instance.data["families"] += ["retime"]

View file

@ -1,223 +0,0 @@
from compiler.ast import flatten
from pyblish import api
from openpype.hosts.hiero import api as phiero
import hiero
# from openpype.hosts.hiero.api import lib
# reload(lib)
# reload(phiero)
class PreCollectInstances(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder - 0.509
label = "Pre-collect Instances"
hosts = ["hiero"]
def process(self, context):
track_items = phiero.get_track_items(
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not track_items:
track_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
# get sequence and video tracks
sequence = context.data["activeSequence"]
tracks = sequence.videoTracks()
# add collection to context
tracks_effect_items = self.collect_sub_track_items(tracks)
context.data["tracksEffectItems"] = tracks_effect_items
self.log.info(
"Processing enabled track items: {}".format(len(track_items)))
for _ti in track_items:
data = {}
clip = _ti.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(clip)
subtracks = self.clip_subtrack(_ti)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get pype tag data
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
# self.log.debug(pformat(tag_parsed_data))
if not tag_parsed_data:
continue
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
continue
# add tag data to instance data
data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_parsed_data["asset"]
subset = tag_parsed_data["subset"]
review_track = tag_parsed_data.get("reviewTrack")
hiero_track = tag_parsed_data.get("heroTrack")
audio = tag_parsed_data.get("audio")
# remove audio attribute from data
data.pop("audio")
# insert family into families
family = tag_parsed_data["family"]
families = [str(f) for f in tag_parsed_data["families"]]
families.insert(0, str(family))
track = _ti.parent()
media_source = _ti.source().mediaSource()
source_path = media_source.firstpath()
file_head = media_source.filenameHead()
file_info = media_source.fileinfos().pop()
source_first_frame = int(file_info.startFrame())
# apply only for review and master track instance
if review_track and hiero_track:
families += ["review", "ftrack"]
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": _ti,
"families": families,
# tags
"tags": _ti.tags(),
# track item attributes
"track": track.name(),
"trackItem": track,
"reviewTrack": review_track,
# version data
"versionData": {
"colorspace": _ti.sourceMediaColourTransform()
},
# source attribute
"source": source_path,
"sourceMedia": media_source,
"sourcePath": source_path,
"sourceFileHead": file_head,
"sourceFirst": source_first_frame,
# clip's effect
"clipEffectItems": subtracks
})
instance = context.create_instance(**data)
self.log.info("Creating instance.data: {}".format(instance.data))
if audio:
a_data = dict()
# add tag data to instance data
a_data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
# create main attributes
subset = "audioMain"
family = "audio"
families = ["clip", "ftrack"]
families.insert(0, str(family))
name = "{} {} {}".format(asset, subset, families)
a_data.update({
"name": name,
"subset": subset,
"asset": asset,
"family": family,
"families": families,
"item": _ti,
# tags
"tags": _ti.tags(),
})
a_instance = context.create_instance(**a_data)
self.log.info("Creating audio instance: {}".format(a_instance))
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = dict()
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = list()
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items

View file

@ -1,74 +0,0 @@
import os
import pyblish.api
from openpype.hosts.hiero import api as phiero
from openpype.pipeline import legacy_io
class PreCollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Pre-collect Workfile"
order = pyblish.api.CollectorOrder - 0.51
def process(self, context):
asset = legacy_io.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_sequence = phiero.get_current_sequence()
video_tracks = active_sequence.videoTracks()
audio_tracks = active_sequence.audioTracks()
current_file = project.path()
staging_dir = os.path.dirname(current_file)
base_name = os.path.basename(current_file)
# get workfile's colorspace properties
_clrs = {}
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
_clrs["lutSettingFloat"] = project.lutSettingFloat()
_clrs["lutSettingLog"] = project.lutSettingLog()
_clrs["lutSettingViewer"] = project.lutSettingViewer()
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
_clrs["ocioConfigName"] = project.ocioConfigName()
_clrs["ocioConfigPath"] = project.ocioConfigPath()
# set main project attributes to context
context.data["activeProject"] = project
context.data["activeSequence"] = active_sequence
context.data["videoTracks"] = video_tracks
context.data["audioTracks"] = audio_tracks
context.data["currentFile"] = current_file
context.data["colorspace"] = _clrs
self.log.info("currentFile: {}".format(current_file))
# creating workfile representation
representation = {
'name': 'hrox',
'ext': 'hrox',
'files': base_name,
"stagingDir": staging_dir,
}
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
# version data
"versionData": {
"colorspace": _clrs
},
# source attribute
"sourcePath": current_file,
"representations": [representation]
}
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))

View file

@ -400,7 +400,7 @@ def add_write_node(name, **kwarg):
return w
def read(node):
def read_avalon_data(node):
"""Return user-defined knobs from given `node`
Args:
@ -415,8 +415,6 @@ def read(node):
return knob_name[len("avalon:"):]
elif knob_name.startswith("ak:"):
return knob_name[len("ak:"):]
else:
return knob_name
data = dict()
@ -445,7 +443,8 @@ def read(node):
(knob_type == 26 and value)
):
key = compat_prefixed(knob_name)
data[key] = value
if key is not None:
data[key] = value
if knob_name == first_user_knob:
break
@ -507,20 +506,74 @@ def get_created_node_imageio_setting(**kwarg):
log.debug(kwarg)
nodeclass = kwarg.get("nodeclass", None)
creator = kwarg.get("creator", None)
subset = kwarg.get("subset", None)
assert any([creator, nodeclass]), nuke.message(
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"]
imageio_nodes = get_nuke_imageio_settings()["nodes"]
required_nodes = imageio_nodes["requiredNodes"]
override_nodes = imageio_nodes["overrideNodes"]
imageio_node = None
for node in imageio_nodes:
for node in required_nodes:
log.info(node)
if (nodeclass in node["nukeNodeClass"]) and (
creator in node["plugins"]):
if (
nodeclass in node["nukeNodeClass"]
and creator in node["plugins"]
):
imageio_node = node
break
log.debug("__ imageio_node: {}".format(imageio_node))
# find matching override node
override_imageio_node = None
for onode in override_nodes:
log.info(onode)
if nodeclass not in node["nukeNodeClass"]:
continue
if creator not in node["plugins"]:
continue
if (
onode["subsets"]
and not any(re.search(s, subset) for s in onode["subsets"])
):
continue
override_imageio_node = onode
break
log.debug("__ override_imageio_node: {}".format(override_imageio_node))
# add overrides to imageio_node
if override_imageio_node:
# get all knob names in imageio_node
knob_names = [k["name"] for k in imageio_node["knobs"]]
for oknob in override_imageio_node["knobs"]:
for knob in imageio_node["knobs"]:
# override matching knob name
if oknob["name"] == knob["name"]:
log.debug(
"_ overriding knob: `{}` > `{}`".format(
knob, oknob
))
if not oknob["value"]:
# remove original knob if no value found in oknob
imageio_node["knobs"].remove(knob)
else:
# override knob value with oknob's
knob["value"] = oknob["value"]
# add missing knobs into imageio_node
if oknob["name"] not in knob_names:
log.debug(
"_ adding knob: `{}`".format(oknob))
imageio_node["knobs"].append(oknob)
knob_names.append(oknob["name"])
log.info("ImageIO node: {}".format(imageio_node))
return imageio_node
@ -542,7 +595,7 @@ def get_imageio_input_colorspace(filename):
def on_script_load():
''' Callback for ffmpeg support
'''
if nuke.env['LINUX']:
if nuke.env["LINUX"]:
nuke.tcl('load ffmpegReader')
nuke.tcl('load ffmpegWriter')
else:
@ -567,7 +620,7 @@ def check_inventory_versions():
if container:
node = nuke.toNode(container["objectName"])
avalon_knob_data = read(node)
avalon_knob_data = read_avalon_data(node)
# get representation from io
representation = legacy_io.find_one({
@ -593,7 +646,7 @@ def check_inventory_versions():
versions = legacy_io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
}).distinct("name")
max_version = max(versions)
@ -623,20 +676,20 @@ def writes_version_sync():
if _NODE_TAB_NAME not in each.knobs():
continue
avalon_knob_data = read(each)
avalon_knob_data = read_avalon_data(each)
try:
if avalon_knob_data['families'] not in ["render"]:
log.debug(avalon_knob_data['families'])
if avalon_knob_data["families"] not in ["render"]:
log.debug(avalon_knob_data["families"])
continue
node_file = each['file'].value()
node_file = each["file"].value()
node_version = "v" + get_version_from_path(node_file)
log.debug("node_version: {}".format(node_version))
node_new_file = node_file.replace(node_version, new_version)
each['file'].setValue(node_new_file)
each["file"].setValue(node_new_file)
if not os.path.isdir(os.path.dirname(node_new_file)):
log.warning("Path does not exist! I am creating it.")
os.makedirs(os.path.dirname(node_new_file))
@ -665,18 +718,19 @@ def check_subsetname_exists(nodes, subset_name):
bool: True of False
"""
return next((True for n in nodes
if subset_name in read(n).get("subset", "")),
if subset_name in read_avalon_data(n).get("subset", "")),
False)
def get_render_path(node):
''' Generate Render path from presets regarding avalon knob data
'''
data = {'avalon': read(node)}
data = {'avalon': read_avalon_data(node)}
data_preset = {
"nodeclass": data['avalon']['family'],
"families": [data['avalon']['families']],
"creator": data['avalon']['creator']
"nodeclass": data["avalon"]["family"],
"families": [data["avalon"]["families"]],
"creator": data["avalon"]["creator"],
"subset": data["avalon"]["subset"]
}
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
@ -749,7 +803,7 @@ def format_anatomy(data):
def script_name():
''' Returns nuke script path
'''
return nuke.root().knob('name').value()
return nuke.root().knob("name").value()
def add_button_write_to_read(node):
@ -844,7 +898,7 @@ def create_write_node(name, data, input=None, prenodes=None,
# adding dataflow template
log.debug("imageio_writes: `{}`".format(imageio_writes))
for knob in imageio_writes["knobs"]:
_data.update({knob["name"]: knob["value"]})
_data[knob["name"]] = knob["value"]
_data = fix_data_for_node_create(_data)
@ -1193,15 +1247,19 @@ class WorkfileSettings(object):
erased_viewers = []
for v in nuke.allNodes(filter="Viewer"):
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
# set viewProcess to preset from settings
v["viewerProcess"].setValue(
str(viewer_dict["viewerProcess"])
)
if str(viewer_dict["viewerProcess"]) \
not in v['viewerProcess'].value():
not in v["viewerProcess"].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
erased_viewers.append(v["name"].value())
nuke.delete(v)
# create new viewer
@ -1217,7 +1275,7 @@ class WorkfileSettings(object):
nv[k].setValue(v)
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"]))
if erased_viewers:
log.warning(
@ -1293,12 +1351,12 @@ class WorkfileSettings(object):
for node in nuke.allNodes(filter="Group"):
# get data from avalon knob
avalon_knob_data = read(node)
avalon_knob_data = read_avalon_data(node)
if not avalon_knob_data:
if avalon_knob_data.get("id") != "pyblish.avalon.instance":
continue
if avalon_knob_data["id"] != "pyblish.avalon.instance":
if "creator" not in avalon_knob_data:
continue
# establish families
@ -1309,7 +1367,8 @@ class WorkfileSettings(object):
data_preset = {
"nodeclass": avalon_knob_data["family"],
"families": families,
"creator": avalon_knob_data['creator']
"creator": avalon_knob_data["creator"],
"subset": avalon_knob_data["subset"]
}
nuke_imageio_writes = get_created_node_imageio_setting(
@ -1342,7 +1401,6 @@ class WorkfileSettings(object):
write_node[knob["name"]].setValue(value)
def set_reads_colorspace(self, read_clrs_inputs):
""" Setting colorspace to Read nodes
@ -1368,17 +1426,16 @@ class WorkfileSettings(object):
current = n["colorspace"].value()
future = str(preset_clrsp)
if current != future:
changes.update({
n.name(): {
"from": current,
"to": future
}
})
changes[n.name()] = {
"from": current,
"to": future
}
log.debug(changes)
if changes:
msg = "Read nodes are not set to correct colospace:\n\n"
for nname, knobs in changes.items():
msg += str(
msg += (
" - node: '{0}' is now '{1}' but should be '{2}'\n"
).format(nname, knobs["from"], knobs["to"])
@ -1610,17 +1667,17 @@ def get_hierarchical_attr(entity, attr, default=None):
if not value:
break
if value or entity['type'].lower() == 'project':
if value or entity["type"].lower() == "project":
return value
parent_id = entity['parent']
parent_id = entity["parent"]
if (
entity['type'].lower() == 'asset'
and entity.get('data', {}).get('visualParent')
entity["type"].lower() == "asset"
and entity.get("data", {}).get("visualParent")
):
parent_id = entity['data']['visualParent']
parent_id = entity["data"]["visualParent"]
parent = legacy_io.find_one({'_id': parent_id})
parent = legacy_io.find_one({"_id": parent_id})
return get_hierarchical_attr(parent, attr)
@ -1630,12 +1687,13 @@ def get_write_node_template_attr(node):
'''
# get avalon data from node
data = dict()
data['avalon'] = read(node)
data = {"avalon": read_avalon_data(node)}
data_preset = {
"nodeclass": data['avalon']['family'],
"families": [data['avalon']['families']],
"creator": data['avalon']['creator']
"nodeclass": data["avalon"]["family"],
"families": [data["avalon"]["families"]],
"creator": data["avalon"]["creator"],
"subset": data["avalon"]["subset"]
}
# get template data
@ -1646,10 +1704,11 @@ def get_write_node_template_attr(node):
"file": get_render_path(node)
})
# adding imageio template
{correct_data.update({k: v})
for k, v in nuke_imageio_writes.items()
if k not in ["_id", "_previous"]}
# adding imageio knob presets
for k, v in nuke_imageio_writes.items():
if k in ["_id", "_previous"]:
continue
correct_data[k] = v
# fix badly encoded data
return fix_data_for_node_create(correct_data)
@ -1765,8 +1824,8 @@ def maintained_selection():
Example:
>>> with maintained_selection():
... node['selected'].setValue(True)
>>> print(node['selected'].value())
... node["selected"].setValue(True)
>>> print(node["selected"].value())
False
"""
previous_selection = nuke.selectedNodes()
@ -1774,11 +1833,11 @@ def maintained_selection():
yield
finally:
# unselect all selection in case there is some
current_seletion = nuke.selectedNodes()
[n['selected'].setValue(False) for n in current_seletion]
reset_selection()
# and select all previously selected nodes
if previous_selection:
[n['selected'].setValue(True) for n in previous_selection]
select_nodes(previous_selection)
def reset_selection():

View file

@ -32,7 +32,7 @@ from .lib import (
launch_workfiles_app,
check_inventory_versions,
set_avalon_knob_data,
read,
read_avalon_data,
Context
)
@ -359,7 +359,7 @@ def parse_container(node):
dict: The container schema data for this container node.
"""
data = read(node)
data = read_avalon_data(node)
# (TODO) Remove key validation when `ls` has re-implemented.
#

View file

@ -260,8 +260,6 @@ class ExporterReview(object):
return nuke_imageio["viewer"]["viewerProcess"]
class ExporterReviewLut(ExporterReview):
"""
Generator object for review lut from Nuke
@ -673,7 +671,8 @@ class AbstractWriteRender(OpenPypeCreator):
write_data = {
"nodeclass": self.n_class,
"families": [self.family],
"avalon": self.data
"avalon": self.data,
"subset": self.data["subset"]
}
# add creator data

View file

@ -52,7 +52,7 @@ class ExtractReviewDataMov(openpype.api.Extractor):
for o_name, o_data in self.outputs.items():
f_families = o_data["filter"]["families"]
f_task_types = o_data["filter"]["task_types"]
f_subsets = o_data["filter"]["sebsets"]
f_subsets = o_data["filter"]["subsets"]
self.log.debug(
"f_families `{}` > families: {}".format(

View file

@ -0,0 +1,105 @@
from openpype.pipeline import (
Creator,
CreatedInstance
)
from openpype.lib import (
FileDef,
BoolDef,
)
from .pipeline import (
list_instances,
update_instances,
remove_instances,
HostContext,
)
class TrayPublishCreator(Creator):
create_allow_context_change = True
def collect_instances(self):
for instance_data in list_instances():
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
update_instances(update_list)
def remove_instances(self, instances):
remove_instances(instances)
for instance in instances:
self._remove_instance_from_context(instance)
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attrobites
return self.get_instance_attr_defs()
class SettingsCreator(TrayPublishCreator):
create_allow_context_change = True
enable_review = False
extensions = []
def collect_instances(self):
for instance_data in list_instances():
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def create(self, subset_name, data, pre_create_data):
# Pass precreate data to creator attributes
data["creator_attributes"] = pre_create_data
data["settings_creator"] = True
# Create new instance
new_instance = CreatedInstance(self.family, subset_name, data, self)
# Host implementation of storing metadata about instance
HostContext.add_instance(new_instance.data_to_store())
# Add instance to current context
self._add_instance_to_context(new_instance)
def get_instance_attr_defs(self):
output = []
file_def = FileDef(
"filepath",
folders=False,
extensions=self.extensions,
allow_sequences=self.allow_sequences,
label="Filepath",
)
output.append(file_def)
if self.enable_review:
output.append(BoolDef("review", label="Review"))
return output
@classmethod
def from_settings(cls, item_data):
identifier = item_data["identifier"]
family = item_data["family"]
if not identifier:
identifier = "settings_{}".format(family)
return type(
"{}{}".format(cls.__name__, identifier),
(cls, ),
{
"family": family,
"identifier": identifier,
"label": item_data["label"].strip(),
"icon": item_data["icon"],
"description": item_data["description"],
"detailed_description": item_data["detailed_description"],
"enable_review": item_data["enable_review"],
"extensions": item_data["extensions"],
"allow_sequences": item_data["allow_sequences"],
"default_variants": item_data["default_variants"]
}
)

View file

@ -0,0 +1,20 @@
import os
from openpype.api import get_project_settings
def initialize():
from openpype.hosts.traypublisher.api.plugin import SettingsCreator
project_name = os.environ["AVALON_PROJECT"]
project_settings = get_project_settings(project_name)
simple_creators = project_settings["traypublisher"]["simple_creators"]
global_variables = globals()
for item in simple_creators:
dynamic_plugin = SettingsCreator.from_settings(item)
global_variables[dynamic_plugin.__name__] = dynamic_plugin
initialize()

View file

@ -1,97 +0,0 @@
from openpype.hosts.traypublisher.api import pipeline
from openpype.lib import FileDef
from openpype.pipeline import (
Creator,
CreatedInstance
)
class WorkfileCreator(Creator):
identifier = "workfile"
label = "Workfile"
family = "workfile"
description = "Publish backup of workfile"
create_allow_context_change = True
extensions = [
# Maya
".ma", ".mb",
# Nuke
".nk",
# Hiero
".hrox",
# Houdini
".hip", ".hiplc", ".hipnc",
# Blender
".blend",
# Celaction
".scn",
# TVPaint
".tvpp",
# Fusion
".comp",
# Harmony
".zip",
# Premiere
".prproj",
# Resolve
".drp",
# Photoshop
".psd", ".psb",
# Aftereffects
".aep"
]
def get_icon(self):
return "fa.file"
def collect_instances(self):
for instance_data in pipeline.list_instances():
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
pipeline.update_instances(update_list)
def remove_instances(self, instances):
pipeline.remove_instances(instances)
for instance in instances:
self._remove_instance_from_context(instance)
def create(self, subset_name, data, pre_create_data):
# Pass precreate data to creator attributes
data["creator_attributes"] = pre_create_data
# Create new instance
new_instance = CreatedInstance(self.family, subset_name, data, self)
# Host implementation of storing metadata about instance
pipeline.HostContext.add_instance(new_instance.data_to_store())
# Add instance to current context
self._add_instance_to_context(new_instance)
def get_default_variants(self):
return [
"Main"
]
def get_instance_attr_defs(self):
output = [
FileDef(
"filepath",
folders=False,
extensions=self.extensions,
label="Filepath"
)
]
return output
def get_pre_create_attr_defs(self):
# Use same attributes as for instance attrobites
return self.get_instance_attr_defs()
def get_detail_description(self):
return """# Publish workfile backup"""

View file

@ -0,0 +1,48 @@
import os
import pyblish.api
class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin):
"""Collect data for instances created by settings creators."""
label = "Collect Settings Simple Instances"
order = pyblish.api.CollectorOrder - 0.49
hosts = ["traypublisher"]
def process(self, instance):
if not instance.data.get("settings_creator"):
return
if "families" not in instance.data:
instance.data["families"] = []
if "representations" not in instance.data:
instance.data["representations"] = []
repres = instance.data["representations"]
creator_attributes = instance.data["creator_attributes"]
if creator_attributes.get("review"):
instance.data["families"].append("review")
filepath_item = creator_attributes["filepath"]
self.log.info(filepath_item)
filepaths = [
os.path.join(filepath_item["directory"], filename)
for filename in filepath_item["filenames"]
]
instance.data["sourceFilepaths"] = filepaths
filenames = filepath_item["filenames"]
ext = os.path.splitext(filenames[0])[-1]
if len(filenames) == 1:
filenames = filenames[0]
repres.append({
"ext": ext,
"name": ext,
"stagingDir": filepath_item["directory"],
"files": filenames
})

View file

@ -1,31 +0,0 @@
import os
import pyblish.api
class CollectWorkfile(pyblish.api.InstancePlugin):
"""Collect representation of workfile instances."""
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.49
families = ["workfile"]
hosts = ["traypublisher"]
def process(self, instance):
if "representations" not in instance.data:
instance.data["representations"] = []
repres = instance.data["representations"]
creator_attributes = instance.data["creator_attributes"]
filepath = creator_attributes["filepath"]
instance.data["sourceFilepath"] = filepath
staging_dir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
ext = os.path.splitext(filename)[-1]
repres.append({
"ext": ext,
"name": ext,
"stagingDir": staging_dir,
"files": filename
})

View file

@ -0,0 +1,47 @@
import os
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateWorkfilePath(pyblish.api.InstancePlugin):
"""Validate existence of workfile instance existence."""
label = "Validate Workfile"
order = pyblish.api.ValidatorOrder - 0.49
hosts = ["traypublisher"]
def process(self, instance):
if "sourceFilepaths" not in instance.data:
self.log.info((
"Can't validate source filepaths existence."
" Instance does not have collected 'sourceFilepaths'"
))
return
filepaths = instance.data.get("sourceFilepaths")
not_found_files = [
filepath
for filepath in filepaths
if not os.path.exists(filepath)
]
if not_found_files:
joined_paths = "\n".join([
"- {}".format(filepath)
for filepath in not_found_files
])
raise PublishValidationError(
(
"Filepath of '{}' instance \"{}\" does not exist:\n{}"
).format(
instance.data["family"],
instance.data["name"],
joined_paths
),
"File not found",
(
"## Files were not found\nFiles\n{}"
"\n\nCheck if the path is still available."
).format(joined_paths)
)

View file

@ -1,35 +0,0 @@
import os
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateWorkfilePath(pyblish.api.InstancePlugin):
"""Validate existence of workfile instance existence."""
label = "Validate Workfile"
order = pyblish.api.ValidatorOrder - 0.49
families = ["workfile"]
hosts = ["traypublisher"]
def process(self, instance):
filepath = instance.data["sourceFilepath"]
if not filepath:
raise PublishValidationError(
(
"Filepath of 'workfile' instance \"{}\" is not set"
).format(instance.data["name"]),
"File not filled",
"## Missing file\nYou are supposed to fill the path."
)
if not os.path.exists(filepath):
raise PublishValidationError(
(
"Filepath of 'workfile' instance \"{}\" does not exist: {}"
).format(instance.data["name"], filepath),
"File not found",
(
"## File was not found\nFile \"{}\" was not found."
" Check if the path is still available."
).format(filepath)
)

View file

@ -24,7 +24,9 @@ class CreateRenderlayer(plugin.Creator):
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
)
dynamic_subset_keys = ["render_pass", "render_layer", "group"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
]
@classmethod
def get_dynamic_data(
@ -34,12 +36,17 @@ class CreateRenderlayer(plugin.Creator):
variant, task_name, asset_id, project_name, host_name
)
# Use render pass name from creator's plugin
dynamic_data["render_pass"] = cls.render_pass
dynamic_data["renderpass"] = cls.render_pass
# Add variant to render layer
dynamic_data["render_layer"] = variant
dynamic_data["renderlayer"] = variant
# Change family for subset name fill
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
return dynamic_data
@classmethod

View file

@ -20,7 +20,9 @@ class CreateRenderPass(plugin.Creator):
icon = "cube"
defaults = ["Main"]
dynamic_subset_keys = ["render_pass", "render_layer"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer"
]
@classmethod
def get_dynamic_data(
@ -29,9 +31,13 @@ class CreateRenderPass(plugin.Creator):
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["render_pass"] = variant
dynamic_data["renderpass"] = variant
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
return dynamic_data
@classmethod
@ -115,6 +121,7 @@ class CreateRenderPass(plugin.Creator):
else:
render_layer = beauty_instance["variant"]
subset_name_fill_data["renderlayer"] = render_layer
subset_name_fill_data["render_layer"] = render_layer
# Format dynamic keys in subset name
@ -129,7 +136,7 @@ class CreateRenderPass(plugin.Creator):
self.data["group_id"] = group_id
self.data["pass"] = variant
self.data["render_layer"] = render_layer
self.data["renderlayer"] = render_layer
# Collect selected layer ids to be stored into instance
layer_names = [layer["name"] for layer in selected_layers]

View file

@ -45,6 +45,21 @@ class CollectInstances(pyblish.api.ContextPlugin):
for instance_data in filtered_instance_data:
instance_data["fps"] = context.data["sceneFps"]
# Conversion from older instances
# - change 'render_layer' to 'renderlayer'
render_layer = instance_data.get("instance_data")
if not render_layer:
# Render Layer has only variant
if instance_data["family"] == "renderLayer":
render_layer = instance_data.get("variant")
# Backwards compatibility for renderPasses
elif "render_layer" in instance_data:
render_layer = instance_data["render_layer"]
if render_layer:
instance_data["renderlayer"] = render_layer
# Store workfile instance data to instance data
instance_data["originData"] = copy.deepcopy(instance_data)
# Global instance data modifications
@ -191,7 +206,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"Creating render pass instance. \"{}\"".format(pass_name)
)
# Change label
render_layer = instance_data["render_layer"]
render_layer = instance_data["renderlayer"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation

View file

@ -69,9 +69,13 @@ class CollectRenderScene(pyblish.api.ContextPlugin):
# Variant is using render pass name
variant = self.render_layer
dynamic_data = {
"render_layer": self.render_layer,
"render_pass": self.render_pass
"renderlayer": self.render_layer,
"renderpass": self.render_pass,
}
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
dynamic_data["render_pass"] = dynamic_data["renderpass"]
task_name = workfile_context["task"]
subset_name = get_subset_name_with_asset_doc(
@ -100,7 +104,9 @@ class CollectRenderScene(pyblish.api.ContextPlugin):
"representations": [],
"layers": copy.deepcopy(context.data["layersData"]),
"asset": asset_name,
"task": task_name
"task": task_name,
# Add render layer to instance data
"renderlayer": self.render_layer
}
instance = context.create_instance(**instance_data)

View file

@ -47,6 +47,7 @@ def install():
print("installing OpenPype for Unreal ...")
print("-=" * 40)
logger.info("installing OpenPype for Unreal")
pyblish.api.register_host("unreal")
pyblish.api.register_plugin_path(str(PUBLISH_PATH))
register_loader_plugin_path(str(LOAD_PATH))
register_creator_plugin_path(str(CREATE_PATH))
@ -392,3 +393,24 @@ def cast_map_to_str_dict(umap) -> dict:
"""
return {str(key): str(value) for (key, value) in umap.items()}
def get_subsequences(sequence: unreal.LevelSequence):
"""Get list of subsequences from sequence.
Args:
sequence (unreal.LevelSequence): Sequence
Returns:
list(unreal.LevelSequence): List of subsequences
"""
tracks = sequence.get_master_tracks()
subscene_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
break
if subscene_track is not None and subscene_track.get_sections():
return subscene_track.get_sections()
return []

View file

@ -0,0 +1,125 @@
import unreal
from openpype.hosts.unreal.api import pipeline
queue = None
executor = None
def _queue_finish_callback(exec, success):
unreal.log("Render completed. Success: " + str(success))
# Delete our reference so we don't keep it alive.
global executor
global queue
del executor
del queue
def _job_finish_callback(job, success):
# You can make any edits you want to the editor world here, and the world
# will be duplicated when the next render happens. Make sure you undo your
# edits in OnQueueFinishedCallback if you don't want to leak state changes
# into the editor world.
unreal.log("Individual job completed.")
def start_rendering():
"""
Start the rendering process.
"""
print("Starting rendering...")
# Get selected sequences
assets = unreal.EditorUtilityLibrary.get_selected_assets()
# instances = pipeline.ls_inst()
instances = [
a for a in assets
if a.get_class().get_name() == "OpenPypePublishInstance"]
inst_data = []
for i in instances:
data = pipeline.parse_container(i.get_path_name())
if data["family"] == "render":
inst_data.append(data)
# subsystem = unreal.get_editor_subsystem(
# unreal.MoviePipelineQueueSubsystem)
# queue = subsystem.get_queue()
global queue
queue = unreal.MoviePipelineQueue()
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for i in inst_data:
sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset()
sequences = [{
"sequence": sequence,
"output": f"{i['output']}",
"frame_range": (
int(float(i["frameStart"])),
int(float(i["frameEnd"])) + 1)
}]
render_list = []
# Get all the sequences to render. If there are subsequences,
# add them and their frame ranges to the render list. We also
# use the names for the output paths.
for s in sequences:
subscenes = pipeline.get_subsequences(s.get('sequence'))
if subscenes:
for ss in subscenes:
sequences.append({
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame())
})
else:
# Avoid rendering camera sequences
if "_camera" not in s.get('sequence').get_name():
render_list.append(s)
# Create the rendering jobs and add them to the queue.
for r in render_list:
job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)
job.sequence = unreal.SoftObjectPath(i["master_sequence"])
job.map = unreal.SoftObjectPath(i["master_level"])
job.author = "OpenPype"
# User data could be used to pass data to the job, that can be
# read in the job's OnJobFinished callback. We could,
# for instance, pass the AvalonPublishInstance's path to the job.
# job.user_data = ""
settings = job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineOutputSetting)
settings.output_resolution = unreal.IntPoint(1920, 1080)
settings.custom_start_frame = r.get("frame_range")[0]
settings.custom_end_frame = r.get("frame_range")[1]
settings.use_custom_playback_range = True
settings.file_name_format = "{sequence_name}.{frame_number}"
settings.output_directory.path += r.get('output')
renderPass = job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineDeferredPassBase)
renderPass.disable_multisample_effects = True
job.get_configuration().find_or_add_setting_by_class(
unreal.MoviePipelineImageSequenceOutput_PNG)
# If there are jobs in the queue, start the rendering process.
if queue.get_jobs():
global executor
executor = unreal.MoviePipelinePIEExecutor()
executor.on_executor_finished_delegate.add_callable_unique(
_queue_finish_callback)
executor.on_individual_job_finished_delegate.add_callable_unique(
_job_finish_callback) # Only available on PIE Executor
executor.execute(queue)

View file

@ -7,6 +7,7 @@ from openpype import (
)
from openpype.tools.utils import host_tools
from openpype.tools.utils.lib import qt_app_context
from openpype.hosts.unreal.api import rendering
class ToolsBtnsWidget(QtWidgets.QWidget):
@ -20,6 +21,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
manage_btn = QtWidgets.QPushButton("Manage...", self)
render_btn = QtWidgets.QPushButton("Render...", self)
experimental_tools_btn = QtWidgets.QPushButton(
"Experimental tools...", self
)
@ -30,6 +32,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
layout.addWidget(load_btn, 0)
layout.addWidget(publish_btn, 0)
layout.addWidget(manage_btn, 0)
layout.addWidget(render_btn, 0)
layout.addWidget(experimental_tools_btn, 0)
layout.addStretch(1)
@ -37,6 +40,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
load_btn.clicked.connect(self._on_load)
publish_btn.clicked.connect(self._on_publish)
manage_btn.clicked.connect(self._on_manage)
render_btn.clicked.connect(self._on_render)
experimental_tools_btn.clicked.connect(self._on_experimental)
def _on_create(self):
@ -51,6 +55,9 @@ class ToolsBtnsWidget(QtWidgets.QWidget):
def _on_manage(self):
self.tool_required.emit("sceneinventory")
def _on_render(self):
rendering.start_rendering()
def _on_experimental(self):
self.tool_required.emit("experimental_tools")

View file

@ -254,6 +254,7 @@ def create_unreal_project(project_name: str,
{"Name": "PythonScriptPlugin", "Enabled": True},
{"Name": "EditorScriptingUtilities", "Enabled": True},
{"Name": "SequencerScripting", "Enabled": True},
{"Name": "MovieRenderPipeline", "Enabled": True},
{"Name": "OpenPype", "Enabled": True}
]
}

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from unreal import EditorLevelLibrary as ell
from unreal import EditorLevelLibrary
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api.pipeline import instantiate
@ -28,13 +29,13 @@ class CreateLayout(plugin.Creator):
# sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
# selection = [a.get_path_name() for a in sel_objects]
data["level"] = ell.get_editor_world().get_path_name()
data["level"] = EditorLevelLibrary.get_editor_world().get_path_name()
data["members"] = []
if (self.options or {}).get("useSelection"):
# Set as members the selected actors
for actor in ell.get_selected_level_actors():
for actor in EditorLevelLibrary.get_selected_level_actors():
data["members"].append("{}.{}".format(
actor.get_outer().get_name(), actor.get_name()))

View file

@ -0,0 +1,111 @@
import unreal
from openpype.pipeline import legacy_io
from openpype.hosts.unreal.api import pipeline
from openpype.hosts.unreal.api.plugin import Creator
class CreateRender(Creator):
"""Create instance for sequence for rendering"""
name = "unrealRender"
label = "Unreal - Render"
family = "render"
icon = "cube"
asset_types = ["LevelSequence"]
root = "/Game/OpenPype/PublishInstances"
suffix = "_INS"
def process(self):
subset = self.data["subset"]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
# Get the master sequence and the master level.
# There should be only one sequence and one level in the directory.
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"/Game/OpenPype/{self.data['asset']}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
ms = sequences[0].get_editor_property('object_path')
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"/Game/OpenPype/{self.data['asset']}"],
recursive_paths=False)
levels = ar.get_assets(filter)
ml = levels[0].get_editor_property('object_path')
selection = []
if (self.options or {}).get("useSelection"):
sel_objects = unreal.EditorUtilityLibrary.get_selected_assets()
selection = [
a.get_path_name() for a in sel_objects
if a.get_class().get_name() in self.asset_types]
else:
selection.append(self.data['sequence'])
unreal.log(f"selection: {selection}")
path = f"{self.root}"
unreal.EditorAssetLibrary.make_directory(path)
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for a in selection:
ms_obj = ar.get_asset_by_object_path(ms).get_asset()
seq_data = None
if a == ms:
seq_data = {
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}
else:
seq_data_list = [{
"sequence": ms_obj,
"output": f"{ms_obj.get_name()}",
"frame_range": (
ms_obj.get_playback_start(), ms_obj.get_playback_end())
}]
for s in seq_data_list:
subscenes = pipeline.get_subsequences(s.get('sequence'))
for ss in subscenes:
curr_data = {
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame() - 1)
}
if ss.get_sequence().get_path_name() == a:
seq_data = curr_data
break
seq_data_list.append(curr_data)
if seq_data is not None:
break
if not seq_data:
continue
d = self.data.copy()
d["members"] = [a]
d["sequence"] = a
d["master_sequence"] = ms
d["master_level"] = ml
d["output"] = seq_data.get('output')
d["frameStart"] = seq_data.get('frame_range')[0]
d["frameEnd"] = seq_data.get('frame_range')[1]
container_name = f"{subset}{self.suffix}"
pipeline.create_publish_instance(
instance=container_name, path=path)
pipeline.imprint(f"{path}/{container_name}", d)

View file

@ -3,13 +3,17 @@
import os
import json
import unreal
from unreal import EditorAssetLibrary
from unreal import MovieSceneSkeletalAnimationTrack
from unreal import MovieSceneSkeletalAnimationSection
from openpype.pipeline import (
get_representation_path,
AVALON_CONTAINER_ID
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
import unreal # noqa
class AnimationFBXLoader(plugin.Loader):
@ -21,59 +25,13 @@ class AnimationFBXLoader(plugin.Loader):
icon = "cube"
color = "orange"
def load(self, context, name, namespace, options=None):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and OpenPype container
root = "/Game/OpenPype/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
"{}/{}/{}".format(root, asset, name), suffix="")
container_name += suffix
unreal.EditorAssetLibrary.make_directory(asset_dir)
def _process(self, asset_dir, asset_name, instance_name):
automated = False
actor = None
task = unreal.AssetImportTask()
task.options = unreal.FbxImportUI()
lib_path = self.fname.replace("fbx", "json")
with open(lib_path, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
if instance_name:
automated = True
# Old method to get the actor
@ -131,6 +89,116 @@ class AnimationFBXLoader(plugin.Loader):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
animation = None
for a in asset_content:
imported_asset_data = EditorAssetLibrary.find_asset_data(a)
imported_asset = unreal.AssetRegistryHelpers.get_asset(
imported_asset_data)
if imported_asset.__class__ == unreal.AnimSequence:
animation = imported_asset
break
if animation:
animation.set_editor_property('enable_root_motion', True)
actor.skeletal_mesh_component.set_editor_property(
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
return animation
def load(self, context, name, namespace, options=None):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = "/Game/OpenPype"
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/Animations/{asset}/{name}", suffix="")
hierarchy_dir = root
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_dir = f"{hierarchy_dir}/{asset}"
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
libpath = self.fname.replace("fbx", "json")
with open(libpath, "r") as fp:
data = json.load(fp)
instance_name = data.get("instance_name")
animation = self._process(asset_dir, container_name, instance_name)
asset_content = EditorAssetLibrary.list_assets(
hierarchy_dir, recursive=True, include_folder=False)
# Get the sequence for the layout, excluding the camera one.
sequences = [a for a in asset_content
if (EditorAssetLibrary.find_asset_data(a).get_class() ==
unreal.LevelSequence.static_class() and
"_camera" not in a.split("/")[-1])]
ar = unreal.AssetRegistryHelpers.get_asset_registry()
for s in sequences:
sequence = ar.get_asset_by_object_path(s).get_asset()
possessables = [
p for p in sequence.get_possessables()
if p.get_display_name() == instance_name]
for p in possessables:
tracks = [
t for t in p.get_tracks()
if (t.get_class() ==
MovieSceneSkeletalAnimationTrack.static_class())]
for t in tracks:
sections = [
s for s in t.get_sections()
if (s.get_class() ==
MovieSceneSkeletalAnimationSection.static_class())]
for s in sections:
s.params.set_editor_property('animation', animation)
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
@ -150,29 +218,11 @@ class AnimationFBXLoader(plugin.Loader):
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
imported_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False)
animation = None
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
imported_asset_data = unreal.EditorAssetLibrary.find_asset_data(a)
imported_asset = unreal.AssetRegistryHelpers.get_asset(
imported_asset_data)
if imported_asset.__class__ == unreal.AnimSequence:
animation = imported_asset
break
if animation:
animation.set_editor_property('enable_root_motion', True)
actor.skeletal_mesh_component.set_editor_property(
'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE)
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
return asset_content
for a in imported_content:
EditorAssetLibrary.save_asset(a)
def update(self, container, representation):
name = container["asset_name"]
@ -218,7 +268,7 @@ class AnimationFBXLoader(plugin.Loader):
task.options.anim_sequence_import_data.set_editor_property(
'convert_scene', True)
skeletal_mesh = unreal.EditorAssetLibrary.load_asset(
skeletal_mesh = EditorAssetLibrary.load_asset(
container.get('namespace') + "/" + container.get('asset_name'))
skeleton = skeletal_mesh.get_editor_property('skeleton')
task.options.set_editor_property('skeleton', skeleton)
@ -235,22 +285,22 @@ class AnimationFBXLoader(plugin.Loader):
"parent": str(representation["parent"])
})
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_content = EditorAssetLibrary.list_assets(
destination_path, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
EditorAssetLibrary.save_asset(a)
def remove(self, container):
path = container["namespace"]
parent_path = os.path.dirname(path)
unreal.EditorAssetLibrary.delete_directory(path)
EditorAssetLibrary.delete_directory(path)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_content = EditorAssetLibrary.list_assets(
parent_path, recursive=False, include_folder=True
)
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path)
EditorAssetLibrary.delete_directory(parent_path)

View file

@ -2,13 +2,16 @@
"""Load camera from FBX."""
import os
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from openpype.pipeline import (
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
import unreal # noqa
class CameraLoader(plugin.Loader):
@ -20,6 +23,40 @@ class CameraLoader(plugin.Loader):
icon = "cube"
color = "orange"
def _get_data(self, asset_name):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
return asset_doc.get("data")
def _set_sequence_hierarchy(
self, seq_i, seq_j, min_frame_j, max_frame_j
):
tracks = seq_i.get_master_tracks()
track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
track = t
break
if not track:
track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
subscenes = track.get_sections()
subscene = None
for s in subscenes:
if s.get_editor_property('sub_sequence') == seq_j:
subscene = s
break
if not subscene:
subscene = track.add_section()
subscene.set_row_index(len(track.get_sections()))
subscene.set_editor_property('sub_sequence', seq_j)
subscene.set_range(
min_frame_j,
max_frame_j + 1)
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
@ -43,8 +80,14 @@ class CameraLoader(plugin.Loader):
list(str): list of container content
"""
# Create directory for asset and OpenPype container
root = "/Game/OpenPype/Assets"
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = "/Game/OpenPype"
hierarchy_dir = root
hierarchy_list = []
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@ -54,10 +97,10 @@ class CameraLoader(plugin.Loader):
tools = unreal.AssetToolsHelpers().get_asset_tools()
# Create a unique name for the camera directory
unique_number = 1
if unreal.EditorAssetLibrary.does_directory_exist(f"{root}/{asset}"):
asset_content = unreal.EditorAssetLibrary.list_assets(
if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"):
asset_content = EditorAssetLibrary.list_assets(
f"{root}/{asset}", recursive=False, include_folder=True
)
@ -76,42 +119,122 @@ class CameraLoader(plugin.Loader):
unique_number = f_numbers[-1] + 1
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name}_{unique_number:02d}", suffix="")
f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="")
container_name += suffix
unreal.EditorAssetLibrary.make_directory(asset_dir)
current_level = EditorLevelLibrary.get_editor_world().get_full_name()
EditorLevelLibrary.save_all_dirty_levels()
sequence = tools.create_asset(
asset_name=asset_name,
ar = unreal.AssetRegistryHelpers.get_asset_registry()
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"{hierarchy_dir}/{asset}/"],
recursive_paths=True)
maps = ar.get_assets(filter)
# There should be only one map in the list
EditorLevelLibrary.load_level(maps[0].get_full_name())
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
sequences = []
frame_ranges = []
i = 0
for h in hierarchy_list:
root_content = EditorAssetLibrary.list_assets(
h, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
for asset in root_content
if EditorAssetLibrary.find_asset_data(
asset).get_class().get_name() == 'LevelSequence'
]
if not existing_sequences:
scene = tools.create_asset(
asset_name=hierarchy[i],
package_path=h,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
asset_data = legacy_io.find_one({
"type": "asset",
"name": h.split('/')[-1]
})
id = asset_data.get('_id')
start_frames = []
end_frames = []
elements = list(
legacy_io.find({"type": "asset", "data.visualParent": id}))
for e in elements:
start_frames.append(e.get('data').get('clipIn'))
end_frames.append(e.get('data').get('clipOut'))
elements.extend(legacy_io.find({
"type": "asset",
"data.visualParent": e.get('_id')
}))
min_frame = min(start_frames)
max_frame = max(end_frames)
scene.set_display_rate(
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
scene.set_playback_start(min_frame)
scene.set_playback_end(max_frame)
sequences.append(scene)
frame_ranges.append((min_frame, max_frame))
else:
for e in existing_sequences:
sequences.append(e.get_asset())
frame_ranges.append((
e.get_asset().get_playback_start(),
e.get_asset().get_playback_end()))
i += 1
EditorAssetLibrary.make_directory(asset_dir)
cam_seq = tools.create_asset(
asset_name=f"{asset}_camera",
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
io_asset = legacy_io.Session["AVALON_ASSET"]
asset_doc = legacy_io.find_one({
"type": "asset",
"name": io_asset
})
# Add sequences data to hierarchy
for i in range(0, len(sequences) - 1):
self._set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1])
data = asset_doc.get("data")
if data:
sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0))
sequence.set_playback_start(data.get("frameStart"))
sequence.set_playback_end(data.get("frameEnd"))
data = self._get_data(asset)
cam_seq.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
cam_seq.set_playback_start(0)
cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
self._set_sequence_hierarchy(
sequences[-1], cam_seq,
data.get('clipIn'), data.get('clipOut'))
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
unreal.SequencerTools.import_fbx(
unreal.EditorLevelLibrary.get_editor_world(),
sequence,
sequence.get_bindings(),
settings,
self.fname
)
if cam_seq:
unreal.SequencerTools.import_fbx(
EditorLevelLibrary.get_editor_world(),
cam_seq,
cam_seq.get_bindings(),
settings,
self.fname
)
# Create Asset Container
unreal_pipeline.create_container(
@ -132,12 +255,15 @@ class CameraLoader(plugin.Loader):
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
asset_content = unreal.EditorAssetLibrary.list_assets(
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(current_level)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
EditorAssetLibrary.save_asset(a)
return asset_content
@ -147,25 +273,25 @@ class CameraLoader(plugin.Loader):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_content = EditorAssetLibrary.list_assets(
path, recursive=False, include_folder=False
)
asset_name = ""
for a in asset_content:
asset = ar.get_asset_by_object_path(a)
if a.endswith("_CON"):
loaded_asset = unreal.EditorAssetLibrary.load_asset(a)
unreal.EditorAssetLibrary.set_metadata_tag(
loaded_asset = EditorAssetLibrary.load_asset(a)
EditorAssetLibrary.set_metadata_tag(
loaded_asset, "representation", str(representation["_id"])
)
unreal.EditorAssetLibrary.set_metadata_tag(
EditorAssetLibrary.set_metadata_tag(
loaded_asset, "parent", str(representation["parent"])
)
asset_name = unreal.EditorAssetLibrary.get_metadata_tag(
asset_name = EditorAssetLibrary.get_metadata_tag(
loaded_asset, "asset_name"
)
elif asset.asset_class == "LevelSequence":
unreal.EditorAssetLibrary.delete_asset(a)
EditorAssetLibrary.delete_asset(a)
sequence = tools.create_asset(
asset_name=asset_name,
@ -191,7 +317,7 @@ class CameraLoader(plugin.Loader):
settings.set_editor_property('reduce_keys', False)
unreal.SequencerTools.import_fbx(
unreal.EditorLevelLibrary.get_editor_world(),
EditorLevelLibrary.get_editor_world(),
sequence,
sequence.get_bindings(),
settings,
@ -202,11 +328,11 @@ class CameraLoader(plugin.Loader):
path = container["namespace"]
parent_path = os.path.dirname(path)
unreal.EditorAssetLibrary.delete_directory(path)
EditorAssetLibrary.delete_directory(path)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_content = EditorAssetLibrary.list_assets(
parent_path, recursive=False, include_folder=True
)
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path)
EditorAssetLibrary.delete_directory(parent_path)

View file

@ -7,6 +7,7 @@ from pathlib import Path
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from unreal import EditorLevelUtils
from unreal import AssetToolsHelpers
from unreal import FBXImportType
from unreal import MathLibrary as umath
@ -17,6 +18,7 @@ from openpype.pipeline import (
load_container,
get_representation_path,
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
@ -31,7 +33,7 @@ class LayoutLoader(plugin.Loader):
label = "Load Layout"
icon = "code-fork"
color = "orange"
ASSET_ROOT = "/Game/OpenPype/Assets"
ASSET_ROOT = "/Game/OpenPype"
def _get_asset_containers(self, path):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
@ -85,11 +87,91 @@ class LayoutLoader(plugin.Loader):
return None
@staticmethod
def _process_family(assets, class_name, transform, inst_name=None):
def _get_data(self, asset_name):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
return asset_doc.get("data")
def _set_sequence_hierarchy(
self, seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths
):
# Get existing sequencer tracks or create them if they don't exist
tracks = seq_i.get_master_tracks()
subscene_track = None
visibility_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
if (t.get_class() ==
unreal.MovieSceneLevelVisibilityTrack.static_class()):
visibility_track = t
if not subscene_track:
subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
if not visibility_track:
visibility_track = seq_i.add_master_track(
unreal.MovieSceneLevelVisibilityTrack)
# Create the sub-scene section
subscenes = subscene_track.get_sections()
subscene = None
for s in subscenes:
if s.get_editor_property('sub_sequence') == seq_j:
subscene = s
break
if not subscene:
subscene = subscene_track.add_section()
subscene.set_row_index(len(subscene_track.get_sections()))
subscene.set_editor_property('sub_sequence', seq_j)
subscene.set_range(
min_frame_j,
max_frame_j + 1)
# Create the visibility section
ar = unreal.AssetRegistryHelpers.get_asset_registry()
maps = []
for m in map_paths:
# Unreal requires to load the level to get the map name
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(m)
maps.append(str(ar.get_asset_by_object_path(m).asset_name))
vis_section = visibility_track.add_section()
index = len(visibility_track.get_sections())
vis_section.set_range(
min_frame_j,
max_frame_j + 1)
vis_section.set_visibility(unreal.LevelVisibility.VISIBLE)
vis_section.set_row_index(index)
vis_section.set_level_names(maps)
if min_frame_j > 1:
hid_section = visibility_track.add_section()
hid_section.set_range(
1,
min_frame_j)
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
hid_section.set_row_index(index)
hid_section.set_level_names(maps)
if max_frame_j < max_frame_i:
hid_section = visibility_track.add_section()
hid_section.set_range(
max_frame_j + 1,
max_frame_i + 1)
hid_section.set_visibility(unreal.LevelVisibility.HIDDEN)
hid_section.set_row_index(index)
hid_section.set_level_names(maps)
def _process_family(
self, assets, class_name, transform, sequence, inst_name=None
):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
actors = []
bindings = []
for asset in assets:
obj = ar.get_asset_by_object_path(asset).get_asset()
@ -119,14 +201,23 @@ class LayoutLoader(plugin.Loader):
), False)
actor.set_actor_scale3d(transform.get('scale'))
if class_name == 'SkeletalMesh':
skm_comp = actor.get_editor_property(
'skeletal_mesh_component')
skm_comp.set_bounds_scale(10.0)
actors.append(actor)
return actors
binding = sequence.add_possessable(actor)
bindings.append(binding)
return actors, bindings
@staticmethod
def _import_animation(
asset_dir, path, instance_name, skeleton, actors_dict,
animation_file):
self, asset_dir, path, instance_name, skeleton, actors_dict,
animation_file, bindings_dict, sequence
):
anim_file = Path(animation_file)
anim_file_name = anim_file.with_suffix('')
@ -205,7 +296,20 @@ class LayoutLoader(plugin.Loader):
actor.skeletal_mesh_component.animation_data.set_editor_property(
'anim_to_play', animation)
def _process(self, lib_path, asset_dir, loaded=None):
# Add animation to the sequencer
bindings = bindings_dict.get(instance_name)
for binding in bindings:
binding.add_track(unreal.MovieSceneSkeletalAnimationTrack)
for track in binding.get_tracks():
section = track.add_section()
section.set_range(
sequence.get_playback_start(),
sequence.get_playback_end())
sec_params = section.get_editor_property('params')
sec_params.set_editor_property('animation', animation)
def _process(self, lib_path, asset_dir, sequence, loaded=None):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
with open(lib_path, "r") as fp:
@ -220,6 +324,7 @@ class LayoutLoader(plugin.Loader):
skeleton_dict = {}
actors_dict = {}
bindings_dict = {}
for element in data:
reference = None
@ -277,12 +382,13 @@ class LayoutLoader(plugin.Loader):
actors = []
if family == 'model':
actors = self._process_family(
assets, 'StaticMesh', transform, inst)
actors, _ = self._process_family(
assets, 'StaticMesh', transform, sequence, inst)
elif family == 'rig':
actors = self._process_family(
assets, 'SkeletalMesh', transform, inst)
actors, bindings = self._process_family(
assets, 'SkeletalMesh', transform, sequence, inst)
actors_dict[inst] = actors
bindings_dict[inst] = bindings
if family == 'rig':
# Finds skeleton among the imported assets
@ -302,8 +408,8 @@ class LayoutLoader(plugin.Loader):
if animation_file and skeleton:
self._import_animation(
asset_dir, path, instance_name, skeleton,
actors_dict, animation_file)
asset_dir, path, instance_name, skeleton, actors_dict,
animation_file, bindings_dict, sequence)
@staticmethod
def _remove_family(assets, components, class_name, prop_name):
@ -369,7 +475,13 @@ class LayoutLoader(plugin.Loader):
list(str): list of container content
"""
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = self.ASSET_ROOT
hierarchy_dir = root
hierarchy_list = []
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
@ -379,13 +491,156 @@ class LayoutLoader(plugin.Loader):
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
"{}/{}/{}".format(root, asset, name), suffix="")
"{}/{}/{}".format(hierarchy_dir, asset, name), suffix="")
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
self._process(self.fname, asset_dir)
# Create map for the shot, and create hierarchy of map. If the maps
# already exist, we will use them.
maps = []
for h in hierarchy_list:
a = h.split('/')[-1]
map = f"{h}/{a}_map.{a}_map"
new = False
if not EditorAssetLibrary.does_asset_exist(map):
EditorLevelLibrary.new_level(f"{h}/{a}_map")
new = True
maps.append({"map": map, "new": new})
EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map")
maps.append(
{"map": f"{asset_dir}/{asset}_map.{asset}_map", "new": True})
for i in range(0, len(maps) - 1):
for j in range(i + 1, len(maps)):
if maps[j].get('new'):
EditorLevelLibrary.load_level(maps[i].get('map'))
EditorLevelUtils.add_level_to_world(
EditorLevelLibrary.get_editor_world(),
maps[j].get('map'),
unreal.LevelStreamingDynamic
)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(maps[-1].get('map'))
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
sequences = []
frame_ranges = []
i = 0
for h in hierarchy_list:
root_content = EditorAssetLibrary.list_assets(
h, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
for asset in root_content
if EditorAssetLibrary.find_asset_data(
asset).get_class().get_name() == 'LevelSequence'
]
if not existing_sequences:
sequence = tools.create_asset(
asset_name=hierarchy[i],
package_path=h,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
asset_data = legacy_io.find_one({
"type": "asset",
"name": h.split('/')[-1]
})
id = asset_data.get('_id')
start_frames = []
end_frames = []
elements = list(
legacy_io.find({"type": "asset", "data.visualParent": id}))
for e in elements:
start_frames.append(e.get('data').get('clipIn'))
end_frames.append(e.get('data').get('clipOut'))
elements.extend(legacy_io.find({
"type": "asset",
"data.visualParent": e.get('_id')
}))
min_frame = min(start_frames)
max_frame = max(end_frames)
sequence.set_display_rate(
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
sequence.set_playback_start(min_frame)
sequence.set_playback_end(max_frame)
sequences.append(sequence)
frame_ranges.append((min_frame, max_frame))
tracks = sequence.get_master_tracks()
track = None
for t in tracks:
if (t.get_class() ==
unreal.MovieSceneCameraCutTrack.static_class()):
track = t
break
if not track:
track = sequence.add_master_track(
unreal.MovieSceneCameraCutTrack)
else:
for e in existing_sequences:
sequences.append(e.get_asset())
frame_ranges.append((
e.get_asset().get_playback_start(),
e.get_asset().get_playback_end()))
i += 1
shot = tools.create_asset(
asset_name=asset,
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
# sequences and frame_ranges have the same length
for i in range(0, len(sequences) - 1):
maps_to_add = []
for j in range(i + 1, len(maps)):
maps_to_add.append(maps[j].get('map'))
self._set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i][1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1],
maps_to_add)
data = self._get_data(asset)
shot.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
shot.set_playback_start(0)
shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
self._set_sequence_hierarchy(
sequences[-1], shot,
frame_ranges[-1][1],
data.get('clipIn'), data.get('clipOut'),
[maps[-1].get('map')])
EditorLevelLibrary.load_level(maps[-1].get('map'))
self._process(self.fname, asset_dir, shot)
for s in sequences:
EditorAssetLibrary.save_asset(s.get_full_name())
EditorLevelLibrary.save_current_level()
# Create Asset Container
unreal_pipeline.create_container(
@ -412,6 +667,8 @@ class LayoutLoader(plugin.Loader):
for a in asset_content:
EditorAssetLibrary.save_asset(a)
EditorLevelLibrary.load_level(maps[0].get('map'))
return asset_content
def update(self, container, representation):

View file

@ -17,7 +17,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""
label = "Collect Instances"
order = pyblish.api.CollectorOrder
order = pyblish.api.CollectorOrder - 0.1
hosts = ["unreal"]
def process(self, context):

View file

@ -0,0 +1,24 @@
import pyblish.api
class CollectRemoveMarked(pyblish.api.ContextPlugin):
"""Remove marked data
Remove instances that have 'remove' in their instance.data
"""
order = pyblish.api.CollectorOrder + 0.499
label = 'Remove Marked Instances'
def process(self, context):
self.log.debug(context)
# make ftrack publishable
instances_to_remove = []
for instance in context:
if instance.data.get('remove'):
instances_to_remove.append(instance)
for instance in instances_to_remove:
context.remove(instance)

View file

@ -0,0 +1,103 @@
from pathlib import Path
import unreal
import pyblish.api
from openpype.hosts.unreal.api import pipeline
class CollectRenderInstances(pyblish.api.InstancePlugin):
""" This collector will try to find all the rendered frames.
"""
order = pyblish.api.CollectorOrder
hosts = ["unreal"]
families = ["render"]
label = "Collect Render Instances"
def process(self, instance):
self.log.debug("Preparing Rendering Instances")
context = instance.context
data = instance.data
data['remove'] = True
ar = unreal.AssetRegistryHelpers.get_asset_registry()
sequence = ar.get_asset_by_object_path(
data.get('sequence')).get_asset()
sequences = [{
"sequence": sequence,
"output": data.get('output'),
"frame_range": (
data.get('frameStart'), data.get('frameEnd'))
}]
for s in sequences:
self.log.debug(f"Processing: {s.get('sequence').get_name()}")
subscenes = pipeline.get_subsequences(s.get('sequence'))
if subscenes:
for ss in subscenes:
sequences.append({
"sequence": ss.get_sequence(),
"output": (f"{s.get('output')}/"
f"{ss.get_sequence().get_name()}"),
"frame_range": (
ss.get_start_frame(), ss.get_end_frame() - 1)
})
else:
# Avoid creating instances for camera sequences
if "_camera" not in s.get('sequence').get_name():
seq = s.get('sequence')
seq_name = seq.get_name()
new_instance = context.create_instance(
f"{data.get('subset')}_"
f"{seq_name}")
new_instance[:] = seq_name
new_data = new_instance.data
new_data["asset"] = seq_name
new_data["setMembers"] = seq_name
new_data["family"] = "render"
new_data["families"] = ["render", "review"]
new_data["parent"] = data.get("parent")
new_data["subset"] = f"{data.get('subset')}_{seq_name}"
new_data["level"] = data.get("level")
new_data["output"] = s.get('output')
new_data["fps"] = seq.get_display_rate().numerator
new_data["frameStart"] = s.get('frame_range')[0]
new_data["frameEnd"] = s.get('frame_range')[1]
new_data["sequence"] = seq.get_path_name()
new_data["master_sequence"] = data["master_sequence"]
new_data["master_level"] = data["master_level"]
self.log.debug(f"new instance data: {new_data}")
project_dir = unreal.Paths.project_dir()
render_dir = (f"{project_dir}/Saved/MovieRenders/"
f"{s.get('output')}")
render_path = Path(render_dir)
frames = []
for x in render_path.iterdir():
if x.is_file() and x.suffix == '.png':
frames.append(str(x.name))
if "representations" not in new_instance.data:
new_instance.data["representations"] = []
repr = {
'frameStart': s.get('frame_range')[0],
'frameEnd': s.get('frame_range')[1],
'name': 'png',
'ext': 'png',
'files': frames,
'stagingDir': render_dir,
'tags': ['review']
}
new_instance.data["representations"].append(repr)

View file

@ -0,0 +1,48 @@
from pathlib import Path
import unreal
import openpype.api
class ExtractRender(openpype.api.Extractor):
"""Extract render."""
label = "Extract Render"
hosts = ["unreal"]
families = ["render"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
# Perform extraction
self.log.info("Performing extraction..")
# Get the render output directory
project_dir = unreal.Paths.project_dir()
render_dir = (f"{project_dir}/Saved/MovieRenders/"
f"{instance.data['subset']}")
assert unreal.Paths.directory_exists(render_dir), \
"Render directory does not exist"
render_path = Path(render_dir)
frames = []
for x in render_path.iterdir():
if x.is_file() and x.suffix == '.png':
frames.append(str(x))
if "representations" not in instance.data:
instance.data["representations"] = []
render_representation = {
'name': 'png',
'ext': 'png',
'files': frames,
"stagingDir": stagingdir,
}
instance.data["representations"].append(render_representation)

View file

@ -0,0 +1,41 @@
import clique
import pyblish.api
class ValidateSequenceFrames(pyblish.api.InstancePlugin):
"""Ensure the sequence of frames is complete
The files found in the folder are checked against the frameStart and
frameEnd of the instance. If the first or last file is not
corresponding with the first or last frame it is flagged as invalid.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Sequence Frames"
families = ["render"]
hosts = ["unreal"]
optional = True
def process(self, instance):
representations = instance.data.get("representations")
for repr in representations:
patterns = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(
repr["files"], minimum_items=1, patterns=patterns)
assert not remainder, "Must not have remainder"
assert len(collections) == 1, "Must detect single collection"
collection = collections[0]
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
required_range = (instance.data["frameStart"],
instance.data["frameEnd"])
if current_range != required_range:
raise ValueError(f"Invalid frame range: {current_range} - "
f"expected: {required_range}")
missing = collection.holes().indexes
assert not missing, "Missing frames: %s" % (missing,)

View file

@ -42,12 +42,12 @@ from .attribute_definitions import (
EnumDef,
BoolDef,
FileDef,
FileDefItem,
)
from .env_tools import (
env_value_to_bool,
get_paths_from_environ,
get_global_environments
)
from .terminal import Terminal
@ -248,7 +248,6 @@ __all__ = [
"env_value_to_bool",
"get_paths_from_environ",
"get_global_environments",
"get_vendor_bin_path",
"get_oiio_tools_path",
@ -267,6 +266,7 @@ __all__ = [
"EnumDef",
"BoolDef",
"FileDef",
"FileDefItem",
"import_filepath",
"modules_from_path",

View file

@ -1,8 +1,12 @@
import os
import re
import collections
import uuid
import json
from abc import ABCMeta, abstractmethod
import six
import clique
class AbstractAttrDefMeta(ABCMeta):
@ -302,12 +306,218 @@ class BoolDef(AbtractAttrDef):
return self.default
class FileDefItem(object):
def __init__(
self, directory, filenames, frames=None, template=None
):
self.directory = directory
self.filenames = []
self.is_sequence = False
self.template = None
self.frames = []
self.set_filenames(filenames, frames, template)
def __str__(self):
return json.dumps(self.to_dict())
def __repr__(self):
if self.is_sequence:
filename = self.template
else:
filename = self.filenames[0]
return "<{}: \"{}\">".format(
self.__class__.__name__,
os.path.join(self.directory, filename)
)
@property
def label(self):
if not self.is_sequence:
return self.filenames[0]
frame_start = self.frames[0]
filename_template = os.path.basename(self.template)
if len(self.frames) == 1:
return "{} [{}]".format(filename_template, frame_start)
frame_end = self.frames[-1]
expected_len = (frame_end - frame_start) + 1
if expected_len == len(self.frames):
return "{} [{}-{}]".format(
filename_template, frame_start, frame_end
)
ranges = []
_frame_start = None
_frame_end = None
for frame in range(frame_start, frame_end + 1):
if frame not in self.frames:
add_to_ranges = _frame_start is not None
elif _frame_start is None:
_frame_start = _frame_end = frame
add_to_ranges = frame == frame_end
else:
_frame_end = frame
add_to_ranges = frame == frame_end
if add_to_ranges:
if _frame_start != _frame_end:
_range = "{}-{}".format(_frame_start, _frame_end)
else:
_range = str(_frame_start)
ranges.append(_range)
_frame_start = _frame_end = None
return "{} [{}]".format(
filename_template, ",".join(ranges)
)
def split_sequence(self):
if not self.is_sequence:
raise ValueError("Cannot split single file item")
paths = [
os.path.join(self.directory, filename)
for filename in self.filenames
]
return self.from_paths(paths, False)
@property
def ext(self):
_, ext = os.path.splitext(self.filenames[0])
if ext:
return ext
return None
@property
def is_dir(self):
# QUESTION a better way how to define folder (in init argument?)
if self.ext:
return False
return True
def set_directory(self, directory):
self.directory = directory
def set_filenames(self, filenames, frames=None, template=None):
if frames is None:
frames = []
is_sequence = False
if frames:
is_sequence = True
if is_sequence and not template:
raise ValueError("Missing template for sequence")
self.filenames = filenames
self.template = template
self.frames = frames
self.is_sequence = is_sequence
@classmethod
def create_empty_item(cls):
return cls("", "")
@classmethod
def from_value(cls, value, allow_sequences):
"""Convert passed value to FileDefItem objects.
Returns:
list: Created FileDefItem objects.
"""
# Convert single item to iterable
if not isinstance(value, (list, tuple, set)):
value = [value]
output = []
str_filepaths = []
for item in value:
if isinstance(item, dict):
item = cls.from_dict(item)
if isinstance(item, FileDefItem):
if not allow_sequences and item.is_sequence:
output.extend(item.split_sequence())
else:
output.append(item)
elif isinstance(item, six.string_types):
str_filepaths.append(item)
else:
raise TypeError(
"Unknown type \"{}\". Can't convert to {}".format(
str(type(item)), cls.__name__
)
)
if str_filepaths:
output.extend(cls.from_paths(str_filepaths, allow_sequences))
return output
@classmethod
def from_dict(cls, data):
return cls(
data["directory"],
data["filenames"],
data.get("frames"),
data.get("template")
)
@classmethod
def from_paths(cls, paths, allow_sequences):
filenames_by_dir = collections.defaultdict(list)
for path in paths:
normalized = os.path.normpath(path)
directory, filename = os.path.split(normalized)
filenames_by_dir[directory].append(filename)
output = []
for directory, filenames in filenames_by_dir.items():
if allow_sequences:
cols, remainders = clique.assemble(filenames)
else:
cols = []
remainders = filenames
for remainder in remainders:
output.append(cls(directory, [remainder]))
for col in cols:
frames = list(col.indexes)
paths = [filename for filename in col]
template = col.format("{head}{padding}{tail}")
output.append(cls(
directory, paths, frames, template
))
return output
def to_dict(self):
output = {
"is_sequence": self.is_sequence,
"directory": self.directory,
"filenames": list(self.filenames),
}
if self.is_sequence:
output.update({
"template": self.template,
"frames": list(sorted(self.frames)),
})
return output
class FileDef(AbtractAttrDef):
"""File definition.
It is possible to define filters of allowed file extensions and if supports
folders.
Args:
multipath(bool): Allow multiple path.
single_item(bool): Allow only single path item.
folders(bool): Allow folder paths.
extensions(list<str>): Allow files with extensions. Empty list will
allow all extensions and None will disable files completely.
@ -315,44 +525,51 @@ class FileDef(AbtractAttrDef):
"""
def __init__(
self, key, multipath=False, folders=None, extensions=None,
default=None, **kwargs
self, key, single_item=True, folders=None, extensions=None,
allow_sequences=True, default=None, **kwargs
):
if folders is None and extensions is None:
folders = True
extensions = []
if default is None:
if multipath:
default = []
if single_item:
default = FileDefItem.create_empty_item().to_dict()
else:
default = ""
default = []
else:
if multipath:
if single_item:
if isinstance(default, dict):
FileDefItem.from_dict(default)
elif isinstance(default, six.string_types):
default = FileDefItem.from_paths([default.strip()])[0]
else:
raise TypeError((
"'default' argument must be 'str' or 'dict' not '{}'"
).format(type(default)))
else:
if not isinstance(default, (tuple, list, set)):
raise TypeError((
"'default' argument must be 'list', 'tuple' or 'set'"
", not '{}'"
).format(type(default)))
else:
if not isinstance(default, six.string_types):
raise TypeError((
"'default' argument must be 'str' not '{}'"
).format(type(default)))
default = default.strip()
# Change horizontal label
is_label_horizontal = kwargs.get("is_label_horizontal")
if is_label_horizontal is None:
is_label_horizontal = True
if multipath:
if single_item:
is_label_horizontal = True
else:
is_label_horizontal = False
kwargs["is_label_horizontal"] = is_label_horizontal
self.multipath = multipath
self.single_item = single_item
self.folders = folders
self.extensions = extensions
self.extensions = set(extensions)
self.allow_sequences = allow_sequences
super(FileDef, self).__init__(key, default=default, **kwargs)
def __eq__(self, other):
@ -360,30 +577,43 @@ class FileDef(AbtractAttrDef):
return False
return (
self.multipath == other.multipath
self.single_item == other.single_item
and self.folders == other.folders
and self.extensions == other.extensions
and self.allow_sequences == other.allow_sequences
)
def convert_value(self, value):
if isinstance(value, six.string_types):
if self.multipath:
value = [value.strip()]
else:
value = value.strip()
return value
if isinstance(value, six.string_types) or isinstance(value, dict):
value = [value]
if isinstance(value, (tuple, list, set)):
_value = []
string_paths = []
dict_items = []
for item in value:
if isinstance(item, six.string_types):
_value.append(item.strip())
string_paths.append(item.strip())
elif isinstance(item, dict):
try:
FileDefItem.from_dict(item)
dict_items.append(item)
except (ValueError, KeyError):
pass
if self.multipath:
return _value
if string_paths:
file_items = FileDefItem.from_paths(string_paths)
dict_items.extend([
file_item.to_dict()
for file_item in file_items
])
if not _value:
if not self.single_item:
return dict_items
if not dict_items:
return self.default
return _value[0].strip()
return dict_items[0]
return str(value).strip()
if self.single_item:
return FileDefItem.create_empty_item().to_dict()
return []

View file

@ -1532,13 +1532,13 @@ class BuildWorkfile:
subsets = list(legacy_io.find({
"type": "subset",
"parent": {"$in": asset_entity_by_ids.keys()}
"parent": {"$in": list(asset_entity_by_ids.keys())}
}))
subset_entity_by_ids = {subset["_id"]: subset for subset in subsets}
sorted_versions = list(legacy_io.find({
"type": "version",
"parent": {"$in": subset_entity_by_ids.keys()}
"parent": {"$in": list(subset_entity_by_ids.keys())}
}).sort("name", -1))
subset_id_with_latest_version = []
@ -1552,7 +1552,7 @@ class BuildWorkfile:
repres = legacy_io.find({
"type": "representation",
"parent": {"$in": last_versions_by_id.keys()}
"parent": {"$in": list(last_versions_by_id.keys())}
})
output = {}

View file

@ -69,57 +69,3 @@ def get_paths_from_environ(env_key=None, env_value=None, return_first=False):
return None
# Return all existing paths from environment variable
return existing_paths
def get_global_environments(env=None):
"""Load global environments from Pype.
Return prepared and parsed global environments by pype's settings. Use
combination of "global" environments set in pype's settings and enabled
modules.
Args:
env (dict, optional): Initial environments. Empty dictionary is used
when not entered.
Returns;
dict of str: Loaded and processed environments.
"""
import acre
from openpype.modules import ModulesManager
from openpype.settings import get_environments
if env is None:
env = {}
# Get global environments from settings
all_settings_env = get_environments()
parsed_global_env = acre.parse(all_settings_env["global"])
# Merge with entered environments
merged_env = acre.append(env, parsed_global_env)
# Get environments from Pype modules
modules_manager = ModulesManager()
module_envs = modules_manager.collect_global_environments()
publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"]
# Set pyblish plugins paths if any module want to register them
if publish_plugin_dirs:
publish_paths_str = os.environ.get("PYBLISHPLUGINPATH") or ""
publish_paths = publish_paths_str.split(os.pathsep)
_publish_paths = {
os.path.normpath(path) for path in publish_paths if path
}
for path in publish_plugin_dirs:
_publish_paths.add(os.path.normpath(path))
module_envs["PYBLISHPLUGINPATH"] = os.pathsep.join(_publish_paths)
# Merge environments with current environments and update values
if module_envs:
parsed_envs = acre.parse(module_envs)
merged_env = acre.merge(parsed_envs, merged_env)
return acre.compute(merged_env, cleanup=True)

View file

@ -365,6 +365,7 @@ class TemplateResult(str):
when value of key in data is dictionary but template expect string
of number.
"""
used_values = None
solved = None
template = None
@ -383,6 +384,12 @@ class TemplateResult(str):
new_obj.invalid_types = invalid_types
return new_obj
def __copy__(self, *args, **kwargs):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
def validate(self):
if not self.solved:
raise TemplateUnsolved(
@ -391,6 +398,17 @@ class TemplateResult(str):
self.invalid_types
)
def copy(self):
cls = self.__class__
return cls(
str(self),
self.template,
self.solved,
self.used_values,
self.missing_keys,
self.invalid_types
)
class TemplatesResultDict(dict):
"""Holds and wrap TemplateResults for easy bug report."""

View file

@ -727,9 +727,9 @@ def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None):
def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd):
input_format = ffprobe_data["format"]
format_tags = input_format.get("tags") or {}
product_name = format_tags.get("product_name") or ""
operational_pattern_ul = format_tags.get("operational_pattern_ul") or ""
output = []
if "opatom" in product_name.lower():
if operational_pattern_ul == "060e2b34.04010102.0d010201.10030000":
output.extend(["-f", "mxf_opatom"])
return output

View file

@ -24,48 +24,6 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
label = "Integrate Ftrack Api"
families = ["ftrack"]
def query(self, entitytype, data):
""" Generate a query expression from data supplied.
If a value is not a string, we'll add the id of the entity to the
query.
Args:
entitytype (str): The type of entity to query.
data (dict): The data to identify the entity.
exclusions (list): All keys to exclude from the query.
Returns:
str: String query to use with "session.query"
"""
queries = []
if sys.version_info[0] < 3:
for key, value in data.iteritems():
if not isinstance(value, (basestring, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
else:
for key, value in data.items():
if not isinstance(value, (str, int)):
self.log.info("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
query = (
"select id from " + entitytype + " where " + " and ".join(queries)
)
self.log.debug(query)
return query
def process(self, instance):
session = instance.context.data["ftrackSession"]
context = instance.context
@ -108,7 +66,19 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
default_asset_name = parent_entity["name"]
# Change status on task
self._set_task_status(instance, task_entity, session)
asset_version_status_ids_by_name = {}
project_entity = instance.context.data.get("ftrackProject")
if project_entity:
project_schema = project_entity["project_schema"]
asset_version_statuses = (
project_schema.get_statuses("AssetVersion")
)
asset_version_status_ids_by_name = {
status["name"].lower(): status["id"]
for status in asset_version_statuses
}
self._set_task_status(instance, project_entity, task_entity, session)
# Prepare AssetTypes
asset_types_by_short = self._ensure_asset_types_exists(
@ -139,7 +109,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
# Asset Version
asset_version_data = data.get("assetversion_data") or {}
asset_version_entity = self._ensure_asset_version_exists(
session, asset_version_data, asset_entity["id"], task_entity
session,
asset_version_data,
asset_entity["id"],
task_entity,
asset_version_status_ids_by_name
)
# Component
@ -174,8 +148,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
if asset_version not in instance.data[asset_versions_key]:
instance.data[asset_versions_key].append(asset_version)
def _set_task_status(self, instance, task_entity, session):
project_entity = instance.context.data.get("ftrackProject")
def _set_task_status(self, instance, project_entity, task_entity, session):
if not project_entity:
self.log.info("Task status won't be set, project is not known.")
return
@ -319,12 +292,19 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
).first()
def _ensure_asset_version_exists(
self, session, asset_version_data, asset_id, task_entity
self,
session,
asset_version_data,
asset_id,
task_entity,
status_ids_by_name
):
task_id = None
if task_entity:
task_id = task_entity["id"]
status_name = asset_version_data.pop("status_name", None)
# Try query asset version by criteria (asset id and version)
version = asset_version_data.get("version") or 0
asset_version_entity = self._query_asset_version(
@ -366,6 +346,18 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
session, version, asset_id
)
if status_name:
status_id = status_ids_by_name.get(status_name.lower())
if not status_id:
self.log.info((
"Ftrack status with name \"{}\""
" for AssetVersion was not found."
).format(status_name))
elif asset_version_entity["status_id"] != status_id:
asset_version_entity["status_id"] = status_id
session.commit()
# Set custom attributes if there were any set
custom_attrs = asset_version_data.get("custom_attributes") or {}
for attr_key, attr_value in custom_attrs.items():

View file

@ -3,6 +3,8 @@ import json
import copy
import pyblish.api
from openpype.lib.profiles_filtering import filter_profiles
class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"""Collect ftrack component data (not integrate yet).
@ -36,6 +38,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"reference": "reference"
}
keep_first_subset_name_for_review = True
asset_versions_status_profiles = {}
def process(self, instance):
self.log.debug("instance {}".format(instance))
@ -80,6 +83,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
if instance_fps is None:
instance_fps = instance.context.data["fps"]
status_name = self._get_asset_version_status_name(instance)
# Base of component item data
# - create a copy of this object when want to use it
base_component_item = {
@ -91,7 +96,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
},
"assetversion_data": {
"version": version_number,
"comment": instance.context.data.get("comment") or ""
"comment": instance.context.data.get("comment") or "",
"status_name": status_name
},
"component_overwrite": False,
# This can be change optionally
@ -317,3 +323,24 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
))
instance.data["ftrackComponentsList"] = component_list
def _get_asset_version_status_name(self, instance):
if not self.asset_versions_status_profiles:
return None
# Prepare filtering data for new asset version status
anatomy_data = instance.data["anatomyData"]
task_type = anatomy_data.get("task", {}).get("type")
filtering_criteria = {
"families": instance.data["family"],
"hosts": instance.context.data["hostName"],
"task_types": task_type
}
matching_profile = filter_profiles(
self.asset_versions_status_profiles,
filtering_criteria
)
if not matching_profile:
return None
return matching_profile["status"] or None

View file

@ -59,7 +59,7 @@ class DiscoverResult:
self.ignored_plugins
)))
for cls in self.ignored_plugins:
lines.append("- {}".format(cls.__class__.__name__))
lines.append("- {}".format(cls.__name__))
# Abstract classes
if self.abstract_plugins or full_report:
@ -67,7 +67,7 @@ class DiscoverResult:
self.abstract_plugins
)))
for cls in self.abstract_plugins:
lines.append("- {}".format(cls.__class__.__name__))
lines.append("- {}".format(cls.__name__))
# Abstract classes
if self.duplicated_plugins or full_report:
@ -75,7 +75,7 @@ class DiscoverResult:
self.duplicated_plugins
)))
for cls in self.duplicated_plugins:
lines.append("- {}".format(cls.__class__.__name__))
lines.append("- {}".format(cls.__name__))
if self.crashed_file_paths or full_report:
lines.append("*** Failed to load {} files".format(len(

View file

@ -30,14 +30,15 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
# shot data dict
shot_data = {}
family = instance.data.get("family")
family = instance.data["family"]
families = instance.data["families"]
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection([family]):
if not set(self.families).intersection(set(families + [family])):
continue
# exclude if not masterLayer True

View file

@ -41,21 +41,33 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin):
loaded_versions = []
_containers = list(host.ls())
_repr_ids = [ObjectId(c["representation"]) for c in _containers]
repre_docs = legacy_io.find(
{"_id": {"$in": _repr_ids}},
projection={"_id": 1, "parent": 1}
)
version_by_repr = {
str(doc["_id"]): doc["parent"] for doc in
legacy_io.find(
{"_id": {"$in": _repr_ids}},
projection={"parent": 1}
)
str(doc["_id"]): doc["parent"]
for doc in repre_docs
}
# QUESTION should we add same representation id when loaded multiple
# times?
for con in _containers:
repre_id = con["representation"]
version_id = version_by_repr.get(repre_id)
if version_id is None:
self.log.warning((
"Skipping container,"
" did not find representation document. {}"
).format(str(con)))
continue
# NOTE:
# may have more then one representation that are same version
version = {
"subsetName": con["name"],
"representation": ObjectId(con["representation"]),
"version": version_by_repr[con["representation"]], # _id
"representation": ObjectId(repre_id),
"version": version_id,
}
loaded_versions.append(version)

View file

@ -41,6 +41,7 @@ class ExtractBurnin(openpype.api.Extractor):
"shell",
"hiero",
"premiere",
"traypublisher",
"standalonepublisher",
"harmony",
"fusion",

View file

@ -45,13 +45,15 @@ class ExtractReview(pyblish.api.InstancePlugin):
"hiero",
"premiere",
"harmony",
"traypublisher",
"standalonepublisher",
"fusion",
"tvpaint",
"resolve",
"webpublisher",
"aftereffects",
"flame"
"flame",
"unreal"
]
# Supported extensions

View file

@ -22,7 +22,6 @@ from .lib import (
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
get_local_settings
)
from .entities import (
@ -54,7 +53,6 @@ __all__ = (
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"get_local_settings",
"SystemSettings",

View file

@ -3,14 +3,11 @@ import re
# Metadata keys for work with studio and project overrides
M_OVERRIDDEN_KEY = "__overriden_keys__"
# Metadata key for storing information about environments
M_ENVIRONMENT_KEY = "__environment_keys__"
# Metadata key for storing dynamic created labels
M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__"
METADATA_KEYS = frozenset([
M_OVERRIDDEN_KEY,
M_ENVIRONMENT_KEY,
M_DYNAMIC_KEY_LABEL
])
@ -35,7 +32,6 @@ KEY_REGEX = re.compile(r"^[{}]+$".format(KEY_ALLOWED_SYMBOLS))
__all__ = (
"M_OVERRIDDEN_KEY",
"M_ENVIRONMENT_KEY",
"M_DYNAMIC_KEY_LABEL",
"METADATA_KEYS",

View file

@ -165,7 +165,7 @@
]
}
],
"customNodes": []
"overrideNodes": []
},
"regexInputs": {
"inputs": [

View file

@ -55,18 +55,23 @@
"keep_original_representation": false,
"export_presets_mapping": {
"exr16fpdwaa": {
"active": true,
"export_type": "File Sequence",
"ext": "exr",
"xml_preset_file": "OpenEXR (16-bit fp DWAA).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"ignore_comment_attrs": false,
"colorspace_out": "ACES - ACEScg",
"xml_preset_dir": "",
"parsed_comment_attrs": true,
"representation_add_range": true,
"representation_tags": [],
"load_to_batch_group": true,
"batch_group_loader_name": "LoadClip"
"batch_group_loader_name": "LoadClipBatch",
"filter_path_regex": ".*"
}
}
},
"IntegrateBatchGroup": {
"enabled": false
}
},
"load": {
@ -87,7 +92,8 @@
"png",
"h264",
"mov",
"mp4"
"mp4",
"exr16fpdwaa"
],
"reel_group_name": "OpenPype_Reels",
"reel_name": "Loaded",
@ -110,7 +116,8 @@
"png",
"h264",
"mov",
"mp4"
"mp4",
"exr16fpdwaa"
],
"reel_name": "OP_LoadedReel",
"clip_name_template": "{asset}_{subset}_{output}"

View file

@ -418,7 +418,8 @@
"redshiftproxy": "cache",
"usd": "usd"
},
"keep_first_subset_name_for_review": true
"keep_first_subset_name_for_review": true,
"asset_versions_status_profiles": []
}
}
}

View file

@ -307,7 +307,7 @@
],
"task_types": [],
"tasks": [],
"template": "{family}{Task}_{Render_layer}_{Render_pass}"
"template": "{family}{Task}_{Renderlayer}_{Renderpass}"
},
{
"families": [

View file

@ -120,7 +120,7 @@
"filter": {
"task_types": [],
"families": [],
"sebsets": []
"subsets": []
},
"read_raw": false,
"viewer_process_override": "",

View file

@ -0,0 +1,36 @@
{
"simple_creators": [
{
"family": "workfile",
"identifier": "",
"label": "Workfile",
"icon": "fa.file",
"default_variants": [
"Main"
],
"enable_review": false,
"description": "Publish workfile backup",
"detailed_description": "",
"allow_sequences": true,
"extensions": [
".ma",
".mb",
".nk",
".hrox",
".hip",
".hiplc",
".hipnc",
".blend",
".scn",
".tvpp",
".comp",
".zip",
".prproj",
".drp",
".psd",
".psb",
".aep"
]
}
]
}

View file

@ -127,12 +127,6 @@ class BaseItemEntity(BaseEntity):
# Entity is in hierarchy of dynamically created entity
self.is_in_dynamic_item = False
# Entity will save metadata about environments
# - this is current possible only for RawJsonEnity
self.is_env_group = False
# Key of environment group key must be unique across system settings
self.env_group_key = None
# Roles of an entity
self.roles = None
@ -286,16 +280,6 @@ class BaseItemEntity(BaseEntity):
).format(self.group_item.path)
raise EntitySchemaError(self, reason)
# Validate that env group entities will be stored into file.
# - env group entities must store metadata which is not possible if
# metadata would be outside of file
if self.file_item is None and self.is_env_group:
reason = (
"Environment item is not inside file"
" item so can't store metadata for defaults."
)
raise EntitySchemaError(self, reason)
# Dynamic items must not have defined labels. (UI specific)
if self.label and self.is_dynamic_item:
raise EntitySchemaError(
@ -862,11 +846,6 @@ class ItemEntity(BaseItemEntity):
if self.is_dynamic_item:
self.require_key = False
# If value should be stored to environments and uder which group key
# - the key may be dynamically changed by it's parent on save
self.env_group_key = self.schema_data.get("env_group_key")
self.is_env_group = bool(self.env_group_key is not None)
# Root item reference
self.root_item = self.parent.root_item

View file

@ -15,7 +15,6 @@ from .exceptions import (
from openpype.settings.constants import (
METADATA_KEYS,
M_DYNAMIC_KEY_LABEL,
M_ENVIRONMENT_KEY,
KEY_REGEX,
KEY_ALLOWED_SYMBOLS
)
@ -148,11 +147,7 @@ class DictMutableKeysEntity(EndpointEntity):
):
raise InvalidKeySymbols(self.path, key)
if self.value_is_env_group:
item_schema = copy.deepcopy(self.item_schema)
item_schema["env_group_key"] = key
else:
item_schema = self.item_schema
item_schema = self.item_schema
new_child = self.create_schema_object(item_schema, self, True)
self.children_by_key[key] = new_child
@ -216,9 +211,7 @@ class DictMutableKeysEntity(EndpointEntity):
self.children_label_by_id = {}
self.store_as_list = self.schema_data.get("store_as_list") or False
self.value_is_env_group = (
self.schema_data.get("value_is_env_group") or False
)
self.required_keys = self.schema_data.get("required_keys") or []
self.collapsible_key = self.schema_data.get("collapsible_key") or False
# GUI attributes
@ -241,9 +234,6 @@ class DictMutableKeysEntity(EndpointEntity):
object_type.update(input_modifiers)
self.item_schema = object_type
if self.value_is_env_group:
self.item_schema["env_group_key"] = ""
if self.group_item is None:
self.is_group = True
@ -259,10 +249,6 @@ class DictMutableKeysEntity(EndpointEntity):
if used_temp_label:
self.label = None
if self.value_is_env_group and self.store_as_list:
reason = "Item can't store environments metadata to list output."
raise EntitySchemaError(self, reason)
if not self.schema_data.get("object_type"):
reason = (
"Modifiable dictionary must have specified `object_type`."
@ -579,18 +565,10 @@ class DictMutableKeysEntity(EndpointEntity):
output.append([key, child_value])
return output
output = {}
for key, child_entity in self.children_by_key.items():
child_value = child_entity.settings_value()
# TODO child should have setter of env group key se child can
# know what env group represents.
if self.value_is_env_group:
if key not in child_value[M_ENVIRONMENT_KEY]:
_metadata = child_value[M_ENVIRONMENT_KEY]
_m_keykey = tuple(_metadata.keys())[0]
env_keys = child_value[M_ENVIRONMENT_KEY].pop(_m_keykey)
child_value[M_ENVIRONMENT_KEY][key] = env_keys
output[key] = child_value
output = {
key: child_entity.settings_value()
for key, child_entity in self.children_by_key.items()
}
output.update(self.metadata)
return output

View file

@ -15,10 +15,7 @@ from .exceptions import (
EntitySchemaError
)
from openpype.settings.constants import (
METADATA_KEYS,
M_ENVIRONMENT_KEY
)
from openpype.settings.constants import METADATA_KEYS
class EndpointEntity(ItemEntity):
@ -534,13 +531,7 @@ class RawJsonEntity(InputEntity):
@property
def metadata(self):
output = {}
if isinstance(self._current_value, dict) and self.is_env_group:
output[M_ENVIRONMENT_KEY] = {
self.env_group_key: list(self._current_value.keys())
}
return output
return {}
@property
def has_unsaved_changes(self):
@ -549,15 +540,6 @@ class RawJsonEntity(InputEntity):
result = self.metadata != self._metadata_for_current_state()
return result
def schema_validations(self):
if self.store_as_string and self.is_env_group:
reason = (
"RawJson entity can't store environment group metadata"
" as string."
)
raise EntitySchemaError(self, reason)
super(RawJsonEntity, self).schema_validations()
def _convert_to_valid_type(self, value):
if isinstance(value, STRING_TYPE):
try:
@ -583,9 +565,6 @@ class RawJsonEntity(InputEntity):
def _settings_value(self):
value = super(RawJsonEntity, self)._settings_value()
if self.is_env_group and isinstance(value, dict):
value.update(self.metadata)
if self.store_as_string:
return json.dumps(value)
return value

View file

@ -52,7 +52,6 @@ from openpype.settings.lib import (
get_available_studio_project_settings_overrides_versions,
get_available_studio_project_anatomy_overrides_versions,
find_environments,
apply_overrides
)
@ -422,11 +421,6 @@ class RootEntity(BaseItemEntity):
"""
pass
@abstractmethod
def _validate_defaults_to_save(self, value):
"""Validate default values before save."""
pass
def _save_default_values(self):
"""Save default values.
@ -435,7 +429,6 @@ class RootEntity(BaseItemEntity):
DEFAULTS.
"""
settings_value = self.settings_value()
self._validate_defaults_to_save(settings_value)
defaults_dir = self.defaults_dir()
for file_path, value in settings_value.items():
@ -604,8 +597,6 @@ class SystemSettings(RootEntity):
def _save_studio_values(self):
settings_value = self.settings_value()
self._validate_duplicated_env_group(settings_value)
self.log.debug("Saving system settings: {}".format(
json.dumps(settings_value, indent=4)
))
@ -613,29 +604,6 @@ class SystemSettings(RootEntity):
# Reset source version after restart
self._source_version = None
def _validate_defaults_to_save(self, value):
"""Valiations of default values before save."""
self._validate_duplicated_env_group(value)
def _validate_duplicated_env_group(self, value, override_state=None):
""" Validate duplicated environment groups.
Raises:
DuplicatedEnvGroups: When value contain duplicated env groups.
"""
value = copy.deepcopy(value)
if override_state is None:
override_state = self._override_state
if override_state is OverrideState.STUDIO:
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
final_value = apply_overrides(default_values, value)
else:
final_value = value
# Check if final_value contain duplicated environment groups
find_environments(final_value)
def _save_project_values(self):
"""System settings can't have project overrides.
@ -911,10 +879,6 @@ class ProjectSettings(RootEntity):
if warnings:
raise SaveWarningExc(warnings)
def _validate_defaults_to_save(self, value):
"""Valiations of default values before save."""
pass
def _validate_values_to_save(self, value):
pass

View file

@ -46,8 +46,7 @@
}, {
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
}, {
"type": "path",
"key": "{host_name}_executables",

View file

@ -126,6 +126,10 @@
"type": "schema",
"name": "schema_project_standalonepublisher"
},
{
"type": "schema",
"name": "schema_project_traypublisher"
},
{
"type": "schema",
"name": "schema_project_webpublisher"

View file

@ -238,25 +238,19 @@
"type": "dict",
"children": [
{
"key": "ext",
"label": "Output extension",
"type": "text"
"type": "boolean",
"key": "active",
"label": "Is active",
"default": true
},
{
"key": "xml_preset_file",
"label": "XML preset file (with ext)",
"type": "text"
},
{
"key": "xml_preset_dir",
"label": "XML preset folder (optional)",
"type": "text"
"type": "separator"
},
{
"key": "export_type",
"label": "Eport clip type",
"type": "enum",
"default": "File Sequence",
"default": "Sequence Publish",
"enum_items": [
{
"Movie": "Movie"
@ -268,59 +262,125 @@
"Sequence Publish": "Sequence Publish"
}
]
},
{
"type": "separator"
"key": "ext",
"label": "Output extension",
"type": "text",
"default": "exr"
},
{
"type": "boolean",
"key": "ignore_comment_attrs",
"label": "Ignore attributes parsed from a segment comments"
},
{
"type": "separator"
"key": "xml_preset_file",
"label": "XML preset file (with ext)",
"type": "text"
},
{
"key": "colorspace_out",
"label": "Output color (imageio)",
"type": "text"
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "representation_add_range",
"label": "Add frame range to representation"
},
{
"type": "list",
"key": "representation_tags",
"label": "Add representation tags",
"object_type": {
"type": "text",
"multiline": false
}
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "load_to_batch_group",
"label": "Load to batch group reel",
"default": false
},
{
"type": "text",
"key": "batch_group_loader_name",
"label": "Use loader name"
"default": "linear"
},
{
"type": "collapsible-wrap",
"label": "Other parameters",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "xml_preset_dir",
"label": "XML preset folder (optional)",
"type": "text"
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "parsed_comment_attrs",
"label": "Include parsed attributes from comments",
"default": false
},
{
"type": "separator"
},
{
"type": "collapsible-wrap",
"label": "Representation",
"collapsible": true,
"collapsed": true,
"children": [
{
"type": "boolean",
"key": "representation_add_range",
"label": "Add frame range to representation"
},
{
"type": "list",
"key": "representation_tags",
"label": "Add representation tags",
"object_type": {
"type": "text",
"multiline": false
}
}
]
},
{
"type": "collapsible-wrap",
"label": "Loading during publish",
"collapsible": true,
"collapsed": true,
"children": [
{
"type": "boolean",
"key": "load_to_batch_group",
"label": "Load to batch group reel",
"default": false
},
{
"type": "text",
"key": "batch_group_loader_name",
"label": "Use loader name"
}
]
}
]
},
{
"type": "collapsible-wrap",
"label": "Filtering",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "filter_path_regex",
"label": "Regex in clip path",
"type": "text",
"default": ".*"
}
]
}
]
}
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "IntegrateBatchGroup",
"label": "IntegrateBatchGroup",
"is_group": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
}
]
}
]
},

View file

@ -858,6 +858,43 @@
"key": "keep_first_subset_name_for_review",
"label": "Make subset name as first asset name",
"default": true
},
{
"type": "list",
"collapsible": true,
"key": "asset_versions_status_profiles",
"label": "AssetVersion status on publish",
"use_label_wrap": true,
"object_type": {
"type": "dict",
"children": [
{
"key": "hosts",
"label": "Host names",
"type": "hosts-enum",
"multiselection": true
},
{
"key": "task_types",
"label": "Task types",
"type": "task-types-enum"
},
{
"key": "family",
"label": "Family",
"type": "list",
"object_type": "text"
},
{
"type": "separator"
},
{
"key": "status",
"label": "Status name",
"type": "text"
}
]
}
}
]
}

View file

@ -0,0 +1,89 @@
{
"type": "dict",
"collapsible": true,
"key": "traypublisher",
"label": "Tray Publisher",
"is_file": true,
"children": [
{
"type": "list",
"collapsible": true,
"key": "simple_creators",
"label": "Creator plugins",
"use_label_wrap": true,
"collapsible_key": true,
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "family",
"label": "Family"
},
{
"type": "text",
"key": "identifier",
"label": "Identifier",
"placeholder": "< Use 'Family' >",
"tooltip": "All creators must have unique identifier.\nBy default is used 'family' but if you need to have more creators with same families\nyou have to set identifier too."
},
{
"type": "text",
"key": "label",
"label": "Label"
},
{
"type": "text",
"key": "icon",
"label": "Icon"
},
{
"type": "list",
"key": "default_variants",
"label": "Default variants",
"object_type": {
"type": "text"
}
},
{
"type": "boolean",
"key": "enable_review",
"label": "Enable review",
"tooltip": "Allow to create review from source file/s.\nFiles must be supported to be able create review."
},
{
"type": "separator"
},
{
"type": "text",
"key": "description",
"label": "Description"
},
{
"type": "text",
"key": "detailed_description",
"label": "Detailed Description",
"multiline": true
},
{
"type": "separator"
},
{
"key": "allow_sequences",
"label": "Allow sequences",
"type": "boolean"
},
{
"type": "list",
"key": "extensions",
"label": "Extensions",
"use_label_wrap": true,
"collapsible_key": true,
"collapsed": false,
"object_type": "text"
}
]
}
}
]
}

View file

@ -253,7 +253,7 @@
{
"key": "requiredNodes",
"type": "list",
"label": "Required Nodes",
"label": "Plugin required",
"object_type": {
"type": "dict",
"children": [
@ -272,35 +272,43 @@
"label": "Nuke Node Class"
},
{
"type": "splitter"
},
{
"key": "knobs",
"type": "collapsible-wrap",
"label": "Knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
}
]
}
]
}
}
]
}
]
}
},
{
"type": "splitter"
},
{
"type": "list",
"key": "customNodes",
"label": "Custom Nodes",
"key": "overrideNodes",
"label": "Plugin's node overrides",
"object_type": {
"type": "dict",
"children": [
@ -319,27 +327,37 @@
"label": "Nuke Node Class"
},
{
"type": "splitter"
"key": "subsets",
"label": "Subsets",
"type": "list",
"object_type": "text"
},
{
"key": "knobs",
"label": "Knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
"type": "collapsible-wrap",
"label": "Knobs overrides",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
}
]
}
]
}
}
]
}
]
}
@ -446,7 +464,7 @@
{
"key": "flame",
"type": "dict",
"label": "Flame/Flair",
"label": "Flame & Flare",
"children": [
{
"key": "project",

View file

@ -212,7 +212,7 @@
"object_type": "text"
},
{
"key": "sebsets",
"key": "subsets",
"label": "Subsets",
"type": "list",
"object_type": "text"

View file

@ -117,19 +117,6 @@
}
]
},
{
"key": "env_group_test",
"label": "EnvGroup Test",
"type": "dict",
"children": [
{
"key": "key_to_store_in_system_settings",
"label": "Testing environment group",
"type": "raw-json",
"env_group_key": "test_group"
}
]
},
{
"key": "dict_wrapper",
"type": "dict",

View file

@ -7,8 +7,7 @@
{
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
},
{
"type": "path",

View file

@ -34,7 +34,6 @@
"key": "environment",
"label": "Environment",
"type": "raw-json",
"env_group_key": "global",
"require_restart": true
},
{

View file

@ -9,7 +9,6 @@ from .exceptions import (
)
from .constants import (
M_OVERRIDDEN_KEY,
M_ENVIRONMENT_KEY,
METADATA_KEYS,
@ -457,24 +456,6 @@ def get_local_settings():
return _LOCAL_SETTINGS_HANDLER.get_local_settings()
class DuplicatedEnvGroups(Exception):
def __init__(self, duplicated):
self.origin_duplicated = duplicated
self.duplicated = {}
for key, items in duplicated.items():
self.duplicated[key] = []
for item in items:
self.duplicated[key].append("/".join(item["parents"]))
msg = "Duplicated environment group keys. {}".format(
", ".join([
"\"{}\"".format(env_key) for env_key in self.duplicated.keys()
])
)
super(DuplicatedEnvGroups, self).__init__(msg)
def load_openpype_default_settings():
"""Load openpype default settings."""
return load_jsons_from_dir(DEFAULTS_DIR)
@ -624,69 +605,6 @@ def load_jsons_from_dir(path, *args, **kwargs):
return output
def find_environments(data, with_items=False, parents=None):
""" Find environemnt values from system settings by it's metadata.
Args:
data(dict): System settings data or dictionary which may contain
environments metadata.
Returns:
dict: Key as Environment key and value for `acre` module.
"""
if not data or not isinstance(data, dict):
return {}
output = {}
if parents is None:
parents = []
if M_ENVIRONMENT_KEY in data:
metadata = data.get(M_ENVIRONMENT_KEY)
for env_group_key, env_keys in metadata.items():
if env_group_key not in output:
output[env_group_key] = []
_env_values = {}
for key in env_keys:
_env_values[key] = data[key]
item = {
"env": _env_values,
"parents": parents[:-1]
}
output[env_group_key].append(item)
for key, value in data.items():
_parents = copy.deepcopy(parents)
_parents.append(key)
result = find_environments(value, True, _parents)
if not result:
continue
for env_group_key, env_values in result.items():
if env_group_key not in output:
output[env_group_key] = []
for env_values_item in env_values:
output[env_group_key].append(env_values_item)
if with_items:
return output
duplicated_env_groups = {}
final_output = {}
for key, value_in_list in output.items():
if len(value_in_list) > 1:
duplicated_env_groups[key] = value_in_list
else:
final_output[key] = value_in_list[0]["env"]
if duplicated_env_groups:
raise DuplicatedEnvGroups(duplicated_env_groups)
return final_output
def subkey_merge(_dict, value, keys):
key = keys.pop(0)
if not keys:
@ -1082,19 +1000,6 @@ def get_current_project_settings():
return get_project_settings(project_name)
def get_environments():
"""Calculated environment based on defaults and system settings.
Any default environment also found in the system settings will be fully
overridden by the one from the system settings.
Returns:
dict: Output should be ready for `acre` module.
"""
return find_environments(get_system_settings(False))
def get_general_environments():
"""Get general environments.

View file

@ -872,7 +872,16 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
#PublishLogConsole {
font-family: "Noto Sans Mono";
}
VariantInputsWidget QLineEdit {
border-bottom-right-radius: 0px;
border-top-right-radius: 0px;
}
VariantInputsWidget QToolButton {
border-bottom-left-radius: 0px;
border-top-left-radius: 0px;
padding-top: 0.5em;
padding-bottom: 0.5em;
}
#VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover {
border-color: {color:publisher:success};
}
@ -1351,3 +1360,11 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {
#LikeDisabledInput:focus {
border-color: {color:border};
}
/* Attribute Definition widgets */
InViewButton, InViewButton:disabled {
background: transparent;
}
InViewButton:hover {
background: rgba(255, 255, 255, 37);
}

View file

@ -13,8 +13,10 @@ from openpype.pipeline.create import (
CreatorError,
SUBSET_NAME_ALLOWED_SYMBOLS
)
from openpype.tools.utils import ErrorMessageBox
from openpype.tools.utils import (
ErrorMessageBox,
MessageOverlayObject
)
from .widgets import IconValuePixmapLabel
from .assets_widget import CreateDialogAssetsWidget
@ -29,6 +31,14 @@ from ..constants import (
SEPARATORS = ("---separator---", "---")
class VariantInputsWidget(QtWidgets.QWidget):
resized = QtCore.Signal()
def resizeEvent(self, event):
super(VariantInputsWidget, self).resizeEvent(event)
self.resized.emit()
class CreateErrorMessageBox(ErrorMessageBox):
def __init__(
self,
@ -231,6 +241,8 @@ class CreateDialog(QtWidgets.QDialog):
self._name_pattern = name_pattern
self._compiled_name_pattern = re.compile(name_pattern)
overlay_object = MessageOverlayObject(self)
context_widget = QtWidgets.QWidget(self)
assets_widget = CreateDialogAssetsWidget(controller, context_widget)
@ -247,22 +259,25 @@ class CreateDialog(QtWidgets.QDialog):
creators_model = QtGui.QStandardItemModel()
creators_view.setModel(creators_model)
variant_input = QtWidgets.QLineEdit(self)
variant_widget = VariantInputsWidget(self)
variant_input = QtWidgets.QLineEdit(variant_widget)
variant_input.setObjectName("VariantInput")
variant_input.setToolTip(VARIANT_TOOLTIP)
variant_hints_btn = QtWidgets.QPushButton(self)
variant_hints_btn.setFixedWidth(18)
variant_hints_btn = QtWidgets.QToolButton(variant_widget)
variant_hints_btn.setArrowType(QtCore.Qt.DownArrow)
variant_hints_btn.setIconSize(QtCore.QSize(12, 12))
variant_hints_menu = QtWidgets.QMenu(variant_hints_btn)
variant_hints_menu = QtWidgets.QMenu(variant_widget)
variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu)
variant_hints_btn.setMenu(variant_hints_menu)
# variant_hints_btn.setMenu(variant_hints_menu)
variant_layout = QtWidgets.QHBoxLayout()
variant_layout = QtWidgets.QHBoxLayout(variant_widget)
variant_layout.setContentsMargins(0, 0, 0, 0)
variant_layout.setSpacing(0)
variant_layout.addWidget(variant_input, 1)
variant_layout.addWidget(variant_hints_btn, 0)
variant_layout.addWidget(variant_hints_btn, 0, QtCore.Qt.AlignVCenter)
subset_name_input = QtWidgets.QLineEdit(self)
subset_name_input.setEnabled(False)
@ -271,7 +286,7 @@ class CreateDialog(QtWidgets.QDialog):
create_btn.setEnabled(False)
form_layout = QtWidgets.QFormLayout()
form_layout.addRow("Variant:", variant_layout)
form_layout.addRow("Variant:", variant_widget)
form_layout.addRow("Subset:", subset_name_input)
mid_widget = QtWidgets.QWidget(self)
@ -341,11 +356,13 @@ class CreateDialog(QtWidgets.QDialog):
help_btn.resized.connect(self._on_help_btn_resize)
create_btn.clicked.connect(self._on_create)
variant_widget.resized.connect(self._on_variant_widget_resize)
variant_input.returnPressed.connect(self._on_create)
variant_input.textChanged.connect(self._on_variant_change)
creators_view.selectionModel().currentChanged.connect(
self._on_creator_item_change
)
variant_hints_btn.clicked.connect(self._on_variant_btn_click)
variant_hints_menu.triggered.connect(self._on_variant_action)
assets_widget.selection_changed.connect(self._on_asset_change)
assets_widget.current_context_required.connect(
@ -355,6 +372,8 @@ class CreateDialog(QtWidgets.QDialog):
controller.add_plugins_refresh_callback(self._on_plugins_refresh)
self._overlay_object = overlay_object
self._splitter_widget = splitter_widget
self._context_widget = context_widget
@ -380,6 +399,9 @@ class CreateDialog(QtWidgets.QDialog):
self._prereq_timer = prereq_timer
self._first_show = True
def _emit_message(self, message):
self._overlay_object.add_message(message)
def _context_change_is_enabled(self):
return self._context_widget.isEnabled()
@ -445,12 +467,15 @@ class CreateDialog(QtWidgets.QDialog):
def _on_prereq_timer(self):
prereq_available = True
creator_btn_tooltips = []
if self.creators_model.rowCount() < 1:
prereq_available = False
creator_btn_tooltips.append("Creator is not selected")
if self._asset_doc is None:
# QUESTION how to handle invalid asset?
prereq_available = False
creator_btn_tooltips.append("Context is not selected")
if prereq_available != self._prereq_available:
self._prereq_available = prereq_available
@ -459,6 +484,12 @@ class CreateDialog(QtWidgets.QDialog):
self.creators_view.setEnabled(prereq_available)
self.variant_input.setEnabled(prereq_available)
self.variant_hints_btn.setEnabled(prereq_available)
tooltip = ""
if creator_btn_tooltips:
tooltip = "\n".join(creator_btn_tooltips)
self.create_btn.setToolTip(tooltip)
self._on_variant_change()
def _refresh_asset(self):
@ -540,7 +571,7 @@ class CreateDialog(QtWidgets.QDialog):
identifier = index.data(CREATOR_IDENTIFIER_ROLE)
self._set_creator(identifier)
self._set_creator_by_identifier(identifier)
def _on_plugins_refresh(self):
# Trigger refresh only if is visible
@ -568,7 +599,7 @@ class CreateDialog(QtWidgets.QDialog):
identifier = None
if new_index.isValid():
identifier = new_index.data(CREATOR_IDENTIFIER_ROLE)
self._set_creator(identifier)
self._set_creator_by_identifier(identifier)
def _update_help_btn(self):
pos_x = self.width() - self._help_btn.width()
@ -620,9 +651,11 @@ class CreateDialog(QtWidgets.QDialog):
else:
self._detail_description_widget.setMarkdown(detailed_description)
def _set_creator(self, identifier):
def _set_creator_by_identifier(self, identifier):
creator = self.controller.manual_creators.get(identifier)
self._set_creator(creator)
def _set_creator(self, creator):
self._creator_short_desc_widget.set_plugin(creator)
self._set_creator_detailed_text(creator)
self._pre_create_widget.set_plugin(creator)
@ -660,6 +693,14 @@ class CreateDialog(QtWidgets.QDialog):
self.variant_input.setText(default_variant or "Main")
def _on_variant_widget_resize(self):
self.variant_hints_btn.setFixedHeight(self.variant_input.height())
def _on_variant_btn_click(self):
pos = self.variant_hints_btn.rect().bottomLeft()
point = self.variant_hints_btn.mapToGlobal(pos)
self.variant_hints_menu.popup(point)
def _on_variant_action(self, action):
value = action.text()
if self.variant_input.text() != value:
@ -840,7 +881,10 @@ class CreateDialog(QtWidgets.QDialog):
))
error_msg = str(exc_value)
if error_msg is not None:
if error_msg is None:
self._set_creator(self._selected_creator)
self._emit_message("Creation finished...")
else:
box = CreateErrorMessageBox(
creator_label,
subset_name,

View file

@ -142,7 +142,7 @@ class ValidationErrorTitleWidget(QtWidgets.QWidget):
self._help_text_by_instance_id = help_text_by_instance_id
def sizeHint(self):
result = super().sizeHint()
result = super(ValidationErrorTitleWidget, self).sizeHint()
expected_width = 0
for idx in range(self._view_layout.count()):
expected_width += self._view_layout.itemAt(idx).sizeHint().width()

View file

@ -83,8 +83,10 @@ class PublisherWindow(QtWidgets.QDialog):
line_widget.setMinimumHeight(2)
# Content
content_stacked_widget = QtWidgets.QWidget(self)
# Subset widget
subset_frame = QtWidgets.QFrame(self)
subset_frame = QtWidgets.QFrame(content_stacked_widget)
subset_views_widget = BorderedLabelWidget(
"Subsets to publish", subset_frame
@ -171,9 +173,12 @@ class PublisherWindow(QtWidgets.QDialog):
subset_layout.addLayout(footer_layout, 0)
# Create publish frame
publish_frame = PublishFrame(controller, self)
publish_frame = PublishFrame(controller, content_stacked_widget)
content_stacked_layout = QtWidgets.QStackedLayout()
content_stacked_layout = QtWidgets.QStackedLayout(
content_stacked_widget
)
content_stacked_layout.setContentsMargins(0, 0, 0, 0)
content_stacked_layout.setStackingMode(
QtWidgets.QStackedLayout.StackAll
)
@ -186,7 +191,7 @@ class PublisherWindow(QtWidgets.QDialog):
main_layout.setSpacing(0)
main_layout.addWidget(header_widget, 0)
main_layout.addWidget(line_widget, 0)
main_layout.addLayout(content_stacked_layout, 1)
main_layout.addWidget(content_stacked_widget, 1)
creator_window = CreateDialog(controller, parent=self)
@ -228,6 +233,7 @@ class PublisherWindow(QtWidgets.QDialog):
# Store header for TrayPublisher
self._header_layout = header_layout
self._content_stacked_widget = content_stacked_widget
self.content_stacked_layout = content_stacked_layout
self.publish_frame = publish_frame
self.subset_frame = subset_frame
@ -340,9 +346,23 @@ class PublisherWindow(QtWidgets.QDialog):
def _set_publish_visibility(self, visible):
if visible:
widget = self.publish_frame
publish_frame_visible = True
else:
widget = self.subset_frame
publish_frame_visible = False
self.content_stacked_layout.setCurrentWidget(widget)
self._set_publish_frame_visible(publish_frame_visible)
def _set_publish_frame_visible(self, publish_frame_visible):
"""Publish frame visibility has changed.
Also used in TrayPublisher to be able handle start/end of publish
widget overlay.
"""
# Hide creator dialog if visible
if publish_frame_visible and self.creator_window.isVisible():
self.creator_window.close()
def _on_reset_clicked(self):
self.controller.reset()

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

View file

@ -44,8 +44,7 @@
}, {
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
}, {
"type": "path-widget",
"key": "{host_name}_executables",

View file

@ -465,10 +465,6 @@ class ModifiableDictItem(QtWidgets.QWidget):
self.entity_widget.change_key(key, self)
self.update_style()
@property
def value_is_env_group(self):
return self.entity_widget.value_is_env_group
def update_key_label(self):
if not self.collapsible_key:
return

View file

@ -54,8 +54,11 @@ class StandaloneOverlayWidget(QtWidgets.QFrame):
)
confirm_btn = QtWidgets.QPushButton("Confirm", content_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", content_widget)
cancel_btn.setVisible(False)
btns_layout = QtWidgets.QHBoxLayout()
btns_layout.addStretch(1)
btns_layout.addWidget(cancel_btn, 0)
btns_layout.addWidget(confirm_btn, 0)
content_layout = QtWidgets.QVBoxLayout(content_widget)
@ -77,15 +80,19 @@ class StandaloneOverlayWidget(QtWidgets.QFrame):
projects_view.doubleClicked.connect(self._on_double_click)
confirm_btn.clicked.connect(self._on_confirm_click)
cancel_btn.clicked.connect(self._on_cancel_click)
self._projects_view = projects_view
self._projects_model = projects_model
self._cancel_btn = cancel_btn
self._confirm_btn = confirm_btn
self._publisher_window = publisher_window
self._project_name = None
def showEvent(self, event):
self._projects_model.refresh()
self._cancel_btn.setVisible(self._project_name is not None)
super(StandaloneOverlayWidget, self).showEvent(event)
def _on_double_click(self):
@ -94,13 +101,18 @@ class StandaloneOverlayWidget(QtWidgets.QFrame):
def _on_confirm_click(self):
self.set_selected_project()
def _on_cancel_click(self):
self._set_project(self._project_name)
def set_selected_project(self):
index = self._projects_view.currentIndex()
project_name = index.data(PROJECT_NAME_ROLE)
if not project_name:
return
if project_name:
self._set_project(project_name)
def _set_project(self, project_name):
self._project_name = project_name
traypublisher.set_project_name(project_name)
self.setVisible(False)
self.project_selected.emit(project_name)
@ -110,6 +122,13 @@ class TrayPublishWindow(PublisherWindow):
def __init__(self, *args, **kwargs):
super(TrayPublishWindow, self).__init__(reset_on_show=False)
flags = self.windowFlags()
# Disable always on top hint
if flags & QtCore.Qt.WindowStaysOnTopHint:
flags ^= QtCore.Qt.WindowStaysOnTopHint
self.setWindowFlags(flags)
overlay_widget = StandaloneOverlayWidget(self)
btns_widget = QtWidgets.QWidget(self)
@ -136,6 +155,12 @@ class TrayPublishWindow(PublisherWindow):
self._back_to_overlay_btn = back_to_overlay_btn
self._overlay_widget = overlay_widget
def _set_publish_frame_visible(self, publish_frame_visible):
super(TrayPublishWindow, self)._set_publish_frame_visible(
publish_frame_visible
)
self._back_to_overlay_btn.setVisible(not publish_frame_visible)
def _on_back_to_overlay(self):
self._overlay_widget.setVisible(True)
self._resize_overlay()

View file

@ -1,6 +1,10 @@
from .widgets import create_widget_for_attr_def
from .widgets import (
create_widget_for_attr_def,
AttributeDefinitionsWidget,
)
__all__ = (
"create_widget_for_attr_def",
"AttributeDefinitionsWidget",
)

View file

@ -1,15 +1,16 @@
import os
import collections
import uuid
import clique
from Qt import QtWidgets, QtCore, QtGui
from openpype.tools.utils import paint_image_with_color
# TODO change imports
from openpype.tools.resources import (
get_pixmap,
get_image,
from openpype.lib import FileDefItem
from openpype.tools.utils import (
paint_image_with_color,
ClickableLabel,
)
# TODO change imports
from openpype.tools.resources import get_image
from openpype.tools.utils import (
IconButton,
PixmapLabel
@ -21,7 +22,8 @@ ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3
FILENAMES_ROLE = QtCore.Qt.UserRole + 4
DIRPATH_ROLE = QtCore.Qt.UserRole + 5
IS_DIR_ROLE = QtCore.Qt.UserRole + 6
EXT_ROLE = QtCore.Qt.UserRole + 7
IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7
EXT_ROLE = QtCore.Qt.UserRole + 8
class DropEmpty(QtWidgets.QWidget):
@ -73,175 +75,91 @@ class DropEmpty(QtWidgets.QWidget):
class FilesModel(QtGui.QStandardItemModel):
sequence_exts = [
".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal",
".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits",
".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer",
".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2",
".jng", ".jpeg", ".jpeg-ls", ".2000", ".jpg", ".xr",
".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd",
".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf",
".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras",
".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep",
".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf",
".xpm", ".xwd"
]
def __init__(self):
def __init__(self, single_item, allow_sequences):
super(FilesModel, self).__init__()
self._single_item = single_item
self._multivalue = False
self._allow_sequences = allow_sequences
self._items_by_id = {}
self._file_items_by_id = {}
self._filenames_by_dirpath = collections.defaultdict(set)
self._items_by_dirpath = collections.defaultdict(list)
def add_filepaths(self, filepaths):
if not filepaths:
def set_multivalue(self, multivalue):
"""Disable filtering."""
if self._multivalue == multivalue:
return
self._multivalue = multivalue
def add_filepaths(self, items):
if not items:
return
new_dirpaths = set()
for filepath in filepaths:
filename = os.path.basename(filepath)
dirpath = os.path.dirname(filepath)
filenames = self._filenames_by_dirpath[dirpath]
if filename not in filenames:
new_dirpaths.add(dirpath)
filenames.add(filename)
self._refresh_items(new_dirpaths)
file_items = FileDefItem.from_value(items, self._allow_sequences)
if not file_items:
return
if not self._multivalue and self._single_item:
file_items = [file_items[0]]
current_ids = list(self._file_items_by_id.keys())
if current_ids:
self.remove_item_by_ids(current_ids)
new_model_items = []
for file_item in file_items:
item_id, model_item = self._create_item(file_item)
new_model_items.append(model_item)
self._file_items_by_id[item_id] = file_item
self._items_by_id[item_id] = model_item
if new_model_items:
roow_item = self.invisibleRootItem()
roow_item.appendRows(new_model_items)
def remove_item_by_ids(self, item_ids):
if not item_ids:
return
remaining_ids = set(item_ids)
result = collections.defaultdict(list)
for dirpath, items in self._items_by_dirpath.items():
if not remaining_ids:
break
items = []
for item_id in set(item_ids):
if item_id not in self._items_by_id:
continue
item = self._items_by_id.pop(item_id)
self._file_items_by_id.pop(item_id)
items.append(item)
if items:
for item in items:
if not remaining_ids:
break
item_id = item.data(ITEM_ID_ROLE)
if item_id in remaining_ids:
remaining_ids.remove(item_id)
result[dirpath].append(item)
if not result:
return
dirpaths = set(result.keys())
for dirpath, items in result.items():
filenames_cache = self._filenames_by_dirpath[dirpath]
for item in items:
filenames = item.data(FILENAMES_ROLE)
self._items_by_dirpath[dirpath].remove(item)
self.removeRows(item.row(), 1)
for filename in filenames:
if filename in filenames_cache:
filenames_cache.remove(filename)
self._refresh_items(dirpaths)
def get_file_item_by_id(self, item_id):
return self._file_items_by_id.get(item_id)
def _refresh_items(self, dirpaths=None):
if dirpaths is None:
dirpaths = set(self._items_by_dirpath.keys())
new_items = []
for dirpath in dirpaths:
items_to_remove = list(self._items_by_dirpath[dirpath])
cols, remainders = clique.assemble(
self._filenames_by_dirpath[dirpath]
def _create_item(self, file_item):
if file_item.is_dir:
icon_pixmap = paint_image_with_color(
get_image(filename="folder.png"), QtCore.Qt.white
)
else:
icon_pixmap = paint_image_with_color(
get_image(filename="file.png"), QtCore.Qt.white
)
filtered_cols = []
for collection in cols:
filenames = set(collection)
valid_col = True
for filename in filenames:
ext = os.path.splitext(filename)[-1]
valid_col = ext in self.sequence_exts
break
if valid_col:
filtered_cols.append(collection)
else:
for filename in filenames:
remainders.append(filename)
for filename in remainders:
found = False
for item in items_to_remove:
item_filenames = item.data(FILENAMES_ROLE)
if filename in item_filenames and len(item_filenames) == 1:
found = True
items_to_remove.remove(item)
break
if found:
continue
fullpath = os.path.join(dirpath, filename)
if os.path.isdir(fullpath):
icon_pixmap = get_pixmap(filename="folder.png")
else:
icon_pixmap = get_pixmap(filename="file.png")
label = filename
filenames = [filename]
item = self._create_item(
label, filenames, dirpath, icon_pixmap
)
new_items.append(item)
self._items_by_dirpath[dirpath].append(item)
for collection in filtered_cols:
filenames = set(collection)
found = False
for item in items_to_remove:
item_filenames = item.data(FILENAMES_ROLE)
if item_filenames == filenames:
found = True
items_to_remove.remove(item)
break
if found:
continue
col_range = collection.format("{ranges}")
label = "{}<{}>{}".format(
collection.head, col_range, collection.tail
)
icon_pixmap = get_pixmap(filename="files.png")
item = self._create_item(
label, filenames, dirpath, icon_pixmap
)
new_items.append(item)
self._items_by_dirpath[dirpath].append(item)
for item in items_to_remove:
self._items_by_dirpath[dirpath].remove(item)
self.removeRows(item.row(), 1)
if new_items:
self.invisibleRootItem().appendRows(new_items)
def _create_item(self, label, filenames, dirpath, icon_pixmap=None):
first_filename = None
for filename in filenames:
first_filename = filename
break
ext = os.path.splitext(first_filename)[-1]
is_dir = False
if len(filenames) == 1:
filepath = os.path.join(dirpath, first_filename)
is_dir = os.path.isdir(filepath)
item = QtGui.QStandardItem()
item.setData(str(uuid.uuid4()), ITEM_ID_ROLE)
item.setData(label, ITEM_LABEL_ROLE)
item.setData(filenames, FILENAMES_ROLE)
item.setData(dirpath, DIRPATH_ROLE)
item_id = str(uuid.uuid4())
item.setData(item_id, ITEM_ID_ROLE)
item.setData(file_item.label, ITEM_LABEL_ROLE)
item.setData(file_item.filenames, FILENAMES_ROLE)
item.setData(file_item.directory, DIRPATH_ROLE)
item.setData(icon_pixmap, ITEM_ICON_ROLE)
item.setData(ext, EXT_ROLE)
item.setData(is_dir, IS_DIR_ROLE)
item.setData(file_item.ext, EXT_ROLE)
item.setData(file_item.is_dir, IS_DIR_ROLE)
item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE)
return item
return item_id, item
class FilesProxyModel(QtCore.QSortFilterProxyModel):
@ -249,6 +167,15 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel):
super(FilesProxyModel, self).__init__(*args, **kwargs)
self._allow_folders = False
self._allowed_extensions = None
self._multivalue = False
def set_multivalue(self, multivalue):
"""Disable filtering."""
if self._multivalue == multivalue:
return
self._multivalue = multivalue
self.invalidateFilter()
def set_allow_folders(self, allow=None):
if allow is None:
@ -267,7 +194,34 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel):
self._allowed_extensions = extensions
self.invalidateFilter()
def are_valid_files(self, filepaths):
for filepath in filepaths:
if os.path.isfile(filepath):
_, ext = os.path.splitext(filepath)
if ext in self._allowed_extensions:
return True
elif self._allow_folders:
return True
return False
def filter_valid_files(self, filepaths):
filtered_paths = []
for filepath in filepaths:
if os.path.isfile(filepath):
_, ext = os.path.splitext(filepath)
if ext in self._allowed_extensions:
filtered_paths.append(filepath)
elif self._allow_folders:
filtered_paths.append(filepath)
return filtered_paths
def filterAcceptsRow(self, row, parent_index):
# Skip filtering if multivalue is set
if self._multivalue:
return True
model = self.sourceModel()
index = model.index(row, self.filterKeyColumn(), parent_index)
# First check if item is folder and if folders are enabled
@ -297,9 +251,11 @@ class FilesProxyModel(QtCore.QSortFilterProxyModel):
class ItemWidget(QtWidgets.QWidget):
remove_requested = QtCore.Signal(str)
split_requested = QtCore.Signal(str)
def __init__(self, item_id, label, pixmap_icon, parent=None):
def __init__(
self, item_id, label, pixmap_icon, is_sequence, multivalue, parent=None
):
self._item_id = item_id
super(ItemWidget, self).__init__(parent)
@ -308,30 +264,82 @@ class ItemWidget(QtWidgets.QWidget):
icon_widget = PixmapLabel(pixmap_icon, self)
label_widget = QtWidgets.QLabel(label, self)
pixmap = paint_image_with_color(
get_image(filename="delete.png"), QtCore.Qt.white
label_size_hint = label_widget.sizeHint()
height = label_size_hint.height()
actions_menu_pix = paint_image_with_color(
get_image(filename="menu.png"), QtCore.Qt.white
)
remove_btn = IconButton(self)
remove_btn.setIcon(QtGui.QIcon(pixmap))
split_btn = ClickableLabel(self)
split_btn.setFixedSize(height, height)
split_btn.setPixmap(actions_menu_pix)
if multivalue:
split_btn.setVisible(False)
else:
split_btn.setVisible(is_sequence)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setContentsMargins(5, 5, 5, 5)
layout.addWidget(icon_widget, 0)
layout.addWidget(label_widget, 1)
layout.addWidget(remove_btn, 0)
layout.addWidget(split_btn, 0)
remove_btn.clicked.connect(self._on_remove_clicked)
split_btn.clicked.connect(self._on_actions_clicked)
self._icon_widget = icon_widget
self._label_widget = label_widget
self._remove_btn = remove_btn
self._split_btn = split_btn
self._actions_menu_pix = actions_menu_pix
self._last_scaled_pix_height = None
def _on_remove_clicked(self):
self.remove_requested.emit(self._item_id)
def _update_btn_size(self):
label_size_hint = self._label_widget.sizeHint()
height = label_size_hint.height()
if height == self._last_scaled_pix_height:
return
self._last_scaled_pix_height = height
self._split_btn.setFixedSize(height, height)
pix = self._actions_menu_pix.scaled(
height, height,
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation
)
self._split_btn.setPixmap(pix)
def showEvent(self, event):
super(ItemWidget, self).showEvent(event)
self._update_btn_size()
def resizeEvent(self, event):
super(ItemWidget, self).resizeEvent(event)
self._update_btn_size()
def _on_actions_clicked(self):
menu = QtWidgets.QMenu(self._split_btn)
action = QtWidgets.QAction("Split sequence", menu)
action.triggered.connect(self._on_split_sequence)
menu.addAction(action)
pos = self._split_btn.rect().bottomLeft()
point = self._split_btn.mapToGlobal(pos)
menu.popup(point)
def _on_split_sequence(self):
self.split_requested.emit(self._item_id)
class InViewButton(IconButton):
pass
class FilesView(QtWidgets.QListView):
"""View showing instances and their groups."""
remove_requested = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(FilesView, self).__init__(*args, **kwargs)
@ -340,8 +348,48 @@ class FilesView(QtWidgets.QListView):
QtWidgets.QAbstractItemView.ExtendedSelection
)
remove_btn = InViewButton(self)
pix_enabled = paint_image_with_color(
get_image(filename="delete.png"), QtCore.Qt.white
)
pix_disabled = paint_image_with_color(
get_image(filename="delete.png"), QtCore.Qt.gray
)
icon = QtGui.QIcon(pix_enabled)
icon.addPixmap(pix_disabled, icon.Disabled, icon.Off)
remove_btn.setIcon(icon)
remove_btn.setEnabled(False)
remove_btn.clicked.connect(self._on_remove_clicked)
self._remove_btn = remove_btn
def setSelectionModel(self, *args, **kwargs):
"""Catch selection model set to register signal callback.
Selection model is not available during initialization.
"""
super(FilesView, self).setSelectionModel(*args, **kwargs)
selection_model = self.selectionModel()
selection_model.selectionChanged.connect(self._on_selection_change)
def set_multivalue(self, multivalue):
"""Disable remove button on multivalue."""
self._remove_btn.setVisible(not multivalue)
def has_selected_item_ids(self):
"""Is any index selected."""
for index in self.selectionModel().selectedIndexes():
instance_id = index.data(ITEM_ID_ROLE)
if instance_id is not None:
return True
return False
def get_selected_item_ids(self):
"""Ids of selected instances."""
selected_item_ids = set()
for index in self.selectionModel().selectedIndexes():
instance_id = index.data(ITEM_ID_ROLE)
@ -350,34 +398,50 @@ class FilesView(QtWidgets.QListView):
return selected_item_ids
def event(self, event):
if not event.type() == QtCore.QEvent.KeyPress:
pass
elif event.key() == QtCore.Qt.Key_Space:
self.toggle_requested.emit(-1)
return True
elif event.key() == QtCore.Qt.Key_Backspace:
self.toggle_requested.emit(0)
return True
elif event.key() == QtCore.Qt.Key_Return:
self.toggle_requested.emit(1)
return True
if event.type() == QtCore.QEvent.KeyPress:
if (
event.key() == QtCore.Qt.Key_Delete
and self.has_selected_item_ids()
):
self.remove_requested.emit()
return True
return super(FilesView, self).event(event)
def _on_selection_change(self):
self._remove_btn.setEnabled(self.has_selected_item_ids())
class MultiFilesWidget(QtWidgets.QFrame):
def _on_remove_clicked(self):
self.remove_requested.emit()
def _update_remove_btn(self):
"""Position remove button to bottom right."""
viewport = self.viewport()
height = viewport.height()
pos_x = viewport.width() - self._remove_btn.width() - 5
pos_y = height - self._remove_btn.height() - 5
self._remove_btn.move(max(0, pos_x), max(0, pos_y))
def resizeEvent(self, event):
super(FilesView, self).resizeEvent(event)
self._update_remove_btn()
def showEvent(self, event):
super(FilesView, self).showEvent(event)
self._update_remove_btn()
class FilesWidget(QtWidgets.QFrame):
value_changed = QtCore.Signal()
def __init__(self, parent):
super(MultiFilesWidget, self).__init__(parent)
def __init__(self, single_item, allow_sequences, parent):
super(FilesWidget, self).__init__(parent)
self.setAcceptDrops(True)
empty_widget = DropEmpty(self)
files_model = FilesModel()
files_model = FilesModel(single_item, allow_sequences)
files_proxy_model = FilesProxyModel()
files_proxy_model.setSourceModel(files_model)
files_view = FilesView(self)
@ -391,8 +455,10 @@ class MultiFilesWidget(QtWidgets.QFrame):
files_proxy_model.rowsInserted.connect(self._on_rows_inserted)
files_proxy_model.rowsRemoved.connect(self._on_rows_removed)
files_view.remove_requested.connect(self._on_remove_requested)
self._in_set_value = False
self._single_item = single_item
self._multivalue = False
self._empty_widget = empty_widget
self._files_model = files_model
@ -401,39 +467,44 @@ class MultiFilesWidget(QtWidgets.QFrame):
self._widgets_by_id = {}
def _set_multivalue(self, multivalue):
if self._multivalue == multivalue:
return
self._multivalue = multivalue
self._files_view.set_multivalue(multivalue)
self._files_model.set_multivalue(multivalue)
self._files_proxy_model.set_multivalue(multivalue)
def set_value(self, value, multivalue):
self._in_set_value = True
widget_ids = set(self._widgets_by_id.keys())
self._remove_item_by_ids(widget_ids)
# TODO how to display multivalue?
all_same = True
if multivalue:
new_value = set()
item_row = None
for _value in value:
_value_set = set(_value)
new_value |= _value_set
if item_row is None:
item_row = _value_set
elif item_row != _value_set:
all_same = False
value = new_value
self._set_multivalue(multivalue)
self._add_filepaths(value)
if value:
self._add_filepaths(value)
self._in_set_value = False
def current_value(self):
model = self._files_proxy_model
filepaths = set()
item_ids = set()
for row in range(model.rowCount()):
index = model.index(row, 0)
dirpath = index.data(DIRPATH_ROLE)
filenames = index.data(FILENAMES_ROLE)
for filename in filenames:
filepaths.add(os.path.join(dirpath, filename))
return list(filepaths)
item_ids.add(index.data(ITEM_ID_ROLE))
file_items = []
for item_id in item_ids:
file_item = self._files_model.get_file_item_by_id(item_id)
if file_item is not None:
file_items.append(file_item.to_dict())
if not self._single_item:
return file_items
if file_items:
return file_items[0]
return FileDefItem.create_empty_item()
def set_filters(self, folders_allowed, exts_filter):
self._files_proxy_model.set_allow_folders(folders_allowed)
@ -447,13 +518,20 @@ class MultiFilesWidget(QtWidgets.QFrame):
continue
label = index.data(ITEM_LABEL_ROLE)
pixmap_icon = index.data(ITEM_ICON_ROLE)
is_sequence = index.data(IS_SEQUENCE_ROLE)
widget = ItemWidget(item_id, label, pixmap_icon)
widget = ItemWidget(
item_id,
label,
pixmap_icon,
is_sequence,
self._multivalue
)
widget.split_requested.connect(self._on_split_request)
self._files_view.setIndexWidget(index, widget)
self._files_proxy_model.setData(
index, widget.sizeHint(), QtCore.Qt.SizeHintRole
)
widget.remove_requested.connect(self._on_remove_request)
self._widgets_by_id[item_id] = widget
self._files_proxy_model.sort(0)
@ -481,27 +559,29 @@ class MultiFilesWidget(QtWidgets.QFrame):
if not self._in_set_value:
self.value_changed.emit()
def _on_remove_request(self, item_id):
found_index = None
for row in range(self._files_model.rowCount()):
index = self._files_model.index(row, 0)
_item_id = index.data(ITEM_ID_ROLE)
if item_id == _item_id:
found_index = index
break
def _on_split_request(self, item_id):
if self._multivalue:
return
if found_index is None:
file_item = self._files_model.get_file_item_by_id(item_id)
if not file_item:
return
new_items = file_item.split_sequence()
self._remove_item_by_ids([item_id])
self._add_filepaths(new_items)
def _on_remove_requested(self):
if self._multivalue:
return
items_to_delete = self._files_view.get_selected_item_ids()
if item_id not in items_to_delete:
items_to_delete = [item_id]
self._remove_item_by_ids(items_to_delete)
if items_to_delete:
self._remove_item_by_ids(items_to_delete)
def sizeHint(self):
# Get size hints of widget and visible widgets
result = super(MultiFilesWidget, self).sizeHint()
result = super(FilesWidget, self).sizeHint()
if not self._files_view.isVisible():
not_visible_hint = self._files_view.sizeHint()
else:
@ -523,15 +603,9 @@ class MultiFilesWidget(QtWidgets.QFrame):
return result
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
if self._multivalue:
return
def dragLeaveEvent(self, event):
event.accept()
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
filepaths = []
@ -539,6 +613,25 @@ class MultiFilesWidget(QtWidgets.QFrame):
filepath = url.toLocalFile()
if os.path.exists(filepath):
filepaths.append(filepath)
if self._files_proxy_model.are_valid_files(filepaths):
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
def dragLeaveEvent(self, event):
event.accept()
def dropEvent(self, event):
mime_data = event.mimeData()
if not self._multivalue and mime_data.hasUrls():
filepaths = []
for url in mime_data.urls():
filepath = url.toLocalFile()
if os.path.exists(filepath):
filepaths.append(filepath)
# Filter filepaths before passing it to model
filepaths = self._files_proxy_model.filter_valid_files(filepaths)
if filepaths:
self._add_filepaths(filepaths)
event.accept()
@ -555,92 +648,3 @@ class MultiFilesWidget(QtWidgets.QFrame):
files_exists = self._files_proxy_model.rowCount() > 0
self._files_view.setVisible(files_exists)
self._empty_widget.setVisible(not files_exists)
class SingleFileWidget(QtWidgets.QWidget):
value_changed = QtCore.Signal()
def __init__(self, parent):
super(SingleFileWidget, self).__init__(parent)
self.setAcceptDrops(True)
filepath_input = QtWidgets.QLineEdit(self)
browse_btn = QtWidgets.QPushButton("Browse", self)
browse_btn.setVisible(False)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(filepath_input, 1)
layout.addWidget(browse_btn, 0)
browse_btn.clicked.connect(self._on_browse_clicked)
filepath_input.textChanged.connect(self._on_text_change)
self._in_set_value = False
self._filepath_input = filepath_input
self._folders_allowed = False
self._exts_filter = []
def set_value(self, value, multivalue):
self._in_set_value = True
if multivalue:
set_value = set(value)
if len(set_value) == 1:
value = tuple(set_value)[0]
else:
value = "< Multiselection >"
self._filepath_input.setText(value)
self._in_set_value = False
def current_value(self):
return self._filepath_input.text()
def set_filters(self, folders_allowed, exts_filter):
self._folders_allowed = folders_allowed
self._exts_filter = exts_filter
def _on_text_change(self, text):
if not self._in_set_value:
self.value_changed.emit()
def _on_browse_clicked(self):
# TODO implement file dialog logic in '_on_browse_clicked'
print("_on_browse_clicked")
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if not mime_data.hasUrls():
return
filepaths = []
for url in mime_data.urls():
filepath = url.toLocalFile()
if os.path.exists(filepath):
filepaths.append(filepath)
# TODO add folder, extensions check
if len(filepaths) == 1:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
def dragLeaveEvent(self, event):
event.accept()
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
filepaths = []
for url in mime_data.urls():
filepath = url.toLocalFile()
if os.path.exists(filepath):
filepaths.append(filepath)
# TODO filter check
if len(filepaths) == 1:
self._filepath_input.setText(filepaths[0])
event.accept()

View file

@ -1,4 +1,5 @@
import uuid
import copy
from Qt import QtWidgets, QtCore
@ -10,11 +11,14 @@ from openpype.lib.attribute_definitions import (
EnumDef,
BoolDef,
FileDef,
UIDef,
UISeparatorDef,
UILabelDef
)
from openpype.widgets.nice_checkbox import NiceCheckbox
from .files_widget import FilesWidget
def create_widget_for_attr_def(attr_def, parent=None):
if not isinstance(attr_def, AbtractAttrDef):
@ -51,6 +55,106 @@ def create_widget_for_attr_def(attr_def, parent=None):
))
class AttributeDefinitionsWidget(QtWidgets.QWidget):
"""Create widgets for attribute definitions in grid layout.
Widget creates input widgets for passed attribute definitions.
Widget can't handle multiselection values.
"""
def __init__(self, attr_defs=None, parent=None):
super(AttributeDefinitionsWidget, self).__init__(parent)
self._widgets = []
self._current_keys = set()
self.set_attr_defs(attr_defs)
def clear_attr_defs(self):
"""Remove all existing widgets and reset layout if needed."""
self._widgets = []
self._current_keys = set()
layout = self.layout()
if layout is not None:
if layout.count() == 0:
return
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget:
widget.setVisible(False)
widget.deleteLater()
layout.deleteLater()
new_layout = QtWidgets.QGridLayout()
self.setLayout(new_layout)
def set_attr_defs(self, attr_defs):
"""Replace current attribute definitions with passed."""
self.clear_attr_defs()
if attr_defs:
self.add_attr_defs(attr_defs)
def add_attr_defs(self, attr_defs):
"""Add attribute definitions to current."""
layout = self.layout()
row = 0
for attr_def in attr_defs:
if attr_def.key in self._current_keys:
raise KeyError("Duplicated key \"{}\"".format(attr_def.key))
self._current_keys.add(attr_def.key)
widget = create_widget_for_attr_def(attr_def, self)
expand_cols = 2
if attr_def.is_value_def and attr_def.is_label_horizontal:
expand_cols = 1
col_num = 2 - expand_cols
if attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
layout.addWidget(
label_widget, row, 0, 1, expand_cols
)
if not attr_def.is_label_horizontal:
row += 1
layout.addWidget(
widget, row, col_num, 1, expand_cols
)
self._widgets.append(widget)
row += 1
def set_value(self, value):
new_value = copy.deepcopy(value)
unused_keys = set(new_value.keys())
for widget in self._widgets:
attr_def = widget.attr_def
if attr_def.key not in new_value:
continue
unused_keys.remove(attr_def.key)
widget_value = new_value[attr_def.key]
if widget_value is None:
widget_value = copy.deepcopy(attr_def.default)
widget.set_value(widget_value)
def current_value(self):
output = {}
for widget in self._widgets:
attr_def = widget.attr_def
if not isinstance(attr_def, UIDef):
output[attr_def.key] = widget.current_value()
return output
class _BaseAttrDefWidget(QtWidgets.QWidget):
# Type 'object' may not work with older PySide versions
value_changed = QtCore.Signal(object, uuid.UUID)
@ -336,16 +440,9 @@ class UnknownAttrWidget(_BaseAttrDefWidget):
class FileAttrWidget(_BaseAttrDefWidget):
def _ui_init(self):
self.multipath = self.attr_def.multipath
if self.multipath:
from .files_widget import MultiFilesWidget
input_widget = MultiFilesWidget(self)
else:
from .files_widget import SingleFileWidget
input_widget = SingleFileWidget(self)
input_widget = FilesWidget(
self.attr_def.single_item, self.attr_def.allow_sequences, self
)
if self.attr_def.tooltip:
input_widget.setToolTip(self.attr_def.tooltip)

69
poetry.lock generated
View file

@ -820,7 +820,7 @@ six = "*"
[[package]]
name = "pillow"
version = "9.0.0"
version = "9.0.1"
description = "Python Imaging Library (Fork)"
category = "main"
optional = false
@ -2310,38 +2310,41 @@ pathlib2 = [
{file = "pathlib2-2.3.6.tar.gz", hash = "sha256:7d8bcb5555003cdf4a8d2872c538faa3a0f5d20630cb360e518ca3b981795e5f"},
]
pillow = [
{file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"},
{file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"},
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"},
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"},
{file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"},
{file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"},
{file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"},
{file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"},
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"},
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"},
{file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"},
{file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"},
{file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"},
{file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"},
{file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"},
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"},
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"},
{file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"},
{file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"},
{file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"},
{file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"},
{file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"},
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"},
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"},
{file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"},
{file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"},
{file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"},
{file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"},
{file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"},
{file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"},
{file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"},
{file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"},
{file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"},
{file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"},
{file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"},
{file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"},
{file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"},
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"},
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"},
{file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"},
{file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"},
{file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"},
{file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"},
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"},
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"},
{file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"},
{file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"},
{file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"},
{file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"},
{file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"},
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"},
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"},
{file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"},
{file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"},
{file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"},
{file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"},
{file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"},
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"},
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"},
{file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"},
{file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"},
{file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"},
{file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"},
{file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"},
{file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"},
{file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"},
{file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
]
platformdirs = [
{file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"},

View file

@ -266,18 +266,9 @@ def set_openpype_global_environments() -> None:
"""Set global OpenPype's environments."""
import acre
try:
from openpype.settings import get_general_environments
from openpype.settings import get_general_environments
general_env = get_general_environments()
except Exception:
# Backwards compatibility for OpenPype versions where
# `get_general_environments` does not exists yet
from openpype.settings import get_environments
all_env = get_environments()
general_env = all_env["global"]
general_env = get_general_environments()
merged_env = acre.merge(
acre.parse(general_env),