Merge branch 'develop' into enhancement/OP-3149_Publisher-UI-modifications

This commit is contained in:
Jakub Trllo 2022-05-03 17:33:29 +02:00
commit 0161e3ddbb
48 changed files with 800 additions and 1061 deletions

View file

@ -3,7 +3,6 @@ from .settings import (
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
SystemSettings,
ProjectSettings
@ -23,7 +22,6 @@ from .lib import (
get_app_environments_for_context,
source_hash,
get_latest_version,
get_global_environments,
get_local_site_id,
change_openpype_mongo_url,
create_project_folders,
@ -69,10 +67,10 @@ __all__ = [
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"get_project_basic_paths",
"SystemSettings",
"ProjectSettings",
"PypeLogger",
"Logger",
@ -102,8 +100,9 @@ __all__ = [
# get contextual data
"version_up",
"get_hierarchy",
"get_asset",
"get_hierarchy",
"get_workdir_data",
"get_version_from_path",
"get_last_version_from_path",
"get_app_environments_for_context",
@ -111,7 +110,6 @@ __all__ = [
"run_subprocess",
"get_latest_version",
"get_global_environments",
"get_local_site_id",
"change_openpype_mongo_url",

View file

@ -873,6 +873,5 @@ class OpenClipSolver(flib.MediaInfoFile):
if feed_clr_obj is not None:
feed_clr_obj = ET.Element(
"colourSpace", {"type": "string"})
feed_clr_obj.text = profile_name
feed_storage_obj.append(feed_clr_obj)
feed_clr_obj.text = profile_name

View file

@ -26,12 +26,10 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
add_tasks = []
def process(self, context):
project = context.data["flameProject"]
selected_segments = context.data["flameSelectedSegments"]
self.log.debug("__ selected_segments: {}".format(selected_segments))
self.otio_timeline = context.data["otioTimeline"]
self.clips_in_reels = opfapi.get_clips_in_reels(project)
self.fps = context.data["fps"]
# process all sellected
@ -63,9 +61,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
# get file path
file_path = clip_data["fpath"]
# get source clip
source_clip = self._get_reel_clip(file_path)
first_frame = opfapi.get_frame_from_filename(file_path) or 0
head, tail = self._get_head_tail(clip_data, first_frame)
@ -103,7 +98,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"flameSourceClip": source_clip,
"sourceFirstFrame": int(first_frame),
"path": file_path,
"flameAddTasks": self.add_tasks,
@ -258,14 +252,6 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin):
)
return head, tail
def _get_reel_clip(self, path):
match_reel_clip = [
clip for clip in self.clips_in_reels
if clip["fpath"] == path
]
if match_reel_clip:
return match_reel_clip.pop()
def _get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"

View file

@ -1,4 +1,5 @@
import os
import re
from pprint import pformat
from copy import deepcopy
@ -6,6 +7,8 @@ import pyblish.api
import openpype.api
from openpype.hosts.flame import api as opfapi
import flame
class ExtractSubsetResources(openpype.api.Extractor):
"""
@ -20,27 +23,31 @@ class ExtractSubsetResources(openpype.api.Extractor):
# plugin defaults
default_presets = {
"thumbnail": {
"active": True,
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"]
"representation_tags": ["thumbnail"],
"path_regex": ".*"
},
"ftrackpreview": {
"active": True,
"ext": "mov",
"xml_preset_file": "Apple iPad (1920x1080).xml",
"xml_preset_dir": "",
"export_type": "Movie",
"ignore_comment_attrs": True,
"parsed_comment_attrs": False,
"colorspace_out": "Output - Rec.709",
"representation_add_range": True,
"representation_tags": [
"review",
"delete"
]
],
"path_regex": ".*"
}
}
keep_original_representation = False
@ -61,13 +68,10 @@ class ExtractSubsetResources(openpype.api.Extractor):
# flame objects
segment = instance.data["item"]
asset_name = instance.data["asset"]
segment_name = segment.name.get_value()
clip_path = instance.data["path"]
sequence_clip = instance.context.data["flameSequence"]
clip_data = instance.data["flameSourceClip"]
reel_clip = None
if clip_data:
reel_clip = clip_data["PyClip"]
# segment's parent track name
s_track_name = segment.parent.name.get_value()
@ -104,14 +108,44 @@ class ExtractSubsetResources(openpype.api.Extractor):
for unique_name, preset_config in export_presets.items():
modify_xml_data = {}
# get activating attributes
activated_preset = preset_config["active"]
filter_path_regex = preset_config.get("filter_path_regex")
self.log.info(
"Preset `{}` is active `{}` with filter `{}`".format(
unique_name, activated_preset, filter_path_regex
)
)
self.log.debug(
"__ clip_path: `{}`".format(clip_path))
# skip if not activated presete
if not activated_preset:
continue
# exclude by regex filter if any
if (
filter_path_regex
and not re.search(filter_path_regex, clip_path)
):
continue
# get all presets attributes
extension = preset_config["ext"]
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
export_type = preset_config["export_type"]
repre_tags = preset_config["representation_tags"]
ignore_comment_attrs = preset_config["ignore_comment_attrs"]
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
color_out = preset_config["colorspace_out"]
self.log.info(
"Processing `{}` as `{}` to `{}` type...".format(
preset_file, export_type, extension
)
)
# get attribures related loading in integrate_batch_group
load_to_batch_group = preset_config.get(
"load_to_batch_group")
@ -131,161 +165,157 @@ class ExtractSubsetResources(openpype.api.Extractor):
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
# make test for type of preset and available reel_clip
if (
not reel_clip
and export_type != "Sequence Publish"
):
self.log.warning((
"Skipping preset {}. Not available "
"reel clip for {}").format(
preset_file, segment_name
))
continue
# by default export source clips
exporting_clip = reel_clip
exporting_clip = None
name_patern_xml = "<name>_{}.".format(
unique_name)
if export_type == "Sequence Publish":
# change export clip to sequence
exporting_clip = sequence_clip
exporting_clip = flame.duplicate(sequence_clip)
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# only keep visible layer where instance segment is child
self.hide_others(
exporting_clip, segment_name, s_track_name)
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start
})
# change name patern
name_patern_xml = (
"<segment name>_<shot name>_{}.").format(
unique_name)
else:
exporting_clip = self.import_clip(clip_path)
exporting_clip.name.set_value("{}_{}".format(
asset_name, segment_name))
if not ignore_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
# change in/out marks to timeline in/out
in_mark = clip_in
out_mark = clip_out
# add xml tags modifications
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles,
"startFrame": frame_start,
"namePattern": name_patern_xml
})
if parsed_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
self.log.debug("__ modify_xml_data: {}".format(pformat(
modify_xml_data
)))
# with maintained duplication loop all presets
with opfapi.maintained_object_duplication(
exporting_clip) as duplclip:
kwargs = {}
export_kwargs = {}
# validate xml preset file is filled
if preset_file == "":
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
)
if export_type == "Sequence Publish":
# only keep visible layer where instance segment is child
self.hide_others(duplclip, segment_name, s_track_name)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# validate xml preset file is filled
if preset_file == "":
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
# define kwargs based on preset type
if "thumbnail" in unique_name:
export_kwargs["thumb_frame_number"] = int(in_mark + (
source_duration_handles / 2))
else:
export_kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# define kwargs based on preset type
if "thumbnail" in unique_name:
kwargs["thumb_frame_number"] = in_mark + (
source_duration_handles / 2)
else:
kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
# export
opfapi.export_clip(
export_dir_path, exporting_clip, preset_path, **export_kwargs)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# export
opfapi.export_clip(
export_dir_path, duplclip, preset_path, **kwargs)
# collect all available content of export dir
files = os.listdir(export_dir_path)
extension = preset_config["ext"]
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# create representation data
representation_data = {
"name": unique_name,
"outputName": unique_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": load_to_batch_group,
"batch_group_loader_name": batch_group_loader_name
}
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
# collect all available content of export dir
files = os.listdir(export_dir_path)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
instance.data["representations"].append(representation_data)
# add files to represetation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or unique_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": frame_start_handle,
"frameEnd": (
frame_start_handle + source_duration_handles),
"fps": instance.data["fps"]
})
self.log.info("Added representation: {}".format(
representation_data))
instance.data["representations"].append(representation_data)
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
self.log.info("Added representation: {}".format(
representation_data))
if export_type == "Sequence Publish":
# at the end remove the duplicated clip
flame.delete(exporting_clip)
self.log.debug("All representations: {}".format(
pformat(instance.data["representations"])))
@ -373,3 +403,18 @@ class ExtractSubsetResources(openpype.api.Extractor):
for segment in track.segments:
if segment.name.get_value() != segment_name:
segment.hidden = True
def import_clip(self, path):
"""
Import clip from path
"""
clips = flame.import_clips(path)
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
if not clips:
self.log.warning("Path `{}` is not having any clips".format(path))
return None
elif len(clips) > 1:
self.log.warning(
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]

View file

@ -1,26 +0,0 @@
import pyblish
@pyblish.api.log
class ValidateSourceClip(pyblish.api.InstancePlugin):
"""Validate instance is not having empty `flameSourceClip`"""
order = pyblish.api.ValidatorOrder
label = "Validate Source Clip"
hosts = ["flame"]
families = ["clip"]
optional = True
active = False
def process(self, instance):
flame_source_clip = instance.data["flameSourceClip"]
self.log.debug("_ flame_source_clip: {}".format(flame_source_clip))
if flame_source_clip is None:
raise AttributeError((
"Timeline segment `{}` is not having "
"relative clip in reels. Please make sure "
"you push `Save Sources` button in Conform Tab").format(
instance.data["asset"]
))

View file

@ -10,16 +10,6 @@ log = Logger.get_logger(__name__)
def tag_data():
return {
# "Retiming": {
# "editable": "1",
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
# "icon": "retiming.png",
# "metadata": {
# "family": "retiming",
# "marginIn": 1,
# "marginOut": 1
# }
# },
"[Lenses]": {
"Set lense here": {
"editable": "1",
@ -48,6 +38,16 @@ def tag_data():
"family": "comment",
"subset": "main"
}
},
"FrameMain": {
"editable": "1",
"note": "Publishing a frame subset.",
"icon": "z_layer_main.png",
"metadata": {
"family": "frame",
"subset": "main",
"format": "png"
}
}
}

View file

@ -0,0 +1,142 @@
from pprint import pformat
import re
import ast
import json
import pyblish.api
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
"""Collect frames from tags.
Tag is expected to have metadata:
{
"family": "frame"
"subset": "main"
}
"""
order = pyblish.api.CollectorOrder
label = "Collect Frames"
hosts = ["hiero"]
def process(self, context):
self._context = context
# collect all sequence tags
subset_data = self._create_frame_subset_data_sequence(context)
self.log.debug("__ subset_data: {}".format(
pformat(subset_data)
))
# create instances
self._create_instances(subset_data)
def _get_tag_data(self, tag):
data = {}
# get tag metadata attribute
tag_data = tag.metadata()
# convert tag metadata to normal keys names and values to correct types
for k, v in dict(tag_data).items():
key = k.replace("tag.", "")
try:
# capture exceptions which are related to strings only
if re.match(r"^[\d]+$", v):
value = int(v)
elif re.match(r"^True$", v):
value = True
elif re.match(r"^False$", v):
value = False
elif re.match(r"^None$", v):
value = None
elif re.match(r"^[\w\d_]+$", v):
value = v
else:
value = ast.literal_eval(v)
except (ValueError, SyntaxError):
value = v
data[key] = value
return data
def _create_frame_subset_data_sequence(self, context):
sequence_tags = []
sequence = context.data["activeTimeline"]
# get all publishable sequence frames
publish_frames = range(int(sequence.duration() + 1))
self.log.debug("__ publish_frames: {}".format(
pformat(publish_frames)
))
# get all sequence tags
for tag in sequence.tags():
tag_data = self._get_tag_data(tag)
self.log.debug("__ tag_data: {}".format(
pformat(tag_data)
))
if not tag_data:
continue
if "family" not in tag_data:
continue
if tag_data["family"] != "frame":
continue
sequence_tags.append(tag_data)
self.log.debug("__ sequence_tags: {}".format(
pformat(sequence_tags)
))
# first collect all available subset tag frames
subset_data = {}
for tag_data in sequence_tags:
frame = int(tag_data["start"])
if frame not in publish_frames:
continue
subset = tag_data["subset"]
if subset in subset_data:
# update existing subset key
subset_data[subset]["frames"].append(frame)
else:
# create new subset key
subset_data[subset] = {
"frames": [frame],
"format": tag_data["format"],
"asset": context.data["assetEntity"]["name"]
}
return subset_data
def _create_instances(self, subset_data):
# create instance per subset
for subset_name, subset_data in subset_data.items():
name = "frame" + subset_name.title()
data = {
"name": name,
"label": "{} {}".format(name, subset_data["frames"]),
"family": "image",
"families": ["frame"],
"asset": subset_data["asset"],
"subset": name,
"format": subset_data["format"],
"frames": subset_data["frames"]
}
self._context.create_instance(**data)
self.log.info(
"Created instance: {}".format(
json.dumps(data, sort_keys=True, indent=4)
)
)

View file

@ -0,0 +1,82 @@
import os
import pyblish.api
import openpype
class ExtractFrames(openpype.api.Extractor):
"""Extracts frames"""
order = pyblish.api.ExtractorOrder
label = "Extract Frames"
hosts = ["hiero"]
families = ["frame"]
movie_extensions = ["mov", "mp4"]
def process(self, instance):
oiio_tool_path = openpype.lib.get_oiio_tools_path()
staging_dir = self.staging_dir(instance)
output_template = os.path.join(staging_dir, instance.data["name"])
sequence = instance.context.data["activeTimeline"]
files = []
for frame in instance.data["frames"]:
track_item = sequence.trackItemAt(frame)
media_source = track_item.source().mediaSource()
input_path = media_source.fileinfos()[0].filename()
input_frame = (
track_item.mapTimelineToSource(frame) +
track_item.source().mediaSource().startTime()
)
output_ext = instance.data["format"]
output_path = output_template
output_path += ".{:04d}.{}".format(int(frame), output_ext)
args = [oiio_tool_path]
ext = os.path.splitext(input_path)[1][1:]
if ext in self.movie_extensions:
args.extend(["--subimage", str(int(input_frame))])
else:
args.extend(["--frames", str(int(input_frame))])
if ext == "exr":
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
args.extend([input_path, "-o", output_path])
output = openpype.api.run_subprocess(args)
failed_output = "oiiotool produced no output."
if failed_output in output:
raise ValueError(
"oiiotool processing failed. Args: {}".format(args)
)
files.append(output_path)
# Feedback to user because "oiiotool" can make the publishing
# appear unresponsive.
self.log.info(
"Processed {} of {} frames".format(
instance.data["frames"].index(frame) + 1,
len(instance.data["frames"])
)
)
if len(files) == 1:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": os.path.basename(files[0]),
"stagingDir": staging_dir
}
]
else:
instance.data["representations"] = [
{
"name": output_ext,
"ext": output_ext,
"files": [os.path.basename(x) for x in files],
"stagingDir": staging_dir
}
]

View file

@ -68,6 +68,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
"families": [],
"representations": [workfile_representation, thumb_representation]
}
@ -77,6 +78,7 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin):
# update context with main project attributes
context_data = {
"activeProject": project,
"activeTimeline": active_timeline,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"colorspace": self.get_colorspace(project),

View file

@ -1,38 +0,0 @@
import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resolution"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,15 +0,0 @@
import pyblish.api
class CollectHostVersion(pyblish.api.ContextPlugin):
"""Inject the hosts version into context"""
label = "Collect Host and HostVersion"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
import nuke
import pyblish.api
context.set_data("host", pyblish.api.current_host())
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)

View file

@ -1,32 +0,0 @@
from pyblish import api
class CollectTagRetime(api.InstancePlugin):
"""Collect Retiming from Tags of selected track items."""
order = api.CollectorOrder + 0.014
label = "Collect Retiming Tag"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "retiming" in t_family:
margin_in = t_metadata.get("tag.marginIn", "")
margin_out = t_metadata.get("tag.marginOut", "")
instance.data["retimeMarginIn"] = int(margin_in)
instance.data["retimeMarginOut"] = int(margin_out)
instance.data["retime"] = True
self.log.info("retimeMarginIn: `{}`".format(margin_in))
self.log.info("retimeMarginOut: `{}`".format(margin_out))
instance.data["families"] += ["retime"]

View file

@ -1,223 +0,0 @@
from compiler.ast import flatten
from pyblish import api
from openpype.hosts.hiero import api as phiero
import hiero
# from openpype.hosts.hiero.api import lib
# reload(lib)
# reload(phiero)
class PreCollectInstances(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder - 0.509
label = "Pre-collect Instances"
hosts = ["hiero"]
def process(self, context):
track_items = phiero.get_track_items(
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not track_items:
track_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
# get sequence and video tracks
sequence = context.data["activeSequence"]
tracks = sequence.videoTracks()
# add collection to context
tracks_effect_items = self.collect_sub_track_items(tracks)
context.data["tracksEffectItems"] = tracks_effect_items
self.log.info(
"Processing enabled track items: {}".format(len(track_items)))
for _ti in track_items:
data = {}
clip = _ti.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(clip)
subtracks = self.clip_subtrack(_ti)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get pype tag data
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
# self.log.debug(pformat(tag_parsed_data))
if not tag_parsed_data:
continue
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
continue
# add tag data to instance data
data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_parsed_data["asset"]
subset = tag_parsed_data["subset"]
review_track = tag_parsed_data.get("reviewTrack")
hiero_track = tag_parsed_data.get("heroTrack")
audio = tag_parsed_data.get("audio")
# remove audio attribute from data
data.pop("audio")
# insert family into families
family = tag_parsed_data["family"]
families = [str(f) for f in tag_parsed_data["families"]]
families.insert(0, str(family))
track = _ti.parent()
media_source = _ti.source().mediaSource()
source_path = media_source.firstpath()
file_head = media_source.filenameHead()
file_info = media_source.fileinfos().pop()
source_first_frame = int(file_info.startFrame())
# apply only for review and master track instance
if review_track and hiero_track:
families += ["review", "ftrack"]
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": _ti,
"families": families,
# tags
"tags": _ti.tags(),
# track item attributes
"track": track.name(),
"trackItem": track,
"reviewTrack": review_track,
# version data
"versionData": {
"colorspace": _ti.sourceMediaColourTransform()
},
# source attribute
"source": source_path,
"sourceMedia": media_source,
"sourcePath": source_path,
"sourceFileHead": file_head,
"sourceFirst": source_first_frame,
# clip's effect
"clipEffectItems": subtracks
})
instance = context.create_instance(**data)
self.log.info("Creating instance.data: {}".format(instance.data))
if audio:
a_data = dict()
# add tag data to instance data
a_data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
# create main attributes
subset = "audioMain"
family = "audio"
families = ["clip", "ftrack"]
families.insert(0, str(family))
name = "{} {} {}".format(asset, subset, families)
a_data.update({
"name": name,
"subset": subset,
"asset": asset,
"family": family,
"families": families,
"item": _ti,
# tags
"tags": _ti.tags(),
})
a_instance = context.create_instance(**a_data)
self.log.info("Creating audio instance: {}".format(a_instance))
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = dict()
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = list()
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items

View file

@ -1,74 +0,0 @@
import os
import pyblish.api
from openpype.hosts.hiero import api as phiero
from openpype.pipeline import legacy_io
class PreCollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Pre-collect Workfile"
order = pyblish.api.CollectorOrder - 0.51
def process(self, context):
asset = legacy_io.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_sequence = phiero.get_current_sequence()
video_tracks = active_sequence.videoTracks()
audio_tracks = active_sequence.audioTracks()
current_file = project.path()
staging_dir = os.path.dirname(current_file)
base_name = os.path.basename(current_file)
# get workfile's colorspace properties
_clrs = {}
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
_clrs["lutSettingFloat"] = project.lutSettingFloat()
_clrs["lutSettingLog"] = project.lutSettingLog()
_clrs["lutSettingViewer"] = project.lutSettingViewer()
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
_clrs["ocioConfigName"] = project.ocioConfigName()
_clrs["ocioConfigPath"] = project.ocioConfigPath()
# set main project attributes to context
context.data["activeProject"] = project
context.data["activeSequence"] = active_sequence
context.data["videoTracks"] = video_tracks
context.data["audioTracks"] = audio_tracks
context.data["currentFile"] = current_file
context.data["colorspace"] = _clrs
self.log.info("currentFile: {}".format(current_file))
# creating workfile representation
representation = {
'name': 'hrox',
'ext': 'hrox',
'files': base_name,
"stagingDir": staging_dir,
}
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
# version data
"versionData": {
"colorspace": _clrs
},
# source attribute
"sourcePath": current_file,
"representations": [representation]
}
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))

View file

@ -400,7 +400,7 @@ def add_write_node(name, **kwarg):
return w
def read(node):
def read_avalon_data(node):
"""Return user-defined knobs from given `node`
Args:
@ -415,8 +415,6 @@ def read(node):
return knob_name[len("avalon:"):]
elif knob_name.startswith("ak:"):
return knob_name[len("ak:"):]
else:
return knob_name
data = dict()
@ -445,7 +443,8 @@ def read(node):
(knob_type == 26 and value)
):
key = compat_prefixed(knob_name)
data[key] = value
if key is not None:
data[key] = value
if knob_name == first_user_knob:
break
@ -507,20 +506,74 @@ def get_created_node_imageio_setting(**kwarg):
log.debug(kwarg)
nodeclass = kwarg.get("nodeclass", None)
creator = kwarg.get("creator", None)
subset = kwarg.get("subset", None)
assert any([creator, nodeclass]), nuke.message(
"`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__))
imageio_nodes = get_nuke_imageio_settings()["nodes"]["requiredNodes"]
imageio_nodes = get_nuke_imageio_settings()["nodes"]
required_nodes = imageio_nodes["requiredNodes"]
override_nodes = imageio_nodes["overrideNodes"]
imageio_node = None
for node in imageio_nodes:
for node in required_nodes:
log.info(node)
if (nodeclass in node["nukeNodeClass"]) and (
creator in node["plugins"]):
if (
nodeclass in node["nukeNodeClass"]
and creator in node["plugins"]
):
imageio_node = node
break
log.debug("__ imageio_node: {}".format(imageio_node))
# find matching override node
override_imageio_node = None
for onode in override_nodes:
log.info(onode)
if nodeclass not in node["nukeNodeClass"]:
continue
if creator not in node["plugins"]:
continue
if (
onode["subsets"]
and not any(re.search(s, subset) for s in onode["subsets"])
):
continue
override_imageio_node = onode
break
log.debug("__ override_imageio_node: {}".format(override_imageio_node))
# add overrides to imageio_node
if override_imageio_node:
# get all knob names in imageio_node
knob_names = [k["name"] for k in imageio_node["knobs"]]
for oknob in override_imageio_node["knobs"]:
for knob in imageio_node["knobs"]:
# override matching knob name
if oknob["name"] == knob["name"]:
log.debug(
"_ overriding knob: `{}` > `{}`".format(
knob, oknob
))
if not oknob["value"]:
# remove original knob if no value found in oknob
imageio_node["knobs"].remove(knob)
else:
# override knob value with oknob's
knob["value"] = oknob["value"]
# add missing knobs into imageio_node
if oknob["name"] not in knob_names:
log.debug(
"_ adding knob: `{}`".format(oknob))
imageio_node["knobs"].append(oknob)
knob_names.append(oknob["name"])
log.info("ImageIO node: {}".format(imageio_node))
return imageio_node
@ -542,7 +595,7 @@ def get_imageio_input_colorspace(filename):
def on_script_load():
''' Callback for ffmpeg support
'''
if nuke.env['LINUX']:
if nuke.env["LINUX"]:
nuke.tcl('load ffmpegReader')
nuke.tcl('load ffmpegWriter')
else:
@ -567,7 +620,7 @@ def check_inventory_versions():
if container:
node = nuke.toNode(container["objectName"])
avalon_knob_data = read(node)
avalon_knob_data = read_avalon_data(node)
# get representation from io
representation = legacy_io.find_one({
@ -593,7 +646,7 @@ def check_inventory_versions():
versions = legacy_io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
}).distinct("name")
max_version = max(versions)
@ -623,20 +676,20 @@ def writes_version_sync():
if _NODE_TAB_NAME not in each.knobs():
continue
avalon_knob_data = read(each)
avalon_knob_data = read_avalon_data(each)
try:
if avalon_knob_data['families'] not in ["render"]:
log.debug(avalon_knob_data['families'])
if avalon_knob_data["families"] not in ["render"]:
log.debug(avalon_knob_data["families"])
continue
node_file = each['file'].value()
node_file = each["file"].value()
node_version = "v" + get_version_from_path(node_file)
log.debug("node_version: {}".format(node_version))
node_new_file = node_file.replace(node_version, new_version)
each['file'].setValue(node_new_file)
each["file"].setValue(node_new_file)
if not os.path.isdir(os.path.dirname(node_new_file)):
log.warning("Path does not exist! I am creating it.")
os.makedirs(os.path.dirname(node_new_file))
@ -665,18 +718,19 @@ def check_subsetname_exists(nodes, subset_name):
bool: True of False
"""
return next((True for n in nodes
if subset_name in read(n).get("subset", "")),
if subset_name in read_avalon_data(n).get("subset", "")),
False)
def get_render_path(node):
''' Generate Render path from presets regarding avalon knob data
'''
data = {'avalon': read(node)}
data = {'avalon': read_avalon_data(node)}
data_preset = {
"nodeclass": data['avalon']['family'],
"families": [data['avalon']['families']],
"creator": data['avalon']['creator']
"nodeclass": data["avalon"]["family"],
"families": [data["avalon"]["families"]],
"creator": data["avalon"]["creator"],
"subset": data["avalon"]["subset"]
}
nuke_imageio_writes = get_created_node_imageio_setting(**data_preset)
@ -749,7 +803,7 @@ def format_anatomy(data):
def script_name():
''' Returns nuke script path
'''
return nuke.root().knob('name').value()
return nuke.root().knob("name").value()
def add_button_write_to_read(node):
@ -844,7 +898,7 @@ def create_write_node(name, data, input=None, prenodes=None,
# adding dataflow template
log.debug("imageio_writes: `{}`".format(imageio_writes))
for knob in imageio_writes["knobs"]:
_data.update({knob["name"]: knob["value"]})
_data[knob["name"]] = knob["value"]
_data = fix_data_for_node_create(_data)
@ -1193,15 +1247,19 @@ class WorkfileSettings(object):
erased_viewers = []
for v in nuke.allNodes(filter="Viewer"):
v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
# set viewProcess to preset from settings
v["viewerProcess"].setValue(
str(viewer_dict["viewerProcess"])
)
if str(viewer_dict["viewerProcess"]) \
not in v['viewerProcess'].value():
not in v["viewerProcess"].value():
copy_inputs = v.dependencies()
copy_knobs = {k: v[k].value() for k in v.knobs()
if k not in filter_knobs}
# delete viewer with wrong settings
erased_viewers.append(v['name'].value())
erased_viewers.append(v["name"].value())
nuke.delete(v)
# create new viewer
@ -1217,7 +1275,7 @@ class WorkfileSettings(object):
nv[k].setValue(v)
# set viewerProcess
nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
nv["viewerProcess"].setValue(str(viewer_dict["viewerProcess"]))
if erased_viewers:
log.warning(
@ -1293,12 +1351,12 @@ class WorkfileSettings(object):
for node in nuke.allNodes(filter="Group"):
# get data from avalon knob
avalon_knob_data = read(node)
avalon_knob_data = read_avalon_data(node)
if not avalon_knob_data:
if avalon_knob_data.get("id") != "pyblish.avalon.instance":
continue
if avalon_knob_data["id"] != "pyblish.avalon.instance":
if "creator" not in avalon_knob_data:
continue
# establish families
@ -1309,7 +1367,8 @@ class WorkfileSettings(object):
data_preset = {
"nodeclass": avalon_knob_data["family"],
"families": families,
"creator": avalon_knob_data['creator']
"creator": avalon_knob_data["creator"],
"subset": avalon_knob_data["subset"]
}
nuke_imageio_writes = get_created_node_imageio_setting(
@ -1342,7 +1401,6 @@ class WorkfileSettings(object):
write_node[knob["name"]].setValue(value)
def set_reads_colorspace(self, read_clrs_inputs):
""" Setting colorspace to Read nodes
@ -1368,17 +1426,16 @@ class WorkfileSettings(object):
current = n["colorspace"].value()
future = str(preset_clrsp)
if current != future:
changes.update({
n.name(): {
"from": current,
"to": future
}
})
changes[n.name()] = {
"from": current,
"to": future
}
log.debug(changes)
if changes:
msg = "Read nodes are not set to correct colospace:\n\n"
for nname, knobs in changes.items():
msg += str(
msg += (
" - node: '{0}' is now '{1}' but should be '{2}'\n"
).format(nname, knobs["from"], knobs["to"])
@ -1610,17 +1667,17 @@ def get_hierarchical_attr(entity, attr, default=None):
if not value:
break
if value or entity['type'].lower() == 'project':
if value or entity["type"].lower() == "project":
return value
parent_id = entity['parent']
parent_id = entity["parent"]
if (
entity['type'].lower() == 'asset'
and entity.get('data', {}).get('visualParent')
entity["type"].lower() == "asset"
and entity.get("data", {}).get("visualParent")
):
parent_id = entity['data']['visualParent']
parent_id = entity["data"]["visualParent"]
parent = legacy_io.find_one({'_id': parent_id})
parent = legacy_io.find_one({"_id": parent_id})
return get_hierarchical_attr(parent, attr)
@ -1630,12 +1687,13 @@ def get_write_node_template_attr(node):
'''
# get avalon data from node
data = dict()
data['avalon'] = read(node)
data = {"avalon": read_avalon_data(node)}
data_preset = {
"nodeclass": data['avalon']['family'],
"families": [data['avalon']['families']],
"creator": data['avalon']['creator']
"nodeclass": data["avalon"]["family"],
"families": [data["avalon"]["families"]],
"creator": data["avalon"]["creator"],
"subset": data["avalon"]["subset"]
}
# get template data
@ -1646,10 +1704,11 @@ def get_write_node_template_attr(node):
"file": get_render_path(node)
})
# adding imageio template
{correct_data.update({k: v})
for k, v in nuke_imageio_writes.items()
if k not in ["_id", "_previous"]}
# adding imageio knob presets
for k, v in nuke_imageio_writes.items():
if k in ["_id", "_previous"]:
continue
correct_data[k] = v
# fix badly encoded data
return fix_data_for_node_create(correct_data)
@ -1765,8 +1824,8 @@ def maintained_selection():
Example:
>>> with maintained_selection():
... node['selected'].setValue(True)
>>> print(node['selected'].value())
... node["selected"].setValue(True)
>>> print(node["selected"].value())
False
"""
previous_selection = nuke.selectedNodes()
@ -1774,11 +1833,11 @@ def maintained_selection():
yield
finally:
# unselect all selection in case there is some
current_seletion = nuke.selectedNodes()
[n['selected'].setValue(False) for n in current_seletion]
reset_selection()
# and select all previously selected nodes
if previous_selection:
[n['selected'].setValue(True) for n in previous_selection]
select_nodes(previous_selection)
def reset_selection():

View file

@ -32,7 +32,7 @@ from .lib import (
launch_workfiles_app,
check_inventory_versions,
set_avalon_knob_data,
read,
read_avalon_data,
Context
)
@ -359,7 +359,7 @@ def parse_container(node):
dict: The container schema data for this container node.
"""
data = read(node)
data = read_avalon_data(node)
# (TODO) Remove key validation when `ls` has re-implemented.
#

View file

@ -260,8 +260,6 @@ class ExporterReview(object):
return nuke_imageio["viewer"]["viewerProcess"]
class ExporterReviewLut(ExporterReview):
"""
Generator object for review lut from Nuke
@ -673,7 +671,8 @@ class AbstractWriteRender(OpenPypeCreator):
write_data = {
"nodeclass": self.n_class,
"families": [self.family],
"avalon": self.data
"avalon": self.data,
"subset": self.data["subset"]
}
# add creator data

View file

@ -52,7 +52,7 @@ class ExtractReviewDataMov(openpype.api.Extractor):
for o_name, o_data in self.outputs.items():
f_families = o_data["filter"]["families"]
f_task_types = o_data["filter"]["task_types"]
f_subsets = o_data["filter"]["sebsets"]
f_subsets = o_data["filter"]["subsets"]
self.log.debug(
"f_families `{}` > families: {}".format(

View file

@ -573,7 +573,7 @@ def composite_rendered_layers(
layer_ids_by_position[layer_position] = layer["layer_id"]
# Sort layer positions
sorted_positions = tuple(sorted(layer_ids_by_position.keys()))
sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys())))
# Prepare variable where filepaths without any rendered content
# - transparent will be created
transparent_filepaths = set()

View file

@ -24,7 +24,9 @@ class CreateRenderlayer(plugin.Creator):
" {clip_id} {group_id} {r} {g} {b} \"{name}\""
)
dynamic_subset_keys = ["render_pass", "render_layer", "group"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer", "group"
]
@classmethod
def get_dynamic_data(
@ -34,12 +36,17 @@ class CreateRenderlayer(plugin.Creator):
variant, task_name, asset_id, project_name, host_name
)
# Use render pass name from creator's plugin
dynamic_data["render_pass"] = cls.render_pass
dynamic_data["renderpass"] = cls.render_pass
# Add variant to render layer
dynamic_data["render_layer"] = variant
dynamic_data["renderlayer"] = variant
# Change family for subset name fill
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
return dynamic_data
@classmethod

View file

@ -20,7 +20,9 @@ class CreateRenderPass(plugin.Creator):
icon = "cube"
defaults = ["Main"]
dynamic_subset_keys = ["render_pass", "render_layer"]
dynamic_subset_keys = [
"renderpass", "renderlayer", "render_pass", "render_layer"
]
@classmethod
def get_dynamic_data(
@ -29,9 +31,13 @@ class CreateRenderPass(plugin.Creator):
dynamic_data = super(CreateRenderPass, cls).get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
dynamic_data["render_pass"] = variant
dynamic_data["renderpass"] = variant
dynamic_data["family"] = "render"
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_pass"] = dynamic_data["renderpass"]
return dynamic_data
@classmethod
@ -115,6 +121,7 @@ class CreateRenderPass(plugin.Creator):
else:
render_layer = beauty_instance["variant"]
subset_name_fill_data["renderlayer"] = render_layer
subset_name_fill_data["render_layer"] = render_layer
# Format dynamic keys in subset name
@ -129,7 +136,7 @@ class CreateRenderPass(plugin.Creator):
self.data["group_id"] = group_id
self.data["pass"] = variant
self.data["render_layer"] = render_layer
self.data["renderlayer"] = render_layer
# Collect selected layer ids to be stored into instance
layer_names = [layer["name"] for layer in selected_layers]

View file

@ -45,6 +45,21 @@ class CollectInstances(pyblish.api.ContextPlugin):
for instance_data in filtered_instance_data:
instance_data["fps"] = context.data["sceneFps"]
# Conversion from older instances
# - change 'render_layer' to 'renderlayer'
render_layer = instance_data.get("instance_data")
if not render_layer:
# Render Layer has only variant
if instance_data["family"] == "renderLayer":
render_layer = instance_data.get("variant")
# Backwards compatibility for renderPasses
elif "render_layer" in instance_data:
render_layer = instance_data["render_layer"]
if render_layer:
instance_data["renderlayer"] = render_layer
# Store workfile instance data to instance data
instance_data["originData"] = copy.deepcopy(instance_data)
# Global instance data modifications
@ -191,7 +206,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"Creating render pass instance. \"{}\"".format(pass_name)
)
# Change label
render_layer = instance_data["render_layer"]
render_layer = instance_data["renderlayer"]
# Backwards compatibility
# - subset names were not stored as final subset names during creation

View file

@ -69,9 +69,13 @@ class CollectRenderScene(pyblish.api.ContextPlugin):
# Variant is using render pass name
variant = self.render_layer
dynamic_data = {
"render_layer": self.render_layer,
"render_pass": self.render_pass
"renderlayer": self.render_layer,
"renderpass": self.render_pass,
}
# TODO remove - Backwards compatibility for old subset name templates
# - added 2022/04/28
dynamic_data["render_layer"] = dynamic_data["renderlayer"]
dynamic_data["render_pass"] = dynamic_data["renderpass"]
task_name = workfile_context["task"]
subset_name = get_subset_name_with_asset_doc(
@ -100,7 +104,9 @@ class CollectRenderScene(pyblish.api.ContextPlugin):
"representations": [],
"layers": copy.deepcopy(context.data["layersData"]),
"asset": asset_name,
"task": task_name
"task": task_name,
# Add render layer to instance data
"renderlayer": self.render_layer
}
instance = context.create_instance(**instance_data)

View file

@ -48,7 +48,6 @@ from .attribute_definitions import (
from .env_tools import (
env_value_to_bool,
get_paths_from_environ,
get_global_environments
)
from .terminal import Terminal
@ -249,7 +248,6 @@ __all__ = [
"env_value_to_bool",
"get_paths_from_environ",
"get_global_environments",
"get_vendor_bin_path",
"get_oiio_tools_path",

View file

@ -69,57 +69,3 @@ def get_paths_from_environ(env_key=None, env_value=None, return_first=False):
return None
# Return all existing paths from environment variable
return existing_paths
def get_global_environments(env=None):
"""Load global environments from Pype.
Return prepared and parsed global environments by pype's settings. Use
combination of "global" environments set in pype's settings and enabled
modules.
Args:
env (dict, optional): Initial environments. Empty dictionary is used
when not entered.
Returns;
dict of str: Loaded and processed environments.
"""
import acre
from openpype.modules import ModulesManager
from openpype.settings import get_environments
if env is None:
env = {}
# Get global environments from settings
all_settings_env = get_environments()
parsed_global_env = acre.parse(all_settings_env["global"])
# Merge with entered environments
merged_env = acre.append(env, parsed_global_env)
# Get environments from Pype modules
modules_manager = ModulesManager()
module_envs = modules_manager.collect_global_environments()
publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"]
# Set pyblish plugins paths if any module want to register them
if publish_plugin_dirs:
publish_paths_str = os.environ.get("PYBLISHPLUGINPATH") or ""
publish_paths = publish_paths_str.split(os.pathsep)
_publish_paths = {
os.path.normpath(path) for path in publish_paths if path
}
for path in publish_plugin_dirs:
_publish_paths.add(os.path.normpath(path))
module_envs["PYBLISHPLUGINPATH"] = os.pathsep.join(_publish_paths)
# Merge environments with current environments and update values
if module_envs:
parsed_envs = acre.parse(module_envs)
merged_env = acre.merge(parsed_envs, merged_env)
return acre.compute(merged_env, cleanup=True)

View file

@ -306,11 +306,11 @@ def _load_modules():
basename, ext = os.path.splitext(filename)
if os.path.isdir(fullpath):
# Check existence of init fil
# Check existence of init file
init_path = os.path.join(fullpath, "__init__.py")
if not os.path.exists(init_path):
log.debug((
"Module directory does not contan __init__.py file {}"
"Module directory does not contain __init__.py file {}"
).format(fullpath))
continue
@ -353,11 +353,11 @@ def _load_modules():
basename, ext = os.path.splitext(filename)
if os.path.isdir(fullpath):
# Check existence of init fil
# Check existence of init file
init_path = os.path.join(fullpath, "__init__.py")
if not os.path.exists(init_path):
log.debug((
"Module directory does not contan __init__.py file {}"
"Module directory does not contain __init__.py file {}"
).format(fullpath))
continue

View file

@ -30,14 +30,15 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
# shot data dict
shot_data = {}
family = instance.data.get("family")
family = instance.data["family"]
families = instance.data["families"]
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection([family]):
if not set(self.families).intersection(set(families + [family])):
continue
# exclude if not masterLayer True

View file

@ -22,7 +22,6 @@ from .lib import (
get_project_settings,
get_current_project_settings,
get_anatomy_settings,
get_environments,
get_local_settings
)
from .entities import (
@ -54,7 +53,6 @@ __all__ = (
"get_project_settings",
"get_current_project_settings",
"get_anatomy_settings",
"get_environments",
"get_local_settings",
"SystemSettings",

View file

@ -3,14 +3,11 @@ import re
# Metadata keys for work with studio and project overrides
M_OVERRIDDEN_KEY = "__overriden_keys__"
# Metadata key for storing information about environments
M_ENVIRONMENT_KEY = "__environment_keys__"
# Metadata key for storing dynamic created labels
M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__"
METADATA_KEYS = frozenset([
M_OVERRIDDEN_KEY,
M_ENVIRONMENT_KEY,
M_DYNAMIC_KEY_LABEL
])
@ -35,7 +32,6 @@ KEY_REGEX = re.compile(r"^[{}]+$".format(KEY_ALLOWED_SYMBOLS))
__all__ = (
"M_OVERRIDDEN_KEY",
"M_ENVIRONMENT_KEY",
"M_DYNAMIC_KEY_LABEL",
"METADATA_KEYS",

View file

@ -165,7 +165,7 @@
]
}
],
"customNodes": []
"overrideNodes": []
},
"regexInputs": {
"inputs": [

View file

@ -55,18 +55,23 @@
"keep_original_representation": false,
"export_presets_mapping": {
"exr16fpdwaa": {
"active": true,
"export_type": "File Sequence",
"ext": "exr",
"xml_preset_file": "OpenEXR (16-bit fp DWAA).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"ignore_comment_attrs": false,
"colorspace_out": "ACES - ACEScg",
"xml_preset_dir": "",
"parsed_comment_attrs": true,
"representation_add_range": true,
"representation_tags": [],
"load_to_batch_group": true,
"batch_group_loader_name": "LoadClip"
"batch_group_loader_name": "LoadClipBatch",
"filter_path_regex": ".*"
}
}
},
"IntegrateBatchGroup": {
"enabled": false
}
},
"load": {
@ -87,7 +92,8 @@
"png",
"h264",
"mov",
"mp4"
"mp4",
"exr16fpdwaa"
],
"reel_group_name": "OpenPype_Reels",
"reel_name": "Loaded",
@ -110,7 +116,8 @@
"png",
"h264",
"mov",
"mp4"
"mp4",
"exr16fpdwaa"
],
"reel_name": "OP_LoadedReel",
"clip_name_template": "{asset}_{subset}_{output}"

View file

@ -307,7 +307,7 @@
],
"task_types": [],
"tasks": [],
"template": "{family}{Task}_{Render_layer}_{Render_pass}"
"template": "{family}{Task}_{Renderlayer}_{Renderpass}"
},
{
"families": [

View file

@ -120,7 +120,7 @@
"filter": {
"task_types": [],
"families": [],
"sebsets": []
"subsets": []
},
"read_raw": false,
"viewer_process_override": "",
@ -220,11 +220,12 @@
"repre_names": [
"exr",
"dpx",
"mov"
"mov",
"mp4",
"h264"
],
"loaders": [
"LoadSequence",
"LoadMov"
"LoadClip"
]
}
],

View file

@ -127,12 +127,6 @@ class BaseItemEntity(BaseEntity):
# Entity is in hierarchy of dynamically created entity
self.is_in_dynamic_item = False
# Entity will save metadata about environments
# - this is current possible only for RawJsonEnity
self.is_env_group = False
# Key of environment group key must be unique across system settings
self.env_group_key = None
# Roles of an entity
self.roles = None
@ -286,16 +280,6 @@ class BaseItemEntity(BaseEntity):
).format(self.group_item.path)
raise EntitySchemaError(self, reason)
# Validate that env group entities will be stored into file.
# - env group entities must store metadata which is not possible if
# metadata would be outside of file
if self.file_item is None and self.is_env_group:
reason = (
"Environment item is not inside file"
" item so can't store metadata for defaults."
)
raise EntitySchemaError(self, reason)
# Dynamic items must not have defined labels. (UI specific)
if self.label and self.is_dynamic_item:
raise EntitySchemaError(
@ -862,11 +846,6 @@ class ItemEntity(BaseItemEntity):
if self.is_dynamic_item:
self.require_key = False
# If value should be stored to environments and uder which group key
# - the key may be dynamically changed by it's parent on save
self.env_group_key = self.schema_data.get("env_group_key")
self.is_env_group = bool(self.env_group_key is not None)
# Root item reference
self.root_item = self.parent.root_item

View file

@ -15,7 +15,6 @@ from .exceptions import (
from openpype.settings.constants import (
METADATA_KEYS,
M_DYNAMIC_KEY_LABEL,
M_ENVIRONMENT_KEY,
KEY_REGEX,
KEY_ALLOWED_SYMBOLS
)
@ -148,11 +147,7 @@ class DictMutableKeysEntity(EndpointEntity):
):
raise InvalidKeySymbols(self.path, key)
if self.value_is_env_group:
item_schema = copy.deepcopy(self.item_schema)
item_schema["env_group_key"] = key
else:
item_schema = self.item_schema
item_schema = self.item_schema
new_child = self.create_schema_object(item_schema, self, True)
self.children_by_key[key] = new_child
@ -216,9 +211,7 @@ class DictMutableKeysEntity(EndpointEntity):
self.children_label_by_id = {}
self.store_as_list = self.schema_data.get("store_as_list") or False
self.value_is_env_group = (
self.schema_data.get("value_is_env_group") or False
)
self.required_keys = self.schema_data.get("required_keys") or []
self.collapsible_key = self.schema_data.get("collapsible_key") or False
# GUI attributes
@ -241,9 +234,6 @@ class DictMutableKeysEntity(EndpointEntity):
object_type.update(input_modifiers)
self.item_schema = object_type
if self.value_is_env_group:
self.item_schema["env_group_key"] = ""
if self.group_item is None:
self.is_group = True
@ -259,10 +249,6 @@ class DictMutableKeysEntity(EndpointEntity):
if used_temp_label:
self.label = None
if self.value_is_env_group and self.store_as_list:
reason = "Item can't store environments metadata to list output."
raise EntitySchemaError(self, reason)
if not self.schema_data.get("object_type"):
reason = (
"Modifiable dictionary must have specified `object_type`."
@ -579,18 +565,10 @@ class DictMutableKeysEntity(EndpointEntity):
output.append([key, child_value])
return output
output = {}
for key, child_entity in self.children_by_key.items():
child_value = child_entity.settings_value()
# TODO child should have setter of env group key se child can
# know what env group represents.
if self.value_is_env_group:
if key not in child_value[M_ENVIRONMENT_KEY]:
_metadata = child_value[M_ENVIRONMENT_KEY]
_m_keykey = tuple(_metadata.keys())[0]
env_keys = child_value[M_ENVIRONMENT_KEY].pop(_m_keykey)
child_value[M_ENVIRONMENT_KEY][key] = env_keys
output[key] = child_value
output = {
key: child_entity.settings_value()
for key, child_entity in self.children_by_key.items()
}
output.update(self.metadata)
return output

View file

@ -15,10 +15,7 @@ from .exceptions import (
EntitySchemaError
)
from openpype.settings.constants import (
METADATA_KEYS,
M_ENVIRONMENT_KEY
)
from openpype.settings.constants import METADATA_KEYS
class EndpointEntity(ItemEntity):
@ -534,13 +531,7 @@ class RawJsonEntity(InputEntity):
@property
def metadata(self):
output = {}
if isinstance(self._current_value, dict) and self.is_env_group:
output[M_ENVIRONMENT_KEY] = {
self.env_group_key: list(self._current_value.keys())
}
return output
return {}
@property
def has_unsaved_changes(self):
@ -549,15 +540,6 @@ class RawJsonEntity(InputEntity):
result = self.metadata != self._metadata_for_current_state()
return result
def schema_validations(self):
if self.store_as_string and self.is_env_group:
reason = (
"RawJson entity can't store environment group metadata"
" as string."
)
raise EntitySchemaError(self, reason)
super(RawJsonEntity, self).schema_validations()
def _convert_to_valid_type(self, value):
if isinstance(value, STRING_TYPE):
try:
@ -583,9 +565,6 @@ class RawJsonEntity(InputEntity):
def _settings_value(self):
value = super(RawJsonEntity, self)._settings_value()
if self.is_env_group and isinstance(value, dict):
value.update(self.metadata)
if self.store_as_string:
return json.dumps(value)
return value

View file

@ -52,7 +52,6 @@ from openpype.settings.lib import (
get_available_studio_project_settings_overrides_versions,
get_available_studio_project_anatomy_overrides_versions,
find_environments,
apply_overrides
)
@ -422,11 +421,6 @@ class RootEntity(BaseItemEntity):
"""
pass
@abstractmethod
def _validate_defaults_to_save(self, value):
"""Validate default values before save."""
pass
def _save_default_values(self):
"""Save default values.
@ -435,7 +429,6 @@ class RootEntity(BaseItemEntity):
DEFAULTS.
"""
settings_value = self.settings_value()
self._validate_defaults_to_save(settings_value)
defaults_dir = self.defaults_dir()
for file_path, value in settings_value.items():
@ -604,8 +597,6 @@ class SystemSettings(RootEntity):
def _save_studio_values(self):
settings_value = self.settings_value()
self._validate_duplicated_env_group(settings_value)
self.log.debug("Saving system settings: {}".format(
json.dumps(settings_value, indent=4)
))
@ -613,29 +604,6 @@ class SystemSettings(RootEntity):
# Reset source version after restart
self._source_version = None
def _validate_defaults_to_save(self, value):
"""Valiations of default values before save."""
self._validate_duplicated_env_group(value)
def _validate_duplicated_env_group(self, value, override_state=None):
""" Validate duplicated environment groups.
Raises:
DuplicatedEnvGroups: When value contain duplicated env groups.
"""
value = copy.deepcopy(value)
if override_state is None:
override_state = self._override_state
if override_state is OverrideState.STUDIO:
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
final_value = apply_overrides(default_values, value)
else:
final_value = value
# Check if final_value contain duplicated environment groups
find_environments(final_value)
def _save_project_values(self):
"""System settings can't have project overrides.
@ -911,10 +879,6 @@ class ProjectSettings(RootEntity):
if warnings:
raise SaveWarningExc(warnings)
def _validate_defaults_to_save(self, value):
"""Valiations of default values before save."""
pass
def _validate_values_to_save(self, value):
pass

View file

@ -46,8 +46,7 @@
}, {
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
}, {
"type": "path",
"key": "{host_name}_executables",

View file

@ -238,25 +238,19 @@
"type": "dict",
"children": [
{
"key": "ext",
"label": "Output extension",
"type": "text"
"type": "boolean",
"key": "active",
"label": "Is active",
"default": true
},
{
"key": "xml_preset_file",
"label": "XML preset file (with ext)",
"type": "text"
},
{
"key": "xml_preset_dir",
"label": "XML preset folder (optional)",
"type": "text"
"type": "separator"
},
{
"key": "export_type",
"label": "Eport clip type",
"type": "enum",
"default": "File Sequence",
"default": "Sequence Publish",
"enum_items": [
{
"Movie": "Movie"
@ -268,59 +262,125 @@
"Sequence Publish": "Sequence Publish"
}
]
},
{
"type": "separator"
"key": "ext",
"label": "Output extension",
"type": "text",
"default": "exr"
},
{
"type": "boolean",
"key": "ignore_comment_attrs",
"label": "Ignore attributes parsed from a segment comments"
},
{
"type": "separator"
"key": "xml_preset_file",
"label": "XML preset file (with ext)",
"type": "text"
},
{
"key": "colorspace_out",
"label": "Output color (imageio)",
"type": "text"
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "representation_add_range",
"label": "Add frame range to representation"
},
{
"type": "list",
"key": "representation_tags",
"label": "Add representation tags",
"object_type": {
"type": "text",
"multiline": false
}
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "load_to_batch_group",
"label": "Load to batch group reel",
"default": false
},
{
"type": "text",
"key": "batch_group_loader_name",
"label": "Use loader name"
"default": "linear"
},
{
"type": "collapsible-wrap",
"label": "Other parameters",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "xml_preset_dir",
"label": "XML preset folder (optional)",
"type": "text"
},
{
"type": "separator"
},
{
"type": "boolean",
"key": "parsed_comment_attrs",
"label": "Include parsed attributes from comments",
"default": false
},
{
"type": "separator"
},
{
"type": "collapsible-wrap",
"label": "Representation",
"collapsible": true,
"collapsed": true,
"children": [
{
"type": "boolean",
"key": "representation_add_range",
"label": "Add frame range to representation"
},
{
"type": "list",
"key": "representation_tags",
"label": "Add representation tags",
"object_type": {
"type": "text",
"multiline": false
}
}
]
},
{
"type": "collapsible-wrap",
"label": "Loading during publish",
"collapsible": true,
"collapsed": true,
"children": [
{
"type": "boolean",
"key": "load_to_batch_group",
"label": "Load to batch group reel",
"default": false
},
{
"type": "text",
"key": "batch_group_loader_name",
"label": "Use loader name"
}
]
}
]
},
{
"type": "collapsible-wrap",
"label": "Filtering",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "filter_path_regex",
"label": "Regex in clip path",
"type": "text",
"default": ".*"
}
]
}
]
}
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "IntegrateBatchGroup",
"label": "IntegrateBatchGroup",
"is_group": true,
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
}
]
}
]
},

View file

@ -253,7 +253,7 @@
{
"key": "requiredNodes",
"type": "list",
"label": "Required Nodes",
"label": "Plugin required",
"object_type": {
"type": "dict",
"children": [
@ -272,35 +272,43 @@
"label": "Nuke Node Class"
},
{
"type": "splitter"
},
{
"key": "knobs",
"type": "collapsible-wrap",
"label": "Knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
}
]
}
]
}
}
]
}
]
}
},
{
"type": "splitter"
},
{
"type": "list",
"key": "customNodes",
"label": "Custom Nodes",
"key": "overrideNodes",
"label": "Plugin's node overrides",
"object_type": {
"type": "dict",
"children": [
@ -319,27 +327,37 @@
"label": "Nuke Node Class"
},
{
"type": "splitter"
"key": "subsets",
"label": "Subsets",
"type": "list",
"object_type": "text"
},
{
"key": "knobs",
"label": "Knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
"type": "collapsible-wrap",
"label": "Knobs overrides",
"collapsible": true,
"collapsed": true,
"children": [
{
"key": "knobs",
"type": "list",
"object_type": {
"type": "dict",
"children": [
{
"type": "text",
"key": "name",
"label": "Name"
},
{
"type": "text",
"key": "value",
"label": "Value"
}
]
}
]
}
}
]
}
]
}
@ -446,7 +464,7 @@
{
"key": "flame",
"type": "dict",
"label": "Flame/Flair",
"label": "Flame & Flare",
"children": [
{
"key": "project",

View file

@ -212,7 +212,7 @@
"object_type": "text"
},
{
"key": "sebsets",
"key": "subsets",
"label": "Subsets",
"type": "list",
"object_type": "text"

View file

@ -117,19 +117,6 @@
}
]
},
{
"key": "env_group_test",
"label": "EnvGroup Test",
"type": "dict",
"children": [
{
"key": "key_to_store_in_system_settings",
"label": "Testing environment group",
"type": "raw-json",
"env_group_key": "test_group"
}
]
},
{
"key": "dict_wrapper",
"type": "dict",

View file

@ -7,8 +7,7 @@
{
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
},
{
"type": "path",

View file

@ -34,7 +34,6 @@
"key": "environment",
"label": "Environment",
"type": "raw-json",
"env_group_key": "global",
"require_restart": true
},
{

View file

@ -9,7 +9,6 @@ from .exceptions import (
)
from .constants import (
M_OVERRIDDEN_KEY,
M_ENVIRONMENT_KEY,
METADATA_KEYS,
@ -457,24 +456,6 @@ def get_local_settings():
return _LOCAL_SETTINGS_HANDLER.get_local_settings()
class DuplicatedEnvGroups(Exception):
def __init__(self, duplicated):
self.origin_duplicated = duplicated
self.duplicated = {}
for key, items in duplicated.items():
self.duplicated[key] = []
for item in items:
self.duplicated[key].append("/".join(item["parents"]))
msg = "Duplicated environment group keys. {}".format(
", ".join([
"\"{}\"".format(env_key) for env_key in self.duplicated.keys()
])
)
super(DuplicatedEnvGroups, self).__init__(msg)
def load_openpype_default_settings():
"""Load openpype default settings."""
return load_jsons_from_dir(DEFAULTS_DIR)
@ -624,69 +605,6 @@ def load_jsons_from_dir(path, *args, **kwargs):
return output
def find_environments(data, with_items=False, parents=None):
""" Find environemnt values from system settings by it's metadata.
Args:
data(dict): System settings data or dictionary which may contain
environments metadata.
Returns:
dict: Key as Environment key and value for `acre` module.
"""
if not data or not isinstance(data, dict):
return {}
output = {}
if parents is None:
parents = []
if M_ENVIRONMENT_KEY in data:
metadata = data.get(M_ENVIRONMENT_KEY)
for env_group_key, env_keys in metadata.items():
if env_group_key not in output:
output[env_group_key] = []
_env_values = {}
for key in env_keys:
_env_values[key] = data[key]
item = {
"env": _env_values,
"parents": parents[:-1]
}
output[env_group_key].append(item)
for key, value in data.items():
_parents = copy.deepcopy(parents)
_parents.append(key)
result = find_environments(value, True, _parents)
if not result:
continue
for env_group_key, env_values in result.items():
if env_group_key not in output:
output[env_group_key] = []
for env_values_item in env_values:
output[env_group_key].append(env_values_item)
if with_items:
return output
duplicated_env_groups = {}
final_output = {}
for key, value_in_list in output.items():
if len(value_in_list) > 1:
duplicated_env_groups[key] = value_in_list
else:
final_output[key] = value_in_list[0]["env"]
if duplicated_env_groups:
raise DuplicatedEnvGroups(duplicated_env_groups)
return final_output
def subkey_merge(_dict, value, keys):
key = keys.pop(0)
if not keys:
@ -1082,19 +1000,6 @@ def get_current_project_settings():
return get_project_settings(project_name)
def get_environments():
"""Calculated environment based on defaults and system settings.
Any default environment also found in the system settings will be fully
overridden by the one from the system settings.
Returns:
dict: Output should be ready for `acre` module.
"""
return find_environments(get_system_settings(False))
def get_general_environments():
"""Get general environments.

View file

@ -44,8 +44,7 @@
}, {
"type": "raw-json",
"label": "{host_label} Environments",
"key": "{host_name}_environments",
"env_group_key": "{host_name}"
"key": "{host_name}_environments"
}, {
"type": "path-widget",
"key": "{host_name}_executables",

View file

@ -465,10 +465,6 @@ class ModifiableDictItem(QtWidgets.QWidget):
self.entity_widget.change_key(key, self)
self.update_style()
@property
def value_is_env_group(self):
return self.entity_widget.value_is_env_group
def update_key_label(self):
if not self.collapsible_key:
return

View file

@ -266,18 +266,9 @@ def set_openpype_global_environments() -> None:
"""Set global OpenPype's environments."""
import acre
try:
from openpype.settings import get_general_environments
from openpype.settings import get_general_environments
general_env = get_general_environments()
except Exception:
# Backwards compatibility for OpenPype versions where
# `get_general_environments` does not exists yet
from openpype.settings import get_environments
all_env = get_environments()
general_env = all_env["global"]
general_env = get_general_environments()
merged_env = acre.merge(
acre.parse(general_env),