Merge branch 'develop' into feaure/change_integratenew_template_profiles_setting

This commit is contained in:
Milan Kolar 2021-05-07 18:31:50 +02:00
commit 073f9a1bd1
32 changed files with 191 additions and 721 deletions

View file

@ -164,24 +164,26 @@ def create_media_pool_item(fpath: str,
# try to search in bin if the clip does not exist
existing_mpi = get_media_pool_item(fpath, root_bin)
print(">>>>> existing_mpi: {}".format(existing_mpi))
if not existing_mpi:
print("___ fpath: {}".format(fpath))
dirname, file = os.path.split(fpath)
_name, ext = os.path.splitext(file)
print(dirname)
media_pool_items = media_storage.AddItemListToMediaPool(os.path.normpath(dirname))
print(media_pool_items)
# pop the returned dict on first item as resolve data object is such
if media_pool_items:
media_pool_item = [mpi for mpi in media_pool_items
if ext in mpi.GetClipProperty("File Path")]
return media_pool_item.pop()
else:
return False
else:
if existing_mpi:
return existing_mpi
dirname, file = os.path.split(fpath)
_name, ext = os.path.splitext(file)
# add all data in folder to mediapool
media_pool_items = media_storage.AddItemListToMediaPool(
os.path.normpath(dirname))
if not media_pool_items:
return False
# if any are added then look into them for the right extension
media_pool_item = [mpi for mpi in media_pool_items
if ext in mpi.GetClipProperty("File Path")]
# return only first found
return media_pool_item.pop()
def get_media_pool_item(fpath, root: object = None) -> object:
"""
@ -199,7 +201,6 @@ def get_media_pool_item(fpath, root: object = None) -> object:
fname = os.path.basename(fpath)
for _mpi in root.GetClipList():
print(">>> _mpi: {}".format(_mpi.GetClipProperty("File Name")))
_mpi_name = _mpi.GetClipProperty("File Name")
_mpi_name = get_reformated_path(_mpi_name, first=True)
if fname in _mpi_name:
@ -312,7 +313,7 @@ def get_current_timeline_items(
selecting_color = selecting_color or "Chocolate"
project = get_current_project()
timeline = get_current_timeline()
selected_clips = list()
selected_clips = []
# get all tracks count filtered by track type
selected_track_count = timeline.GetTrackCount(track_type)
@ -708,7 +709,7 @@ def get_clip_attributes(clip):
"""
mp_item = clip.GetMediaPoolItem()
data = {
return {
"clipIn": clip.GetStart(),
"clipOut": clip.GetEnd(),
"clipLeftOffset": clip.GetLeftOffset(),
@ -718,7 +719,6 @@ def get_clip_attributes(clip):
"sourceId": mp_item.GetMediaId(),
"sourceProperties": mp_item.GetClipProperty()
}
return data
def set_project_manager_to_folder_name(folder_name):
@ -850,12 +850,12 @@ def get_reformated_path(path, padded=False, first=False):
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
"""
num_pattern = r"(\[\d+\-\d+\])"
padding_pattern = r"(\d+)(?=-)"
first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]")
if "[" in path:
padding_pattern = r"(\d+)(?=-)"
padding = len(re.findall(padding_pattern, path).pop())
num_pattern = r"(\[\d+\-\d+\])"
if padded:
path = re.sub(num_pattern, f"%0{padding}d", path)
elif first:

View file

@ -422,7 +422,6 @@ class ClipLoader:
media_pool_item = lib.create_media_pool_item(
self.data["path"], self.active_bin)
_clip_property = media_pool_item.GetClipProperty
clip_name = _clip_property("File Name")
# get handles
handle_start = self.data["versionData"].get("handleStart")
@ -784,6 +783,8 @@ class PublishClip:
# add review track only to hero track
if hero_track and self.review_layer:
self.tag_data.update({"reviewTrack": self.review_layer})
else:
self.tag_data.update({"reviewTrack": None})
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
@ -820,7 +821,7 @@ class PublishClip:
def _create_parents(self):
""" Create parents and return it in list. """
self.parents = list()
self.parents = []
patern = re.compile(self.parents_search_patern)
par_split = [patern.findall(t).pop()

View file

@ -37,8 +37,16 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
continue
media_pool_item = timeline_item.GetMediaPoolItem()
clip_property = media_pool_item.GetClipProperty()
self.log.debug(f"clip_property: {clip_property}")
source_duration = int(media_pool_item.GetClipProperty("Frames"))
# solve handles length
handle_start = min(
tag_data["handleStart"], int(timeline_item.GetLeftOffset()))
handle_end = min(
tag_data["handleEnd"], int(
source_duration - timeline_item.GetRightOffset()))
self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end))
# add tag data to instance data
data.update({
@ -60,7 +68,9 @@ class PrecollectInstances(pyblish.api.ContextPlugin):
"item": timeline_item,
"families": families,
"publish": resolve.get_publish_attribute(timeline_item),
"fps": context.data["fps"]
"fps": context.data["fps"],
"handleStart": handle_start,
"handleEnd": handle_end
})
# otio clip data

View file

@ -26,7 +26,8 @@ from .terminal import Terminal
from .execute import (
get_pype_execute_args,
execute,
run_subprocess
run_subprocess,
CREATE_NO_WINDOW
)
from .log import PypeLogger, timeit
from .mongo import (

View file

@ -6,6 +6,9 @@ from .log import PypeLogger as Logger
log = logging.getLogger(__name__)
# MSDN process creation flag (Windows only)
CREATE_NO_WINDOW = 0x08000000
def execute(args,
silent=False,

View file

@ -15,12 +15,17 @@ def default_custom_attributes_definition():
def app_definitions_from_app_manager(app_manager):
app_definitions = []
_app_definitions = []
for app_name, app in app_manager.applications.items():
if app.enabled and app.is_host:
app_definitions.append({
app_name: app.full_label
})
_app_definitions.append(
(app_name, app.full_label)
)
# Sort items by label
app_definitions = []
for key, label in sorted(_app_definitions, key=lambda item: item[1]):
app_definitions.append({key: label})
if not app_definitions:
app_definitions.append({"empty": "< Empty >"})
@ -28,11 +33,16 @@ def app_definitions_from_app_manager(app_manager):
def tool_definitions_from_app_manager(app_manager):
tools_data = []
_tools_data = []
for tool_name, tool in app_manager.tools.items():
tools_data.append({
tool_name: tool.label
})
_tools_data.append(
(tool_name, tool.label)
)
# Sort items by label
tools_data = []
for key, label in sorted(_tools_data, key=lambda item: item[1]):
tools_data.append({key: label})
# Make sure there is at least one item
if not tools_data:

View file

@ -4,7 +4,7 @@ Requires:
context -> otioTimeline
Optional:
otioClip.metadata -> masterLayer
instance -> reviewTrack
Provides:
instance -> otioReviewClips
@ -26,12 +26,12 @@ class CollectOcioReview(pyblish.api.InstancePlugin):
def process(self, instance):
# get basic variables
otio_review_clips = list()
otio_review_clips = []
otio_timeline = instance.context.data["otioTimeline"]
otio_clip = instance.data["otioClip"]
# optionally get `reviewTrack`
review_track_name = otio_clip.metadata.get("reviewTrack")
review_track_name = instance.data.get("reviewTrack")
# generate range in parent
otio_tl_range = otio_clip.range_in_parent()

View file

@ -17,7 +17,8 @@ from openpype.lib import (
get_pype_execute_args,
should_decompress,
get_decompress_dir,
decompress
decompress,
CREATE_NO_WINDOW
)
@ -253,9 +254,7 @@ class ExtractBurnin(openpype.api.Extractor):
"env": {}
}
if platform.system().lower() == "windows":
process_kwargs["creationflags"] = (
subprocess.CREATE_NO_WINDOW
)
process_kwargs["creationflags"] = CREATE_NO_WINDOW
openpype.api.run_subprocess(args, **process_kwargs)
# Remove the temporary json

View file

@ -69,6 +69,87 @@ def get_fps(str_value):
return str(fps)
def _prores_codec_args(ffprobe_data):
output = []
tags = ffprobe_data.get("tags") or {}
encoder = tags.get("encoder") or ""
if encoder.endswith("prores_ks"):
codec_name = "prores_ks"
elif encoder.endswith("prores_aw"):
codec_name = "prores_aw"
else:
codec_name = "prores"
output.extend(["-codec:v", codec_name])
pix_fmt = ffprobe_data.get("pix_fmt")
if pix_fmt:
output.extend(["-pix_fmt", pix_fmt])
# Rest of arguments is prores_kw specific
if codec_name == "prores_ks":
codec_tag_to_profile_map = {
"apco": "proxy",
"apcs": "lt",
"apcn": "standard",
"apch": "hq",
"ap4h": "4444",
"ap4x": "4444xq"
}
codec_tag_str = ffprobe_data.get("codec_tag_string")
if codec_tag_str:
profile = codec_tag_to_profile_map.get(codec_tag_str)
if profile:
output.extend(["-profile:v", profile])
return output
def _h264_codec_args(ffprobe_data):
output = []
output.extend(["-codec:v", "h264"])
pix_fmt = ffprobe_data.get("pix_fmt")
if pix_fmt:
output.extend(["-pix_fmt", pix_fmt])
output.extend(["-intra"])
output.extend(["-g", "1"])
return output
def get_codec_args(ffprobe_data):
codec_name = ffprobe_data.get("codec_name")
# Codec "prores"
if codec_name == "prores":
return _prores_codec_args(ffprobe_data)
# Codec "h264"
if codec_name == "h264":
return _h264_codec_args(ffprobe_data)
output = []
if codec_name:
output.extend(["-codec:v", codec_name])
bit_rate = ffprobe_data.get("bit_rate")
if bit_rate:
output.extend(["-b:v", bit_rate])
pix_fmt = ffprobe_data.get("pix_fmt")
if pix_fmt:
output.extend(["-pix_fmt", pix_fmt])
output.extend(["-g", "1"])
return output
class ModifiedBurnins(ffmpeg_burnins.Burnins):
'''
This is modification of OTIO FFmpeg Burnin adapter.
@ -558,38 +639,13 @@ def burnins_from_data(
if codec_data:
# Use codec definition from method arguments
ffmpeg_args = codec_data
ffmpeg_args.append("-g 1")
else:
ffprobe_data = burnin._streams[0]
codec_name = ffprobe_data.get("codec_name")
if codec_name:
if codec_name == "prores":
tags = ffprobe_data.get("tags") or {}
encoder = tags.get("encoder") or ""
if encoder.endswith("prores_ks"):
codec_name = "prores_ks"
elif encoder.endswith("prores_aw"):
codec_name = "prores_aw"
ffmpeg_args.append("-codec:v {}".format(codec_name))
profile_name = ffprobe_data.get("profile")
if profile_name:
# lower profile name and repalce spaces with underscore
profile_name = profile_name.replace(" ", "_").lower()
ffmpeg_args.append("-profile:v {}".format(profile_name))
bit_rate = ffprobe_data.get("bit_rate")
if bit_rate:
ffmpeg_args.append("-b:v {}".format(bit_rate))
pix_fmt = ffprobe_data.get("pix_fmt")
if pix_fmt:
ffmpeg_args.append("-pix_fmt {}".format(pix_fmt))
ffmpeg_args.extend(get_codec_args(ffprobe_data))
# Use group one (same as `-intra` argument, which is deprecated)
ffmpeg_args.append("-g 1")
ffmpeg_args_str = " ".join(ffmpeg_args)
burnin.render(
output_path, args=ffmpeg_args_str, overwrite=overwrite, **data

View file

@ -7,7 +7,7 @@
"enabled": true,
"ffmpeg_args": {
"input": [
"-gamma 2.2"
"-apply_trc gamma22"
],
"output": []
}

View file

@ -32,7 +32,7 @@
},
"__dynamic_keys_labels__": {
"3-2": "3.2",
"3-1": "3.2"
"3-1": "3.1"
}
}
},

View file

@ -61,39 +61,36 @@ class ListEntity(EndpointEntity):
def append(self, item):
child_obj = self.add_new_item(trigger_change=False)
child_obj.set(item)
self.on_change()
self.on_child_change(child_obj)
def extend(self, items):
for item in items:
self.append(item)
def clear(self):
self.children.clear()
self.on_change()
if not self.children:
return
first_item = self.children.pop(0)
while self.children:
self.children.pop(0)
self.on_child_change(first_item)
def pop(self, idx):
item = self.children.pop(idx)
self.on_change()
self.on_child_change(item)
return item
def remove(self, item):
for idx, child_obj in enumerate(self.children):
found = False
if isinstance(item, BaseEntity):
if child_obj is item:
found = True
elif child_obj.value == item:
found = True
if found:
self.pop(idx)
return
raise ValueError("ListEntity.remove(x): x not in ListEntity")
try:
self.pop(self.index(item))
except ValueError:
raise ValueError("ListEntity.remove(x): x not in ListEntity")
def insert(self, idx, item):
child_obj = self.add_new_item(idx, trigger_change=False)
child_obj.set(item)
self.on_change()
self.on_child_change(child_obj)
def _add_new_item(self, idx=None):
child_obj = self.create_schema_object(self.item_schema, self, True)
@ -106,13 +103,9 @@ class ListEntity(EndpointEntity):
def add_new_item(self, idx=None, trigger_change=True):
child_obj = self._add_new_item(idx)
child_obj.set_override_state(self._override_state)
if self._override_state is OverrideState.STUDIO:
child_obj.add_to_studio_default([])
elif self._override_state is OverrideState.PROJECT:
child_obj.add_to_project_default([])
if trigger_change:
self.on_change()
self.on_child_change(child_obj)
return child_obj
def swap_items(self, item_1, item_2):

View file

@ -1,6 +1,10 @@
import sys
from Qt import QtWidgets, QtGui
from .lib import is_password_required
from .lib import (
is_password_required,
BTN_FIXED_SIZE,
CHILD_OFFSET
)
from .widgets import PasswordDialog
from .local_settings import LocalSettingsWindow
from .settings import (
@ -32,7 +36,11 @@ def main(user_role=None):
__all__ = (
"is_password_required",
"BTN_FIXED_SIZE",
"CHILD_OFFSET",
"style",
"PasswordDialog",
"MainWidget",
"ProjectListWidget",

View file

@ -1,3 +1,7 @@
CHILD_OFFSET = 15
BTN_FIXED_SIZE = 20
def is_password_required():
from openpype.settings import (
get_system_settings,

View file

@ -4,7 +4,7 @@ from .widgets import (
Separator,
ExpandingWidget
)
from .constants import CHILD_OFFSET
from openpype.tools.settings import CHILD_OFFSET
class AppVariantWidget(QtWidgets.QWidget):

View file

@ -14,8 +14,6 @@ LOCAL_APPS_KEY = "applications"
# Roots key constant
LOCAL_ROOTS_KEY = "roots"
# Child offset in expandable widget
CHILD_OFFSET = 15
__all__ = (
"LABEL_REMOVE_DEFAULT",

View file

@ -1,5 +1,5 @@
from Qt import QtWidgets, QtCore
from openpype.tools.settings.settings.widgets.widgets import (
from openpype.tools.settings.settings.widgets import (
ExpandingWidget,
SpacerWidget
)

View file

@ -7,6 +7,7 @@ from openpype.settings.lib import (
get_local_settings,
save_local_settings
)
from openpype.tools.settings import CHILD_OFFSET
from openpype.api import (
SystemSettings,
ProjectSettings
@ -23,7 +24,6 @@ from .apps_widget import LocalApplicationsWidgets
from .projects_widget import ProjectSettingsWidget
from .constants import (
CHILD_OFFSET,
LOCAL_GENERAL_KEY,
LOCAL_PROJECTS_KEY,
LOCAL_APPS_KEY

View file

@ -1,8 +1,6 @@
from . import style
from .widgets import (
MainWidget,
ProjectListWidget
)
from .window import MainWidget
from .widgets import ProjectListWidget
__all__ = (

View file

@ -1,5 +1,5 @@
from Qt import QtWidgets, QtGui, QtCore
from .lib import CHILD_OFFSET
from openpype.tools.settings import CHILD_OFFSET
from .widgets import ExpandingWidget

View file

@ -30,8 +30,6 @@ from openpype.settings.entities import (
from openpype.settings import SaveWarningExc
from .widgets import ProjectListWidget
from . import lib
from .base import GUIWidget
from .list_item_widget import ListWidget
from .list_strict_widget import ListStrictWidget

View file

@ -8,7 +8,7 @@ from .widgets import (
IconButton,
SpacerWidget
)
from .lib import (
from openpype.tools.settings import (
BTN_FIXED_SIZE,
CHILD_OFFSET
)

View file

@ -19,7 +19,7 @@ from .base import (
BaseWidget,
InputWidget
)
from .lib import CHILD_OFFSET
from openpype.tools.settings import CHILD_OFFSET
class DictImmutableKeysWidget(BaseWidget):

View file

@ -2,7 +2,7 @@ from Qt import QtWidgets, QtCore
from .base import InputWidget
from .widgets import ExpandingWidget
from .lib import (
from openpype.tools.settings import (
BTN_FIXED_SIZE,
CHILD_OFFSET
)

View file

@ -1,8 +0,0 @@
from .window import MainWidget
from .widgets import ProjectListWidget
__all__ = [
"MainWidget",
"ProjectListWidget"
]

View file

@ -1,601 +0,0 @@
import os
import re
import json
import copy
from openpype.settings.constants import (
M_OVERRIDEN_KEY,
M_ENVIRONMENT_KEY,
M_DYNAMIC_KEY_LABEL
)
from queue import Queue
# Singleton database of available inputs
class TypeToKlass:
types = {}
NOT_SET = type("NOT_SET", (), {"__bool__": lambda obj: False})()
METADATA_KEY = type("METADATA_KEY", (), {})()
OVERRIDE_VERSION = 1
CHILD_OFFSET = 15
BTN_FIXED_SIZE = 20
key_pattern = re.compile(r"(\{.*?[^{0]*\})")
def convert_gui_data_with_metadata(data, ignored_keys=None):
if not data or not isinstance(data, dict):
return data
if ignored_keys is None:
ignored_keys = tuple()
output = {}
if METADATA_KEY in data:
metadata = data.pop(METADATA_KEY)
for key, value in metadata.items():
if key in ignored_keys or key == "groups":
continue
if key == "environments":
output[M_ENVIRONMENT_KEY] = value
elif key == "dynamic_key_label":
output[M_DYNAMIC_KEY_LABEL] = value
else:
raise KeyError("Unknown metadata key \"{}\"".format(key))
for key, value in data.items():
output[key] = convert_gui_data_with_metadata(value, ignored_keys)
return output
def convert_data_to_gui_data(data, first=True):
if not data or not isinstance(data, dict):
return data
output = {}
if M_ENVIRONMENT_KEY in data:
data.pop(M_ENVIRONMENT_KEY)
if M_DYNAMIC_KEY_LABEL in data:
if METADATA_KEY not in data:
data[METADATA_KEY] = {}
data[METADATA_KEY]["dynamic_key_label"] = data.pop(M_DYNAMIC_KEY_LABEL)
for key, value in data.items():
output[key] = convert_data_to_gui_data(value, False)
return output
def convert_gui_data_to_overrides(data, first=True):
if not data or not isinstance(data, dict):
return data
output = {}
if first:
output["__override_version__"] = OVERRIDE_VERSION
data = convert_gui_data_with_metadata(data)
if METADATA_KEY in data:
metadata = data.pop(METADATA_KEY)
for key, value in metadata.items():
if key == "groups":
output[M_OVERRIDEN_KEY] = value
else:
raise KeyError("Unknown metadata key \"{}\"".format(key))
for key, value in data.items():
output[key] = convert_gui_data_to_overrides(value, False)
return output
def convert_overrides_to_gui_data(data, first=True):
if not data or not isinstance(data, dict):
return data
if first:
data = convert_data_to_gui_data(data)
output = {}
if M_OVERRIDEN_KEY in data:
groups = data.pop(M_OVERRIDEN_KEY)
if METADATA_KEY not in output:
output[METADATA_KEY] = {}
output[METADATA_KEY]["groups"] = groups
for key, value in data.items():
output[key] = convert_overrides_to_gui_data(value, False)
return output
def _fill_schema_template_data(
template, template_data, required_keys=None, missing_keys=None
):
first = False
if required_keys is None:
first = True
required_keys = set()
missing_keys = set()
_template = []
default_values = {}
for item in template:
if isinstance(item, dict) and "__default_values__" in item:
default_values = item["__default_values__"]
else:
_template.append(item)
template = _template
for key, value in default_values.items():
if key not in template_data:
template_data[key] = value
if not template:
output = template
elif isinstance(template, list):
output = []
for item in template:
output.append(_fill_schema_template_data(
item, template_data, required_keys, missing_keys
))
elif isinstance(template, dict):
output = {}
for key, value in template.items():
output[key] = _fill_schema_template_data(
value, template_data, required_keys, missing_keys
)
elif isinstance(template, str):
# TODO find much better way how to handle filling template data
for replacement_string in key_pattern.findall(template):
key = str(replacement_string[1:-1])
required_keys.add(key)
if key not in template_data:
missing_keys.add(key)
continue
value = template_data[key]
if replacement_string == template:
# Replace the value with value from templates data
# - with this is possible to set value with different type
template = value
else:
# Only replace the key in string
template = template.replace(replacement_string, value)
output = template
else:
output = template
if first and missing_keys:
raise SchemaTemplateMissingKeys(missing_keys, required_keys)
return output
def _fill_schema_template(child_data, schema_collection, schema_templates):
template_name = child_data["name"]
template = schema_templates.get(template_name)
if template is None:
if template_name in schema_collection:
raise KeyError((
"Schema \"{}\" is used as `schema_template`"
).format(template_name))
raise KeyError("Schema template \"{}\" was not found".format(
template_name
))
# Default value must be dictionary (NOT list)
# - empty list would not add any item if `template_data` are not filled
template_data = child_data.get("template_data") or {}
if isinstance(template_data, dict):
template_data = [template_data]
output = []
for single_template_data in template_data:
try:
filled_child = _fill_schema_template_data(
template, single_template_data
)
except SchemaTemplateMissingKeys as exc:
raise SchemaTemplateMissingKeys(
exc.missing_keys, exc.required_keys, template_name
)
for item in filled_child:
filled_item = _fill_inner_schemas(
item, schema_collection, schema_templates
)
if filled_item["type"] == "schema_template":
output.extend(_fill_schema_template(
filled_item, schema_collection, schema_templates
))
else:
output.append(filled_item)
return output
def _fill_inner_schemas(schema_data, schema_collection, schema_templates):
if schema_data["type"] == "schema":
raise ValueError("First item in schema data can't be schema.")
children_key = "children"
object_type_key = "object_type"
for item_key in (children_key, object_type_key):
children = schema_data.get(item_key)
if not children:
continue
if object_type_key == item_key:
if not isinstance(children, dict):
continue
children = [children]
new_children = []
for child in children:
child_type = child["type"]
if child_type == "schema":
schema_name = child["name"]
if schema_name not in schema_collection:
if schema_name in schema_templates:
raise KeyError((
"Schema template \"{}\" is used as `schema`"
).format(schema_name))
raise KeyError(
"Schema \"{}\" was not found".format(schema_name)
)
filled_child = _fill_inner_schemas(
schema_collection[schema_name],
schema_collection,
schema_templates
)
elif child_type == "schema_template":
for filled_child in _fill_schema_template(
child, schema_collection, schema_templates
):
new_children.append(filled_child)
continue
else:
filled_child = _fill_inner_schemas(
child, schema_collection, schema_templates
)
new_children.append(filled_child)
if item_key == object_type_key:
if len(new_children) != 1:
raise KeyError((
"Failed to fill object type with type: {} | name {}"
).format(
child_type, str(child.get("name"))
))
new_children = new_children[0]
schema_data[item_key] = new_children
return schema_data
class SchemaTemplateMissingKeys(Exception):
def __init__(self, missing_keys, required_keys, template_name=None):
self.missing_keys = missing_keys
self.required_keys = required_keys
if template_name:
msg = f"Schema template \"{template_name}\" require more keys.\n"
else:
msg = ""
msg += "Required keys: {}\nMissing keys: {}".format(
self.join_keys(required_keys),
self.join_keys(missing_keys)
)
super(SchemaTemplateMissingKeys, self).__init__(msg)
def join_keys(self, keys):
return ", ".join([
f"\"{key}\"" for key in keys
])
class SchemaMissingFileInfo(Exception):
def __init__(self, invalid):
full_path_keys = []
for item in invalid:
full_path_keys.append("\"{}\"".format("/".join(item)))
msg = (
"Schema has missing definition of output file (\"is_file\" key)"
" for keys. [{}]"
).format(", ".join(full_path_keys))
super(SchemaMissingFileInfo, self).__init__(msg)
class SchemeGroupHierarchyBug(Exception):
def __init__(self, invalid):
full_path_keys = []
for item in invalid:
full_path_keys.append("\"{}\"".format("/".join(item)))
msg = (
"Items with attribute \"is_group\" can't have another item with"
" \"is_group\" attribute as child. Error happened for keys: [{}]"
).format(", ".join(full_path_keys))
super(SchemeGroupHierarchyBug, self).__init__(msg)
class SchemaDuplicatedKeys(Exception):
def __init__(self, invalid):
items = []
for key_path, keys in invalid.items():
joined_keys = ", ".join([
"\"{}\"".format(key) for key in keys
])
items.append("\"{}\" ({})".format(key_path, joined_keys))
msg = (
"Schema items contain duplicated keys in one hierarchy level. {}"
).format(" || ".join(items))
super(SchemaDuplicatedKeys, self).__init__(msg)
class SchemaDuplicatedEnvGroupKeys(Exception):
def __init__(self, invalid):
items = []
for key_path, keys in invalid.items():
joined_keys = ", ".join([
"\"{}\"".format(key) for key in keys
])
items.append("\"{}\" ({})".format(key_path, joined_keys))
msg = (
"Schema items contain duplicated environment group keys. {}"
).format(" || ".join(items))
super(SchemaDuplicatedEnvGroupKeys, self).__init__(msg)
def file_keys_from_schema(schema_data):
output = []
item_type = schema_data["type"]
klass = TypeToKlass.types[item_type]
if not klass.is_input_type:
return output
keys = []
key = schema_data.get("key")
if key:
keys.append(key)
for child in schema_data["children"]:
if child.get("is_file"):
_keys = copy.deepcopy(keys)
_keys.append(child["key"])
output.append(_keys)
continue
for result in file_keys_from_schema(child):
_keys = copy.deepcopy(keys)
_keys.extend(result)
output.append(_keys)
return output
def validate_all_has_ending_file(schema_data, is_top=True):
item_type = schema_data["type"]
klass = TypeToKlass.types[item_type]
if not klass.is_input_type:
return None
if schema_data.get("is_file"):
return None
children = schema_data.get("children")
if not children:
return [[schema_data["key"]]]
invalid = []
keyless = "key" not in schema_data
for child in children:
result = validate_all_has_ending_file(child, False)
if result is None:
continue
if keyless:
invalid.extend(result)
else:
for item in result:
new_invalid = [schema_data["key"]]
new_invalid.extend(item)
invalid.append(new_invalid)
if not invalid:
return None
if not is_top:
return invalid
raise SchemaMissingFileInfo(invalid)
def validate_is_group_is_unique_in_hierarchy(
schema_data, any_parent_is_group=False, keys=None
):
is_top = keys is None
if keys is None:
keys = []
keyless = "key" not in schema_data
if not keyless:
keys.append(schema_data["key"])
invalid = []
is_group = schema_data.get("is_group")
if is_group and any_parent_is_group:
invalid.append(copy.deepcopy(keys))
if is_group:
any_parent_is_group = is_group
children = schema_data.get("children")
if not children:
return invalid
for child in children:
result = validate_is_group_is_unique_in_hierarchy(
child, any_parent_is_group, copy.deepcopy(keys)
)
if not result:
continue
invalid.extend(result)
if invalid and is_group and keys not in invalid:
invalid.append(copy.deepcopy(keys))
if not is_top:
return invalid
if invalid:
raise SchemeGroupHierarchyBug(invalid)
def validate_keys_are_unique(schema_data, keys=None):
children = schema_data.get("children")
if not children:
return
is_top = keys is None
if keys is None:
keys = [schema_data["key"]]
else:
keys.append(schema_data["key"])
child_queue = Queue()
for child in children:
child_queue.put(child)
child_inputs = []
while not child_queue.empty():
child = child_queue.get()
if "key" not in child:
_children = child.get("children") or []
for _child in _children:
child_queue.put(_child)
else:
child_inputs.append(child)
duplicated_keys = set()
child_keys = set()
for child in child_inputs:
key = child["key"]
if key in child_keys:
duplicated_keys.add(key)
else:
child_keys.add(key)
invalid = {}
if duplicated_keys:
joined_keys = "/".join(keys)
invalid[joined_keys] = duplicated_keys
for child in child_inputs:
result = validate_keys_are_unique(child, copy.deepcopy(keys))
if result:
invalid.update(result)
if not is_top:
return invalid
if invalid:
raise SchemaDuplicatedKeys(invalid)
def validate_environment_groups_uniquenes(
schema_data, env_groups=None, keys=None
):
is_first = False
if env_groups is None:
is_first = True
env_groups = {}
keys = []
my_keys = copy.deepcopy(keys)
key = schema_data.get("key")
if key:
my_keys.append(key)
env_group_key = schema_data.get("env_group_key")
if env_group_key:
if env_group_key not in env_groups:
env_groups[env_group_key] = []
env_groups[env_group_key].append("/".join(my_keys))
children = schema_data.get("children")
if not children:
return
for child in children:
validate_environment_groups_uniquenes(
child, env_groups, copy.deepcopy(my_keys)
)
if is_first:
invalid = {}
for env_group_key, key_paths in env_groups.items():
if len(key_paths) > 1:
invalid[env_group_key] = key_paths
if invalid:
raise SchemaDuplicatedEnvGroupKeys(invalid)
def validate_schema(schema_data):
validate_all_has_ending_file(schema_data)
validate_is_group_is_unique_in_hierarchy(schema_data)
validate_keys_are_unique(schema_data)
validate_environment_groups_uniquenes(schema_data)
def gui_schema(subfolder, main_schema_name):
subfolder, main_schema_name
dirpath = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"gui_schemas",
subfolder
)
loaded_schemas = {}
loaded_schema_templates = {}
for root, _, filenames in os.walk(dirpath):
for filename in filenames:
basename, ext = os.path.splitext(filename)
if ext != ".json":
continue
filepath = os.path.join(root, filename)
with open(filepath, "r") as json_stream:
try:
schema_data = json.load(json_stream)
except Exception as exc:
raise Exception((
f"Unable to parse JSON file {filepath}\n{exc}"
)) from exc
if isinstance(schema_data, list):
loaded_schema_templates[basename] = schema_data
else:
loaded_schemas[basename] = schema_data
main_schema = _fill_inner_schemas(
loaded_schemas[main_schema_name],
loaded_schemas,
loaded_schema_templates
)
validate_schema(main_schema)
return main_schema

View file

@ -5,7 +5,7 @@ from .categories import (
ProjectWidget
)
from .widgets import ShadowWidget
from .. import style
from . import style
from openpype.tools.settings import (
is_password_required,

View file

@ -5,7 +5,7 @@ from .widgets import (
ExpandingWidget,
GridLabelWidget
)
from .lib import CHILD_OFFSET
from openpype.tools.settings import CHILD_OFFSET
class WrapperWidget(QtWidgets.QWidget):