Merge branch 'develop' of github.com:pypeclub/OpenPype into feature/OP-3426_Add-support-for-Deadline-for-automatic-tests

This commit is contained in:
Petr Kalis 2022-11-11 16:39:59 +01:00
commit 1114c86f15
16 changed files with 230 additions and 56 deletions

View file

@ -2,7 +2,7 @@ name: Milestone - assign to PRs
on:
pull_request_target:
types: [opened, reopened, edited, synchronize]
types: [closed]
jobs:
run_if_release:

View file

@ -225,7 +225,8 @@ class FlameMenuUniversal(_FlameMenuApp):
menu['actions'].append({
"name": "Load...",
"execute": lambda x: self.tools_helper.show_loader()
"execute": lambda x: callback_selection(
x, self.tools_helper.show_loader)
})
menu['actions'].append({
"name": "Manage...",

View file

@ -1,3 +1,4 @@
from copy import deepcopy
import os
import flame
from pprint import pformat
@ -22,7 +23,7 @@ class LoadClipBatch(opfapi.ClipLoader):
# settings
reel_name = "OP_LoadedReel"
clip_name_template = "{asset}_{subset}<_{output}>"
clip_name_template = "{batch}_{asset}_{subset}<_{output}>"
def load(self, context, name, namespace, options):
@ -40,8 +41,11 @@ class LoadClipBatch(opfapi.ClipLoader):
if not context["representation"]["context"].get("output"):
self.clip_name_template.replace("output", "representation")
formating_data = deepcopy(context["representation"]["context"])
formating_data["batch"] = self.batch.name.get_value()
clip_name = StringTemplate(self.clip_name_template).format(
context["representation"]["context"])
formating_data)
# TODO: settings in imageio
# convert colorspace with ocio to flame mapping
@ -56,6 +60,7 @@ class LoadClipBatch(opfapi.ClipLoader):
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)

View file

@ -170,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog):
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
@ -273,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog):
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setValue=v["value"], setMinimum=0,
setMaximum=100000, setToolTip=tool_tip)
setRange=(1, 9999999), setValue=v["value"],
setToolTip=tool_tip)
return data

View file

@ -1,7 +1,8 @@
import os
import nuke
import qargparse
from pprint import pformat
from copy import deepcopy
from openpype.lib import Logger
from openpype.client import (
get_version_by_id,
get_last_version_by_subset_id,
@ -28,6 +29,7 @@ class LoadClip(plugin.NukeLoader):
Either it is image sequence or video file.
"""
log = Logger.get_logger(__name__)
families = [
"source",
@ -85,24 +87,19 @@ class LoadClip(plugin.NukeLoader):
+ plugin.get_review_presets_config()
)
def _fix_path_for_knob(self, filepath, repre_cont):
basename = os.path.basename(filepath)
dirname = os.path.dirname(filepath)
frame = repre_cont.get("frame")
assert frame, "Representation is not sequence"
padding = len(str(frame))
basename = basename.replace(frame, "#" * padding)
return os.path.join(dirname, basename).replace("\\", "/")
def load(self, context, name, namespace, options):
repre = context["representation"]
representation = context["representation"]
# reste container id so it is always unique for each instance
self.reset_container_id()
is_sequence = len(repre["files"]) > 1
is_sequence = len(representation["files"]) > 1
filepath = self.fname.replace("\\", "/")
if is_sequence:
representation = self._representation_with_hash_in_frame(
representation
)
filepath = get_representation_path(representation).replace("\\", "/")
self.log.debug("_ filepath: {}".format(filepath))
start_at_workfile = options.get(
"start_at_workfile", self.options_defaults["start_at_workfile"])
@ -112,11 +109,10 @@ class LoadClip(plugin.NukeLoader):
version = context['version']
version_data = version.get("data", {})
repre_id = repre["_id"]
repre_id = representation["_id"]
repre_cont = repre["context"]
self.log.info("version_data: {}\n".format(version_data))
self.log.debug("_ version_data: {}\n".format(
pformat(version_data)))
self.log.debug(
"Representation id `{}` ".format(repre_id))
@ -132,8 +128,6 @@ class LoadClip(plugin.NukeLoader):
duration = last - first
first = 1
last = first + duration
elif "#" not in filepath:
filepath = self._fix_path_for_knob(filepath, repre_cont)
# Fallback to asset name when namespace is None
if namespace is None:
@ -144,7 +138,7 @@ class LoadClip(plugin.NukeLoader):
"Representation id `{}` is failing to load".format(repre_id))
return
read_name = self._get_node_name(repre)
read_name = self._get_node_name(representation)
# Create the Loader with the filename path set
read_node = nuke.createNode(
@ -157,7 +151,7 @@ class LoadClip(plugin.NukeLoader):
read_node["file"].setValue(filepath)
used_colorspace = self._set_colorspace(
read_node, version_data, repre["data"])
read_node, version_data, representation["data"])
self._set_range_to_node(read_node, first, last, start_at_workfile)
@ -179,7 +173,7 @@ class LoadClip(plugin.NukeLoader):
data_imprint[k] = version
elif k == 'colorspace':
colorspace = repre["data"].get(k)
colorspace = representation["data"].get(k)
colorspace = colorspace or version_data.get(k)
data_imprint["db_colorspace"] = colorspace
if used_colorspace:
@ -213,6 +207,20 @@ class LoadClip(plugin.NukeLoader):
def switch(self, container, representation):
self.update(container, representation)
def _representation_with_hash_in_frame(self, representation):
"""Convert frame key value to padded hash
Args:
representation (dict): representation data
Returns:
dict: altered representation data
"""
representation = deepcopy(representation)
frame = representation["context"]["frame"]
representation["context"]["frame"] = "#" * len(str(frame))
return representation
def update(self, container, representation):
"""Update the Loader's path
@ -225,7 +233,13 @@ class LoadClip(plugin.NukeLoader):
is_sequence = len(representation["files"]) > 1
read_node = nuke.toNode(container['objectName'])
if is_sequence:
representation = self._representation_with_hash_in_frame(
representation
)
filepath = get_representation_path(representation).replace("\\", "/")
self.log.debug("_ filepath: {}".format(filepath))
start_at_workfile = "start at" in read_node['frame_mode'].value()
@ -240,8 +254,6 @@ class LoadClip(plugin.NukeLoader):
version_data = version_doc.get("data", {})
repre_id = representation["_id"]
repre_cont = representation["context"]
# colorspace profile
colorspace = representation["data"].get("colorspace")
colorspace = colorspace or version_data.get("colorspace")
@ -258,8 +270,6 @@ class LoadClip(plugin.NukeLoader):
duration = last - first
first = 1
last = first + duration
elif "#" not in filepath:
filepath = self._fix_path_for_knob(filepath, repre_cont)
if not filepath:
self.log.warning(
@ -348,8 +358,10 @@ class LoadClip(plugin.NukeLoader):
time_warp_nodes = version_data.get('timewarps', [])
last_node = None
source_id = self.get_container_id(parent_node)
self.log.info("__ source_id: {}".format(source_id))
self.log.info("__ members: {}".format(self.get_members(parent_node)))
self.log.debug("__ source_id: {}".format(source_id))
self.log.debug("__ members: {}".format(
self.get_members(parent_node)))
dependent_nodes = self.clear_members(parent_node)
with maintained_selection():

View file

@ -470,9 +470,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
cam = [c for c in cameras if c in col.head]
if cam:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
if aov:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
else:
subset_name = '{}_{}'.format(group_name, cam)
else:
subset_name = '{}_{}'.format(group_name, aov)
if aov:
subset_name = '{}_{}'.format(group_name, aov)
else:
subset_name = '{}'.format(group_name)
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])

View file

@ -0,0 +1,28 @@
# -*- coding: utf-8 -*-
import os
import re
import pyblish.api
class CollectKitsuUsername(pyblish.api.ContextPlugin):
"""Collect Kitsu username from the kitsu login"""
order = pyblish.api.CollectorOrder + 0.499
label = "Kitsu username"
def process(self, context):
kitsu_login = os.environ.get('KITSU_LOGIN')
if not kitsu_login:
return
kitsu_username = kitsu_login.split("@")[0].replace('.', ' ')
new_username = re.sub('[^a-zA-Z]', ' ', kitsu_username).title()
for instance in context:
# Don't override customData if it already exists
if 'customData' not in instance.data:
instance.data['customData'] = {}
instance.data['customData']["kitsuUsername"] = new_username

View file

@ -3,26 +3,26 @@ import re
import copy
import json
import shutil
from abc import ABCMeta, abstractmethod
import six
import clique
import speedcopy
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
get_ffprobe_streams,
path_to_subprocess_arg,
run_subprocess,
)
from openpype.lib.transcoding import (
IMAGE_EXTENSIONS,
get_ffprobe_streams,
should_convert_for_ffmpeg,
convert_input_paths_for_ffmpeg,
get_transcode_temp_directory
get_transcode_temp_directory,
)
import speedcopy
class ExtractReview(pyblish.api.InstancePlugin):
@ -175,6 +175,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
outputs_per_representations.append((repre, outputs))
return outputs_per_representations
def _single_frame_filter(self, input_filepaths, output_defs):
single_frame_image = False
if len(input_filepaths) == 1:
ext = os.path.splitext(input_filepaths[0])[-1]
single_frame_image = ext in IMAGE_EXTENSIONS
filtered_defs = []
for output_def in output_defs:
output_filters = output_def.get("filter") or {}
frame_filter = output_filters.get("single_frame_filter")
if (
(not single_frame_image and frame_filter == "single_frame")
or (single_frame_image and frame_filter == "multi_frame")
):
continue
filtered_defs.append(output_def)
return filtered_defs
@staticmethod
def get_instance_label(instance):
return (
@ -195,7 +215,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
outputs_per_repres = self._get_outputs_per_representations(
instance, profile_outputs
)
for repre, outpu_defs in outputs_per_repres:
for repre, output_defs in outputs_per_repres:
# Check if input should be preconverted before processing
# Store original staging dir (it's value may change)
src_repre_staging_dir = repre["stagingDir"]
@ -216,6 +236,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
if first_input_path is None:
first_input_path = filepath
filtered_output_defs = self._single_frame_filter(
input_filepaths, output_defs
)
if not filtered_output_defs:
self.log.debug((
"Repre: {} - All output definitions were filtered"
" out by single frame filter. Skipping"
).format(repre["name"]))
continue
# Skip if file is not set
if first_input_path is None:
self.log.warning((
@ -249,7 +279,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
try:
self._render_output_definitions(
instance, repre, src_repre_staging_dir, outpu_defs
instance,
repre,
src_repre_staging_dir,
filtered_output_defs
)
finally:
@ -263,10 +296,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
shutil.rmtree(new_staging_dir)
def _render_output_definitions(
self, instance, repre, src_repre_staging_dir, outpu_defs
self, instance, repre, src_repre_staging_dir, output_defs
):
fill_data = copy.deepcopy(instance.data["anatomyData"])
for _output_def in outpu_defs:
for _output_def in output_defs:
output_def = copy.deepcopy(_output_def)
# Make sure output definition has "tags" key
if "tags" not in output_def:
@ -1659,9 +1692,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
return True
return False
def filter_output_defs(
self, profile, subset_name, families
):
def filter_output_defs(self, profile, subset_name, families):
"""Return outputs matching input instance families.
Output definitions without families filter are marked as valid.

View file

@ -142,7 +142,7 @@
"exr16fpdwaa"
],
"reel_name": "OP_LoadedReel",
"clip_name_template": "{asset}_{subset}<_{output}>"
"clip_name_template": "{batch}_{asset}_{subset}<_{output}>"
}
}
}

View file

@ -53,6 +53,62 @@
"families": [],
"hosts": [],
"outputs": {
"png": {
"ext": "png",
"tags": [
"ftrackreview"
],
"burnins": [],
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [],
"output": []
},
"filter": {
"families": [
"render",
"review",
"ftrack"
],
"subsets": [],
"custom_tags": [],
"single_frame_filter": "single_frame"
},
"overscan_crop": "",
"overscan_color": [
0,
0,
0,
255
],
"width": 1920,
"height": 1080,
"scale_pixel_aspect": true,
"bg_color": [
0,
0,
0,
0
],
"letter_box": {
"enabled": false,
"ratio": 0.0,
"fill_color": [
0,
0,
0,
255
],
"line_thickness": 0,
"line_color": [
255,
0,
0,
255
]
}
},
"h264": {
"ext": "mp4",
"tags": [
@ -79,7 +135,8 @@
"ftrack"
],
"subsets": [],
"custom_tags": []
"custom_tags": [],
"single_frame_filter": "multi_frame"
},
"overscan_crop": "",
"overscan_color": [

View file

@ -304,6 +304,20 @@
"label": "Custom Tags",
"type": "list",
"object_type": "text"
},
{
"type": "label",
"label": "Use output <b>always</b> / only if input <b>is 1 frame</b> image / only if has <b>2+ frames</b> or <b>is video</b>"
},
{
"type": "enum",
"key": "single_frame_filter",
"default": "everytime",
"enum_items": [
{"everytime": "Always"},
{"single_frame": "Only if input has 1 image frame"},
{"multi_frame": "Only if input is video or sequence of frames"}
]
}
]
},

View file

@ -27,6 +27,9 @@ class PluginLoadReportModel(QtGui.QStandardItemModel):
parent = self.invisibleRootItem()
parent.removeRows(0, parent.rowCount())
if report is None:
return
new_items = []
new_items_by_filepath = {}
for filepath in report.crashed_plugin_paths.keys():

View file

@ -367,6 +367,7 @@ class LoadedFilesView(QtWidgets.QTreeView):
def _on_rows_inserted(self):
header = self.header()
header.resizeSections(header.ResizeToContents)
self._update_remove_btn()
def resizeEvent(self, event):
super(LoadedFilesView, self).resizeEvent(event)

View file

@ -361,6 +361,13 @@ class PublisherWindow(QtWidgets.QDialog):
super(PublisherWindow, self).resizeEvent(event)
self._update_publish_frame_rect()
def keyPressEvent(self, event):
# Ignore escape button to close window
if event.key() == QtCore.Qt.Key_Escape:
event.accept()
return
super(PublisherWindow, self).keyPressEvent(event)
def _on_overlay_message(self, event):
self._overlay_object.add_message(
event["message"],

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.14.7-nightly.2"
__version__ = "3.14.7-nightly.3"

View file

@ -135,6 +135,12 @@ Profile may generate multiple outputs from a single input. Each output must defi
- set alpha to `0` to not use this option at all (in most of cases background stays black)
- other than `0` alpha will draw color as background
- **`Additional filtering`**
- Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own.
- They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base.
- Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags).
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.
### IntegrateAssetNew