mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
f20416a31a
6 changed files with 137 additions and 20 deletions
|
|
@ -170,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
for func, val in kwargs.items():
|
||||
if getattr(item, func):
|
||||
func_attr = getattr(item, func)
|
||||
func_attr(val)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
|
@ -273,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setValue=v["value"], setMinimum=0,
|
||||
setMaximum=100000, setToolTip=tool_tip)
|
||||
setRange=(1, 9999999), setValue=v["value"],
|
||||
setToolTip=tool_tip)
|
||||
return data
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -457,9 +457,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
cam = [c for c in cameras if c in col.head]
|
||||
if cam:
|
||||
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
|
||||
if aov:
|
||||
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
|
||||
else:
|
||||
subset_name = '{}_{}'.format(group_name, cam)
|
||||
else:
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
if aov:
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
else:
|
||||
subset_name = '{}'.format(group_name)
|
||||
|
||||
if isinstance(col, (list, tuple)):
|
||||
staging = os.path.dirname(col[0])
|
||||
|
|
|
|||
|
|
@ -3,26 +3,26 @@ import re
|
|||
import copy
|
||||
import json
|
||||
import shutil
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
import six
|
||||
|
||||
import clique
|
||||
|
||||
import speedcopy
|
||||
import pyblish.api
|
||||
|
||||
from openpype.lib import (
|
||||
get_ffmpeg_tool_path,
|
||||
get_ffprobe_streams,
|
||||
|
||||
path_to_subprocess_arg,
|
||||
run_subprocess,
|
||||
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
IMAGE_EXTENSIONS,
|
||||
get_ffprobe_streams,
|
||||
should_convert_for_ffmpeg,
|
||||
convert_input_paths_for_ffmpeg,
|
||||
get_transcode_temp_directory
|
||||
get_transcode_temp_directory,
|
||||
)
|
||||
import speedcopy
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -175,6 +175,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
outputs_per_representations.append((repre, outputs))
|
||||
return outputs_per_representations
|
||||
|
||||
def _single_frame_filter(self, input_filepaths, output_defs):
|
||||
single_frame_image = False
|
||||
if len(input_filepaths) == 1:
|
||||
ext = os.path.splitext(input_filepaths[0])[-1]
|
||||
single_frame_image = ext in IMAGE_EXTENSIONS
|
||||
|
||||
filtered_defs = []
|
||||
for output_def in output_defs:
|
||||
output_filters = output_def.get("filter") or {}
|
||||
frame_filter = output_filters.get("single_frame_filter")
|
||||
if (
|
||||
(not single_frame_image and frame_filter == "single_frame")
|
||||
or (single_frame_image and frame_filter == "multi_frame")
|
||||
):
|
||||
continue
|
||||
|
||||
filtered_defs.append(output_def)
|
||||
|
||||
return filtered_defs
|
||||
|
||||
@staticmethod
|
||||
def get_instance_label(instance):
|
||||
return (
|
||||
|
|
@ -195,7 +215,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
outputs_per_repres = self._get_outputs_per_representations(
|
||||
instance, profile_outputs
|
||||
)
|
||||
for repre, outpu_defs in outputs_per_repres:
|
||||
for repre, output_defs in outputs_per_repres:
|
||||
# Check if input should be preconverted before processing
|
||||
# Store original staging dir (it's value may change)
|
||||
src_repre_staging_dir = repre["stagingDir"]
|
||||
|
|
@ -216,6 +236,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
if first_input_path is None:
|
||||
first_input_path = filepath
|
||||
|
||||
filtered_output_defs = self._single_frame_filter(
|
||||
input_filepaths, output_defs
|
||||
)
|
||||
if not filtered_output_defs:
|
||||
self.log.debug((
|
||||
"Repre: {} - All output definitions were filtered"
|
||||
" out by single frame filter. Skipping"
|
||||
).format(repre["name"]))
|
||||
continue
|
||||
|
||||
# Skip if file is not set
|
||||
if first_input_path is None:
|
||||
self.log.warning((
|
||||
|
|
@ -249,7 +279,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
|
||||
try:
|
||||
self._render_output_definitions(
|
||||
instance, repre, src_repre_staging_dir, outpu_defs
|
||||
instance,
|
||||
repre,
|
||||
src_repre_staging_dir,
|
||||
filtered_output_defs
|
||||
)
|
||||
|
||||
finally:
|
||||
|
|
@ -263,10 +296,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
shutil.rmtree(new_staging_dir)
|
||||
|
||||
def _render_output_definitions(
|
||||
self, instance, repre, src_repre_staging_dir, outpu_defs
|
||||
self, instance, repre, src_repre_staging_dir, output_defs
|
||||
):
|
||||
fill_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
for _output_def in outpu_defs:
|
||||
for _output_def in output_defs:
|
||||
output_def = copy.deepcopy(_output_def)
|
||||
# Make sure output definition has "tags" key
|
||||
if "tags" not in output_def:
|
||||
|
|
@ -1659,9 +1692,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
return True
|
||||
return False
|
||||
|
||||
def filter_output_defs(
|
||||
self, profile, subset_name, families
|
||||
):
|
||||
def filter_output_defs(self, profile, subset_name, families):
|
||||
"""Return outputs matching input instance families.
|
||||
|
||||
Output definitions without families filter are marked as valid.
|
||||
|
|
|
|||
|
|
@ -53,6 +53,62 @@
|
|||
"families": [],
|
||||
"hosts": [],
|
||||
"outputs": {
|
||||
"png": {
|
||||
"ext": "png",
|
||||
"tags": [
|
||||
"ftrackreview"
|
||||
],
|
||||
"burnins": [],
|
||||
"ffmpeg_args": {
|
||||
"video_filters": [],
|
||||
"audio_filters": [],
|
||||
"input": [],
|
||||
"output": []
|
||||
},
|
||||
"filter": {
|
||||
"families": [
|
||||
"render",
|
||||
"review",
|
||||
"ftrack"
|
||||
],
|
||||
"subsets": [],
|
||||
"custom_tags": [],
|
||||
"single_frame_filter": "single_frame"
|
||||
},
|
||||
"overscan_crop": "",
|
||||
"overscan_color": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
255
|
||||
],
|
||||
"width": 1920,
|
||||
"height": 1080,
|
||||
"scale_pixel_aspect": true,
|
||||
"bg_color": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"letter_box": {
|
||||
"enabled": false,
|
||||
"ratio": 0.0,
|
||||
"fill_color": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
255
|
||||
],
|
||||
"line_thickness": 0,
|
||||
"line_color": [
|
||||
255,
|
||||
0,
|
||||
0,
|
||||
255
|
||||
]
|
||||
}
|
||||
},
|
||||
"h264": {
|
||||
"ext": "mp4",
|
||||
"tags": [
|
||||
|
|
@ -79,7 +135,8 @@
|
|||
"ftrack"
|
||||
],
|
||||
"subsets": [],
|
||||
"custom_tags": []
|
||||
"custom_tags": [],
|
||||
"single_frame_filter": "multi_frame"
|
||||
},
|
||||
"overscan_crop": "",
|
||||
"overscan_color": [
|
||||
|
|
|
|||
|
|
@ -304,6 +304,20 @@
|
|||
"label": "Custom Tags",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "label",
|
||||
"label": "Use output <b>always</b> / only if input <b>is 1 frame</b> image / only if has <b>2+ frames</b> or <b>is video</b>"
|
||||
},
|
||||
{
|
||||
"type": "enum",
|
||||
"key": "single_frame_filter",
|
||||
"default": "everytime",
|
||||
"enum_items": [
|
||||
{"everytime": "Always"},
|
||||
{"single_frame": "Only if input has 1 image frame"},
|
||||
{"multi_frame": "Only if input is video or sequence of frames"}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -135,6 +135,12 @@ Profile may generate multiple outputs from a single input. Each output must defi
|
|||
- set alpha to `0` to not use this option at all (in most of cases background stays black)
|
||||
- other than `0` alpha will draw color as background
|
||||
|
||||
- **`Additional filtering`**
|
||||
- Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own.
|
||||
- They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base.
|
||||
- Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags).
|
||||
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
|
||||
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.
|
||||
|
||||
### IntegrateAssetNew
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue