Merge branch 'main' into release/3.15.x

This commit is contained in:
Jakub Jezek 2022-11-10 18:03:31 +01:00
commit 85e5741385
No known key found for this signature in database
GPG key ID: 730D7C02726179A7
24 changed files with 279 additions and 223 deletions

View file

@ -2,7 +2,7 @@ name: Milestone - assign to PRs
on:
pull_request_target:
types: [opened, reopened, edited, synchronize]
types: [closed]
jobs:
run_if_release:

View file

@ -170,7 +170,10 @@ class CreatorWidget(QtWidgets.QDialog):
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
if isinstance(val, tuple):
func_attr(*val)
else:
func_attr(val)
# add to layout
layout.addRow(label, item)
@ -273,8 +276,8 @@ class CreatorWidget(QtWidgets.QDialog):
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setValue=v["value"], setMinimum=0,
setMaximum=100000, setToolTip=tool_tip)
setRange=(1, 9999999), setValue=v["value"],
setToolTip=tool_tip)
return data

View file

@ -1,4 +1,4 @@
import qargparse
from openpype.lib.attribute_definitions import BoolDef
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import execute_george_through_file
@ -27,26 +27,28 @@ class ImportImage(plugin.Loader):
"preload": True
}
options = [
qargparse.Boolean(
"stretch",
label="Stretch to project size",
default=True,
help="Stretch loaded image/s to project resolution?"
),
qargparse.Boolean(
"timestretch",
label="Stretch to timeline length",
default=True,
help="Clip loaded image/s to timeline length?"
),
qargparse.Boolean(
"preload",
label="Preload loaded image/s",
default=True,
help="Preload image/s?"
)
]
@classmethod
def get_options(cls, contexts):
return [
BoolDef(
"stretch",
label="Stretch to project size",
default=cls.defaults["stretch"],
tooltip="Stretch loaded image/s to project resolution?"
),
BoolDef(
"timestretch",
label="Stretch to timeline length",
default=cls.defaults["timestretch"],
tooltip="Clip loaded image/s to timeline length?"
),
BoolDef(
"preload",
label="Preload loaded image/s",
default=cls.defaults["preload"],
tooltip="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])

View file

@ -1,7 +1,6 @@
import collections
import qargparse
from openpype.lib.attribute_definitions import BoolDef
from openpype.pipeline import (
get_representation_context,
register_host,
@ -42,26 +41,28 @@ class LoadImage(plugin.Loader):
"preload": True
}
options = [
qargparse.Boolean(
"stretch",
label="Stretch to project size",
default=True,
help="Stretch loaded image/s to project resolution?"
),
qargparse.Boolean(
"timestretch",
label="Stretch to timeline length",
default=True,
help="Clip loaded image/s to timeline length?"
),
qargparse.Boolean(
"preload",
label="Preload loaded image/s",
default=True,
help="Preload image/s?"
)
]
@classmethod
def get_options(cls, contexts):
return [
BoolDef(
"stretch",
label="Stretch to project size",
default=cls.defaults["stretch"],
tooltip="Stretch loaded image/s to project resolution?"
),
BoolDef(
"timestretch",
label="Stretch to timeline length",
default=cls.defaults["timestretch"],
tooltip="Clip loaded image/s to timeline length?"
),
BoolDef(
"preload",
label="Preload loaded image/s",
default=cls.defaults["preload"],
tooltip="Preload image/s?"
)
]
def load(self, context, name, namespace, options):
stretch = options.get("stretch", self.defaults["stretch"])

View file

@ -83,8 +83,9 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
self.log.info("task_data:: {}".format(task_data))
is_sequence = len(task_data["files"]) > 1
first_file = task_data["files"][0]
_, extension = os.path.splitext(task_data["files"][0])
_, extension = os.path.splitext(first_file)
family, families, tags = self._get_family(
self.task_type_to_family,
task_type,
@ -149,10 +150,13 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin):
self.log.warning("Unable to count frames "
"duration {}".format(no_of_frames))
# raise ValueError("STOP")
instance.data["handleStart"] = asset_doc["data"]["handleStart"]
instance.data["handleEnd"] = asset_doc["data"]["handleEnd"]
if "review" in tags:
first_file_path = os.path.join(task_dir, first_file)
instance.data["thumbnailSource"] = first_file_path
instances.append(instance)
self.log.info("instance.data:: {}".format(instance.data))

View file

@ -1,137 +0,0 @@
import os
import shutil
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
run_subprocess,
get_transcode_temp_directory,
convert_input_paths_for_ffmpeg,
should_convert_for_ffmpeg
)
class ExtractThumbnail(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from input using ffmpeg."""
label = "Extract Thumbnail"
order = pyblish.api.ExtractorOrder
families = [
"render",
"image"
]
hosts = ["webpublisher"]
targets = ["filespublish"]
def process(self, instance):
self.log.info("subset {}".format(instance.data['subset']))
filtered_repres = self._get_filtered_repres(instance)
for repre in filtered_repres:
repre_files = repre["files"]
if not isinstance(repre_files, (list, tuple)):
input_file = repre_files
else:
file_index = int(float(len(repre_files)) * 0.5)
input_file = repre_files[file_index]
stagingdir = os.path.normpath(repre["stagingDir"])
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("Input filepath: {}".format(full_input_path))
do_convert = should_convert_for_ffmpeg(full_input_path)
# If result is None the requirement of conversion can't be
# determined
if do_convert is None:
self.log.info((
"Can't determine if representation requires conversion."
" Skipped."
))
continue
# Do conversion if needed
# - change staging dir of source representation
# - must be set back after output definitions processing
convert_dir = None
if do_convert:
convert_dir = get_transcode_temp_directory()
filename = os.path.basename(full_input_path)
convert_input_paths_for_ffmpeg(
[full_input_path],
convert_dir,
self.log
)
full_input_path = os.path.join(convert_dir, filename)
filename = os.path.splitext(input_file)[0]
while filename.endswith("."):
filename = filename[:-1]
thumbnail_filename = filename + "_thumbnail.jpg"
full_output_path = os.path.join(stagingdir, thumbnail_filename)
self.log.info("output {}".format(full_output_path))
ffmpeg_args = [
get_ffmpeg_tool_path("ffmpeg"),
"-y",
"-i", full_input_path,
"-vframes", "1",
full_output_path
]
# run subprocess
self.log.debug("{}".format(" ".join(ffmpeg_args)))
try: # temporary until oiiotool is supported cross platform
run_subprocess(
ffmpeg_args, logger=self.log
)
except RuntimeError as exp:
if "Compression" in str(exp):
self.log.debug(
"Unsupported compression on input files. Skipping!!!"
)
return
self.log.warning("Conversion crashed", exc_info=True)
raise
new_repre = {
"name": "thumbnail",
"ext": "jpg",
"files": thumbnail_filename,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ["thumbnail"]
}
# adding representation
self.log.debug("Adding: {}".format(new_repre))
instance.data["representations"].append(new_repre)
# Cleanup temp folder
if convert_dir is not None and os.path.exists(convert_dir):
shutil.rmtree(convert_dir)
def _get_filtered_repres(self, instance):
filtered_repres = []
repres = instance.data.get("representations") or []
for repre in repres:
self.log.debug(repre)
tags = repre.get("tags") or []
# Skip instance if already has thumbnail representation
if "thumbnail" in tags:
return []
if "review" not in tags:
continue
if not repre.get("files"):
self.log.info((
"Representation \"{}\" don't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres

View file

@ -91,7 +91,7 @@ class AbstractAttrDefMeta(ABCMeta):
@six.add_metaclass(AbstractAttrDefMeta)
class AbtractAttrDef:
class AbtractAttrDef(object):
"""Abstraction of attribute definiton.
Each attribute definition must have implemented validation and

View file

@ -457,9 +457,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
cam = [c for c in cameras if c in col.head]
if cam:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
if aov:
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
else:
subset_name = '{}_{}'.format(group_name, cam)
else:
subset_name = '{}_{}'.format(group_name, aov)
if aov:
subset_name = '{}_{}'.format(group_name, aov)
else:
subset_name = '{}'.format(group_name)
if isinstance(col, (list, tuple)):
staging = os.path.dirname(col[0])

View file

@ -3,26 +3,26 @@ import re
import copy
import json
import shutil
from abc import ABCMeta, abstractmethod
import six
import clique
import speedcopy
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
get_ffprobe_streams,
path_to_subprocess_arg,
run_subprocess,
)
from openpype.lib.transcoding import (
IMAGE_EXTENSIONS,
get_ffprobe_streams,
should_convert_for_ffmpeg,
convert_input_paths_for_ffmpeg,
get_transcode_temp_directory
get_transcode_temp_directory,
)
import speedcopy
class ExtractReview(pyblish.api.InstancePlugin):
@ -175,6 +175,26 @@ class ExtractReview(pyblish.api.InstancePlugin):
outputs_per_representations.append((repre, outputs))
return outputs_per_representations
def _single_frame_filter(self, input_filepaths, output_defs):
single_frame_image = False
if len(input_filepaths) == 1:
ext = os.path.splitext(input_filepaths[0])[-1]
single_frame_image = ext in IMAGE_EXTENSIONS
filtered_defs = []
for output_def in output_defs:
output_filters = output_def.get("filter") or {}
frame_filter = output_filters.get("single_frame_filter")
if (
(not single_frame_image and frame_filter == "single_frame")
or (single_frame_image and frame_filter == "multi_frame")
):
continue
filtered_defs.append(output_def)
return filtered_defs
@staticmethod
def get_instance_label(instance):
return (
@ -195,7 +215,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
outputs_per_repres = self._get_outputs_per_representations(
instance, profile_outputs
)
for repre, outpu_defs in outputs_per_repres:
for repre, output_defs in outputs_per_repres:
# Check if input should be preconverted before processing
# Store original staging dir (it's value may change)
src_repre_staging_dir = repre["stagingDir"]
@ -216,6 +236,16 @@ class ExtractReview(pyblish.api.InstancePlugin):
if first_input_path is None:
first_input_path = filepath
filtered_output_defs = self._single_frame_filter(
input_filepaths, output_defs
)
if not filtered_output_defs:
self.log.debug((
"Repre: {} - All output definitions were filtered"
" out by single frame filter. Skipping"
).format(repre["name"]))
continue
# Skip if file is not set
if first_input_path is None:
self.log.warning((
@ -249,7 +279,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
try:
self._render_output_definitions(
instance, repre, src_repre_staging_dir, outpu_defs
instance,
repre,
src_repre_staging_dir,
filtered_output_defs
)
finally:
@ -263,10 +296,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
shutil.rmtree(new_staging_dir)
def _render_output_definitions(
self, instance, repre, src_repre_staging_dir, outpu_defs
self, instance, repre, src_repre_staging_dir, output_defs
):
fill_data = copy.deepcopy(instance.data["anatomyData"])
for _output_def in outpu_defs:
for _output_def in output_defs:
output_def = copy.deepcopy(_output_def)
# Make sure output definition has "tags" key
if "tags" not in output_def:
@ -1659,9 +1692,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
return True
return False
def filter_output_defs(
self, profile, subset_name, families
):
def filter_output_defs(self, profile, subset_name, families):
"""Return outputs matching input instance families.
Output definitions without families filter are marked as valid.

View file

@ -21,9 +21,8 @@ class PreIntegrateThumbnails(pyblish.api.InstancePlugin):
label = "Override Integrate Thumbnail Representations"
order = pyblish.api.IntegratorOrder - 0.1
families = ["review"]
integrate_profiles = {}
integrate_profiles = []
def process(self, instance):
repres = instance.data.get("representations")

View file

@ -53,6 +53,62 @@
"families": [],
"hosts": [],
"outputs": {
"png": {
"ext": "png",
"tags": [
"ftrackreview"
],
"burnins": [],
"ffmpeg_args": {
"video_filters": [],
"audio_filters": [],
"input": [],
"output": []
},
"filter": {
"families": [
"render",
"review",
"ftrack"
],
"subsets": [],
"custom_tags": [],
"single_frame_filter": "single_frame"
},
"overscan_crop": "",
"overscan_color": [
0,
0,
0,
255
],
"width": 1920,
"height": 1080,
"scale_pixel_aspect": true,
"bg_color": [
0,
0,
0,
0
],
"letter_box": {
"enabled": false,
"ratio": 0.0,
"fill_color": [
0,
0,
0,
255
],
"line_thickness": 0,
"line_color": [
255,
0,
0,
255
]
}
},
"h264": {
"ext": "mp4",
"tags": [
@ -79,7 +135,8 @@
"ftrack"
],
"subsets": [],
"custom_tags": []
"custom_tags": [],
"single_frame_filter": "multi_frame"
},
"overscan_crop": "",
"overscan_color": [

View file

@ -304,6 +304,20 @@
"label": "Custom Tags",
"type": "list",
"object_type": "text"
},
{
"type": "label",
"label": "Use output <b>always</b> / only if input <b>is 1 frame</b> image / only if has <b>2+ frames</b> or <b>is video</b>"
},
{
"type": "enum",
"key": "single_frame_filter",
"default": "everytime",
"enum_items": [
{"everytime": "Always"},
{"single_frame": "Only if input has 1 image frame"},
{"multi_frame": "Only if input is video or sequence of frames"}
]
}
]
},

View file

@ -3,8 +3,14 @@ from .widgets import (
AttributeDefinitionsWidget,
)
from .dialog import (
AttributeDefinitionsDialog,
)
__all__ = (
"create_widget_for_attr_def",
"AttributeDefinitionsWidget",
"AttributeDefinitionsDialog",
)

View file

@ -0,0 +1,33 @@
from Qt import QtWidgets
from .widgets import AttributeDefinitionsWidget
class AttributeDefinitionsDialog(QtWidgets.QDialog):
def __init__(self, attr_defs, parent=None):
super(AttributeDefinitionsDialog, self).__init__(parent)
attrs_widget = AttributeDefinitionsWidget(attr_defs, self)
btns_widget = QtWidgets.QWidget(self)
ok_btn = QtWidgets.QPushButton("OK", btns_widget)
cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
btns_layout.setContentsMargins(0, 0, 0, 0)
btns_layout.addStretch(1)
btns_layout.addWidget(ok_btn, 0)
btns_layout.addWidget(cancel_btn, 0)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.addWidget(attrs_widget, 0)
main_layout.addStretch(1)
main_layout.addWidget(btns_widget, 0)
ok_btn.clicked.connect(self.accept)
cancel_btn.clicked.connect(self.reject)
self._attrs_widget = attrs_widget
def get_values(self):
return self._attrs_widget.current_value()

View file

@ -2,6 +2,8 @@ import inspect
from Qt import QtGui
import qtawesome
from openpype.lib.attribute_definitions import AbtractAttrDef
from openpype.tools.attribute_defs import AttributeDefinitionsDialog
from openpype.tools.utils.widgets import (
OptionalAction,
OptionDialog
@ -34,21 +36,30 @@ def get_options(action, loader, parent, repre_contexts):
None when dialog was closed or cancelled, in all other cases {}
if no options
"""
# Pop option dialog
options = {}
loader_options = loader.get_options(repre_contexts)
if getattr(action, "optioned", False) and loader_options:
if not getattr(action, "optioned", False) or not loader_options:
return options
if isinstance(loader_options[0], AbtractAttrDef):
qargparse_options = False
dialog = AttributeDefinitionsDialog(loader_options, parent)
else:
qargparse_options = True
dialog = OptionDialog(parent)
dialog.setWindowTitle(action.label + " Options")
dialog.create(loader_options)
if not dialog.exec_():
return None
dialog.setWindowTitle(action.label + " Options")
# Get option
options = dialog.parse()
if not dialog.exec_():
return None
return options
# Get option
if qargparse_options:
return dialog.parse()
return dialog.get_values()
def add_representation_loaders_to_menu(loaders, menu, repre_contexts):

View file

@ -90,9 +90,9 @@ class AssetDocsCache:
return
project_name = self._controller.project_name
asset_docs = get_assets(
asset_docs = list(get_assets(
project_name, fields=self.projection.keys()
)
))
asset_docs_by_name = {}
task_names_by_asset_name = {}
for asset_doc in asset_docs:

View file

@ -1,6 +1,6 @@
from Qt import QtWidgets, QtCore
from openpype.widgets.attribute_defs import create_widget_for_attr_def
from openpype.tools.attribute_defs import create_widget_for_attr_def
class PreCreateWidget(QtWidgets.QWidget):

View file

@ -9,7 +9,7 @@ import collections
from Qt import QtWidgets, QtCore, QtGui
import qtawesome
from openpype.widgets.attribute_defs import create_widget_for_attr_def
from openpype.tools.attribute_defs import create_widget_for_attr_def
from openpype.tools import resources
from openpype.tools.flickcharm import FlickCharm
from openpype.tools.utils import (
@ -1229,7 +1229,7 @@ class CreatorAttrsWidget(QtWidgets.QWidget):
Attributes are defined on creator so are dynamic. Their look and type is
based on attribute definitions that are defined in
`~/openpype/pipeline/lib/attribute_definitions.py` and their widget
representation in `~/openpype/widgets/attribute_defs/*`.
representation in `~/openpype/tools/attribute_defs/*`.
Widgets are disabled if context of instance is not valid.
@ -1353,7 +1353,7 @@ class PublishPluginAttrsWidget(QtWidgets.QWidget):
Look and type of attributes is based on attribute definitions that are
defined in `~/openpype/pipeline/lib/attribute_definitions.py` and their
widget representation in `~/openpype/widgets/attribute_defs/*`.
widget representation in `~/openpype/tools/attribute_defs/*`.
Widgets are disabled if context of instance is not valid.

View file

@ -3,10 +3,12 @@ import logging
from Qt import QtWidgets, QtCore, QtGui
import qargparse
import qtawesome
from openpype.style import (
get_objected_colors,
get_style_image_path
)
from openpype.lib.attribute_definitions import AbtractAttrDef
log = logging.getLogger(__name__)
@ -401,8 +403,26 @@ class OptionalAction(QtWidgets.QWidgetAction):
def set_option_tip(self, options):
sep = "\n\n"
mak = (lambda opt: opt["name"] + " :\n " + opt["help"])
self.option_tip = sep.join(mak(opt) for opt in options)
if not options or not isinstance(options[0], AbtractAttrDef):
mak = (lambda opt: opt["name"] + " :\n " + opt["help"])
self.option_tip = sep.join(mak(opt) for opt in options)
return
option_items = []
for option in options:
option_lines = []
if option.label:
option_lines.append(
"{} ({}) :".format(option.label, option.key)
)
else:
option_lines.append("{} :".format(option.key))
if option.tooltip:
option_lines.append(" - {}".format(option.tooltip))
option_items.append("\n".join(option_lines))
self.option_tip = sep.join(option_items)
def on_option(self):
self.optioned = True

View file

@ -3,7 +3,7 @@ from Qt import QtWidgets
from openpype import style
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.widgets.attribute_defs import AttributeDefinitionsWidget
from openpype.tools.attribute_defs import AttributeDefinitionsWidget
class WorkfileBuildPlaceholderDialog(QtWidgets.QDialog):

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.14.7-nightly.2"
__version__ = "3.14.7-nightly.3"

View file

@ -135,6 +135,12 @@ Profile may generate multiple outputs from a single input. Each output must defi
- set alpha to `0` to not use this option at all (in most of cases background stays black)
- other than `0` alpha will draw color as background
- **`Additional filtering`**
- Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own.
- They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base.
- Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags).
- Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags`
- Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input.
### IntegrateAssetNew