mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into bugfix/delivery_python_api_backwards_compatibility_fix
This commit is contained in:
commit
e10bbad7ca
44 changed files with 417 additions and 156 deletions
15
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
15
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
name: 🔸Auto assign pr
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
auto-assign-pr:
|
||||
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@develop
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
project_id: 16
|
||||
pull_request_number: ${{ github.event.pull_request.number }}
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
name: 🔎 Validate PR Labels
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
validate-type-label:
|
||||
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@develop
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
pull_request_number: ${{ github.event.pull_request.number }}
|
||||
query_prefix: "type: "
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
|
@ -535,8 +535,8 @@ class AYONAddon(ABC):
|
|||
Implementation of this method is optional.
|
||||
|
||||
Note:
|
||||
The logic can be similar to logic in tray, but tray does not require
|
||||
to be logged in.
|
||||
The logic can be similar to logic in tray, but tray does not
|
||||
require to be logged in.
|
||||
|
||||
Args:
|
||||
process_context (ProcessContext): Context of child
|
||||
|
|
|
|||
|
|
@ -146,7 +146,8 @@ def publish_report_viewer():
|
|||
@main_cli.command()
|
||||
@click.argument("output_path")
|
||||
@click.option("--project", help="Define project context")
|
||||
@click.option("--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--strict",
|
||||
is_flag=True,
|
||||
|
|
|
|||
|
|
@ -616,7 +616,9 @@ class EnumDef(AbstractAttrDef):
|
|||
return data
|
||||
|
||||
@staticmethod
|
||||
def prepare_enum_items(items: "EnumItemsInputType") -> List["EnumItemDict"]:
|
||||
def prepare_enum_items(
|
||||
items: "EnumItemsInputType"
|
||||
) -> List["EnumItemDict"]:
|
||||
"""Convert items to unified structure.
|
||||
|
||||
Output is a list where each item is dictionary with 'value'
|
||||
|
|
|
|||
|
|
@ -276,12 +276,7 @@ class ASettingRegistry(ABC):
|
|||
@abstractmethod
|
||||
def _delete_item(self, name):
|
||||
# type: (str) -> None
|
||||
"""Delete item from settings.
|
||||
|
||||
Note:
|
||||
see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item`
|
||||
|
||||
"""
|
||||
"""Delete item from settings."""
|
||||
pass
|
||||
|
||||
def __delitem__(self, name):
|
||||
|
|
@ -433,12 +428,7 @@ class IniSettingRegistry(ASettingRegistry):
|
|||
config.write(cfg)
|
||||
|
||||
def _delete_item(self, name):
|
||||
"""Delete item from default section.
|
||||
|
||||
Note:
|
||||
See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section`
|
||||
|
||||
"""
|
||||
"""Delete item from default section."""
|
||||
self.delete_item_from_section("MAIN", name)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1283,12 +1283,16 @@ class CreateContext:
|
|||
|
||||
@contextmanager
|
||||
def bulk_pre_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("pre_create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"pre_create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
def bulk_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
|
|
@ -1946,9 +1950,9 @@ class CreateContext:
|
|||
creator are just removed from context.
|
||||
|
||||
Args:
|
||||
instances (List[CreatedInstance]): Instances that should be removed.
|
||||
Remove logic is done using creator, which may require to
|
||||
do other cleanup than just remove instance from context.
|
||||
instances (List[CreatedInstance]): Instances that should be
|
||||
removed. Remove logic is done using creator, which may require
|
||||
to do other cleanup than just remove instance from context.
|
||||
sender (Optional[str]): Sender of the event.
|
||||
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import ayon_api
|
||||
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
|
||||
from ayon_core.lib import (
|
||||
StringTemplate,
|
||||
filter_profiles,
|
||||
prepare_template_data,
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
||||
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
||||
|
|
|
|||
|
|
@ -429,11 +429,18 @@ class CreatedInstance:
|
|||
__immutable_keys = (
|
||||
"id",
|
||||
"instance_id",
|
||||
"product_type",
|
||||
"productType",
|
||||
"creator_identifier",
|
||||
"creator_attributes",
|
||||
"publish_attributes"
|
||||
)
|
||||
# Keys that can be changed, but should not be removed from instance
|
||||
__required_keys = {
|
||||
"folderPath": None,
|
||||
"task": None,
|
||||
"productName": None,
|
||||
"active": True,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
|
@ -515,6 +522,9 @@ class CreatedInstance:
|
|||
if data:
|
||||
self._data.update(data)
|
||||
|
||||
for key, default in self.__required_keys.items():
|
||||
self._data.setdefault(key, default)
|
||||
|
||||
if not self._data.get("instance_id"):
|
||||
self._data["instance_id"] = str(uuid4())
|
||||
|
||||
|
|
@ -567,6 +577,8 @@ class CreatedInstance:
|
|||
has_key = key in self._data
|
||||
output = self._data.pop(key, *args, **kwargs)
|
||||
if has_key:
|
||||
if key in self.__required_keys:
|
||||
self._data[key] = self.__required_keys[key]
|
||||
self._create_context.instance_values_changed(
|
||||
self.id, {key: None}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -222,6 +222,9 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
|||
source_range = otio_clip.source_range
|
||||
available_range_rate = available_range.start_time.rate
|
||||
media_in = available_range.start_time.value
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
|
|
@ -230,7 +233,7 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
|||
# while we are updating those.
|
||||
if (
|
||||
is_clip_from_media_sequence(otio_clip)
|
||||
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||
):
|
||||
media_in = 0
|
||||
|
|
@ -303,8 +306,12 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
rounded_av_rate = round(available_range_rate, 2)
|
||||
rounded_src_rate = round(source_range.start_time.rate, 2)
|
||||
if rounded_av_rate != rounded_src_rate:
|
||||
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
|
||||
conformed_src_in = source_range.start_time.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_source_range = otio.opentime.TimeRange(
|
||||
start_time=conformed_src_in,
|
||||
duration=conformed_src_duration
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
|
||||
Example:
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd"
|
||||
>>> )
|
||||
{'project': 'test', 'folderPath': '/char/villain',
|
||||
'product': 'modelMain', 'version': 1,
|
||||
'representation': 'usd'}
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr"
|
||||
>>> )
|
||||
{'project': 'project', 'folderPath': '/folder',
|
||||
'product': 'renderMain', 'version': 3,
|
||||
|
|
@ -34,7 +34,7 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
dict[str, Union[str, int]]: The individual key with their values as
|
||||
found in the ayon entity URI.
|
||||
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -7,8 +7,11 @@ from copy import deepcopy
|
|||
import attr
|
||||
import ayon_api
|
||||
import clique
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import get_current_project_name, get_representation_path
|
||||
from ayon_core.lib import Logger, collect_frames
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_representation_path,
|
||||
)
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
|
@ -295,11 +298,17 @@ def _add_review_families(families):
|
|||
return families
|
||||
|
||||
|
||||
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin):
|
||||
def prepare_representations(
|
||||
skeleton_data,
|
||||
exp_files,
|
||||
anatomy,
|
||||
aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin,
|
||||
frames_to_render=None
|
||||
):
|
||||
"""Create representations for file sequences.
|
||||
|
||||
This will return representations of expected files if they are not
|
||||
|
|
@ -315,6 +324,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
skip_integration_repre_list (list): exclude specific extensions,
|
||||
do_not_add_review (bool): explicitly skip review
|
||||
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
||||
frames_to_render (str): implicit or explicit range of frames to render
|
||||
this value is sent to Deadline in JobInfo.Frames
|
||||
Returns:
|
||||
list of representations
|
||||
|
||||
|
|
@ -325,6 +336,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
if frames_to_render is not None:
|
||||
frames_to_render = _get_real_frames_to_render(frames_to_render)
|
||||
else:
|
||||
# Backwards compatibility for older logic
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_end = int(skeleton_data.get("frameEndHandle"))
|
||||
frames_to_render = list(range(frame_start, frame_end + 1))
|
||||
|
||||
# create representation for every collected sequence
|
||||
for collection in collections:
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
|
@ -361,18 +380,21 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_start = frames_to_render[0]
|
||||
frame_end = frames_to_render[-1]
|
||||
if skeleton_data.get("slate"):
|
||||
frame_start -= 1
|
||||
|
||||
files = _get_real_files_to_rendered(collection, frames_to_render)
|
||||
|
||||
# explicitly disable review by user
|
||||
preview = preview and not do_not_add_review
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"files": files,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
||||
"frameEnd": frame_end,
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": skeleton_data.get("fps"),
|
||||
|
|
@ -413,10 +435,13 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
files = _get_real_files_to_rendered(
|
||||
[os.path.basename(remainder)], frames_to_render)
|
||||
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": os.path.basename(remainder),
|
||||
"files": files[0],
|
||||
"stagingDir": staging,
|
||||
}
|
||||
|
||||
|
|
@ -453,6 +478,53 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
return representations
|
||||
|
||||
|
||||
def _get_real_frames_to_render(frames):
|
||||
"""Returns list of frames that should be rendered.
|
||||
|
||||
Artists could want to selectively render only particular frames
|
||||
"""
|
||||
frames_to_render = []
|
||||
for frame in frames.split(","):
|
||||
if "-" in frame:
|
||||
splitted = frame.split("-")
|
||||
frames_to_render.extend(
|
||||
range(int(splitted[0]), int(splitted[1])+1))
|
||||
else:
|
||||
frames_to_render.append(int(frame))
|
||||
frames_to_render.sort()
|
||||
return frames_to_render
|
||||
|
||||
|
||||
def _get_real_files_to_rendered(collection, frames_to_render):
|
||||
"""Use expected files based on real frames_to_render.
|
||||
|
||||
Artists might explicitly set frames they want to render via Publisher UI.
|
||||
This uses this value to filter out files
|
||||
Args:
|
||||
frames_to_render (list): of str '1001'
|
||||
"""
|
||||
files = [os.path.basename(f) for f in list(collection)]
|
||||
file_name, extracted_frame = list(collect_frames(files).items())[0]
|
||||
|
||||
if not extracted_frame:
|
||||
return files
|
||||
|
||||
found_frame_pattern_length = len(extracted_frame)
|
||||
normalized_frames_to_render = {
|
||||
str(frame_to_render).zfill(found_frame_pattern_length)
|
||||
for frame_to_render in frames_to_render
|
||||
}
|
||||
|
||||
return [
|
||||
file_name
|
||||
for file_name in files
|
||||
if any(
|
||||
frame in file_name
|
||||
for frame in normalized_frames_to_render
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def create_instances_for_aov(instance, skeleton, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review):
|
||||
|
|
@ -702,9 +774,14 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
|
||||
project_settings = instance.context.data.get("project_settings")
|
||||
|
||||
use_legacy_product_name = True
|
||||
try:
|
||||
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
|
||||
use_legacy_product_name = (
|
||||
project_settings
|
||||
["core"]
|
||||
["tools"]
|
||||
["creator"]
|
||||
["use_legacy_product_names_for_renders"]
|
||||
)
|
||||
except KeyError:
|
||||
warnings.warn(
|
||||
("use_legacy_for_renders not found in project settings. "
|
||||
|
|
@ -720,7 +797,9 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
dynamic_data=dynamic_data)
|
||||
|
||||
else:
|
||||
product_name, group_name = get_product_name_and_group_from_template(
|
||||
(
|
||||
product_name, group_name
|
||||
) = get_product_name_and_group_from_template(
|
||||
task_entity=instance.data["taskEntity"],
|
||||
project_name=instance.context.data["projectName"],
|
||||
host_name=instance.context.data["hostName"],
|
||||
|
|
@ -863,7 +942,7 @@ def _collect_expected_files_for_aov(files):
|
|||
# but we really expect only one collection.
|
||||
# Nothing else make sense.
|
||||
if len(cols) != 1:
|
||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
||||
raise ValueError("Only one image sequence type is expected.")
|
||||
return list(cols[0])
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -205,9 +205,9 @@ class AYONPyblishPluginMixin:
|
|||
if not cls.__instanceEnabled__:
|
||||
return False
|
||||
|
||||
for _ in pyblish.logic.plugins_by_families(
|
||||
[cls], [instance.product_type]
|
||||
):
|
||||
families = [instance.product_type]
|
||||
families.extend(instance.get("families", []))
|
||||
for _ in pyblish.logic.plugins_by_families([cls], families):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
|||
|
|
@ -413,14 +413,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# Backwards compatible (Deprecated since 24/06/06)
|
||||
or instance.data.get("newAssetPublishing")
|
||||
):
|
||||
hierarchy = instance.data["hierarchy"]
|
||||
anatomy_data["hierarchy"] = hierarchy
|
||||
folder_path = instance.data["folderPath"]
|
||||
parents = folder_path.lstrip("/").split("/")
|
||||
folder_name = parents.pop(-1)
|
||||
|
||||
parent_name = project_entity["name"]
|
||||
if hierarchy:
|
||||
parent_name = hierarchy.split("/")[-1]
|
||||
hierarchy = ""
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
hierarchy = "/".join(parents)
|
||||
|
||||
folder_name = instance.data["folderPath"].split("/")[-1]
|
||||
anatomy_data.update({
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
|
|||
|
|
@ -13,8 +13,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
|
||||
label = "Collect Hierarchy"
|
||||
order = pyblish.api.CollectorOrder - 0.076
|
||||
families = ["shot"]
|
||||
hosts = ["resolve", "hiero", "flame"]
|
||||
hosts = ["resolve", "hiero", "flame", "traypublisher"]
|
||||
|
||||
def process(self, context):
|
||||
project_name = context.data["projectName"]
|
||||
|
|
@ -32,36 +31,50 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
product_type = instance.data["productType"]
|
||||
families = instance.data["families"]
|
||||
|
||||
# exclude other families then self.families with intersection
|
||||
if not set(self.families).intersection(
|
||||
set(families + [product_type])
|
||||
):
|
||||
# exclude other families then "shot" with intersection
|
||||
if "shot" not in (families + [product_type]):
|
||||
self.log.debug("Skipping not a shot: {}".format(families))
|
||||
continue
|
||||
|
||||
# exclude if not masterLayer True
|
||||
# Skip if is not a hero track
|
||||
if not instance.data.get("heroTrack"):
|
||||
self.log.debug("Skipping not a shot from hero track")
|
||||
continue
|
||||
|
||||
shot_data = {
|
||||
"entity_type": "folder",
|
||||
# WARNING Default folder type is hardcoded
|
||||
# suppose that all instances are Shots
|
||||
"folder_type": "Shot",
|
||||
# WARNING unless overwritten, default folder type is hardcoded
|
||||
# to shot
|
||||
"folder_type": instance.data.get("folder_type") or "Shot",
|
||||
"tasks": instance.data.get("tasks") or {},
|
||||
"comments": instance.data.get("comments", []),
|
||||
"attributes": {
|
||||
"handleStart": instance.data["handleStart"],
|
||||
"handleEnd": instance.data["handleEnd"],
|
||||
"frameStart": instance.data["frameStart"],
|
||||
"frameEnd": instance.data["frameEnd"],
|
||||
"clipIn": instance.data["clipIn"],
|
||||
"clipOut": instance.data["clipOut"],
|
||||
"fps": instance.data["fps"],
|
||||
"resolutionWidth": instance.data["resolutionWidth"],
|
||||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"],
|
||||
},
|
||||
}
|
||||
|
||||
shot_data["attributes"] = {}
|
||||
SHOT_ATTRS = (
|
||||
"handleStart",
|
||||
"handleEnd",
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"clipIn",
|
||||
"clipOut",
|
||||
"fps",
|
||||
"resolutionWidth",
|
||||
"resolutionHeight",
|
||||
"pixelAspect",
|
||||
)
|
||||
for shot_attr in SHOT_ATTRS:
|
||||
attr_value = instance.data.get(shot_attr)
|
||||
if attr_value is None:
|
||||
# Shot attribute might not be defined (e.g. CSV ingest)
|
||||
self.log.debug(
|
||||
"%s shot attribute is not defined for instance.",
|
||||
shot_attr
|
||||
)
|
||||
continue
|
||||
|
||||
shot_data["attributes"][shot_attr] = attr_value
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
name = instance.data["folderPath"].split("/")[-1]
|
||||
actual = {name: shot_data}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,10 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin):
|
|||
otio_range_with_handles
|
||||
)
|
||||
|
||||
if not instance.data.get("otioClip"):
|
||||
self.log.debug("Skipping collect OTIO frame range.")
|
||||
return
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
workfile_start = instance.data["workfileFrameStart"]
|
||||
|
|
|
|||
|
|
@ -95,9 +95,42 @@ class CollectOtioReview(pyblish.api.InstancePlugin):
|
|||
instance.data["label"] = label + " (review)"
|
||||
instance.data["families"] += ["review", "ftrack"]
|
||||
instance.data["otioReviewClips"] = otio_review_clips
|
||||
|
||||
self.log.info(
|
||||
"Creating review track: {}".format(otio_review_clips))
|
||||
|
||||
# get colorspace from metadata if available
|
||||
# get metadata from first clip with media reference
|
||||
r_otio_cl = next(
|
||||
(
|
||||
clip
|
||||
for clip in otio_review_clips
|
||||
if (
|
||||
isinstance(clip, otio.schema.Clip)
|
||||
and clip.media_reference
|
||||
)
|
||||
),
|
||||
None
|
||||
)
|
||||
if r_otio_cl is not None:
|
||||
media_ref = r_otio_cl.media_reference
|
||||
media_metadata = media_ref.metadata
|
||||
|
||||
# TODO: we might need some alternative method since
|
||||
# native OTIO exports do not support ayon metadata
|
||||
review_colorspace = media_metadata.get(
|
||||
"ayon.source.colorspace"
|
||||
)
|
||||
if review_colorspace is None:
|
||||
# Backwards compatibility for older scenes
|
||||
review_colorspace = media_metadata.get(
|
||||
"openpype.source.colourtransform"
|
||||
)
|
||||
if review_colorspace:
|
||||
instance.data["reviewColorspace"] = review_colorspace
|
||||
self.log.info(
|
||||
"Review colorspace: {}".format(review_colorspace))
|
||||
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -10,12 +10,16 @@ import os
|
|||
import clique
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.pipeline.publish import (
|
||||
get_publish_template_name
|
||||
)
|
||||
|
||||
|
||||
class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
||||
class CollectOtioSubsetResources(
|
||||
pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""Get Resources for a product version"""
|
||||
|
||||
label = "Collect OTIO Subset Resources"
|
||||
|
|
@ -190,9 +194,13 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin):
|
|||
instance.data["originalDirname"] = self.staging_dir
|
||||
|
||||
if repre:
|
||||
colorspace = instance.data.get("colorspace")
|
||||
# add colorspace data to representation
|
||||
self.set_representation_colorspace(
|
||||
repre, instance.context, colorspace)
|
||||
|
||||
# add representation to instance data
|
||||
instance.data["representations"].append(repre)
|
||||
self.log.debug(">>>>>>>> {}".format(repre))
|
||||
|
||||
self.log.debug(instance.data)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import pyblish.api
|
|||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_core.lib import (
|
||||
|
||||
is_oiio_supported,
|
||||
)
|
||||
|
||||
|
|
@ -154,12 +153,15 @@ class ExtractOIIOTranscode(publish.Extractor):
|
|||
|
||||
files_to_convert = self._translate_to_sequence(
|
||||
files_to_convert)
|
||||
self.log.debug("Files to convert: {}".format(files_to_convert))
|
||||
for file_name in files_to_convert:
|
||||
self.log.debug("Transcoding file: `{}`".format(file_name))
|
||||
input_path = os.path.join(original_staging_dir,
|
||||
file_name)
|
||||
output_path = self._get_output_file_path(input_path,
|
||||
new_staging_dir,
|
||||
output_extension)
|
||||
|
||||
convert_colorspace(
|
||||
input_path,
|
||||
output_path,
|
||||
|
|
|
|||
|
|
@ -37,6 +37,9 @@ class ExtractColorspaceData(publish.Extractor,
|
|||
# get colorspace settings
|
||||
context = instance.context
|
||||
|
||||
# colorspace name could be kept in instance.data
|
||||
colorspace = instance.data.get("colorspace")
|
||||
|
||||
# loop representations
|
||||
for representation in representations:
|
||||
# skip if colorspaceData is already at representation
|
||||
|
|
@ -44,5 +47,4 @@ class ExtractColorspaceData(publish.Extractor,
|
|||
continue
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation, context
|
||||
)
|
||||
representation, context, colorspace)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Hierarchy To AYON"
|
||||
families = ["clip", "shot"]
|
||||
|
||||
def process(self, context):
|
||||
if not context.data.get("hierarchyContext"):
|
||||
|
|
|
|||
|
|
@ -26,7 +26,10 @@ from ayon_core.lib import (
|
|||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractOTIOReview(publish.Extractor):
|
||||
class ExtractOTIOReview(
|
||||
publish.Extractor,
|
||||
publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""
|
||||
Extract OTIO timeline into one concuted image sequence file.
|
||||
|
||||
|
|
@ -71,14 +74,19 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
# TODO: what if handles are different in `versionData`?
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
otio_review_clips = instance.data["otioReviewClips"]
|
||||
otio_review_clips = instance.data.get("otioReviewClips")
|
||||
|
||||
if otio_review_clips is None:
|
||||
self.log.info(f"Instance `{instance}` has no otioReviewClips")
|
||||
|
||||
# add plugin wide attributes
|
||||
self.representation_files = []
|
||||
self.used_frames = []
|
||||
self.workfile_start = int(instance.data.get(
|
||||
"workfileFrameStart", 1001)) - handle_start
|
||||
self.padding = len(str(self.workfile_start))
|
||||
# NOTE: padding has to be converted from
|
||||
# end frame since start could be lower then 1000
|
||||
self.padding = len(str(instance.data.get("frameEnd", 1001)))
|
||||
self.used_frames.append(self.workfile_start)
|
||||
self.to_width = instance.data.get(
|
||||
"resolutionWidth") or self.to_width
|
||||
|
|
@ -86,8 +94,10 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
"resolutionHeight") or self.to_height
|
||||
|
||||
# skip instance if no reviewable data available
|
||||
if (not isinstance(otio_review_clips[0], otio.schema.Clip)) \
|
||||
and (len(otio_review_clips) == 1):
|
||||
if (
|
||||
not isinstance(otio_review_clips[0], otio.schema.Clip)
|
||||
and len(otio_review_clips) == 1
|
||||
):
|
||||
self.log.warning(
|
||||
"Instance `{}` has nothing to process".format(instance))
|
||||
return
|
||||
|
|
@ -119,26 +129,33 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
res_data[key] = value
|
||||
break
|
||||
|
||||
self.to_width, self.to_height = res_data["width"], res_data["height"]
|
||||
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
|
||||
self.to_width, self.to_height
|
||||
))
|
||||
self.to_width, self.to_height = (
|
||||
res_data["width"], res_data["height"]
|
||||
)
|
||||
self.log.debug(
|
||||
"> self.to_width x self.to_height:"
|
||||
f" {self.to_width} x {self.to_height}"
|
||||
)
|
||||
|
||||
available_range = r_otio_cl.available_range()
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
processing_range = None
|
||||
self.actual_fps = available_range.duration.rate
|
||||
start = src_range.start_time.rescaled_to(self.actual_fps)
|
||||
duration = src_range.duration.rescaled_to(self.actual_fps)
|
||||
src_frame_start = src_range.start_time.to_frames()
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting available range
|
||||
# Some AYON custom OTIO exporter were implemented with
|
||||
# relative source range for image sequence. Following code
|
||||
# maintain backward-compatibility by adjusting available range
|
||||
# while we are updating those.
|
||||
if (
|
||||
is_clip_from_media_sequence(r_otio_cl)
|
||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
||||
and src_range.start_time.to_frames() < media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and src_frame_start < media_ref.start_frame
|
||||
):
|
||||
available_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
||||
|
|
@ -168,7 +185,7 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
start -= clip_handle_start
|
||||
duration += clip_handle_start
|
||||
elif len(otio_review_clips) > 1 \
|
||||
and (index == len(otio_review_clips) - 1):
|
||||
and (index == len(otio_review_clips) - 1):
|
||||
# more clips | last clip reframing with handle
|
||||
duration += clip_handle_end
|
||||
elif len(otio_review_clips) == 1:
|
||||
|
|
@ -236,7 +253,8 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
# Extraction via FFmpeg.
|
||||
else:
|
||||
path = media_ref.target_url
|
||||
# Set extract range from 0 (FFmpeg ignores embedded timecode).
|
||||
# Set extract range from 0 (FFmpeg ignores
|
||||
# embedded timecode).
|
||||
extract_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(
|
||||
(
|
||||
|
|
@ -263,6 +281,13 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
|
||||
# creating and registering representation
|
||||
representation = self._create_representation(start, duration)
|
||||
|
||||
# add colorspace data to representation
|
||||
if colorspace := instance.data.get("reviewColorspace"):
|
||||
self.set_representation_colorspace(
|
||||
representation, instance.context, colorspace
|
||||
)
|
||||
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Adding representation: {}".format(representation))
|
||||
|
||||
|
|
@ -397,7 +422,8 @@ class ExtractOTIOReview(publish.Extractor):
|
|||
to defined image sequence format.
|
||||
|
||||
Args:
|
||||
sequence (list): input dir path string, collection object, fps in list
|
||||
sequence (list): input dir path string, collection object,
|
||||
fps in list.
|
||||
video (list)[optional]: video_path string, otio_range in list
|
||||
gap (int)[optional]: gap duration
|
||||
end_offset (int)[optional]: offset gap frame start in frames
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
"""Validate all product names are unique.
|
||||
|
||||
This only validates whether the instances currently set to publish from
|
||||
the workfile overlap one another for the folder + product they are publishing
|
||||
to.
|
||||
the workfile overlap one another for the folder + product they are
|
||||
publishing to.
|
||||
|
||||
This does not perform any check against existing publishes in the database
|
||||
since it is allowed to publish into existing products resulting in
|
||||
|
|
@ -72,8 +72,10 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
# All is ok
|
||||
return
|
||||
|
||||
msg = ("Instance product names {} are not unique. ".format(non_unique) +
|
||||
"Please remove or rename duplicates.")
|
||||
msg = (
|
||||
f"Instance product names {non_unique} are not unique."
|
||||
" Please remove or rename duplicates."
|
||||
)
|
||||
formatting_data = {
|
||||
"non_unique": ",".join(non_unique)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
- Datatypes explanation:
|
||||
<color> string format must be supported by FFmpeg.
|
||||
Examples: "#000000", "0x000000", "black"
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system
|
||||
or path to font file.
|
||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||
|
||||
- Possible keys:
|
||||
|
|
@ -87,17 +88,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||
"bg_color" - Background color - <color>
|
||||
"bg_padding" - Background padding in pixels - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels
|
||||
from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels
|
||||
from border - <int>
|
||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||
"font" - Font Family for text - <font>
|
||||
"font_size" - Font size in pixels - <int>
|
||||
"font_color" - Color of text - <color>
|
||||
"frame_offset" - Default start frame - <int>
|
||||
- required IF start frame is not set when using frames or timecode burnins
|
||||
- required IF start frame is not set when using frames
|
||||
or timecode burnins
|
||||
|
||||
On initializing class can be set General options through "options_init" arg.
|
||||
General can be overridden when adding burnin
|
||||
On initializing class can be set General options through
|
||||
"options_init" arg.
|
||||
General options can be overridden when adding burnin.
|
||||
|
||||
'''
|
||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||
|
|
|
|||
|
|
@ -190,6 +190,7 @@ def get_current_project_settings():
|
|||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||
if not project_name:
|
||||
raise ValueError(
|
||||
"Missing context project in environemt variable `AYON_PROJECT_NAME`."
|
||||
"Missing context project in environment"
|
||||
" variable `AYON_PROJECT_NAME`."
|
||||
)
|
||||
return get_project_settings(project_name)
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
|
|||
|
||||
def validate(self, text, pos):
|
||||
results = super(ProductNameValidator, self).validate(text, pos)
|
||||
if results[0] == self.Invalid:
|
||||
if results[0] == RegularExpressionValidatorClass.Invalid:
|
||||
self.invalid.emit(self.invalid_chars(text))
|
||||
return results
|
||||
|
||||
|
|
@ -217,7 +217,9 @@ class ProductTypeDescriptionWidget(QtWidgets.QWidget):
|
|||
|
||||
product_type_label = QtWidgets.QLabel(self)
|
||||
product_type_label.setObjectName("CreatorProductTypeLabel")
|
||||
product_type_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
|
||||
product_type_label.setAlignment(
|
||||
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft
|
||||
)
|
||||
|
||||
help_label = QtWidgets.QLabel(self)
|
||||
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ except ImportError:
|
|||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
Handling of applications in launcher is not ideal and should be
|
||||
completely redone from scratch. This is just a temporary solution
|
||||
to keep backwards compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
|
|
|
|||
|
|
@ -517,7 +517,11 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
def setItemCheckState(self, index, state):
|
||||
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
||||
|
||||
def set_value(self, values: Optional[Iterable[Any]], role: Optional[int] = None):
|
||||
def set_value(
|
||||
self,
|
||||
values: Optional[Iterable[Any]],
|
||||
role: Optional[int] = None,
|
||||
):
|
||||
if role is None:
|
||||
role = self._value_role
|
||||
|
||||
|
|
|
|||
|
|
@ -222,6 +222,7 @@ class VersionDelegate(QtWidgets.QStyledItemDelegate):
|
|||
|
||||
editor = VersionComboBox(product_id, parent)
|
||||
editor.setProperty("itemId", item_id)
|
||||
editor.setFocusPolicy(QtCore.Qt.NoFocus)
|
||||
|
||||
editor.value_changed.connect(self._on_editor_change)
|
||||
editor.destroyed.connect(self._on_destroy)
|
||||
|
|
|
|||
|
|
@ -499,8 +499,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
version_item.version_id
|
||||
for version_item in last_version_by_product_id.values()
|
||||
}
|
||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
repre_count_by_version_id = (
|
||||
self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
)
|
||||
)
|
||||
sync_availability_by_version_id = (
|
||||
self._controller.get_version_sync_availability(
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ class InstanceItem:
|
|||
return InstanceItem(
|
||||
instance.id,
|
||||
instance.creator_identifier,
|
||||
instance.label,
|
||||
instance.label or "N/A",
|
||||
instance.group_label,
|
||||
instance.product_type,
|
||||
instance.product_name,
|
||||
|
|
|
|||
|
|
@ -339,7 +339,9 @@ class OverviewWidget(QtWidgets.QFrame):
|
|||
self._change_visibility_for_state()
|
||||
self._product_content_layout.addWidget(self._create_widget, 7)
|
||||
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
||||
self._product_content_layout.addWidget(self._product_attributes_wrap, 7)
|
||||
self._product_content_layout.addWidget(
|
||||
self._product_attributes_wrap, 7
|
||||
)
|
||||
|
||||
def _change_visibility_for_state(self):
|
||||
self._create_widget.setVisible(
|
||||
|
|
|
|||
|
|
@ -214,8 +214,8 @@ class TasksCombobox(QtWidgets.QComboBox):
|
|||
Combobox gives ability to select only from intersection of task names for
|
||||
folder paths in selected instances.
|
||||
|
||||
If folder paths in selected instances does not have same tasks then combobox
|
||||
will be empty.
|
||||
If folder paths in selected instances does not have same tasks
|
||||
then combobox will be empty.
|
||||
"""
|
||||
value_changed = QtCore.Signal()
|
||||
|
||||
|
|
@ -604,7 +604,7 @@ class VariantInputWidget(PlaceholderLineEdit):
|
|||
|
||||
|
||||
class GlobalAttrsWidget(QtWidgets.QWidget):
|
||||
"""Global attributes mainly to define context and product name of instances.
|
||||
"""Global attributes to define context and product name of instances.
|
||||
|
||||
product name is or may be affected on context. Gives abiity to modify
|
||||
context and product name of instance. This change is not autopromoted but
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ class TasksModel(QtGui.QStandardItemModel):
|
|||
tasks with same names then model is empty too.
|
||||
|
||||
Args:
|
||||
controller (AbstractPublisherFrontend): Controller which handles creation and
|
||||
publishing.
|
||||
controller (AbstractPublisherFrontend): Controller which handles
|
||||
creation and publishing.
|
||||
|
||||
"""
|
||||
def __init__(
|
||||
|
|
|
|||
|
|
@ -998,7 +998,11 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
new_item["label"] = new_item.pop("creator_label")
|
||||
new_item["identifier"] = new_item.pop("creator_identifier")
|
||||
new_failed_info.append(new_item)
|
||||
self.add_error_message_dialog(event["title"], new_failed_info, "Creator:")
|
||||
self.add_error_message_dialog(
|
||||
event["title"],
|
||||
new_failed_info,
|
||||
"Creator:"
|
||||
)
|
||||
|
||||
def _on_convertor_error(self, event):
|
||||
new_failed_info = []
|
||||
|
|
|
|||
|
|
@ -366,8 +366,8 @@ class ContainersModel:
|
|||
try:
|
||||
uuid.UUID(repre_id)
|
||||
except (ValueError, TypeError, AttributeError):
|
||||
# Fake not existing representation id so container is shown in UI
|
||||
# but as invalid
|
||||
# Fake not existing representation id so container
|
||||
# is shown in UI but as invalid
|
||||
item.representation_id = invalid_ids_mapping.setdefault(
|
||||
repre_id, uuid.uuid4().hex
|
||||
)
|
||||
|
|
|
|||
|
|
@ -556,9 +556,10 @@ class _IconsCache:
|
|||
log.info("Didn't find icon \"{}\"".format(icon_name))
|
||||
|
||||
elif used_variant != icon_name:
|
||||
log.debug("Icon \"{}\" was not found \"{}\" is used instead".format(
|
||||
icon_name, used_variant
|
||||
))
|
||||
log.debug(
|
||||
f"Icon \"{icon_name}\" was not found"
|
||||
f" \"{used_variant}\" is used instead"
|
||||
)
|
||||
|
||||
cls._qtawesome_cache[full_icon_name] = icon
|
||||
return icon
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'core' version."""
|
||||
__version__ = "1.0.7+dev"
|
||||
__version__ = "1.0.9+dev"
|
||||
|
|
|
|||
|
|
@ -15,6 +15,6 @@ qtawesome = "0.7.3"
|
|||
aiohttp-middlewares = "^2.0.0"
|
||||
Click = "^8"
|
||||
OpenTimelineIO = "0.16.0"
|
||||
opencolorio = "^2.3.2"
|
||||
opencolorio = "^2.3.2,<2.4.0"
|
||||
Pillow = "9.5.0"
|
||||
websocket-client = ">=0.40.0,<2"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "core"
|
||||
title = "Core"
|
||||
version = "1.0.7+dev"
|
||||
version = "1.0.9+dev"
|
||||
|
||||
client_dir = "ayon_core"
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
[tool.poetry]
|
||||
name = "ayon-core"
|
||||
version = "1.0.7+dev"
|
||||
version = "1.0.9+dev"
|
||||
description = ""
|
||||
authors = ["Ynput Team <team@ynput.io>"]
|
||||
readme = "README.md"
|
||||
|
|
@ -68,7 +68,7 @@ target-version = "py39"
|
|||
|
||||
[tool.ruff.lint]
|
||||
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
|
||||
select = ["E4", "E7", "E9", "F", "W"]
|
||||
select = ["E", "F", "W"]
|
||||
ignore = []
|
||||
|
||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||
|
|
|
|||
|
|
@ -358,7 +358,10 @@ class ExtractOIIOTranscodeOutputModel(BaseSettingsModel):
|
|||
custom_tags: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Custom Tags",
|
||||
description="Additional custom tags that will be added to the created representation."
|
||||
description=(
|
||||
"Additional custom tags that will be added"
|
||||
" to the created representation."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -892,9 +895,11 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
default_factory=CollectFramesFixDefModel,
|
||||
title="Collect Frames to Fix",
|
||||
)
|
||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = SettingsField(
|
||||
default_factory=CollectUSDLayerContributionsModel,
|
||||
title="Collect USD Layer Contributions",
|
||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = (
|
||||
SettingsField(
|
||||
default_factory=CollectUSDLayerContributionsModel,
|
||||
title="Collect USD Layer Contributions",
|
||||
)
|
||||
)
|
||||
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
||||
default_factory=ValidateBaseModel,
|
||||
|
|
@ -1214,7 +1219,9 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"TOP_RIGHT": "{anatomy[version]}",
|
||||
"BOTTOM_LEFT": "{username}",
|
||||
"BOTTOM_CENTERED": "{folder[name]}",
|
||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
||||
"BOTTOM_RIGHT": (
|
||||
"{frame_start}-{current_frame}-{frame_end}"
|
||||
),
|
||||
"filter": {
|
||||
"families": [],
|
||||
"tags": []
|
||||
|
|
@ -1240,7 +1247,9 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"TOP_RIGHT": "{anatomy[version]}",
|
||||
"BOTTOM_LEFT": "{username}",
|
||||
"BOTTOM_CENTERED": "{folder[name]}",
|
||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
||||
"BOTTOM_RIGHT": (
|
||||
"{frame_start}-{current_frame}-{frame_end}"
|
||||
),
|
||||
"filter": {
|
||||
"families": [],
|
||||
"tags": []
|
||||
|
|
|
|||
|
|
@ -83,8 +83,8 @@ class CreatorToolModel(BaseSettingsModel):
|
|||
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Filter creator profiles",
|
||||
description="Allowed list of creator labels that will be only shown if "
|
||||
"profile matches context."
|
||||
description="Allowed list of creator labels that will be only shown"
|
||||
" if profile matches context."
|
||||
)
|
||||
|
||||
@validator("product_types_smart_select")
|
||||
|
|
@ -426,7 +426,9 @@ DEFAULT_TOOLS_VALUES = {
|
|||
],
|
||||
"task_types": [],
|
||||
"tasks": [],
|
||||
"template": "{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||
"template": (
|
||||
"{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||
)
|
||||
},
|
||||
{
|
||||
"product_types": [
|
||||
|
|
|
|||
|
|
@ -130,19 +130,20 @@ def test_image_sequence_and_handles_out_of_range():
|
|||
|
||||
expected = [
|
||||
# 5 head black frames generated from gap (991-995)
|
||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
|
||||
# 9 tail back frames generated from gap (1097-1105)
|
||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||
|
||||
# Report from source tiff (996-1096)
|
||||
# 996-1000 = additional 5 head frames
|
||||
# 1001-1095 = source range conformed to 25fps
|
||||
# 1096-1096 = additional 1 tail frames
|
||||
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996 C:/result/output.%03d.jpg"
|
||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
|
||||
f" C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -179,13 +180,13 @@ def test_short_movie_head_gap_handles():
|
|||
|
||||
expected = [
|
||||
# 10 head black frames generated from gap (991-1000)
|
||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
|
||||
# source range + 10 tail frames
|
||||
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -start_number 1001 "
|
||||
"C:/result/output.%03d.jpg"
|
||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
|
||||
" -start_number 1001 C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -208,7 +209,8 @@ def test_short_movie_tail_gap_handles():
|
|||
# 10 head frames + source range
|
||||
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
||||
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991 C:/result/output.%03d.jpg"
|
||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
|
||||
" C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -234,10 +236,12 @@ def test_multiple_review_clips_no_gap():
|
|||
|
||||
expected = [
|
||||
# 10 head black frames generated from gap (991-1000)
|
||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
|
||||
' -i color=c=black:s=1280x720 -tune '
|
||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||
|
||||
# Alternance 25fps tiff sequence and 24fps exr sequence for 100 frames each
|
||||
# Alternance 25fps tiff sequence and 24fps exr sequence
|
||||
# for 100 frames each
|
||||
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
||||
f'C:\\no_tc{os.sep}output.%04d.tif '
|
||||
'-start_number 1001 C:/result/output.%03d.jpg',
|
||||
|
|
@ -315,7 +319,8 @@ def test_multiple_review_clips_with_gap():
|
|||
|
||||
expected = [
|
||||
# Gap on review track (12 frames)
|
||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
|
||||
' -i color=c=black:s=1280x720 -tune '
|
||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||
|
||||
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue