mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge remote-tracking branch 'origin/develop' into feature/911-new-traits-based-integrator
This commit is contained in:
commit
0074da126d
39 changed files with 374 additions and 144 deletions
27
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
27
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
name: 🔸Auto assign pr
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
pr_number:
|
||||||
|
type: number
|
||||||
|
description: "Run workflow for this PR number"
|
||||||
|
required: true
|
||||||
|
project_id:
|
||||||
|
type: number
|
||||||
|
description: "Github Project Number"
|
||||||
|
required: true
|
||||||
|
default: 16
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
auto-assign-pr:
|
||||||
|
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
|
||||||
|
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
|
||||||
|
with:
|
||||||
|
repo: "${{ github.repository }}"
|
||||||
|
project_id: "${{ inputs.project_id }}"
|
||||||
|
pull_request_number: "${{ github.event.pull_request.number || inputs.pr_number }}"
|
||||||
|
secrets:
|
||||||
|
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||||
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
name: 🔎 Validate PR Labels
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- edited
|
||||||
|
- labeled
|
||||||
|
- unlabeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-type-label:
|
||||||
|
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
|
||||||
|
with:
|
||||||
|
repo: "${{ github.repository }}"
|
||||||
|
pull_request_number: ${{ github.event.pull_request.number }}
|
||||||
|
query_prefix: "type: "
|
||||||
|
secrets:
|
||||||
|
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||||
|
|
@ -535,8 +535,8 @@ class AYONAddon(ABC):
|
||||||
Implementation of this method is optional.
|
Implementation of this method is optional.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
The logic can be similar to logic in tray, but tray does not require
|
The logic can be similar to logic in tray, but tray does not
|
||||||
to be logged in.
|
require to be logged in.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
process_context (ProcessContext): Context of child
|
process_context (ProcessContext): Context of child
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,8 @@ def publish_report_viewer():
|
||||||
@main_cli.command()
|
@main_cli.command()
|
||||||
@click.argument("output_path")
|
@click.argument("output_path")
|
||||||
@click.option("--project", help="Define project context")
|
@click.option("--project", help="Define project context")
|
||||||
@click.option("--folder", help="Define folder in project (project must be set)")
|
@click.option(
|
||||||
|
"--folder", help="Define folder in project (project must be set)")
|
||||||
@click.option(
|
@click.option(
|
||||||
"--strict",
|
"--strict",
|
||||||
is_flag=True,
|
is_flag=True,
|
||||||
|
|
|
||||||
|
|
@ -616,7 +616,9 @@ class EnumDef(AbstractAttrDef):
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def prepare_enum_items(items: "EnumItemsInputType") -> List["EnumItemDict"]:
|
def prepare_enum_items(
|
||||||
|
items: "EnumItemsInputType"
|
||||||
|
) -> List["EnumItemDict"]:
|
||||||
"""Convert items to unified structure.
|
"""Convert items to unified structure.
|
||||||
|
|
||||||
Output is a list where each item is dictionary with 'value'
|
Output is a list where each item is dictionary with 'value'
|
||||||
|
|
|
||||||
|
|
@ -276,12 +276,7 @@ class ASettingRegistry(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _delete_item(self, name):
|
def _delete_item(self, name):
|
||||||
# type: (str) -> None
|
# type: (str) -> None
|
||||||
"""Delete item from settings.
|
"""Delete item from settings."""
|
||||||
|
|
||||||
Note:
|
|
||||||
see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item`
|
|
||||||
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def __delitem__(self, name):
|
def __delitem__(self, name):
|
||||||
|
|
@ -433,12 +428,7 @@ class IniSettingRegistry(ASettingRegistry):
|
||||||
config.write(cfg)
|
config.write(cfg)
|
||||||
|
|
||||||
def _delete_item(self, name):
|
def _delete_item(self, name):
|
||||||
"""Delete item from default section.
|
"""Delete item from default section."""
|
||||||
|
|
||||||
Note:
|
|
||||||
See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section`
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.delete_item_from_section("MAIN", name)
|
self.delete_item_from_section("MAIN", name)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1283,12 +1283,16 @@ class CreateContext:
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def bulk_pre_create_attr_defs_change(self, sender=None):
|
def bulk_pre_create_attr_defs_change(self, sender=None):
|
||||||
with self._bulk_context("pre_create_attrs_change", sender) as bulk_info:
|
with self._bulk_context(
|
||||||
|
"pre_create_attrs_change", sender
|
||||||
|
) as bulk_info:
|
||||||
yield bulk_info
|
yield bulk_info
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def bulk_create_attr_defs_change(self, sender=None):
|
def bulk_create_attr_defs_change(self, sender=None):
|
||||||
with self._bulk_context("create_attrs_change", sender) as bulk_info:
|
with self._bulk_context(
|
||||||
|
"create_attrs_change", sender
|
||||||
|
) as bulk_info:
|
||||||
yield bulk_info
|
yield bulk_info
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
|
|
@ -1946,9 +1950,9 @@ class CreateContext:
|
||||||
creator are just removed from context.
|
creator are just removed from context.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
instances (List[CreatedInstance]): Instances that should be removed.
|
instances (List[CreatedInstance]): Instances that should be
|
||||||
Remove logic is done using creator, which may require to
|
removed. Remove logic is done using creator, which may require
|
||||||
do other cleanup than just remove instance from context.
|
to do other cleanup than just remove instance from context.
|
||||||
sender (Optional[str]): Sender of the event.
|
sender (Optional[str]): Sender of the event.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,9 @@
|
||||||
import ayon_api
|
import ayon_api
|
||||||
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
|
from ayon_core.lib import (
|
||||||
|
StringTemplate,
|
||||||
|
filter_profiles,
|
||||||
|
prepare_template_data,
|
||||||
|
)
|
||||||
from ayon_core.settings import get_project_settings
|
from ayon_core.settings import get_project_settings
|
||||||
|
|
||||||
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
||||||
|
|
|
||||||
|
|
@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
|
||||||
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
||||||
if isinstance(template_data, str):
|
if isinstance(template_data, str):
|
||||||
con = ayon_api.get_server_api_connection()
|
con = ayon_api.get_server_api_connection()
|
||||||
repre_entity = con._representation_conversion(repre_entity)
|
con._representation_conversion(repre_entity)
|
||||||
template_data = repre_entity["context"]
|
template_data = repre_entity["context"]
|
||||||
|
|
||||||
template_data.update(copy.deepcopy(general_template_data))
|
template_data.update(copy.deepcopy(general_template_data))
|
||||||
|
|
|
||||||
|
|
@ -222,6 +222,9 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
||||||
source_range = otio_clip.source_range
|
source_range = otio_clip.source_range
|
||||||
available_range_rate = available_range.start_time.rate
|
available_range_rate = available_range.start_time.rate
|
||||||
media_in = available_range.start_time.value
|
media_in = available_range.start_time.value
|
||||||
|
available_range_start_frame = (
|
||||||
|
available_range.start_time.to_frames()
|
||||||
|
)
|
||||||
|
|
||||||
# Temporary.
|
# Temporary.
|
||||||
# Some AYON custom OTIO exporter were implemented with relative
|
# Some AYON custom OTIO exporter were implemented with relative
|
||||||
|
|
@ -230,7 +233,7 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
||||||
# while we are updating those.
|
# while we are updating those.
|
||||||
if (
|
if (
|
||||||
is_clip_from_media_sequence(otio_clip)
|
is_clip_from_media_sequence(otio_clip)
|
||||||
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
|
and available_range_start_frame == media_ref.start_frame
|
||||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||||
):
|
):
|
||||||
media_in = 0
|
media_in = 0
|
||||||
|
|
@ -303,8 +306,12 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||||
rounded_av_rate = round(available_range_rate, 2)
|
rounded_av_rate = round(available_range_rate, 2)
|
||||||
rounded_src_rate = round(source_range.start_time.rate, 2)
|
rounded_src_rate = round(source_range.start_time.rate, 2)
|
||||||
if rounded_av_rate != rounded_src_rate:
|
if rounded_av_rate != rounded_src_rate:
|
||||||
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
|
conformed_src_in = source_range.start_time.rescaled_to(
|
||||||
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
|
available_range_rate
|
||||||
|
)
|
||||||
|
conformed_src_duration = source_range.duration.rescaled_to(
|
||||||
|
available_range_rate
|
||||||
|
)
|
||||||
conformed_source_range = otio.opentime.TimeRange(
|
conformed_source_range = otio.opentime.TimeRange(
|
||||||
start_time=conformed_src_in,
|
start_time=conformed_src_in,
|
||||||
duration=conformed_src_duration
|
duration=conformed_src_duration
|
||||||
|
|
|
||||||
|
|
@ -18,13 +18,13 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
>>> parse_ayon_entity_uri(
|
>>> parse_ayon_entity_uri(
|
||||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd"
|
||||||
>>> )
|
>>> )
|
||||||
{'project': 'test', 'folderPath': '/char/villain',
|
{'project': 'test', 'folderPath': '/char/villain',
|
||||||
'product': 'modelMain', 'version': 1,
|
'product': 'modelMain', 'version': 1,
|
||||||
'representation': 'usd'}
|
'representation': 'usd'}
|
||||||
>>> parse_ayon_entity_uri(
|
>>> parse_ayon_entity_uri(
|
||||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr"
|
||||||
>>> )
|
>>> )
|
||||||
{'project': 'project', 'folderPath': '/folder',
|
{'project': 'project', 'folderPath': '/folder',
|
||||||
'product': 'renderMain', 'version': 3,
|
'product': 'renderMain', 'version': 3,
|
||||||
|
|
@ -34,7 +34,7 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
||||||
dict[str, Union[str, int]]: The individual key with their values as
|
dict[str, Union[str, int]]: The individual key with their values as
|
||||||
found in the ayon entity URI.
|
found in the ayon entity URI.
|
||||||
|
|
||||||
"""
|
""" # noqa: E501
|
||||||
|
|
||||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||||
return {}
|
return {}
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,11 @@ from copy import deepcopy
|
||||||
import attr
|
import attr
|
||||||
import ayon_api
|
import ayon_api
|
||||||
import clique
|
import clique
|
||||||
from ayon_core.lib import Logger
|
from ayon_core.lib import Logger, collect_frames
|
||||||
from ayon_core.pipeline import get_current_project_name, get_representation_path
|
from ayon_core.pipeline import (
|
||||||
|
get_current_project_name,
|
||||||
|
get_representation_path,
|
||||||
|
)
|
||||||
from ayon_core.pipeline.create import get_product_name
|
from ayon_core.pipeline.create import get_product_name
|
||||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||||
from ayon_core.pipeline.publish import KnownPublishError
|
from ayon_core.pipeline.publish import KnownPublishError
|
||||||
|
|
@ -295,11 +298,17 @@ def _add_review_families(families):
|
||||||
return families
|
return families
|
||||||
|
|
||||||
|
|
||||||
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
def prepare_representations(
|
||||||
skip_integration_repre_list,
|
skeleton_data,
|
||||||
do_not_add_review,
|
exp_files,
|
||||||
context,
|
anatomy,
|
||||||
color_managed_plugin):
|
aov_filter,
|
||||||
|
skip_integration_repre_list,
|
||||||
|
do_not_add_review,
|
||||||
|
context,
|
||||||
|
color_managed_plugin,
|
||||||
|
frames_to_render=None
|
||||||
|
):
|
||||||
"""Create representations for file sequences.
|
"""Create representations for file sequences.
|
||||||
|
|
||||||
This will return representations of expected files if they are not
|
This will return representations of expected files if they are not
|
||||||
|
|
@ -315,6 +324,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
skip_integration_repre_list (list): exclude specific extensions,
|
skip_integration_repre_list (list): exclude specific extensions,
|
||||||
do_not_add_review (bool): explicitly skip review
|
do_not_add_review (bool): explicitly skip review
|
||||||
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
||||||
|
frames_to_render (str): implicit or explicit range of frames to render
|
||||||
|
this value is sent to Deadline in JobInfo.Frames
|
||||||
Returns:
|
Returns:
|
||||||
list of representations
|
list of representations
|
||||||
|
|
||||||
|
|
@ -325,6 +336,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
|
|
||||||
log = Logger.get_logger("farm_publishing")
|
log = Logger.get_logger("farm_publishing")
|
||||||
|
|
||||||
|
if frames_to_render is not None:
|
||||||
|
frames_to_render = _get_real_frames_to_render(frames_to_render)
|
||||||
|
else:
|
||||||
|
# Backwards compatibility for older logic
|
||||||
|
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||||
|
frame_end = int(skeleton_data.get("frameEndHandle"))
|
||||||
|
frames_to_render = list(range(frame_start, frame_end + 1))
|
||||||
|
|
||||||
# create representation for every collected sequence
|
# create representation for every collected sequence
|
||||||
for collection in collections:
|
for collection in collections:
|
||||||
ext = collection.tail.lstrip(".")
|
ext = collection.tail.lstrip(".")
|
||||||
|
|
@ -361,18 +380,21 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
" This may cause issues on farm."
|
" This may cause issues on farm."
|
||||||
).format(staging))
|
).format(staging))
|
||||||
|
|
||||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
frame_start = frames_to_render[0]
|
||||||
|
frame_end = frames_to_render[-1]
|
||||||
if skeleton_data.get("slate"):
|
if skeleton_data.get("slate"):
|
||||||
frame_start -= 1
|
frame_start -= 1
|
||||||
|
|
||||||
|
files = _get_real_files_to_rendered(collection, frames_to_render)
|
||||||
|
|
||||||
# explicitly disable review by user
|
# explicitly disable review by user
|
||||||
preview = preview and not do_not_add_review
|
preview = preview and not do_not_add_review
|
||||||
rep = {
|
rep = {
|
||||||
"name": ext,
|
"name": ext,
|
||||||
"ext": ext,
|
"ext": ext,
|
||||||
"files": [os.path.basename(f) for f in list(collection)],
|
"files": files,
|
||||||
"frameStart": frame_start,
|
"frameStart": frame_start,
|
||||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
"frameEnd": frame_end,
|
||||||
# If expectedFile are absolute, we need only filenames
|
# If expectedFile are absolute, we need only filenames
|
||||||
"stagingDir": staging,
|
"stagingDir": staging,
|
||||||
"fps": skeleton_data.get("fps"),
|
"fps": skeleton_data.get("fps"),
|
||||||
|
|
@ -413,10 +435,13 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
" This may cause issues on farm."
|
" This may cause issues on farm."
|
||||||
).format(staging))
|
).format(staging))
|
||||||
|
|
||||||
|
files = _get_real_files_to_rendered(
|
||||||
|
[os.path.basename(remainder)], frames_to_render)
|
||||||
|
|
||||||
rep = {
|
rep = {
|
||||||
"name": ext,
|
"name": ext,
|
||||||
"ext": ext,
|
"ext": ext,
|
||||||
"files": os.path.basename(remainder),
|
"files": files[0],
|
||||||
"stagingDir": staging,
|
"stagingDir": staging,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -453,6 +478,53 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
return representations
|
return representations
|
||||||
|
|
||||||
|
|
||||||
|
def _get_real_frames_to_render(frames):
|
||||||
|
"""Returns list of frames that should be rendered.
|
||||||
|
|
||||||
|
Artists could want to selectively render only particular frames
|
||||||
|
"""
|
||||||
|
frames_to_render = []
|
||||||
|
for frame in frames.split(","):
|
||||||
|
if "-" in frame:
|
||||||
|
splitted = frame.split("-")
|
||||||
|
frames_to_render.extend(
|
||||||
|
range(int(splitted[0]), int(splitted[1])+1))
|
||||||
|
else:
|
||||||
|
frames_to_render.append(int(frame))
|
||||||
|
frames_to_render.sort()
|
||||||
|
return frames_to_render
|
||||||
|
|
||||||
|
|
||||||
|
def _get_real_files_to_rendered(collection, frames_to_render):
|
||||||
|
"""Use expected files based on real frames_to_render.
|
||||||
|
|
||||||
|
Artists might explicitly set frames they want to render via Publisher UI.
|
||||||
|
This uses this value to filter out files
|
||||||
|
Args:
|
||||||
|
frames_to_render (list): of str '1001'
|
||||||
|
"""
|
||||||
|
files = [os.path.basename(f) for f in list(collection)]
|
||||||
|
file_name, extracted_frame = list(collect_frames(files).items())[0]
|
||||||
|
|
||||||
|
if not extracted_frame:
|
||||||
|
return files
|
||||||
|
|
||||||
|
found_frame_pattern_length = len(extracted_frame)
|
||||||
|
normalized_frames_to_render = {
|
||||||
|
str(frame_to_render).zfill(found_frame_pattern_length)
|
||||||
|
for frame_to_render in frames_to_render
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
file_name
|
||||||
|
for file_name in files
|
||||||
|
if any(
|
||||||
|
frame in file_name
|
||||||
|
for frame in normalized_frames_to_render
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def create_instances_for_aov(instance, skeleton, aov_filter,
|
def create_instances_for_aov(instance, skeleton, aov_filter,
|
||||||
skip_integration_repre_list,
|
skip_integration_repre_list,
|
||||||
do_not_add_review):
|
do_not_add_review):
|
||||||
|
|
@ -702,9 +774,14 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
||||||
|
|
||||||
project_settings = instance.context.data.get("project_settings")
|
project_settings = instance.context.data.get("project_settings")
|
||||||
|
|
||||||
use_legacy_product_name = True
|
|
||||||
try:
|
try:
|
||||||
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
|
use_legacy_product_name = (
|
||||||
|
project_settings
|
||||||
|
["core"]
|
||||||
|
["tools"]
|
||||||
|
["creator"]
|
||||||
|
["use_legacy_product_names_for_renders"]
|
||||||
|
)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
("use_legacy_for_renders not found in project settings. "
|
("use_legacy_for_renders not found in project settings. "
|
||||||
|
|
@ -720,7 +797,9 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
||||||
dynamic_data=dynamic_data)
|
dynamic_data=dynamic_data)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
product_name, group_name = get_product_name_and_group_from_template(
|
(
|
||||||
|
product_name, group_name
|
||||||
|
) = get_product_name_and_group_from_template(
|
||||||
task_entity=instance.data["taskEntity"],
|
task_entity=instance.data["taskEntity"],
|
||||||
project_name=instance.context.data["projectName"],
|
project_name=instance.context.data["projectName"],
|
||||||
host_name=instance.context.data["hostName"],
|
host_name=instance.context.data["hostName"],
|
||||||
|
|
@ -863,7 +942,7 @@ def _collect_expected_files_for_aov(files):
|
||||||
# but we really expect only one collection.
|
# but we really expect only one collection.
|
||||||
# Nothing else make sense.
|
# Nothing else make sense.
|
||||||
if len(cols) != 1:
|
if len(cols) != 1:
|
||||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
raise ValueError("Only one image sequence type is expected.")
|
||||||
return list(cols[0])
|
return list(cols[0])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ from .constants import (
|
||||||
ValidateContentsOrder,
|
ValidateContentsOrder,
|
||||||
ValidateSceneOrder,
|
ValidateSceneOrder,
|
||||||
ValidateMeshOrder,
|
ValidateMeshOrder,
|
||||||
|
FARM_JOB_ENV_DATA_KEY,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .publish_plugins import (
|
from .publish_plugins import (
|
||||||
|
|
@ -59,6 +60,7 @@ __all__ = (
|
||||||
"ValidateContentsOrder",
|
"ValidateContentsOrder",
|
||||||
"ValidateSceneOrder",
|
"ValidateSceneOrder",
|
||||||
"ValidateMeshOrder",
|
"ValidateMeshOrder",
|
||||||
|
"FARM_JOB_ENV_DATA_KEY",
|
||||||
|
|
||||||
"AbstractMetaInstancePlugin",
|
"AbstractMetaInstancePlugin",
|
||||||
"AbstractMetaContextPlugin",
|
"AbstractMetaContextPlugin",
|
||||||
|
|
|
||||||
|
|
@ -9,3 +9,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
|
||||||
DEFAULT_PUBLISH_TEMPLATE = "default"
|
DEFAULT_PUBLISH_TEMPLATE = "default"
|
||||||
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
||||||
TRANSIENT_DIR_TEMPLATE = "default"
|
TRANSIENT_DIR_TEMPLATE = "default"
|
||||||
|
|
||||||
|
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"
|
||||||
|
|
|
||||||
|
|
@ -413,14 +413,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
||||||
# Backwards compatible (Deprecated since 24/06/06)
|
# Backwards compatible (Deprecated since 24/06/06)
|
||||||
or instance.data.get("newAssetPublishing")
|
or instance.data.get("newAssetPublishing")
|
||||||
):
|
):
|
||||||
hierarchy = instance.data["hierarchy"]
|
folder_path = instance.data["folderPath"]
|
||||||
anatomy_data["hierarchy"] = hierarchy
|
parents = folder_path.lstrip("/").split("/")
|
||||||
|
folder_name = parents.pop(-1)
|
||||||
|
|
||||||
parent_name = project_entity["name"]
|
parent_name = project_entity["name"]
|
||||||
if hierarchy:
|
hierarchy = ""
|
||||||
parent_name = hierarchy.split("/")[-1]
|
if parents:
|
||||||
|
parent_name = parents[-1]
|
||||||
|
hierarchy = "/".join(parents)
|
||||||
|
|
||||||
folder_name = instance.data["folderPath"].split("/")[-1]
|
|
||||||
anatomy_data.update({
|
anatomy_data.update({
|
||||||
"asset": folder_name,
|
"asset": folder_name,
|
||||||
"hierarchy": hierarchy,
|
"hierarchy": hierarchy,
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,43 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
from ayon_core.lib import get_ayon_username
|
||||||
|
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
|
||||||
|
|
||||||
|
|
||||||
|
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
|
||||||
|
"""Collect set of environment variables to submit with deadline jobs"""
|
||||||
|
order = pyblish.api.CollectorOrder - 0.45
|
||||||
|
label = "AYON core Farm Environment Variables"
|
||||||
|
targets = ["local"]
|
||||||
|
|
||||||
|
def process(self, context):
|
||||||
|
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
|
||||||
|
|
||||||
|
# Disable colored logs on farm
|
||||||
|
for key, value in (
|
||||||
|
("AYON_LOG_NO_COLORS", "1"),
|
||||||
|
("AYON_PROJECT_NAME", context.data["projectName"]),
|
||||||
|
("AYON_FOLDER_PATH", context.data.get("folderPath")),
|
||||||
|
("AYON_TASK_NAME", context.data.get("task")),
|
||||||
|
# NOTE we should use 'context.data["user"]' but that has higher
|
||||||
|
# order.
|
||||||
|
("AYON_USERNAME", get_ayon_username()),
|
||||||
|
):
|
||||||
|
if value:
|
||||||
|
self.log.debug(f"Setting job env: {key}: {value}")
|
||||||
|
env[key] = value
|
||||||
|
|
||||||
|
for key in [
|
||||||
|
"AYON_BUNDLE_NAME",
|
||||||
|
"AYON_DEFAULT_SETTINGS_VARIANT",
|
||||||
|
"AYON_IN_TESTS",
|
||||||
|
# NOTE Not sure why workdir is needed?
|
||||||
|
"AYON_WORKDIR",
|
||||||
|
]:
|
||||||
|
value = os.getenv(key)
|
||||||
|
if value:
|
||||||
|
self.log.debug(f"Setting job env: {key}: {value}")
|
||||||
|
env[key] = value
|
||||||
|
|
||||||
|
|
@ -43,7 +43,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
||||||
|
|
||||||
shot_data = {
|
shot_data = {
|
||||||
"entity_type": "folder",
|
"entity_type": "folder",
|
||||||
# WARNING unless overwritten, default folder type is hardcoded to shot
|
# WARNING unless overwritten, default folder type is hardcoded
|
||||||
|
# to shot
|
||||||
"folder_type": instance.data.get("folder_type") or "Shot",
|
"folder_type": instance.data.get("folder_type") or "Shot",
|
||||||
"tasks": instance.data.get("tasks") or {},
|
"tasks": instance.data.get("tasks") or {},
|
||||||
"comments": instance.data.get("comments", []),
|
"comments": instance.data.get("comments", []),
|
||||||
|
|
|
||||||
|
|
@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
||||||
name = inst.data["folderPath"]
|
name = inst.data["folderPath"]
|
||||||
|
|
||||||
recycling_file = [f for f in created_files if name in f]
|
recycling_file = [f for f in created_files if name in f]
|
||||||
|
audio_clip = inst.data["otioClip"]
|
||||||
# frameranges
|
audio_range = audio_clip.range_in_parent()
|
||||||
timeline_in_h = inst.data["clipInH"]
|
duration = audio_range.duration.to_frames()
|
||||||
timeline_out_h = inst.data["clipOutH"]
|
|
||||||
fps = inst.data["fps"]
|
|
||||||
|
|
||||||
# create duration
|
|
||||||
duration = (timeline_out_h - timeline_in_h) + 1
|
|
||||||
|
|
||||||
# ffmpeg generate new file only if doesn't exists already
|
# ffmpeg generate new file only if doesn't exists already
|
||||||
if not recycling_file:
|
if not recycling_file:
|
||||||
# convert to seconds
|
parent_track = audio_clip.parent()
|
||||||
start_sec = float(timeline_in_h / fps)
|
parent_track_start = parent_track.range_in_parent().start_time
|
||||||
duration_sec = float(duration / fps)
|
relative_start_time = (
|
||||||
|
audio_range.start_time - parent_track_start)
|
||||||
|
start_sec = relative_start_time.to_seconds()
|
||||||
|
duration_sec = audio_range.duration.to_seconds()
|
||||||
|
|
||||||
# temp audio file
|
# temp audio file
|
||||||
audio_fpath = self.create_temp_file(name)
|
audio_fpath = self.create_temp_file(name)
|
||||||
|
|
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
||||||
|
|
||||||
output = []
|
output = []
|
||||||
# go trough all audio tracks
|
# go trough all audio tracks
|
||||||
for otio_track in otio_timeline.tracks:
|
for otio_track in otio_timeline.audio_tracks():
|
||||||
if "Audio" not in otio_track.kind:
|
|
||||||
continue
|
|
||||||
self.log.debug("_" * 50)
|
self.log.debug("_" * 50)
|
||||||
playhead = 0
|
playhead = 0
|
||||||
for otio_clip in otio_track:
|
for otio_clip in otio_track:
|
||||||
self.log.debug(otio_clip)
|
self.log.debug(otio_clip)
|
||||||
if isinstance(otio_clip, otio.schema.Gap):
|
if (isinstance(otio_clip, otio.schema.Clip) and
|
||||||
playhead += otio_clip.source_range.duration.value
|
not otio_clip.media_reference.is_missing_reference):
|
||||||
elif isinstance(otio_clip, otio.schema.Clip):
|
media_av_start = otio_clip.available_range().start_time
|
||||||
start = otio_clip.source_range.start_time.value
|
clip_start = otio_clip.source_range.start_time
|
||||||
duration = otio_clip.source_range.duration.value
|
fps = clip_start.rate
|
||||||
fps = otio_clip.source_range.start_time.rate
|
conformed_av_start = media_av_start.rescaled_to(fps)
|
||||||
|
# ffmpeg ignores embedded tc
|
||||||
|
start = clip_start - conformed_av_start
|
||||||
|
duration = otio_clip.source_range.duration
|
||||||
media_path = otio_clip.media_reference.target_url
|
media_path = otio_clip.media_reference.target_url
|
||||||
input = {
|
input = {
|
||||||
"mediaPath": media_path,
|
"mediaPath": media_path,
|
||||||
"delayFrame": playhead,
|
"delayFrame": playhead,
|
||||||
"startFrame": start,
|
"startFrame": start.to_frames(),
|
||||||
"durationFrame": duration,
|
"durationFrame": duration.to_frames(),
|
||||||
"delayMilSec": int(float(playhead / fps) * 1000),
|
"delayMilSec": int(float(playhead / fps) * 1000),
|
||||||
"startSec": float(start / fps),
|
"startSec": start.to_seconds(),
|
||||||
"durationSec": float(duration / fps),
|
"durationSec": duration.to_seconds(),
|
||||||
"fps": fps
|
"fps": float(fps)
|
||||||
}
|
}
|
||||||
if input not in output:
|
if input not in output:
|
||||||
output.append(input)
|
output.append(input)
|
||||||
self.log.debug("__ input: {}".format(input))
|
self.log.debug("__ input: {}".format(input))
|
||||||
playhead += otio_clip.source_range.duration.value
|
|
||||||
|
playhead += otio_clip.source_range.duration.value
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -129,26 +129,33 @@ class ExtractOTIOReview(
|
||||||
res_data[key] = value
|
res_data[key] = value
|
||||||
break
|
break
|
||||||
|
|
||||||
self.to_width, self.to_height = res_data["width"], res_data["height"]
|
self.to_width, self.to_height = (
|
||||||
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
|
res_data["width"], res_data["height"]
|
||||||
self.to_width, self.to_height
|
)
|
||||||
))
|
self.log.debug(
|
||||||
|
"> self.to_width x self.to_height:"
|
||||||
|
f" {self.to_width} x {self.to_height}"
|
||||||
|
)
|
||||||
|
|
||||||
available_range = r_otio_cl.available_range()
|
available_range = r_otio_cl.available_range()
|
||||||
|
available_range_start_frame = (
|
||||||
|
available_range.start_time.to_frames()
|
||||||
|
)
|
||||||
processing_range = None
|
processing_range = None
|
||||||
self.actual_fps = available_range.duration.rate
|
self.actual_fps = available_range.duration.rate
|
||||||
start = src_range.start_time.rescaled_to(self.actual_fps)
|
start = src_range.start_time.rescaled_to(self.actual_fps)
|
||||||
duration = src_range.duration.rescaled_to(self.actual_fps)
|
duration = src_range.duration.rescaled_to(self.actual_fps)
|
||||||
|
src_frame_start = src_range.start_time.to_frames()
|
||||||
|
|
||||||
# Temporary.
|
# Temporary.
|
||||||
# Some AYON custom OTIO exporter were implemented with relative
|
# Some AYON custom OTIO exporter were implemented with
|
||||||
# source range for image sequence. Following code maintain
|
# relative source range for image sequence. Following code
|
||||||
# backward-compatibility by adjusting available range
|
# maintain backward-compatibility by adjusting available range
|
||||||
# while we are updating those.
|
# while we are updating those.
|
||||||
if (
|
if (
|
||||||
is_clip_from_media_sequence(r_otio_cl)
|
is_clip_from_media_sequence(r_otio_cl)
|
||||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
and available_range_start_frame == media_ref.start_frame
|
||||||
and src_range.start_time.to_frames() < media_ref.start_frame
|
and src_frame_start < media_ref.start_frame
|
||||||
):
|
):
|
||||||
available_range = otio.opentime.TimeRange(
|
available_range = otio.opentime.TimeRange(
|
||||||
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
||||||
|
|
@ -246,7 +253,8 @@ class ExtractOTIOReview(
|
||||||
# Extraction via FFmpeg.
|
# Extraction via FFmpeg.
|
||||||
else:
|
else:
|
||||||
path = media_ref.target_url
|
path = media_ref.target_url
|
||||||
# Set extract range from 0 (FFmpeg ignores embedded timecode).
|
# Set extract range from 0 (FFmpeg ignores
|
||||||
|
# embedded timecode).
|
||||||
extract_range = otio.opentime.TimeRange(
|
extract_range = otio.opentime.TimeRange(
|
||||||
otio.opentime.RationalTime(
|
otio.opentime.RationalTime(
|
||||||
(
|
(
|
||||||
|
|
@ -414,7 +422,8 @@ class ExtractOTIOReview(
|
||||||
to defined image sequence format.
|
to defined image sequence format.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sequence (list): input dir path string, collection object, fps in list
|
sequence (list): input dir path string, collection object,
|
||||||
|
fps in list.
|
||||||
video (list)[optional]: video_path string, otio_range in list
|
video (list)[optional]: video_path string, otio_range in list
|
||||||
gap (int)[optional]: gap duration
|
gap (int)[optional]: gap duration
|
||||||
end_offset (int)[optional]: offset gap frame start in frames
|
end_offset (int)[optional]: offset gap frame start in frames
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
||||||
"""Validate all product names are unique.
|
"""Validate all product names are unique.
|
||||||
|
|
||||||
This only validates whether the instances currently set to publish from
|
This only validates whether the instances currently set to publish from
|
||||||
the workfile overlap one another for the folder + product they are publishing
|
the workfile overlap one another for the folder + product they are
|
||||||
to.
|
publishing to.
|
||||||
|
|
||||||
This does not perform any check against existing publishes in the database
|
This does not perform any check against existing publishes in the database
|
||||||
since it is allowed to publish into existing products resulting in
|
since it is allowed to publish into existing products resulting in
|
||||||
|
|
@ -72,8 +72,10 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
||||||
# All is ok
|
# All is ok
|
||||||
return
|
return
|
||||||
|
|
||||||
msg = ("Instance product names {} are not unique. ".format(non_unique) +
|
msg = (
|
||||||
"Please remove or rename duplicates.")
|
f"Instance product names {non_unique} are not unique."
|
||||||
|
" Please remove or rename duplicates."
|
||||||
|
)
|
||||||
formatting_data = {
|
formatting_data = {
|
||||||
"non_unique": ",".join(non_unique)
|
"non_unique": ",".join(non_unique)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||||
- Datatypes explanation:
|
- Datatypes explanation:
|
||||||
<color> string format must be supported by FFmpeg.
|
<color> string format must be supported by FFmpeg.
|
||||||
Examples: "#000000", "0x000000", "black"
|
Examples: "#000000", "0x000000", "black"
|
||||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
<font> must be accesible by ffmpeg = name of registered Font in system
|
||||||
|
or path to font file.
|
||||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||||
|
|
||||||
- Possible keys:
|
- Possible keys:
|
||||||
|
|
@ -87,17 +88,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||||
"bg_color" - Background color - <color>
|
"bg_color" - Background color - <color>
|
||||||
"bg_padding" - Background padding in pixels - <int>
|
"bg_padding" - Background padding in pixels - <int>
|
||||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
"x_offset" - offsets burnin vertically by entered pixels
|
||||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
from border - <int>
|
||||||
|
"y_offset" - offsets burnin horizontally by entered pixels
|
||||||
|
from border - <int>
|
||||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||||
"font" - Font Family for text - <font>
|
"font" - Font Family for text - <font>
|
||||||
"font_size" - Font size in pixels - <int>
|
"font_size" - Font size in pixels - <int>
|
||||||
"font_color" - Color of text - <color>
|
"font_color" - Color of text - <color>
|
||||||
"frame_offset" - Default start frame - <int>
|
"frame_offset" - Default start frame - <int>
|
||||||
- required IF start frame is not set when using frames or timecode burnins
|
- required IF start frame is not set when using frames
|
||||||
|
or timecode burnins
|
||||||
|
|
||||||
On initializing class can be set General options through "options_init" arg.
|
On initializing class can be set General options through
|
||||||
General can be overridden when adding burnin
|
"options_init" arg.
|
||||||
|
General options can be overridden when adding burnin.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||||
|
|
|
||||||
|
|
@ -190,6 +190,7 @@ def get_current_project_settings():
|
||||||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||||
if not project_name:
|
if not project_name:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing context project in environemt variable `AYON_PROJECT_NAME`."
|
"Missing context project in environment"
|
||||||
|
" variable `AYON_PROJECT_NAME`."
|
||||||
)
|
)
|
||||||
return get_project_settings(project_name)
|
return get_project_settings(project_name)
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
|
||||||
|
|
||||||
def validate(self, text, pos):
|
def validate(self, text, pos):
|
||||||
results = super(ProductNameValidator, self).validate(text, pos)
|
results = super(ProductNameValidator, self).validate(text, pos)
|
||||||
if results[0] == self.Invalid:
|
if results[0] == RegularExpressionValidatorClass.Invalid:
|
||||||
self.invalid.emit(self.invalid_chars(text))
|
self.invalid.emit(self.invalid_chars(text))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
@ -217,7 +217,9 @@ class ProductTypeDescriptionWidget(QtWidgets.QWidget):
|
||||||
|
|
||||||
product_type_label = QtWidgets.QLabel(self)
|
product_type_label = QtWidgets.QLabel(self)
|
||||||
product_type_label.setObjectName("CreatorProductTypeLabel")
|
product_type_label.setObjectName("CreatorProductTypeLabel")
|
||||||
product_type_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
|
product_type_label.setAlignment(
|
||||||
|
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft
|
||||||
|
)
|
||||||
|
|
||||||
help_label = QtWidgets.QLabel(self)
|
help_label = QtWidgets.QLabel(self)
|
||||||
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
||||||
|
|
|
||||||
|
|
@ -21,9 +21,9 @@ except ImportError:
|
||||||
|
|
||||||
Application action based on 'ApplicationManager' system.
|
Application action based on 'ApplicationManager' system.
|
||||||
|
|
||||||
Handling of applications in launcher is not ideal and should be completely
|
Handling of applications in launcher is not ideal and should be
|
||||||
redone from scratch. This is just a temporary solution to keep backwards
|
completely redone from scratch. This is just a temporary solution
|
||||||
compatibility with AYON launcher.
|
to keep backwards compatibility with AYON launcher.
|
||||||
|
|
||||||
Todos:
|
Todos:
|
||||||
Move handling of errors to frontend.
|
Move handling of errors to frontend.
|
||||||
|
|
|
||||||
|
|
@ -517,7 +517,11 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
||||||
def setItemCheckState(self, index, state):
|
def setItemCheckState(self, index, state):
|
||||||
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
||||||
|
|
||||||
def set_value(self, values: Optional[Iterable[Any]], role: Optional[int] = None):
|
def set_value(
|
||||||
|
self,
|
||||||
|
values: Optional[Iterable[Any]],
|
||||||
|
role: Optional[int] = None,
|
||||||
|
):
|
||||||
if role is None:
|
if role is None:
|
||||||
role = self._value_role
|
role = self._value_role
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -499,8 +499,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
||||||
version_item.version_id
|
version_item.version_id
|
||||||
for version_item in last_version_by_product_id.values()
|
for version_item in last_version_by_product_id.values()
|
||||||
}
|
}
|
||||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
repre_count_by_version_id = (
|
||||||
project_name, version_ids
|
self._controller.get_versions_representation_count(
|
||||||
|
project_name, version_ids
|
||||||
|
)
|
||||||
)
|
)
|
||||||
sync_availability_by_version_id = (
|
sync_availability_by_version_id = (
|
||||||
self._controller.get_version_sync_availability(
|
self._controller.get_version_sync_availability(
|
||||||
|
|
|
||||||
|
|
@ -339,7 +339,9 @@ class OverviewWidget(QtWidgets.QFrame):
|
||||||
self._change_visibility_for_state()
|
self._change_visibility_for_state()
|
||||||
self._product_content_layout.addWidget(self._create_widget, 7)
|
self._product_content_layout.addWidget(self._create_widget, 7)
|
||||||
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
||||||
self._product_content_layout.addWidget(self._product_attributes_wrap, 7)
|
self._product_content_layout.addWidget(
|
||||||
|
self._product_attributes_wrap, 7
|
||||||
|
)
|
||||||
|
|
||||||
def _change_visibility_for_state(self):
|
def _change_visibility_for_state(self):
|
||||||
self._create_widget.setVisible(
|
self._create_widget.setVisible(
|
||||||
|
|
|
||||||
|
|
@ -214,8 +214,8 @@ class TasksCombobox(QtWidgets.QComboBox):
|
||||||
Combobox gives ability to select only from intersection of task names for
|
Combobox gives ability to select only from intersection of task names for
|
||||||
folder paths in selected instances.
|
folder paths in selected instances.
|
||||||
|
|
||||||
If folder paths in selected instances does not have same tasks then combobox
|
If folder paths in selected instances does not have same tasks
|
||||||
will be empty.
|
then combobox will be empty.
|
||||||
"""
|
"""
|
||||||
value_changed = QtCore.Signal()
|
value_changed = QtCore.Signal()
|
||||||
|
|
||||||
|
|
@ -604,7 +604,7 @@ class VariantInputWidget(PlaceholderLineEdit):
|
||||||
|
|
||||||
|
|
||||||
class GlobalAttrsWidget(QtWidgets.QWidget):
|
class GlobalAttrsWidget(QtWidgets.QWidget):
|
||||||
"""Global attributes mainly to define context and product name of instances.
|
"""Global attributes to define context and product name of instances.
|
||||||
|
|
||||||
product name is or may be affected on context. Gives abiity to modify
|
product name is or may be affected on context. Gives abiity to modify
|
||||||
context and product name of instance. This change is not autopromoted but
|
context and product name of instance. This change is not autopromoted but
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,8 @@ class TasksModel(QtGui.QStandardItemModel):
|
||||||
tasks with same names then model is empty too.
|
tasks with same names then model is empty too.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
controller (AbstractPublisherFrontend): Controller which handles creation and
|
controller (AbstractPublisherFrontend): Controller which handles
|
||||||
publishing.
|
creation and publishing.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(
|
def __init__(
|
||||||
|
|
|
||||||
|
|
@ -998,7 +998,11 @@ class PublisherWindow(QtWidgets.QDialog):
|
||||||
new_item["label"] = new_item.pop("creator_label")
|
new_item["label"] = new_item.pop("creator_label")
|
||||||
new_item["identifier"] = new_item.pop("creator_identifier")
|
new_item["identifier"] = new_item.pop("creator_identifier")
|
||||||
new_failed_info.append(new_item)
|
new_failed_info.append(new_item)
|
||||||
self.add_error_message_dialog(event["title"], new_failed_info, "Creator:")
|
self.add_error_message_dialog(
|
||||||
|
event["title"],
|
||||||
|
new_failed_info,
|
||||||
|
"Creator:"
|
||||||
|
)
|
||||||
|
|
||||||
def _on_convertor_error(self, event):
|
def _on_convertor_error(self, event):
|
||||||
new_failed_info = []
|
new_failed_info = []
|
||||||
|
|
|
||||||
|
|
@ -366,8 +366,8 @@ class ContainersModel:
|
||||||
try:
|
try:
|
||||||
uuid.UUID(repre_id)
|
uuid.UUID(repre_id)
|
||||||
except (ValueError, TypeError, AttributeError):
|
except (ValueError, TypeError, AttributeError):
|
||||||
# Fake not existing representation id so container is shown in UI
|
# Fake not existing representation id so container
|
||||||
# but as invalid
|
# is shown in UI but as invalid
|
||||||
item.representation_id = invalid_ids_mapping.setdefault(
|
item.representation_id = invalid_ids_mapping.setdefault(
|
||||||
repre_id, uuid.uuid4().hex
|
repre_id, uuid.uuid4().hex
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -556,9 +556,10 @@ class _IconsCache:
|
||||||
log.info("Didn't find icon \"{}\"".format(icon_name))
|
log.info("Didn't find icon \"{}\"".format(icon_name))
|
||||||
|
|
||||||
elif used_variant != icon_name:
|
elif used_variant != icon_name:
|
||||||
log.debug("Icon \"{}\" was not found \"{}\" is used instead".format(
|
log.debug(
|
||||||
icon_name, used_variant
|
f"Icon \"{icon_name}\" was not found"
|
||||||
))
|
f" \"{used_variant}\" is used instead"
|
||||||
|
)
|
||||||
|
|
||||||
cls._qtawesome_cache[full_icon_name] = icon
|
cls._qtawesome_cache[full_icon_name] = icon
|
||||||
return icon
|
return icon
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""Package declaring AYON addon 'core' version."""
|
"""Package declaring AYON addon 'core' version."""
|
||||||
__version__ = "1.0.8+dev"
|
__version__ = "1.0.10+dev"
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,6 @@ pydantic = "^2.9.2"
|
||||||
aiohttp-middlewares = "^2.0.0"
|
aiohttp-middlewares = "^2.0.0"
|
||||||
Click = "^8"
|
Click = "^8"
|
||||||
OpenTimelineIO = "0.16.0"
|
OpenTimelineIO = "0.16.0"
|
||||||
opencolorio = "^2.3.2"
|
opencolorio = "^2.3.2,<2.4.0"
|
||||||
Pillow = "9.5.0"
|
Pillow = "9.5.0"
|
||||||
websocket-client = ">=0.40.0,<2"
|
websocket-client = ">=0.40.0,<2"
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
name = "core"
|
name = "core"
|
||||||
title = "Core"
|
title = "Core"
|
||||||
version = "1.0.8+dev"
|
version = "1.0.10+dev"
|
||||||
|
|
||||||
client_dir = "ayon_core"
|
client_dir = "ayon_core"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "ayon-core"
|
name = "ayon-core"
|
||||||
version = "1.0.8+dev"
|
version = "1.0.10+dev"
|
||||||
description = ""
|
description = ""
|
||||||
authors = ["Ynput Team <team@ynput.io>"]
|
authors = ["Ynput Team <team@ynput.io>"]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
|
||||||
|
|
@ -358,7 +358,10 @@ class ExtractOIIOTranscodeOutputModel(BaseSettingsModel):
|
||||||
custom_tags: list[str] = SettingsField(
|
custom_tags: list[str] = SettingsField(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Custom Tags",
|
title="Custom Tags",
|
||||||
description="Additional custom tags that will be added to the created representation."
|
description=(
|
||||||
|
"Additional custom tags that will be added"
|
||||||
|
" to the created representation."
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -892,9 +895,11 @@ class PublishPuginsModel(BaseSettingsModel):
|
||||||
default_factory=CollectFramesFixDefModel,
|
default_factory=CollectFramesFixDefModel,
|
||||||
title="Collect Frames to Fix",
|
title="Collect Frames to Fix",
|
||||||
)
|
)
|
||||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = SettingsField(
|
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = (
|
||||||
default_factory=CollectUSDLayerContributionsModel,
|
SettingsField(
|
||||||
title="Collect USD Layer Contributions",
|
default_factory=CollectUSDLayerContributionsModel,
|
||||||
|
title="Collect USD Layer Contributions",
|
||||||
|
)
|
||||||
)
|
)
|
||||||
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
||||||
default_factory=ValidateBaseModel,
|
default_factory=ValidateBaseModel,
|
||||||
|
|
@ -1214,7 +1219,9 @@ DEFAULT_PUBLISH_VALUES = {
|
||||||
"TOP_RIGHT": "{anatomy[version]}",
|
"TOP_RIGHT": "{anatomy[version]}",
|
||||||
"BOTTOM_LEFT": "{username}",
|
"BOTTOM_LEFT": "{username}",
|
||||||
"BOTTOM_CENTERED": "{folder[name]}",
|
"BOTTOM_CENTERED": "{folder[name]}",
|
||||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
"BOTTOM_RIGHT": (
|
||||||
|
"{frame_start}-{current_frame}-{frame_end}"
|
||||||
|
),
|
||||||
"filter": {
|
"filter": {
|
||||||
"families": [],
|
"families": [],
|
||||||
"tags": []
|
"tags": []
|
||||||
|
|
@ -1240,7 +1247,9 @@ DEFAULT_PUBLISH_VALUES = {
|
||||||
"TOP_RIGHT": "{anatomy[version]}",
|
"TOP_RIGHT": "{anatomy[version]}",
|
||||||
"BOTTOM_LEFT": "{username}",
|
"BOTTOM_LEFT": "{username}",
|
||||||
"BOTTOM_CENTERED": "{folder[name]}",
|
"BOTTOM_CENTERED": "{folder[name]}",
|
||||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
"BOTTOM_RIGHT": (
|
||||||
|
"{frame_start}-{current_frame}-{frame_end}"
|
||||||
|
),
|
||||||
"filter": {
|
"filter": {
|
||||||
"families": [],
|
"families": [],
|
||||||
"tags": []
|
"tags": []
|
||||||
|
|
|
||||||
|
|
@ -83,8 +83,8 @@ class CreatorToolModel(BaseSettingsModel):
|
||||||
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Filter creator profiles",
|
title="Filter creator profiles",
|
||||||
description="Allowed list of creator labels that will be only shown if "
|
description="Allowed list of creator labels that will be only shown"
|
||||||
"profile matches context."
|
" if profile matches context."
|
||||||
)
|
)
|
||||||
|
|
||||||
@validator("product_types_smart_select")
|
@validator("product_types_smart_select")
|
||||||
|
|
@ -426,7 +426,9 @@ DEFAULT_TOOLS_VALUES = {
|
||||||
],
|
],
|
||||||
"task_types": [],
|
"task_types": [],
|
||||||
"tasks": [],
|
"tasks": [],
|
||||||
"template": "{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
"template": (
|
||||||
|
"{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||||
|
)
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"product_types": [
|
"product_types": [
|
||||||
|
|
|
||||||
|
|
@ -130,19 +130,20 @@ def test_image_sequence_and_handles_out_of_range():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 5 head black frames generated from gap (991-995)
|
# 5 head black frames generated from gap (991-995)
|
||||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# 9 tail back frames generated from gap (1097-1105)
|
# 9 tail back frames generated from gap (1097-1105)
|
||||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# Report from source tiff (996-1096)
|
# Report from source tiff (996-1096)
|
||||||
# 996-1000 = additional 5 head frames
|
# 996-1000 = additional 5 head frames
|
||||||
# 1001-1095 = source range conformed to 25fps
|
# 1001-1095 = source range conformed to 25fps
|
||||||
# 1096-1096 = additional 1 tail frames
|
# 1096-1096 = additional 1 tail frames
|
||||||
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
||||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996 C:/result/output.%03d.jpg"
|
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
|
||||||
|
f" C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -179,13 +180,13 @@ def test_short_movie_head_gap_handles():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 10 head black frames generated from gap (991-1000)
|
# 10 head black frames generated from gap (991-1000)
|
||||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# source range + 10 tail frames
|
# source range + 10 tail frames
|
||||||
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
||||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -start_number 1001 "
|
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
|
||||||
"C:/result/output.%03d.jpg"
|
" -start_number 1001 C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -208,7 +209,8 @@ def test_short_movie_tail_gap_handles():
|
||||||
# 10 head frames + source range
|
# 10 head frames + source range
|
||||||
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
||||||
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
||||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991 C:/result/output.%03d.jpg"
|
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
|
||||||
|
" C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -234,10 +236,12 @@ def test_multiple_review_clips_no_gap():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 10 head black frames generated from gap (991-1000)
|
# 10 head black frames generated from gap (991-1000)
|
||||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
|
||||||
|
' -i color=c=black:s=1280x720 -tune '
|
||||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||||
|
|
||||||
# Alternance 25fps tiff sequence and 24fps exr sequence for 100 frames each
|
# Alternance 25fps tiff sequence and 24fps exr sequence
|
||||||
|
# for 100 frames each
|
||||||
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
||||||
f'C:\\no_tc{os.sep}output.%04d.tif '
|
f'C:\\no_tc{os.sep}output.%04d.tif '
|
||||||
'-start_number 1001 C:/result/output.%03d.jpg',
|
'-start_number 1001 C:/result/output.%03d.jpg',
|
||||||
|
|
@ -315,7 +319,8 @@ def test_multiple_review_clips_with_gap():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# Gap on review track (12 frames)
|
# Gap on review track (12 frames)
|
||||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
|
||||||
|
' -i color=c=black:s=1280x720 -tune '
|
||||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||||
|
|
||||||
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue