mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into enhancement/Load-and-manage-products-from-a-library-project
This commit is contained in:
commit
51ec36e66f
41 changed files with 633 additions and 275 deletions
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
name: 🔸Auto assign pr
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
type: string
|
||||
description: "Run workflow for this PR number"
|
||||
required: true
|
||||
project_id:
|
||||
type: string
|
||||
description: "Github Project Number"
|
||||
required: true
|
||||
default: "16"
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
jobs:
|
||||
get-pr-repo:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
pr_repo_name: ${{ steps.get-repo-name.outputs.repo_name || github.event.pull_request.head.repo.full_name }}
|
||||
|
||||
# INFO `github.event.pull_request.head.repo.full_name` is not available on manual triggered (dispatched) runs
|
||||
steps:
|
||||
- name: Get PR repo name
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
id: get-repo-name
|
||||
run: |
|
||||
repo_name=$(gh pr view ${{ inputs.pr_number }} --json headRepository,headRepositoryOwner --repo ${{ github.repository }} | jq -r '.headRepositoryOwner.login + "/" + .headRepository.name')
|
||||
echo "repo_name=$repo_name" >> $GITHUB_OUTPUT
|
||||
|
||||
auto-assign-pr:
|
||||
needs:
|
||||
- get-pr-repo
|
||||
if: ${{ needs.get-pr-repo.outputs.pr_repo_name == github.repository }}
|
||||
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
project_id: ${{ inputs.project_id != '' && fromJSON(inputs.project_id) || 16 }}
|
||||
pull_request_number: ${{ github.event.pull_request.number || fromJSON(inputs.pr_number) }}
|
||||
secrets:
|
||||
# INFO fallback to default `github.token` is required for PRs from forks
|
||||
# INFO organization secrets won't be available to forks
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN || github.token}}
|
||||
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
name: 🔎 Validate PR Labels
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
validate-type-label:
|
||||
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
|
||||
with:
|
||||
repo: "${{ github.repository }}"
|
||||
pull_request_number: ${{ github.event.pull_request.number }}
|
||||
query_prefix: "type: "
|
||||
secrets:
|
||||
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
|
@ -535,8 +535,8 @@ class AYONAddon(ABC):
|
|||
Implementation of this method is optional.
|
||||
|
||||
Note:
|
||||
The logic can be similar to logic in tray, but tray does not require
|
||||
to be logged in.
|
||||
The logic can be similar to logic in tray, but tray does not
|
||||
require to be logged in.
|
||||
|
||||
Args:
|
||||
process_context (ProcessContext): Context of child
|
||||
|
|
|
|||
|
|
@ -146,7 +146,8 @@ def publish_report_viewer():
|
|||
@main_cli.command()
|
||||
@click.argument("output_path")
|
||||
@click.option("--project", help="Define project context")
|
||||
@click.option("--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--folder", help="Define folder in project (project must be set)")
|
||||
@click.option(
|
||||
"--strict",
|
||||
is_flag=True,
|
||||
|
|
|
|||
|
|
@ -616,7 +616,9 @@ class EnumDef(AbstractAttrDef):
|
|||
return data
|
||||
|
||||
@staticmethod
|
||||
def prepare_enum_items(items: "EnumItemsInputType") -> List["EnumItemDict"]:
|
||||
def prepare_enum_items(
|
||||
items: "EnumItemsInputType"
|
||||
) -> List["EnumItemDict"]:
|
||||
"""Convert items to unified structure.
|
||||
|
||||
Output is a list where each item is dictionary with 'value'
|
||||
|
|
|
|||
|
|
@ -276,12 +276,7 @@ class ASettingRegistry(ABC):
|
|||
@abstractmethod
|
||||
def _delete_item(self, name):
|
||||
# type: (str) -> None
|
||||
"""Delete item from settings.
|
||||
|
||||
Note:
|
||||
see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item`
|
||||
|
||||
"""
|
||||
"""Delete item from settings."""
|
||||
pass
|
||||
|
||||
def __delitem__(self, name):
|
||||
|
|
@ -433,12 +428,7 @@ class IniSettingRegistry(ASettingRegistry):
|
|||
config.write(cfg)
|
||||
|
||||
def _delete_item(self, name):
|
||||
"""Delete item from default section.
|
||||
|
||||
Note:
|
||||
See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section`
|
||||
|
||||
"""
|
||||
"""Delete item from default section."""
|
||||
self.delete_item_from_section("MAIN", name)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,15 @@
|
|||
import os
|
||||
import re
|
||||
import copy
|
||||
import numbers
|
||||
import warnings
|
||||
from string import Formatter
|
||||
import typing
|
||||
from typing import List, Dict, Any, Set
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing import Union
|
||||
|
||||
KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})")
|
||||
KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+")
|
||||
SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)")
|
||||
OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
|
||||
|
||||
|
|
@ -18,9 +24,7 @@ class TemplateUnsolved(Exception):
|
|||
def __init__(self, template, missing_keys, invalid_types):
|
||||
invalid_type_items = []
|
||||
for _key, _type in invalid_types.items():
|
||||
invalid_type_items.append(
|
||||
"\"{0}\" {1}".format(_key, str(_type))
|
||||
)
|
||||
invalid_type_items.append(f"\"{_key}\" {str(_type)}")
|
||||
|
||||
invalid_types_msg = ""
|
||||
if invalid_type_items:
|
||||
|
|
@ -33,31 +37,32 @@ class TemplateUnsolved(Exception):
|
|||
missing_keys_msg = self.missing_keys_msg.format(
|
||||
", ".join(missing_keys)
|
||||
)
|
||||
super(TemplateUnsolved, self).__init__(
|
||||
super().__init__(
|
||||
self.msg.format(template, missing_keys_msg, invalid_types_msg)
|
||||
)
|
||||
|
||||
|
||||
class StringTemplate:
|
||||
"""String that can be formatted."""
|
||||
def __init__(self, template):
|
||||
def __init__(self, template: str):
|
||||
if not isinstance(template, str):
|
||||
raise TypeError("<{}> argument must be a string, not {}.".format(
|
||||
self.__class__.__name__, str(type(template))
|
||||
))
|
||||
raise TypeError(
|
||||
f"<{self.__class__.__name__}> argument must be a string,"
|
||||
f" not {str(type(template))}."
|
||||
)
|
||||
|
||||
self._template = template
|
||||
self._template: str = template
|
||||
parts = []
|
||||
last_end_idx = 0
|
||||
for item in KEY_PATTERN.finditer(template):
|
||||
start, end = item.span()
|
||||
if start > last_end_idx:
|
||||
parts.append(template[last_end_idx:start])
|
||||
parts.append(FormattingPart(template[start:end]))
|
||||
last_end_idx = end
|
||||
formatter = Formatter()
|
||||
|
||||
if last_end_idx < len(template):
|
||||
parts.append(template[last_end_idx:len(template)])
|
||||
for item in formatter.parse(template):
|
||||
literal_text, field_name, format_spec, conversion = item
|
||||
if literal_text:
|
||||
parts.append(literal_text)
|
||||
if field_name:
|
||||
parts.append(
|
||||
FormattingPart(field_name, format_spec, conversion)
|
||||
)
|
||||
|
||||
new_parts = []
|
||||
for part in parts:
|
||||
|
|
@ -77,15 +82,17 @@ class StringTemplate:
|
|||
if substr:
|
||||
new_parts.append(substr)
|
||||
|
||||
self._parts = self.find_optional_parts(new_parts)
|
||||
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = (
|
||||
self.find_optional_parts(new_parts)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return self.template
|
||||
|
||||
def __repr__(self):
|
||||
return "<{}> {}".format(self.__class__.__name__, self.template)
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}> {self.template}"
|
||||
|
||||
def __contains__(self, other):
|
||||
def __contains__(self, other: str) -> bool:
|
||||
return other in self.template
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
|
|
@ -93,10 +100,10 @@ class StringTemplate:
|
|||
return self
|
||||
|
||||
@property
|
||||
def template(self):
|
||||
def template(self) -> str:
|
||||
return self._template
|
||||
|
||||
def format(self, data):
|
||||
def format(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||
""" Figure out with whole formatting.
|
||||
|
||||
Separate advanced keys (*Like '{project[name]}') from string which must
|
||||
|
|
@ -108,6 +115,7 @@ class StringTemplate:
|
|||
Returns:
|
||||
TemplateResult: Filled or partially filled template containing all
|
||||
data needed or missing for filling template.
|
||||
|
||||
"""
|
||||
result = TemplatePartResult()
|
||||
for part in self._parts:
|
||||
|
|
@ -135,23 +143,29 @@ class StringTemplate:
|
|||
invalid_types
|
||||
)
|
||||
|
||||
def format_strict(self, *args, **kwargs):
|
||||
result = self.format(*args, **kwargs)
|
||||
def format_strict(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||
result = self.format(data)
|
||||
result.validate()
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def format_template(cls, template, data):
|
||||
def format_template(
|
||||
cls, template: str, data: Dict[str, Any]
|
||||
) -> "TemplateResult":
|
||||
objected_template = cls(template)
|
||||
return objected_template.format(data)
|
||||
|
||||
@classmethod
|
||||
def format_strict_template(cls, template, data):
|
||||
def format_strict_template(
|
||||
cls, template: str, data: Dict[str, Any]
|
||||
) -> "TemplateResult":
|
||||
objected_template = cls(template)
|
||||
return objected_template.format_strict(data)
|
||||
|
||||
@staticmethod
|
||||
def find_optional_parts(parts):
|
||||
def find_optional_parts(
|
||||
parts: List["Union[str, FormattingPart]"]
|
||||
) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||
new_parts = []
|
||||
tmp_parts = {}
|
||||
counted_symb = -1
|
||||
|
|
@ -216,11 +230,11 @@ class TemplateResult(str):
|
|||
of number.
|
||||
"""
|
||||
|
||||
used_values = None
|
||||
solved = None
|
||||
template = None
|
||||
missing_keys = None
|
||||
invalid_types = None
|
||||
used_values: Dict[str, Any] = None
|
||||
solved: bool = None
|
||||
template: str = None
|
||||
missing_keys: List[str] = None
|
||||
invalid_types: Dict[str, Any] = None
|
||||
|
||||
def __new__(
|
||||
cls, filled_template, template, solved,
|
||||
|
|
@ -248,7 +262,7 @@ class TemplateResult(str):
|
|||
self.invalid_types
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
def copy(self) -> "TemplateResult":
|
||||
cls = self.__class__
|
||||
return cls(
|
||||
str(self),
|
||||
|
|
@ -259,7 +273,7 @@ class TemplateResult(str):
|
|||
self.invalid_types
|
||||
)
|
||||
|
||||
def normalized(self):
|
||||
def normalized(self) -> "TemplateResult":
|
||||
"""Convert to normalized path."""
|
||||
|
||||
cls = self.__class__
|
||||
|
|
@ -275,27 +289,28 @@ class TemplateResult(str):
|
|||
|
||||
class TemplatePartResult:
|
||||
"""Result to store result of template parts."""
|
||||
def __init__(self, optional=False):
|
||||
def __init__(self, optional: bool = False):
|
||||
# Missing keys or invalid value types of required keys
|
||||
self._missing_keys = set()
|
||||
self._invalid_types = {}
|
||||
self._missing_keys: Set[str] = set()
|
||||
self._invalid_types: Dict[str, Any] = {}
|
||||
# Missing keys or invalid value types of optional keys
|
||||
self._missing_optional_keys = set()
|
||||
self._invalid_optional_types = {}
|
||||
self._missing_optional_keys: Set[str] = set()
|
||||
self._invalid_optional_types: Dict[str, Any] = {}
|
||||
|
||||
# Used values stored by key with origin type
|
||||
# - key without any padding or key modifiers
|
||||
# - value from filling data
|
||||
# Example: {"version": 1}
|
||||
self._used_values = {}
|
||||
self._used_values: Dict[str, Any] = {}
|
||||
# Used values stored by key with all modifirs
|
||||
# - value is already formatted string
|
||||
# Example: {"version:0>3": "001"}
|
||||
self._realy_used_values = {}
|
||||
self._really_used_values: Dict[str, Any] = {}
|
||||
# Concatenated string output after formatting
|
||||
self._output = ""
|
||||
self._output: str = ""
|
||||
# Is this result from optional part
|
||||
self._optional = True
|
||||
# TODO find out why we don't use 'optional' from args
|
||||
self._optional: bool = True
|
||||
|
||||
def add_output(self, other):
|
||||
if isinstance(other, str):
|
||||
|
|
@ -313,7 +328,7 @@ class TemplatePartResult:
|
|||
if other.optional and not other.solved:
|
||||
return
|
||||
self._used_values.update(other.used_values)
|
||||
self._realy_used_values.update(other.realy_used_values)
|
||||
self._really_used_values.update(other.really_used_values)
|
||||
|
||||
else:
|
||||
raise TypeError("Cannot add data from \"{}\" to \"{}\"".format(
|
||||
|
|
@ -321,7 +336,7 @@ class TemplatePartResult:
|
|||
)
|
||||
|
||||
@property
|
||||
def solved(self):
|
||||
def solved(self) -> bool:
|
||||
if self.optional:
|
||||
if (
|
||||
len(self.missing_optional_keys) > 0
|
||||
|
|
@ -334,45 +349,53 @@ class TemplatePartResult:
|
|||
)
|
||||
|
||||
@property
|
||||
def optional(self):
|
||||
def optional(self) -> bool:
|
||||
return self._optional
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
def output(self) -> str:
|
||||
return self._output
|
||||
|
||||
@property
|
||||
def missing_keys(self):
|
||||
def missing_keys(self) -> Set[str]:
|
||||
return self._missing_keys
|
||||
|
||||
@property
|
||||
def missing_optional_keys(self):
|
||||
def missing_optional_keys(self) -> Set[str]:
|
||||
return self._missing_optional_keys
|
||||
|
||||
@property
|
||||
def invalid_types(self):
|
||||
def invalid_types(self) -> Dict[str, Any]:
|
||||
return self._invalid_types
|
||||
|
||||
@property
|
||||
def invalid_optional_types(self):
|
||||
def invalid_optional_types(self) -> Dict[str, Any]:
|
||||
return self._invalid_optional_types
|
||||
|
||||
@property
|
||||
def realy_used_values(self):
|
||||
return self._realy_used_values
|
||||
def really_used_values(self) -> Dict[str, Any]:
|
||||
return self._really_used_values
|
||||
|
||||
@property
|
||||
def used_values(self):
|
||||
def realy_used_values(self) -> Dict[str, Any]:
|
||||
warnings.warn(
|
||||
"Property 'realy_used_values' is deprecated."
|
||||
" Use 'really_used_values' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
return self._really_used_values
|
||||
|
||||
@property
|
||||
def used_values(self) -> Dict[str, Any]:
|
||||
return self._used_values
|
||||
|
||||
@staticmethod
|
||||
def split_keys_to_subdicts(values):
|
||||
def split_keys_to_subdicts(values: Dict[str, Any]) -> Dict[str, Any]:
|
||||
output = {}
|
||||
formatter = Formatter()
|
||||
for key, value in values.items():
|
||||
key_padding = list(KEY_PADDING_PATTERN.findall(key))
|
||||
if key_padding:
|
||||
key = key_padding[0]
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(key))
|
||||
_, field_name, _, _ = next(formatter.parse(f"{{{key}}}"))
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(field_name))
|
||||
data = output
|
||||
last_key = key_subdict.pop(-1)
|
||||
for subkey in key_subdict:
|
||||
|
|
@ -382,7 +405,7 @@ class TemplatePartResult:
|
|||
data[last_key] = value
|
||||
return output
|
||||
|
||||
def get_clean_used_values(self):
|
||||
def get_clean_used_values(self) -> Dict[str, Any]:
|
||||
new_used_values = {}
|
||||
for key, value in self.used_values.items():
|
||||
if isinstance(value, FormatObject):
|
||||
|
|
@ -391,19 +414,27 @@ class TemplatePartResult:
|
|||
|
||||
return self.split_keys_to_subdicts(new_used_values)
|
||||
|
||||
def add_realy_used_value(self, key, value):
|
||||
self._realy_used_values[key] = value
|
||||
def add_really_used_value(self, key: str, value: Any):
|
||||
self._really_used_values[key] = value
|
||||
|
||||
def add_used_value(self, key, value):
|
||||
def add_realy_used_value(self, key: str, value: Any):
|
||||
warnings.warn(
|
||||
"Method 'add_realy_used_value' is deprecated."
|
||||
" Use 'add_really_used_value' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
self.add_really_used_value(key, value)
|
||||
|
||||
def add_used_value(self, key: str, value: Any):
|
||||
self._used_values[key] = value
|
||||
|
||||
def add_missing_key(self, key):
|
||||
def add_missing_key(self, key: str):
|
||||
if self._optional:
|
||||
self._missing_optional_keys.add(key)
|
||||
else:
|
||||
self._missing_keys.add(key)
|
||||
|
||||
def add_invalid_type(self, key, value):
|
||||
def add_invalid_type(self, key: str, value: Any):
|
||||
if self._optional:
|
||||
self._invalid_optional_types[key] = type(value)
|
||||
else:
|
||||
|
|
@ -421,10 +452,10 @@ class FormatObject:
|
|||
def __format__(self, *args, **kwargs):
|
||||
return self.value.__format__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return self.__str__()
|
||||
|
||||
|
||||
|
|
@ -434,23 +465,44 @@ class FormattingPart:
|
|||
Containt only single key to format e.g. "{project[name]}".
|
||||
|
||||
Args:
|
||||
template(str): String containing the formatting key.
|
||||
field_name (str): Name of key.
|
||||
format_spec (str): Format specification.
|
||||
conversion (Union[str, None]): Conversion type.
|
||||
|
||||
"""
|
||||
def __init__(self, template):
|
||||
self._template = template
|
||||
def __init__(
|
||||
self,
|
||||
field_name: str,
|
||||
format_spec: str,
|
||||
conversion: "Union[str, None]",
|
||||
):
|
||||
format_spec_v = ""
|
||||
if format_spec:
|
||||
format_spec_v = f":{format_spec}"
|
||||
conversion_v = ""
|
||||
if conversion:
|
||||
conversion_v = f"!{conversion}"
|
||||
|
||||
self._field_name: str = field_name
|
||||
self._format_spec: str = format_spec_v
|
||||
self._conversion: str = conversion_v
|
||||
|
||||
template_base = f"{field_name}{format_spec_v}{conversion_v}"
|
||||
self._template_base: str = template_base
|
||||
self._template: str = f"{{{template_base}}}"
|
||||
|
||||
@property
|
||||
def template(self):
|
||||
def template(self) -> str:
|
||||
return self._template
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return "<Format:{}>".format(self._template)
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return self._template
|
||||
|
||||
@staticmethod
|
||||
def validate_value_type(value):
|
||||
def validate_value_type(value: Any) -> bool:
|
||||
"""Check if value can be used for formatting of single key."""
|
||||
if isinstance(value, (numbers.Number, FormatObject)):
|
||||
return True
|
||||
|
|
@ -461,7 +513,7 @@ class FormattingPart:
|
|||
return False
|
||||
|
||||
@staticmethod
|
||||
def validate_key_is_matched(key):
|
||||
def validate_key_is_matched(key: str) -> bool:
|
||||
"""Validate that opening has closing at correct place.
|
||||
Future-proof, only square brackets are currently used in keys.
|
||||
|
||||
|
|
@ -488,16 +540,29 @@ class FormattingPart:
|
|||
return False
|
||||
return not queue
|
||||
|
||||
def format(self, data, result):
|
||||
@staticmethod
|
||||
def keys_to_template_base(keys: List[str]):
|
||||
if not keys:
|
||||
return None
|
||||
# Create copy of keys
|
||||
keys = list(keys)
|
||||
template_base = keys.pop(0)
|
||||
joined_keys = "".join([f"[{key}]" for key in keys])
|
||||
return f"{template_base}{joined_keys}"
|
||||
|
||||
def format(
|
||||
self, data: Dict[str, Any], result: TemplatePartResult
|
||||
) -> TemplatePartResult:
|
||||
"""Format the formattings string.
|
||||
|
||||
Args:
|
||||
data(dict): Data that should be used for formatting.
|
||||
result(TemplatePartResult): Object where result is stored.
|
||||
|
||||
"""
|
||||
key = self.template[1:-1]
|
||||
if key in result.realy_used_values:
|
||||
result.add_output(result.realy_used_values[key])
|
||||
key = self._template_base
|
||||
if key in result.really_used_values:
|
||||
result.add_output(result.really_used_values[key])
|
||||
return result
|
||||
|
||||
# ensure key is properly formed [({})] properly closed.
|
||||
|
|
@ -507,17 +572,38 @@ class FormattingPart:
|
|||
return result
|
||||
|
||||
# check if key expects subdictionary keys (e.g. project[name])
|
||||
existence_check = key
|
||||
key_padding = list(KEY_PADDING_PATTERN.findall(existence_check))
|
||||
if key_padding:
|
||||
existence_check = key_padding[0]
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(existence_check))
|
||||
key_subdict = list(SUB_DICT_PATTERN.findall(self._field_name))
|
||||
|
||||
value = data
|
||||
missing_key = False
|
||||
invalid_type = False
|
||||
used_keys = []
|
||||
keys_to_value = None
|
||||
used_value = None
|
||||
|
||||
for sub_key in key_subdict:
|
||||
if isinstance(value, list):
|
||||
if not sub_key.lstrip("-").isdigit():
|
||||
invalid_type = True
|
||||
break
|
||||
sub_key = int(sub_key)
|
||||
if sub_key < 0:
|
||||
sub_key = len(value) + sub_key
|
||||
|
||||
invalid = 0 > sub_key < len(data)
|
||||
if invalid:
|
||||
used_keys.append(sub_key)
|
||||
missing_key = True
|
||||
break
|
||||
|
||||
used_keys.append(sub_key)
|
||||
if keys_to_value is None:
|
||||
keys_to_value = list(used_keys)
|
||||
keys_to_value.pop(-1)
|
||||
used_value = copy.deepcopy(value)
|
||||
value = value[sub_key]
|
||||
continue
|
||||
|
||||
if (
|
||||
value is None
|
||||
or (hasattr(value, "items") and sub_key not in value)
|
||||
|
|
@ -533,45 +619,57 @@ class FormattingPart:
|
|||
used_keys.append(sub_key)
|
||||
value = value.get(sub_key)
|
||||
|
||||
if missing_key or invalid_type:
|
||||
if len(used_keys) == 0:
|
||||
invalid_key = key_subdict[0]
|
||||
else:
|
||||
invalid_key = used_keys[0]
|
||||
for idx, sub_key in enumerate(used_keys):
|
||||
if idx == 0:
|
||||
continue
|
||||
invalid_key += "[{0}]".format(sub_key)
|
||||
field_name = key_subdict[0]
|
||||
if used_keys:
|
||||
field_name = self.keys_to_template_base(used_keys)
|
||||
|
||||
if missing_key or invalid_type:
|
||||
if missing_key:
|
||||
result.add_missing_key(invalid_key)
|
||||
result.add_missing_key(field_name)
|
||||
|
||||
elif invalid_type:
|
||||
result.add_invalid_type(invalid_key, value)
|
||||
result.add_invalid_type(field_name, value)
|
||||
|
||||
result.add_output(self.template)
|
||||
return result
|
||||
|
||||
if self.validate_value_type(value):
|
||||
fill_data = {}
|
||||
first_value = True
|
||||
for used_key in reversed(used_keys):
|
||||
if first_value:
|
||||
first_value = False
|
||||
fill_data[used_key] = value
|
||||
else:
|
||||
_fill_data = {used_key: fill_data}
|
||||
fill_data = _fill_data
|
||||
|
||||
formatted_value = self.template.format(**fill_data)
|
||||
result.add_realy_used_value(key, formatted_value)
|
||||
result.add_used_value(existence_check, formatted_value)
|
||||
result.add_output(formatted_value)
|
||||
if not self.validate_value_type(value):
|
||||
result.add_invalid_type(key, value)
|
||||
result.add_output(self.template)
|
||||
return result
|
||||
|
||||
result.add_invalid_type(key, value)
|
||||
result.add_output(self.template)
|
||||
fill_data = root_fill_data = {}
|
||||
parent_fill_data = None
|
||||
parent_key = None
|
||||
fill_value = data
|
||||
value_filled = False
|
||||
for used_key in used_keys:
|
||||
if isinstance(fill_value, list):
|
||||
parent_fill_data[parent_key] = fill_value
|
||||
value_filled = True
|
||||
break
|
||||
fill_value = fill_value[used_key]
|
||||
parent_fill_data = fill_data
|
||||
fill_data = parent_fill_data.setdefault(used_key, {})
|
||||
parent_key = used_key
|
||||
|
||||
if not value_filled:
|
||||
parent_fill_data[used_keys[-1]] = value
|
||||
|
||||
template = f"{{{field_name}{self._format_spec}{self._conversion}}}"
|
||||
formatted_value = template.format(**root_fill_data)
|
||||
used_key = key
|
||||
if keys_to_value is not None:
|
||||
used_key = self.keys_to_template_base(keys_to_value)
|
||||
|
||||
if used_value is None:
|
||||
if isinstance(value, numbers.Number):
|
||||
used_value = value
|
||||
else:
|
||||
used_value = formatted_value
|
||||
result.add_really_used_value(self._field_name, used_value)
|
||||
result.add_used_value(used_key, used_value)
|
||||
result.add_output(formatted_value)
|
||||
return result
|
||||
|
||||
|
||||
|
|
@ -585,20 +683,27 @@ class OptionalPart:
|
|||
'FormattingPart'.
|
||||
"""
|
||||
|
||||
def __init__(self, parts):
|
||||
self._parts = parts
|
||||
def __init__(
|
||||
self,
|
||||
parts: List["Union[str, OptionalPart, FormattingPart]"]
|
||||
):
|
||||
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = parts
|
||||
|
||||
@property
|
||||
def parts(self):
|
||||
def parts(self) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||
return self._parts
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return "<{}>".format("".join([str(p) for p in self._parts]))
|
||||
|
||||
def __repr__(self):
|
||||
def __repr__(self) -> str:
|
||||
return "<Optional:{}>".format("".join([str(p) for p in self._parts]))
|
||||
|
||||
def format(self, data, result):
|
||||
def format(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
result: TemplatePartResult,
|
||||
) -> TemplatePartResult:
|
||||
new_result = TemplatePartResult(True)
|
||||
for part in self._parts:
|
||||
if isinstance(part, str):
|
||||
|
|
|
|||
|
|
@ -1283,12 +1283,16 @@ class CreateContext:
|
|||
|
||||
@contextmanager
|
||||
def bulk_pre_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("pre_create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"pre_create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
def bulk_create_attr_defs_change(self, sender=None):
|
||||
with self._bulk_context("create_attrs_change", sender) as bulk_info:
|
||||
with self._bulk_context(
|
||||
"create_attrs_change", sender
|
||||
) as bulk_info:
|
||||
yield bulk_info
|
||||
|
||||
@contextmanager
|
||||
|
|
@ -1946,9 +1950,9 @@ class CreateContext:
|
|||
creator are just removed from context.
|
||||
|
||||
Args:
|
||||
instances (List[CreatedInstance]): Instances that should be removed.
|
||||
Remove logic is done using creator, which may require to
|
||||
do other cleanup than just remove instance from context.
|
||||
instances (List[CreatedInstance]): Instances that should be
|
||||
removed. Remove logic is done using creator, which may require
|
||||
to do other cleanup than just remove instance from context.
|
||||
sender (Optional[str]): Sender of the event.
|
||||
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
import ayon_api
|
||||
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
|
||||
from ayon_core.lib import (
|
||||
StringTemplate,
|
||||
filter_profiles,
|
||||
prepare_template_data,
|
||||
)
|
||||
from ayon_core.settings import get_project_settings
|
||||
|
||||
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
|
|||
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
||||
if isinstance(template_data, str):
|
||||
con = ayon_api.get_server_api_connection()
|
||||
repre_entity = con._representation_conversion(repre_entity)
|
||||
con._representation_conversion(repre_entity)
|
||||
template_data = repre_entity["context"]
|
||||
|
||||
template_data.update(copy.deepcopy(general_template_data))
|
||||
|
|
|
|||
|
|
@ -222,6 +222,9 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
|||
source_range = otio_clip.source_range
|
||||
available_range_rate = available_range.start_time.rate
|
||||
media_in = available_range.start_time.value
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
|
|
@ -230,7 +233,7 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
|||
# while we are updating those.
|
||||
if (
|
||||
is_clip_from_media_sequence(otio_clip)
|
||||
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||
):
|
||||
media_in = 0
|
||||
|
|
@ -303,8 +306,12 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
|||
rounded_av_rate = round(available_range_rate, 2)
|
||||
rounded_src_rate = round(source_range.start_time.rate, 2)
|
||||
if rounded_av_rate != rounded_src_rate:
|
||||
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
|
||||
conformed_src_in = source_range.start_time.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_src_duration = source_range.duration.rescaled_to(
|
||||
available_range_rate
|
||||
)
|
||||
conformed_source_range = otio.opentime.TimeRange(
|
||||
start_time=conformed_src_in,
|
||||
duration=conformed_src_duration
|
||||
|
|
|
|||
|
|
@ -18,13 +18,13 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
|
||||
Example:
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd"
|
||||
>>> )
|
||||
{'project': 'test', 'folderPath': '/char/villain',
|
||||
'product': 'modelMain', 'version': 1,
|
||||
'representation': 'usd'}
|
||||
>>> parse_ayon_entity_uri(
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr"
|
||||
>>> )
|
||||
{'project': 'project', 'folderPath': '/folder',
|
||||
'product': 'renderMain', 'version': 3,
|
||||
|
|
@ -34,7 +34,7 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
|||
dict[str, Union[str, int]]: The individual key with their values as
|
||||
found in the ayon entity URI.
|
||||
|
||||
"""
|
||||
""" # noqa: E501
|
||||
|
||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -7,8 +7,11 @@ from copy import deepcopy
|
|||
import attr
|
||||
import ayon_api
|
||||
import clique
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import get_current_project_name, get_representation_path
|
||||
from ayon_core.lib import Logger, collect_frames
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_representation_path,
|
||||
)
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||
from ayon_core.pipeline.publish import KnownPublishError
|
||||
|
|
@ -295,11 +298,17 @@ def _add_review_families(families):
|
|||
return families
|
||||
|
||||
|
||||
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin):
|
||||
def prepare_representations(
|
||||
skeleton_data,
|
||||
exp_files,
|
||||
anatomy,
|
||||
aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review,
|
||||
context,
|
||||
color_managed_plugin,
|
||||
frames_to_render=None
|
||||
):
|
||||
"""Create representations for file sequences.
|
||||
|
||||
This will return representations of expected files if they are not
|
||||
|
|
@ -315,6 +324,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
skip_integration_repre_list (list): exclude specific extensions,
|
||||
do_not_add_review (bool): explicitly skip review
|
||||
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
||||
frames_to_render (str): implicit or explicit range of frames to render
|
||||
this value is sent to Deadline in JobInfo.Frames
|
||||
Returns:
|
||||
list of representations
|
||||
|
||||
|
|
@ -325,6 +336,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
|
||||
log = Logger.get_logger("farm_publishing")
|
||||
|
||||
if frames_to_render is not None:
|
||||
frames_to_render = _get_real_frames_to_render(frames_to_render)
|
||||
else:
|
||||
# Backwards compatibility for older logic
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_end = int(skeleton_data.get("frameEndHandle"))
|
||||
frames_to_render = list(range(frame_start, frame_end + 1))
|
||||
|
||||
# create representation for every collected sequence
|
||||
for collection in collections:
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
|
@ -361,18 +380,21 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||
frame_start = frames_to_render[0]
|
||||
frame_end = frames_to_render[-1]
|
||||
if skeleton_data.get("slate"):
|
||||
frame_start -= 1
|
||||
|
||||
files = _get_real_files_to_rendered(collection, frames_to_render)
|
||||
|
||||
# explicitly disable review by user
|
||||
preview = preview and not do_not_add_review
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(collection)],
|
||||
"files": files,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
||||
"frameEnd": frame_end,
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"fps": skeleton_data.get("fps"),
|
||||
|
|
@ -413,10 +435,13 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
" This may cause issues on farm."
|
||||
).format(staging))
|
||||
|
||||
files = _get_real_files_to_rendered(
|
||||
[os.path.basename(remainder)], frames_to_render)
|
||||
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": os.path.basename(remainder),
|
||||
"files": files[0],
|
||||
"stagingDir": staging,
|
||||
}
|
||||
|
||||
|
|
@ -453,6 +478,53 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
|||
return representations
|
||||
|
||||
|
||||
def _get_real_frames_to_render(frames):
|
||||
"""Returns list of frames that should be rendered.
|
||||
|
||||
Artists could want to selectively render only particular frames
|
||||
"""
|
||||
frames_to_render = []
|
||||
for frame in frames.split(","):
|
||||
if "-" in frame:
|
||||
splitted = frame.split("-")
|
||||
frames_to_render.extend(
|
||||
range(int(splitted[0]), int(splitted[1])+1))
|
||||
else:
|
||||
frames_to_render.append(int(frame))
|
||||
frames_to_render.sort()
|
||||
return frames_to_render
|
||||
|
||||
|
||||
def _get_real_files_to_rendered(collection, frames_to_render):
|
||||
"""Use expected files based on real frames_to_render.
|
||||
|
||||
Artists might explicitly set frames they want to render via Publisher UI.
|
||||
This uses this value to filter out files
|
||||
Args:
|
||||
frames_to_render (list): of str '1001'
|
||||
"""
|
||||
files = [os.path.basename(f) for f in list(collection)]
|
||||
file_name, extracted_frame = list(collect_frames(files).items())[0]
|
||||
|
||||
if not extracted_frame:
|
||||
return files
|
||||
|
||||
found_frame_pattern_length = len(extracted_frame)
|
||||
normalized_frames_to_render = {
|
||||
str(frame_to_render).zfill(found_frame_pattern_length)
|
||||
for frame_to_render in frames_to_render
|
||||
}
|
||||
|
||||
return [
|
||||
file_name
|
||||
for file_name in files
|
||||
if any(
|
||||
frame in file_name
|
||||
for frame in normalized_frames_to_render
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def create_instances_for_aov(instance, skeleton, aov_filter,
|
||||
skip_integration_repre_list,
|
||||
do_not_add_review):
|
||||
|
|
@ -702,9 +774,14 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
|
||||
project_settings = instance.context.data.get("project_settings")
|
||||
|
||||
use_legacy_product_name = True
|
||||
try:
|
||||
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
|
||||
use_legacy_product_name = (
|
||||
project_settings
|
||||
["core"]
|
||||
["tools"]
|
||||
["creator"]
|
||||
["use_legacy_product_names_for_renders"]
|
||||
)
|
||||
except KeyError:
|
||||
warnings.warn(
|
||||
("use_legacy_for_renders not found in project settings. "
|
||||
|
|
@ -720,7 +797,9 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
dynamic_data=dynamic_data)
|
||||
|
||||
else:
|
||||
product_name, group_name = get_product_name_and_group_from_template(
|
||||
(
|
||||
product_name, group_name
|
||||
) = get_product_name_and_group_from_template(
|
||||
task_entity=instance.data["taskEntity"],
|
||||
project_name=instance.context.data["projectName"],
|
||||
host_name=instance.context.data["hostName"],
|
||||
|
|
@ -863,7 +942,7 @@ def _collect_expected_files_for_aov(files):
|
|||
# but we really expect only one collection.
|
||||
# Nothing else make sense.
|
||||
if len(cols) != 1:
|
||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
||||
raise ValueError("Only one image sequence type is expected.")
|
||||
return list(cols[0])
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from .constants import (
|
|||
ValidateContentsOrder,
|
||||
ValidateSceneOrder,
|
||||
ValidateMeshOrder,
|
||||
FARM_JOB_ENV_DATA_KEY,
|
||||
)
|
||||
|
||||
from .publish_plugins import (
|
||||
|
|
@ -59,6 +60,7 @@ __all__ = (
|
|||
"ValidateContentsOrder",
|
||||
"ValidateSceneOrder",
|
||||
"ValidateMeshOrder",
|
||||
"FARM_JOB_ENV_DATA_KEY",
|
||||
|
||||
"AbstractMetaInstancePlugin",
|
||||
"AbstractMetaContextPlugin",
|
||||
|
|
|
|||
|
|
@ -9,3 +9,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
|
|||
DEFAULT_PUBLISH_TEMPLATE = "default"
|
||||
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
||||
TRANSIENT_DIR_TEMPLATE = "default"
|
||||
|
||||
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"
|
||||
|
|
|
|||
|
|
@ -87,14 +87,13 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
"""
|
||||
|
||||
path = folder_entity["path"]
|
||||
hierarchy_parts = path.split("/")
|
||||
# Remove empty string from the beginning
|
||||
hierarchy_parts.pop(0)
|
||||
# Remove empty string from the beginning and split by '/'
|
||||
parents = path.lstrip("/").split("/")
|
||||
# Remove last part which is folder name
|
||||
folder_name = hierarchy_parts.pop(-1)
|
||||
hierarchy = "/".join(hierarchy_parts)
|
||||
if hierarchy_parts:
|
||||
parent_name = hierarchy_parts[-1]
|
||||
folder_name = parents.pop(-1)
|
||||
hierarchy = "/".join(parents)
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
else:
|
||||
parent_name = project_name
|
||||
|
||||
|
|
@ -103,6 +102,7 @@ def get_folder_template_data(folder_entity, project_name):
|
|||
"name": folder_name,
|
||||
"type": folder_entity["folderType"],
|
||||
"path": path,
|
||||
"parents": parents,
|
||||
},
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
|
|||
|
|
@ -413,14 +413,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# Backwards compatible (Deprecated since 24/06/06)
|
||||
or instance.data.get("newAssetPublishing")
|
||||
):
|
||||
hierarchy = instance.data["hierarchy"]
|
||||
anatomy_data["hierarchy"] = hierarchy
|
||||
folder_path = instance.data["folderPath"]
|
||||
parents = folder_path.lstrip("/").split("/")
|
||||
folder_name = parents.pop(-1)
|
||||
|
||||
parent_name = project_entity["name"]
|
||||
if hierarchy:
|
||||
parent_name = hierarchy.split("/")[-1]
|
||||
hierarchy = ""
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
hierarchy = "/".join(parents)
|
||||
|
||||
folder_name = instance.data["folderPath"].split("/")[-1]
|
||||
anatomy_data.update({
|
||||
"asset": folder_name,
|
||||
"hierarchy": hierarchy,
|
||||
|
|
@ -432,6 +434,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
# Using 'Shot' is current default behavior of editorial
|
||||
# (or 'newHierarchyIntegration') publishing.
|
||||
"type": "Shot",
|
||||
"parents": parents,
|
||||
},
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import get_ayon_username
|
||||
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
|
||||
|
||||
|
||||
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
|
||||
"""Collect set of environment variables to submit with deadline jobs"""
|
||||
order = pyblish.api.CollectorOrder - 0.45
|
||||
label = "AYON core Farm Environment Variables"
|
||||
targets = ["local"]
|
||||
|
||||
def process(self, context):
|
||||
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
|
||||
|
||||
# Disable colored logs on farm
|
||||
for key, value in (
|
||||
("AYON_LOG_NO_COLORS", "1"),
|
||||
("AYON_PROJECT_NAME", context.data["projectName"]),
|
||||
("AYON_FOLDER_PATH", context.data.get("folderPath")),
|
||||
("AYON_TASK_NAME", context.data.get("task")),
|
||||
# NOTE we should use 'context.data["user"]' but that has higher
|
||||
# order.
|
||||
("AYON_USERNAME", get_ayon_username()),
|
||||
):
|
||||
if value:
|
||||
self.log.debug(f"Setting job env: {key}: {value}")
|
||||
env[key] = value
|
||||
|
||||
for key in [
|
||||
"AYON_BUNDLE_NAME",
|
||||
"AYON_DEFAULT_SETTINGS_VARIANT",
|
||||
"AYON_IN_TESTS",
|
||||
# NOTE Not sure why workdir is needed?
|
||||
"AYON_WORKDIR",
|
||||
]:
|
||||
value = os.getenv(key)
|
||||
if value:
|
||||
self.log.debug(f"Setting job env: {key}: {value}")
|
||||
env[key] = value
|
||||
|
||||
|
|
@ -43,7 +43,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
|
||||
shot_data = {
|
||||
"entity_type": "folder",
|
||||
# WARNING unless overwritten, default folder type is hardcoded to shot
|
||||
# WARNING unless overwritten, default folder type is hardcoded
|
||||
# to shot
|
||||
"folder_type": instance.data.get("folder_type") or "Shot",
|
||||
"tasks": instance.data.get("tasks") or {},
|
||||
"comments": instance.data.get("comments", []),
|
||||
|
|
|
|||
|
|
@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
name = inst.data["folderPath"]
|
||||
|
||||
recycling_file = [f for f in created_files if name in f]
|
||||
|
||||
# frameranges
|
||||
timeline_in_h = inst.data["clipInH"]
|
||||
timeline_out_h = inst.data["clipOutH"]
|
||||
fps = inst.data["fps"]
|
||||
|
||||
# create duration
|
||||
duration = (timeline_out_h - timeline_in_h) + 1
|
||||
audio_clip = inst.data["otioClip"]
|
||||
audio_range = audio_clip.range_in_parent()
|
||||
duration = audio_range.duration.to_frames()
|
||||
|
||||
# ffmpeg generate new file only if doesn't exists already
|
||||
if not recycling_file:
|
||||
# convert to seconds
|
||||
start_sec = float(timeline_in_h / fps)
|
||||
duration_sec = float(duration / fps)
|
||||
parent_track = audio_clip.parent()
|
||||
parent_track_start = parent_track.range_in_parent().start_time
|
||||
relative_start_time = (
|
||||
audio_range.start_time - parent_track_start)
|
||||
start_sec = relative_start_time.to_seconds()
|
||||
duration_sec = audio_range.duration.to_seconds()
|
||||
|
||||
# temp audio file
|
||||
audio_fpath = self.create_temp_file(name)
|
||||
|
|
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
|||
|
||||
output = []
|
||||
# go trough all audio tracks
|
||||
for otio_track in otio_timeline.tracks:
|
||||
if "Audio" not in otio_track.kind:
|
||||
continue
|
||||
for otio_track in otio_timeline.audio_tracks():
|
||||
self.log.debug("_" * 50)
|
||||
playhead = 0
|
||||
for otio_clip in otio_track:
|
||||
self.log.debug(otio_clip)
|
||||
if isinstance(otio_clip, otio.schema.Gap):
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
elif isinstance(otio_clip, otio.schema.Clip):
|
||||
start = otio_clip.source_range.start_time.value
|
||||
duration = otio_clip.source_range.duration.value
|
||||
fps = otio_clip.source_range.start_time.rate
|
||||
if (isinstance(otio_clip, otio.schema.Clip) and
|
||||
not otio_clip.media_reference.is_missing_reference):
|
||||
media_av_start = otio_clip.available_range().start_time
|
||||
clip_start = otio_clip.source_range.start_time
|
||||
fps = clip_start.rate
|
||||
conformed_av_start = media_av_start.rescaled_to(fps)
|
||||
# ffmpeg ignores embedded tc
|
||||
start = clip_start - conformed_av_start
|
||||
duration = otio_clip.source_range.duration
|
||||
media_path = otio_clip.media_reference.target_url
|
||||
input = {
|
||||
"mediaPath": media_path,
|
||||
"delayFrame": playhead,
|
||||
"startFrame": start,
|
||||
"durationFrame": duration,
|
||||
"startFrame": start.to_frames(),
|
||||
"durationFrame": duration.to_frames(),
|
||||
"delayMilSec": int(float(playhead / fps) * 1000),
|
||||
"startSec": float(start / fps),
|
||||
"durationSec": float(duration / fps),
|
||||
"fps": fps
|
||||
"startSec": start.to_seconds(),
|
||||
"durationSec": duration.to_seconds(),
|
||||
"fps": float(fps)
|
||||
}
|
||||
if input not in output:
|
||||
output.append(input)
|
||||
self.log.debug("__ input: {}".format(input))
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
return output
|
||||
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ class ExtractOTIOReview(
|
|||
|
||||
if otio_review_clips is None:
|
||||
self.log.info(f"Instance `{instance}` has no otioReviewClips")
|
||||
return
|
||||
|
||||
# add plugin wide attributes
|
||||
self.representation_files = []
|
||||
|
|
@ -129,26 +130,33 @@ class ExtractOTIOReview(
|
|||
res_data[key] = value
|
||||
break
|
||||
|
||||
self.to_width, self.to_height = res_data["width"], res_data["height"]
|
||||
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
|
||||
self.to_width, self.to_height
|
||||
))
|
||||
self.to_width, self.to_height = (
|
||||
res_data["width"], res_data["height"]
|
||||
)
|
||||
self.log.debug(
|
||||
"> self.to_width x self.to_height:"
|
||||
f" {self.to_width} x {self.to_height}"
|
||||
)
|
||||
|
||||
available_range = r_otio_cl.available_range()
|
||||
available_range_start_frame = (
|
||||
available_range.start_time.to_frames()
|
||||
)
|
||||
processing_range = None
|
||||
self.actual_fps = available_range.duration.rate
|
||||
start = src_range.start_time.rescaled_to(self.actual_fps)
|
||||
duration = src_range.duration.rescaled_to(self.actual_fps)
|
||||
src_frame_start = src_range.start_time.to_frames()
|
||||
|
||||
# Temporary.
|
||||
# Some AYON custom OTIO exporter were implemented with relative
|
||||
# source range for image sequence. Following code maintain
|
||||
# backward-compatibility by adjusting available range
|
||||
# Some AYON custom OTIO exporter were implemented with
|
||||
# relative source range for image sequence. Following code
|
||||
# maintain backward-compatibility by adjusting available range
|
||||
# while we are updating those.
|
||||
if (
|
||||
is_clip_from_media_sequence(r_otio_cl)
|
||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
||||
and src_range.start_time.to_frames() < media_ref.start_frame
|
||||
and available_range_start_frame == media_ref.start_frame
|
||||
and src_frame_start < media_ref.start_frame
|
||||
):
|
||||
available_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
||||
|
|
@ -246,7 +254,8 @@ class ExtractOTIOReview(
|
|||
# Extraction via FFmpeg.
|
||||
else:
|
||||
path = media_ref.target_url
|
||||
# Set extract range from 0 (FFmpeg ignores embedded timecode).
|
||||
# Set extract range from 0 (FFmpeg ignores
|
||||
# embedded timecode).
|
||||
extract_range = otio.opentime.TimeRange(
|
||||
otio.opentime.RationalTime(
|
||||
(
|
||||
|
|
@ -414,7 +423,8 @@ class ExtractOTIOReview(
|
|||
to defined image sequence format.
|
||||
|
||||
Args:
|
||||
sequence (list): input dir path string, collection object, fps in list
|
||||
sequence (list): input dir path string, collection object,
|
||||
fps in list.
|
||||
video (list)[optional]: video_path string, otio_range in list
|
||||
gap (int)[optional]: gap duration
|
||||
end_offset (int)[optional]: offset gap frame start in frames
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
"""Validate all product names are unique.
|
||||
|
||||
This only validates whether the instances currently set to publish from
|
||||
the workfile overlap one another for the folder + product they are publishing
|
||||
to.
|
||||
the workfile overlap one another for the folder + product they are
|
||||
publishing to.
|
||||
|
||||
This does not perform any check against existing publishes in the database
|
||||
since it is allowed to publish into existing products resulting in
|
||||
|
|
@ -72,8 +72,10 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
|||
# All is ok
|
||||
return
|
||||
|
||||
msg = ("Instance product names {} are not unique. ".format(non_unique) +
|
||||
"Please remove or rename duplicates.")
|
||||
msg = (
|
||||
f"Instance product names {non_unique} are not unique."
|
||||
" Please remove or rename duplicates."
|
||||
)
|
||||
formatting_data = {
|
||||
"non_unique": ",".join(non_unique)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
- Datatypes explanation:
|
||||
<color> string format must be supported by FFmpeg.
|
||||
Examples: "#000000", "0x000000", "black"
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
||||
<font> must be accesible by ffmpeg = name of registered Font in system
|
||||
or path to font file.
|
||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||
|
||||
- Possible keys:
|
||||
|
|
@ -87,17 +88,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||
"bg_color" - Background color - <color>
|
||||
"bg_padding" - Background padding in pixels - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
||||
"x_offset" - offsets burnin vertically by entered pixels
|
||||
from border - <int>
|
||||
"y_offset" - offsets burnin horizontally by entered pixels
|
||||
from border - <int>
|
||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||
"font" - Font Family for text - <font>
|
||||
"font_size" - Font size in pixels - <int>
|
||||
"font_color" - Color of text - <color>
|
||||
"frame_offset" - Default start frame - <int>
|
||||
- required IF start frame is not set when using frames or timecode burnins
|
||||
- required IF start frame is not set when using frames
|
||||
or timecode burnins
|
||||
|
||||
On initializing class can be set General options through "options_init" arg.
|
||||
General can be overridden when adding burnin
|
||||
On initializing class can be set General options through
|
||||
"options_init" arg.
|
||||
General options can be overridden when adding burnin.
|
||||
|
||||
'''
|
||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||
|
|
|
|||
|
|
@ -190,6 +190,7 @@ def get_current_project_settings():
|
|||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||
if not project_name:
|
||||
raise ValueError(
|
||||
"Missing context project in environemt variable `AYON_PROJECT_NAME`."
|
||||
"Missing context project in environment"
|
||||
" variable `AYON_PROJECT_NAME`."
|
||||
)
|
||||
return get_project_settings(project_name)
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
|
|||
|
||||
def validate(self, text, pos):
|
||||
results = super(ProductNameValidator, self).validate(text, pos)
|
||||
if results[0] == self.Invalid:
|
||||
if results[0] == RegularExpressionValidatorClass.Invalid:
|
||||
self.invalid.emit(self.invalid_chars(text))
|
||||
return results
|
||||
|
||||
|
|
@ -217,7 +217,9 @@ class ProductTypeDescriptionWidget(QtWidgets.QWidget):
|
|||
|
||||
product_type_label = QtWidgets.QLabel(self)
|
||||
product_type_label.setObjectName("CreatorProductTypeLabel")
|
||||
product_type_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
|
||||
product_type_label.setAlignment(
|
||||
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft
|
||||
)
|
||||
|
||||
help_label = QtWidgets.QLabel(self)
|
||||
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ except ImportError:
|
|||
|
||||
Application action based on 'ApplicationManager' system.
|
||||
|
||||
Handling of applications in launcher is not ideal and should be completely
|
||||
redone from scratch. This is just a temporary solution to keep backwards
|
||||
compatibility with AYON launcher.
|
||||
Handling of applications in launcher is not ideal and should be
|
||||
completely redone from scratch. This is just a temporary solution
|
||||
to keep backwards compatibility with AYON launcher.
|
||||
|
||||
Todos:
|
||||
Move handling of errors to frontend.
|
||||
|
|
|
|||
|
|
@ -517,7 +517,11 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
|||
def setItemCheckState(self, index, state):
|
||||
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
||||
|
||||
def set_value(self, values: Optional[Iterable[Any]], role: Optional[int] = None):
|
||||
def set_value(
|
||||
self,
|
||||
values: Optional[Iterable[Any]],
|
||||
role: Optional[int] = None,
|
||||
):
|
||||
if role is None:
|
||||
role = self._value_role
|
||||
|
||||
|
|
|
|||
|
|
@ -499,8 +499,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
|||
version_item.version_id
|
||||
for version_item in last_version_by_product_id.values()
|
||||
}
|
||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
repre_count_by_version_id = (
|
||||
self._controller.get_versions_representation_count(
|
||||
project_name, version_ids
|
||||
)
|
||||
)
|
||||
sync_availability_by_version_id = (
|
||||
self._controller.get_version_sync_availability(
|
||||
|
|
|
|||
|
|
@ -339,7 +339,9 @@ class OverviewWidget(QtWidgets.QFrame):
|
|||
self._change_visibility_for_state()
|
||||
self._product_content_layout.addWidget(self._create_widget, 7)
|
||||
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
||||
self._product_content_layout.addWidget(self._product_attributes_wrap, 7)
|
||||
self._product_content_layout.addWidget(
|
||||
self._product_attributes_wrap, 7
|
||||
)
|
||||
|
||||
def _change_visibility_for_state(self):
|
||||
self._create_widget.setVisible(
|
||||
|
|
|
|||
|
|
@ -214,8 +214,8 @@ class TasksCombobox(QtWidgets.QComboBox):
|
|||
Combobox gives ability to select only from intersection of task names for
|
||||
folder paths in selected instances.
|
||||
|
||||
If folder paths in selected instances does not have same tasks then combobox
|
||||
will be empty.
|
||||
If folder paths in selected instances does not have same tasks
|
||||
then combobox will be empty.
|
||||
"""
|
||||
value_changed = QtCore.Signal()
|
||||
|
||||
|
|
@ -604,7 +604,7 @@ class VariantInputWidget(PlaceholderLineEdit):
|
|||
|
||||
|
||||
class GlobalAttrsWidget(QtWidgets.QWidget):
|
||||
"""Global attributes mainly to define context and product name of instances.
|
||||
"""Global attributes to define context and product name of instances.
|
||||
|
||||
product name is or may be affected on context. Gives abiity to modify
|
||||
context and product name of instance. This change is not autopromoted but
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ class TasksModel(QtGui.QStandardItemModel):
|
|||
tasks with same names then model is empty too.
|
||||
|
||||
Args:
|
||||
controller (AbstractPublisherFrontend): Controller which handles creation and
|
||||
publishing.
|
||||
controller (AbstractPublisherFrontend): Controller which handles
|
||||
creation and publishing.
|
||||
|
||||
"""
|
||||
def __init__(
|
||||
|
|
|
|||
|
|
@ -998,7 +998,11 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
new_item["label"] = new_item.pop("creator_label")
|
||||
new_item["identifier"] = new_item.pop("creator_identifier")
|
||||
new_failed_info.append(new_item)
|
||||
self.add_error_message_dialog(event["title"], new_failed_info, "Creator:")
|
||||
self.add_error_message_dialog(
|
||||
event["title"],
|
||||
new_failed_info,
|
||||
"Creator:"
|
||||
)
|
||||
|
||||
def _on_convertor_error(self, event):
|
||||
new_failed_info = []
|
||||
|
|
|
|||
|
|
@ -368,8 +368,8 @@ class ContainersModel:
|
|||
try:
|
||||
uuid.UUID(repre_id)
|
||||
except (ValueError, TypeError, AttributeError):
|
||||
# Fake not existing representation id so container is shown in UI
|
||||
# but as invalid
|
||||
# Fake not existing representation id so container
|
||||
# is shown in UI but as invalid
|
||||
item.representation_id = invalid_ids_mapping.setdefault(
|
||||
repre_id, uuid.uuid4().hex
|
||||
)
|
||||
|
|
|
|||
|
|
@ -556,9 +556,10 @@ class _IconsCache:
|
|||
log.info("Didn't find icon \"{}\"".format(icon_name))
|
||||
|
||||
elif used_variant != icon_name:
|
||||
log.debug("Icon \"{}\" was not found \"{}\" is used instead".format(
|
||||
icon_name, used_variant
|
||||
))
|
||||
log.debug(
|
||||
f"Icon \"{icon_name}\" was not found"
|
||||
f" \"{used_variant}\" is used instead"
|
||||
)
|
||||
|
||||
cls._qtawesome_cache[full_icon_name] = icon
|
||||
return icon
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'core' version."""
|
||||
__version__ = "1.0.8+dev"
|
||||
__version__ = "1.0.10+dev"
|
||||
|
|
|
|||
|
|
@ -15,6 +15,6 @@ qtawesome = "0.7.3"
|
|||
aiohttp-middlewares = "^2.0.0"
|
||||
Click = "^8"
|
||||
OpenTimelineIO = "0.16.0"
|
||||
opencolorio = "^2.3.2"
|
||||
opencolorio = "^2.3.2,<2.4.0"
|
||||
Pillow = "9.5.0"
|
||||
websocket-client = ">=0.40.0,<2"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
name = "core"
|
||||
title = "Core"
|
||||
version = "1.0.8+dev"
|
||||
version = "1.0.10+dev"
|
||||
|
||||
client_dir = "ayon_core"
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
[tool.poetry]
|
||||
name = "ayon-core"
|
||||
version = "1.0.8+dev"
|
||||
version = "1.0.10+dev"
|
||||
description = ""
|
||||
authors = ["Ynput Team <team@ynput.io>"]
|
||||
readme = "README.md"
|
||||
|
|
@ -68,7 +68,7 @@ target-version = "py39"
|
|||
|
||||
[tool.ruff.lint]
|
||||
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
|
||||
select = ["E4", "E7", "E9", "F", "W"]
|
||||
select = ["E", "F", "W"]
|
||||
ignore = []
|
||||
|
||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||
|
|
|
|||
|
|
@ -358,7 +358,10 @@ class ExtractOIIOTranscodeOutputModel(BaseSettingsModel):
|
|||
custom_tags: list[str] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Custom Tags",
|
||||
description="Additional custom tags that will be added to the created representation."
|
||||
description=(
|
||||
"Additional custom tags that will be added"
|
||||
" to the created representation."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -892,9 +895,11 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
default_factory=CollectFramesFixDefModel,
|
||||
title="Collect Frames to Fix",
|
||||
)
|
||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = SettingsField(
|
||||
default_factory=CollectUSDLayerContributionsModel,
|
||||
title="Collect USD Layer Contributions",
|
||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = (
|
||||
SettingsField(
|
||||
default_factory=CollectUSDLayerContributionsModel,
|
||||
title="Collect USD Layer Contributions",
|
||||
)
|
||||
)
|
||||
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
||||
default_factory=ValidateBaseModel,
|
||||
|
|
@ -1214,7 +1219,9 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"TOP_RIGHT": "{anatomy[version]}",
|
||||
"BOTTOM_LEFT": "{username}",
|
||||
"BOTTOM_CENTERED": "{folder[name]}",
|
||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
||||
"BOTTOM_RIGHT": (
|
||||
"{frame_start}-{current_frame}-{frame_end}"
|
||||
),
|
||||
"filter": {
|
||||
"families": [],
|
||||
"tags": []
|
||||
|
|
@ -1240,7 +1247,9 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"TOP_RIGHT": "{anatomy[version]}",
|
||||
"BOTTOM_LEFT": "{username}",
|
||||
"BOTTOM_CENTERED": "{folder[name]}",
|
||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
||||
"BOTTOM_RIGHT": (
|
||||
"{frame_start}-{current_frame}-{frame_end}"
|
||||
),
|
||||
"filter": {
|
||||
"families": [],
|
||||
"tags": []
|
||||
|
|
|
|||
|
|
@ -83,8 +83,8 @@ class CreatorToolModel(BaseSettingsModel):
|
|||
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
||||
default_factory=list,
|
||||
title="Filter creator profiles",
|
||||
description="Allowed list of creator labels that will be only shown if "
|
||||
"profile matches context."
|
||||
description="Allowed list of creator labels that will be only shown"
|
||||
" if profile matches context."
|
||||
)
|
||||
|
||||
@validator("product_types_smart_select")
|
||||
|
|
@ -426,7 +426,9 @@ DEFAULT_TOOLS_VALUES = {
|
|||
],
|
||||
"task_types": [],
|
||||
"tasks": [],
|
||||
"template": "{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||
"template": (
|
||||
"{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||
)
|
||||
},
|
||||
{
|
||||
"product_types": [
|
||||
|
|
|
|||
|
|
@ -130,19 +130,20 @@ def test_image_sequence_and_handles_out_of_range():
|
|||
|
||||
expected = [
|
||||
# 5 head black frames generated from gap (991-995)
|
||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
|
||||
# 9 tail back frames generated from gap (1097-1105)
|
||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||
|
||||
# Report from source tiff (996-1096)
|
||||
# 996-1000 = additional 5 head frames
|
||||
# 1001-1095 = source range conformed to 25fps
|
||||
# 1096-1096 = additional 1 tail frames
|
||||
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996 C:/result/output.%03d.jpg"
|
||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
|
||||
f" C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -179,13 +180,13 @@ def test_short_movie_head_gap_handles():
|
|||
|
||||
expected = [
|
||||
# 10 head black frames generated from gap (991-1000)
|
||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||
|
||||
# source range + 10 tail frames
|
||||
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -start_number 1001 "
|
||||
"C:/result/output.%03d.jpg"
|
||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
|
||||
" -start_number 1001 C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -208,7 +209,8 @@ def test_short_movie_tail_gap_handles():
|
|||
# 10 head frames + source range
|
||||
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
||||
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991 C:/result/output.%03d.jpg"
|
||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
|
||||
" C:/result/output.%03d.jpg"
|
||||
]
|
||||
|
||||
assert calls == expected
|
||||
|
|
@ -234,10 +236,12 @@ def test_multiple_review_clips_no_gap():
|
|||
|
||||
expected = [
|
||||
# 10 head black frames generated from gap (991-1000)
|
||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
|
||||
' -i color=c=black:s=1280x720 -tune '
|
||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||
|
||||
# Alternance 25fps tiff sequence and 24fps exr sequence for 100 frames each
|
||||
# Alternance 25fps tiff sequence and 24fps exr sequence
|
||||
# for 100 frames each
|
||||
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
||||
f'C:\\no_tc{os.sep}output.%04d.tif '
|
||||
'-start_number 1001 C:/result/output.%03d.jpg',
|
||||
|
|
@ -315,7 +319,8 @@ def test_multiple_review_clips_with_gap():
|
|||
|
||||
expected = [
|
||||
# Gap on review track (12 frames)
|
||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
|
||||
' -i color=c=black:s=1280x720 -tune '
|
||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||
|
||||
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue