mirror of
https://github.com/ynput/ayon-core.git
synced 2026-01-01 16:34:53 +01:00
Merge branch 'develop' into enhancement/Load-and-manage-products-from-a-library-project
This commit is contained in:
commit
51ec36e66f
41 changed files with 633 additions and 275 deletions
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
48
.github/workflows/assign_pr_to_project.yml
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
name: 🔸Auto assign pr
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
pr_number:
|
||||||
|
type: string
|
||||||
|
description: "Run workflow for this PR number"
|
||||||
|
required: true
|
||||||
|
project_id:
|
||||||
|
type: string
|
||||||
|
description: "Github Project Number"
|
||||||
|
required: true
|
||||||
|
default: "16"
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
get-pr-repo:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
pr_repo_name: ${{ steps.get-repo-name.outputs.repo_name || github.event.pull_request.head.repo.full_name }}
|
||||||
|
|
||||||
|
# INFO `github.event.pull_request.head.repo.full_name` is not available on manual triggered (dispatched) runs
|
||||||
|
steps:
|
||||||
|
- name: Get PR repo name
|
||||||
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
|
id: get-repo-name
|
||||||
|
run: |
|
||||||
|
repo_name=$(gh pr view ${{ inputs.pr_number }} --json headRepository,headRepositoryOwner --repo ${{ github.repository }} | jq -r '.headRepositoryOwner.login + "/" + .headRepository.name')
|
||||||
|
echo "repo_name=$repo_name" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
auto-assign-pr:
|
||||||
|
needs:
|
||||||
|
- get-pr-repo
|
||||||
|
if: ${{ needs.get-pr-repo.outputs.pr_repo_name == github.repository }}
|
||||||
|
uses: ynput/ops-repo-automation/.github/workflows/pr_to_project.yml@main
|
||||||
|
with:
|
||||||
|
repo: "${{ github.repository }}"
|
||||||
|
project_id: ${{ inputs.project_id != '' && fromJSON(inputs.project_id) || 16 }}
|
||||||
|
pull_request_number: ${{ github.event.pull_request.number || fromJSON(inputs.pr_number) }}
|
||||||
|
secrets:
|
||||||
|
# INFO fallback to default `github.token` is required for PRs from forks
|
||||||
|
# INFO organization secrets won't be available to forks
|
||||||
|
token: ${{ secrets.YNPUT_BOT_TOKEN || github.token}}
|
||||||
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
18
.github/workflows/validate_pr_labels.yml
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
name: 🔎 Validate PR Labels
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
- edited
|
||||||
|
- labeled
|
||||||
|
- unlabeled
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-type-label:
|
||||||
|
uses: ynput/ops-repo-automation/.github/workflows/validate_pr_labels.yml@main
|
||||||
|
with:
|
||||||
|
repo: "${{ github.repository }}"
|
||||||
|
pull_request_number: ${{ github.event.pull_request.number }}
|
||||||
|
query_prefix: "type: "
|
||||||
|
secrets:
|
||||||
|
token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||||
|
|
@ -535,8 +535,8 @@ class AYONAddon(ABC):
|
||||||
Implementation of this method is optional.
|
Implementation of this method is optional.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
The logic can be similar to logic in tray, but tray does not require
|
The logic can be similar to logic in tray, but tray does not
|
||||||
to be logged in.
|
require to be logged in.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
process_context (ProcessContext): Context of child
|
process_context (ProcessContext): Context of child
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,8 @@ def publish_report_viewer():
|
||||||
@main_cli.command()
|
@main_cli.command()
|
||||||
@click.argument("output_path")
|
@click.argument("output_path")
|
||||||
@click.option("--project", help="Define project context")
|
@click.option("--project", help="Define project context")
|
||||||
@click.option("--folder", help="Define folder in project (project must be set)")
|
@click.option(
|
||||||
|
"--folder", help="Define folder in project (project must be set)")
|
||||||
@click.option(
|
@click.option(
|
||||||
"--strict",
|
"--strict",
|
||||||
is_flag=True,
|
is_flag=True,
|
||||||
|
|
|
||||||
|
|
@ -616,7 +616,9 @@ class EnumDef(AbstractAttrDef):
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def prepare_enum_items(items: "EnumItemsInputType") -> List["EnumItemDict"]:
|
def prepare_enum_items(
|
||||||
|
items: "EnumItemsInputType"
|
||||||
|
) -> List["EnumItemDict"]:
|
||||||
"""Convert items to unified structure.
|
"""Convert items to unified structure.
|
||||||
|
|
||||||
Output is a list where each item is dictionary with 'value'
|
Output is a list where each item is dictionary with 'value'
|
||||||
|
|
|
||||||
|
|
@ -276,12 +276,7 @@ class ASettingRegistry(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _delete_item(self, name):
|
def _delete_item(self, name):
|
||||||
# type: (str) -> None
|
# type: (str) -> None
|
||||||
"""Delete item from settings.
|
"""Delete item from settings."""
|
||||||
|
|
||||||
Note:
|
|
||||||
see :meth:`ayon_core.lib.user_settings.ARegistrySettings.delete_item`
|
|
||||||
|
|
||||||
"""
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def __delitem__(self, name):
|
def __delitem__(self, name):
|
||||||
|
|
@ -433,12 +428,7 @@ class IniSettingRegistry(ASettingRegistry):
|
||||||
config.write(cfg)
|
config.write(cfg)
|
||||||
|
|
||||||
def _delete_item(self, name):
|
def _delete_item(self, name):
|
||||||
"""Delete item from default section.
|
"""Delete item from default section."""
|
||||||
|
|
||||||
Note:
|
|
||||||
See :meth:`~ayon_core.lib.IniSettingsRegistry.delete_item_from_section`
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.delete_item_from_section("MAIN", name)
|
self.delete_item_from_section("MAIN", name)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,15 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import copy
|
||||||
import numbers
|
import numbers
|
||||||
|
import warnings
|
||||||
|
from string import Formatter
|
||||||
|
import typing
|
||||||
|
from typing import List, Dict, Any, Set
|
||||||
|
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})")
|
|
||||||
KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+")
|
|
||||||
SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)")
|
SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)")
|
||||||
OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
|
OPTIONAL_PATTERN = re.compile(r"(<.*?[^{0]*>)[^0-9]*?")
|
||||||
|
|
||||||
|
|
@ -18,9 +24,7 @@ class TemplateUnsolved(Exception):
|
||||||
def __init__(self, template, missing_keys, invalid_types):
|
def __init__(self, template, missing_keys, invalid_types):
|
||||||
invalid_type_items = []
|
invalid_type_items = []
|
||||||
for _key, _type in invalid_types.items():
|
for _key, _type in invalid_types.items():
|
||||||
invalid_type_items.append(
|
invalid_type_items.append(f"\"{_key}\" {str(_type)}")
|
||||||
"\"{0}\" {1}".format(_key, str(_type))
|
|
||||||
)
|
|
||||||
|
|
||||||
invalid_types_msg = ""
|
invalid_types_msg = ""
|
||||||
if invalid_type_items:
|
if invalid_type_items:
|
||||||
|
|
@ -33,31 +37,32 @@ class TemplateUnsolved(Exception):
|
||||||
missing_keys_msg = self.missing_keys_msg.format(
|
missing_keys_msg = self.missing_keys_msg.format(
|
||||||
", ".join(missing_keys)
|
", ".join(missing_keys)
|
||||||
)
|
)
|
||||||
super(TemplateUnsolved, self).__init__(
|
super().__init__(
|
||||||
self.msg.format(template, missing_keys_msg, invalid_types_msg)
|
self.msg.format(template, missing_keys_msg, invalid_types_msg)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class StringTemplate:
|
class StringTemplate:
|
||||||
"""String that can be formatted."""
|
"""String that can be formatted."""
|
||||||
def __init__(self, template):
|
def __init__(self, template: str):
|
||||||
if not isinstance(template, str):
|
if not isinstance(template, str):
|
||||||
raise TypeError("<{}> argument must be a string, not {}.".format(
|
raise TypeError(
|
||||||
self.__class__.__name__, str(type(template))
|
f"<{self.__class__.__name__}> argument must be a string,"
|
||||||
))
|
f" not {str(type(template))}."
|
||||||
|
)
|
||||||
|
|
||||||
self._template = template
|
self._template: str = template
|
||||||
parts = []
|
parts = []
|
||||||
last_end_idx = 0
|
formatter = Formatter()
|
||||||
for item in KEY_PATTERN.finditer(template):
|
|
||||||
start, end = item.span()
|
|
||||||
if start > last_end_idx:
|
|
||||||
parts.append(template[last_end_idx:start])
|
|
||||||
parts.append(FormattingPart(template[start:end]))
|
|
||||||
last_end_idx = end
|
|
||||||
|
|
||||||
if last_end_idx < len(template):
|
for item in formatter.parse(template):
|
||||||
parts.append(template[last_end_idx:len(template)])
|
literal_text, field_name, format_spec, conversion = item
|
||||||
|
if literal_text:
|
||||||
|
parts.append(literal_text)
|
||||||
|
if field_name:
|
||||||
|
parts.append(
|
||||||
|
FormattingPart(field_name, format_spec, conversion)
|
||||||
|
)
|
||||||
|
|
||||||
new_parts = []
|
new_parts = []
|
||||||
for part in parts:
|
for part in parts:
|
||||||
|
|
@ -77,15 +82,17 @@ class StringTemplate:
|
||||||
if substr:
|
if substr:
|
||||||
new_parts.append(substr)
|
new_parts.append(substr)
|
||||||
|
|
||||||
self._parts = self.find_optional_parts(new_parts)
|
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = (
|
||||||
|
self.find_optional_parts(new_parts)
|
||||||
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
return self.template
|
return self.template
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
return "<{}> {}".format(self.__class__.__name__, self.template)
|
return f"<{self.__class__.__name__}> {self.template}"
|
||||||
|
|
||||||
def __contains__(self, other):
|
def __contains__(self, other: str) -> bool:
|
||||||
return other in self.template
|
return other in self.template
|
||||||
|
|
||||||
def replace(self, *args, **kwargs):
|
def replace(self, *args, **kwargs):
|
||||||
|
|
@ -93,10 +100,10 @@ class StringTemplate:
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def template(self):
|
def template(self) -> str:
|
||||||
return self._template
|
return self._template
|
||||||
|
|
||||||
def format(self, data):
|
def format(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||||
""" Figure out with whole formatting.
|
""" Figure out with whole formatting.
|
||||||
|
|
||||||
Separate advanced keys (*Like '{project[name]}') from string which must
|
Separate advanced keys (*Like '{project[name]}') from string which must
|
||||||
|
|
@ -108,6 +115,7 @@ class StringTemplate:
|
||||||
Returns:
|
Returns:
|
||||||
TemplateResult: Filled or partially filled template containing all
|
TemplateResult: Filled or partially filled template containing all
|
||||||
data needed or missing for filling template.
|
data needed or missing for filling template.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
result = TemplatePartResult()
|
result = TemplatePartResult()
|
||||||
for part in self._parts:
|
for part in self._parts:
|
||||||
|
|
@ -135,23 +143,29 @@ class StringTemplate:
|
||||||
invalid_types
|
invalid_types
|
||||||
)
|
)
|
||||||
|
|
||||||
def format_strict(self, *args, **kwargs):
|
def format_strict(self, data: Dict[str, Any]) -> "TemplateResult":
|
||||||
result = self.format(*args, **kwargs)
|
result = self.format(data)
|
||||||
result.validate()
|
result.validate()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def format_template(cls, template, data):
|
def format_template(
|
||||||
|
cls, template: str, data: Dict[str, Any]
|
||||||
|
) -> "TemplateResult":
|
||||||
objected_template = cls(template)
|
objected_template = cls(template)
|
||||||
return objected_template.format(data)
|
return objected_template.format(data)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def format_strict_template(cls, template, data):
|
def format_strict_template(
|
||||||
|
cls, template: str, data: Dict[str, Any]
|
||||||
|
) -> "TemplateResult":
|
||||||
objected_template = cls(template)
|
objected_template = cls(template)
|
||||||
return objected_template.format_strict(data)
|
return objected_template.format_strict(data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_optional_parts(parts):
|
def find_optional_parts(
|
||||||
|
parts: List["Union[str, FormattingPart]"]
|
||||||
|
) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||||
new_parts = []
|
new_parts = []
|
||||||
tmp_parts = {}
|
tmp_parts = {}
|
||||||
counted_symb = -1
|
counted_symb = -1
|
||||||
|
|
@ -216,11 +230,11 @@ class TemplateResult(str):
|
||||||
of number.
|
of number.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
used_values = None
|
used_values: Dict[str, Any] = None
|
||||||
solved = None
|
solved: bool = None
|
||||||
template = None
|
template: str = None
|
||||||
missing_keys = None
|
missing_keys: List[str] = None
|
||||||
invalid_types = None
|
invalid_types: Dict[str, Any] = None
|
||||||
|
|
||||||
def __new__(
|
def __new__(
|
||||||
cls, filled_template, template, solved,
|
cls, filled_template, template, solved,
|
||||||
|
|
@ -248,7 +262,7 @@ class TemplateResult(str):
|
||||||
self.invalid_types
|
self.invalid_types
|
||||||
)
|
)
|
||||||
|
|
||||||
def copy(self):
|
def copy(self) -> "TemplateResult":
|
||||||
cls = self.__class__
|
cls = self.__class__
|
||||||
return cls(
|
return cls(
|
||||||
str(self),
|
str(self),
|
||||||
|
|
@ -259,7 +273,7 @@ class TemplateResult(str):
|
||||||
self.invalid_types
|
self.invalid_types
|
||||||
)
|
)
|
||||||
|
|
||||||
def normalized(self):
|
def normalized(self) -> "TemplateResult":
|
||||||
"""Convert to normalized path."""
|
"""Convert to normalized path."""
|
||||||
|
|
||||||
cls = self.__class__
|
cls = self.__class__
|
||||||
|
|
@ -275,27 +289,28 @@ class TemplateResult(str):
|
||||||
|
|
||||||
class TemplatePartResult:
|
class TemplatePartResult:
|
||||||
"""Result to store result of template parts."""
|
"""Result to store result of template parts."""
|
||||||
def __init__(self, optional=False):
|
def __init__(self, optional: bool = False):
|
||||||
# Missing keys or invalid value types of required keys
|
# Missing keys or invalid value types of required keys
|
||||||
self._missing_keys = set()
|
self._missing_keys: Set[str] = set()
|
||||||
self._invalid_types = {}
|
self._invalid_types: Dict[str, Any] = {}
|
||||||
# Missing keys or invalid value types of optional keys
|
# Missing keys or invalid value types of optional keys
|
||||||
self._missing_optional_keys = set()
|
self._missing_optional_keys: Set[str] = set()
|
||||||
self._invalid_optional_types = {}
|
self._invalid_optional_types: Dict[str, Any] = {}
|
||||||
|
|
||||||
# Used values stored by key with origin type
|
# Used values stored by key with origin type
|
||||||
# - key without any padding or key modifiers
|
# - key without any padding or key modifiers
|
||||||
# - value from filling data
|
# - value from filling data
|
||||||
# Example: {"version": 1}
|
# Example: {"version": 1}
|
||||||
self._used_values = {}
|
self._used_values: Dict[str, Any] = {}
|
||||||
# Used values stored by key with all modifirs
|
# Used values stored by key with all modifirs
|
||||||
# - value is already formatted string
|
# - value is already formatted string
|
||||||
# Example: {"version:0>3": "001"}
|
# Example: {"version:0>3": "001"}
|
||||||
self._realy_used_values = {}
|
self._really_used_values: Dict[str, Any] = {}
|
||||||
# Concatenated string output after formatting
|
# Concatenated string output after formatting
|
||||||
self._output = ""
|
self._output: str = ""
|
||||||
# Is this result from optional part
|
# Is this result from optional part
|
||||||
self._optional = True
|
# TODO find out why we don't use 'optional' from args
|
||||||
|
self._optional: bool = True
|
||||||
|
|
||||||
def add_output(self, other):
|
def add_output(self, other):
|
||||||
if isinstance(other, str):
|
if isinstance(other, str):
|
||||||
|
|
@ -313,7 +328,7 @@ class TemplatePartResult:
|
||||||
if other.optional and not other.solved:
|
if other.optional and not other.solved:
|
||||||
return
|
return
|
||||||
self._used_values.update(other.used_values)
|
self._used_values.update(other.used_values)
|
||||||
self._realy_used_values.update(other.realy_used_values)
|
self._really_used_values.update(other.really_used_values)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise TypeError("Cannot add data from \"{}\" to \"{}\"".format(
|
raise TypeError("Cannot add data from \"{}\" to \"{}\"".format(
|
||||||
|
|
@ -321,7 +336,7 @@ class TemplatePartResult:
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def solved(self):
|
def solved(self) -> bool:
|
||||||
if self.optional:
|
if self.optional:
|
||||||
if (
|
if (
|
||||||
len(self.missing_optional_keys) > 0
|
len(self.missing_optional_keys) > 0
|
||||||
|
|
@ -334,45 +349,53 @@ class TemplatePartResult:
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def optional(self):
|
def optional(self) -> bool:
|
||||||
return self._optional
|
return self._optional
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def output(self):
|
def output(self) -> str:
|
||||||
return self._output
|
return self._output
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def missing_keys(self):
|
def missing_keys(self) -> Set[str]:
|
||||||
return self._missing_keys
|
return self._missing_keys
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def missing_optional_keys(self):
|
def missing_optional_keys(self) -> Set[str]:
|
||||||
return self._missing_optional_keys
|
return self._missing_optional_keys
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def invalid_types(self):
|
def invalid_types(self) -> Dict[str, Any]:
|
||||||
return self._invalid_types
|
return self._invalid_types
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def invalid_optional_types(self):
|
def invalid_optional_types(self) -> Dict[str, Any]:
|
||||||
return self._invalid_optional_types
|
return self._invalid_optional_types
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def realy_used_values(self):
|
def really_used_values(self) -> Dict[str, Any]:
|
||||||
return self._realy_used_values
|
return self._really_used_values
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def used_values(self):
|
def realy_used_values(self) -> Dict[str, Any]:
|
||||||
|
warnings.warn(
|
||||||
|
"Property 'realy_used_values' is deprecated."
|
||||||
|
" Use 'really_used_values' instead.",
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
return self._really_used_values
|
||||||
|
|
||||||
|
@property
|
||||||
|
def used_values(self) -> Dict[str, Any]:
|
||||||
return self._used_values
|
return self._used_values
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def split_keys_to_subdicts(values):
|
def split_keys_to_subdicts(values: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
output = {}
|
output = {}
|
||||||
|
formatter = Formatter()
|
||||||
for key, value in values.items():
|
for key, value in values.items():
|
||||||
key_padding = list(KEY_PADDING_PATTERN.findall(key))
|
_, field_name, _, _ = next(formatter.parse(f"{{{key}}}"))
|
||||||
if key_padding:
|
key_subdict = list(SUB_DICT_PATTERN.findall(field_name))
|
||||||
key = key_padding[0]
|
|
||||||
key_subdict = list(SUB_DICT_PATTERN.findall(key))
|
|
||||||
data = output
|
data = output
|
||||||
last_key = key_subdict.pop(-1)
|
last_key = key_subdict.pop(-1)
|
||||||
for subkey in key_subdict:
|
for subkey in key_subdict:
|
||||||
|
|
@ -382,7 +405,7 @@ class TemplatePartResult:
|
||||||
data[last_key] = value
|
data[last_key] = value
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def get_clean_used_values(self):
|
def get_clean_used_values(self) -> Dict[str, Any]:
|
||||||
new_used_values = {}
|
new_used_values = {}
|
||||||
for key, value in self.used_values.items():
|
for key, value in self.used_values.items():
|
||||||
if isinstance(value, FormatObject):
|
if isinstance(value, FormatObject):
|
||||||
|
|
@ -391,19 +414,27 @@ class TemplatePartResult:
|
||||||
|
|
||||||
return self.split_keys_to_subdicts(new_used_values)
|
return self.split_keys_to_subdicts(new_used_values)
|
||||||
|
|
||||||
def add_realy_used_value(self, key, value):
|
def add_really_used_value(self, key: str, value: Any):
|
||||||
self._realy_used_values[key] = value
|
self._really_used_values[key] = value
|
||||||
|
|
||||||
def add_used_value(self, key, value):
|
def add_realy_used_value(self, key: str, value: Any):
|
||||||
|
warnings.warn(
|
||||||
|
"Method 'add_realy_used_value' is deprecated."
|
||||||
|
" Use 'add_really_used_value' instead.",
|
||||||
|
DeprecationWarning
|
||||||
|
)
|
||||||
|
self.add_really_used_value(key, value)
|
||||||
|
|
||||||
|
def add_used_value(self, key: str, value: Any):
|
||||||
self._used_values[key] = value
|
self._used_values[key] = value
|
||||||
|
|
||||||
def add_missing_key(self, key):
|
def add_missing_key(self, key: str):
|
||||||
if self._optional:
|
if self._optional:
|
||||||
self._missing_optional_keys.add(key)
|
self._missing_optional_keys.add(key)
|
||||||
else:
|
else:
|
||||||
self._missing_keys.add(key)
|
self._missing_keys.add(key)
|
||||||
|
|
||||||
def add_invalid_type(self, key, value):
|
def add_invalid_type(self, key: str, value: Any):
|
||||||
if self._optional:
|
if self._optional:
|
||||||
self._invalid_optional_types[key] = type(value)
|
self._invalid_optional_types[key] = type(value)
|
||||||
else:
|
else:
|
||||||
|
|
@ -421,10 +452,10 @@ class FormatObject:
|
||||||
def __format__(self, *args, **kwargs):
|
def __format__(self, *args, **kwargs):
|
||||||
return self.value.__format__(*args, **kwargs)
|
return self.value.__format__(*args, **kwargs)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
return str(self.value)
|
return str(self.value)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
return self.__str__()
|
return self.__str__()
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -434,23 +465,44 @@ class FormattingPart:
|
||||||
Containt only single key to format e.g. "{project[name]}".
|
Containt only single key to format e.g. "{project[name]}".
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
template(str): String containing the formatting key.
|
field_name (str): Name of key.
|
||||||
|
format_spec (str): Format specification.
|
||||||
|
conversion (Union[str, None]): Conversion type.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, template):
|
def __init__(
|
||||||
self._template = template
|
self,
|
||||||
|
field_name: str,
|
||||||
|
format_spec: str,
|
||||||
|
conversion: "Union[str, None]",
|
||||||
|
):
|
||||||
|
format_spec_v = ""
|
||||||
|
if format_spec:
|
||||||
|
format_spec_v = f":{format_spec}"
|
||||||
|
conversion_v = ""
|
||||||
|
if conversion:
|
||||||
|
conversion_v = f"!{conversion}"
|
||||||
|
|
||||||
|
self._field_name: str = field_name
|
||||||
|
self._format_spec: str = format_spec_v
|
||||||
|
self._conversion: str = conversion_v
|
||||||
|
|
||||||
|
template_base = f"{field_name}{format_spec_v}{conversion_v}"
|
||||||
|
self._template_base: str = template_base
|
||||||
|
self._template: str = f"{{{template_base}}}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def template(self):
|
def template(self) -> str:
|
||||||
return self._template
|
return self._template
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
return "<Format:{}>".format(self._template)
|
return "<Format:{}>".format(self._template)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
return self._template
|
return self._template
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def validate_value_type(value):
|
def validate_value_type(value: Any) -> bool:
|
||||||
"""Check if value can be used for formatting of single key."""
|
"""Check if value can be used for formatting of single key."""
|
||||||
if isinstance(value, (numbers.Number, FormatObject)):
|
if isinstance(value, (numbers.Number, FormatObject)):
|
||||||
return True
|
return True
|
||||||
|
|
@ -461,7 +513,7 @@ class FormattingPart:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def validate_key_is_matched(key):
|
def validate_key_is_matched(key: str) -> bool:
|
||||||
"""Validate that opening has closing at correct place.
|
"""Validate that opening has closing at correct place.
|
||||||
Future-proof, only square brackets are currently used in keys.
|
Future-proof, only square brackets are currently used in keys.
|
||||||
|
|
||||||
|
|
@ -488,16 +540,29 @@ class FormattingPart:
|
||||||
return False
|
return False
|
||||||
return not queue
|
return not queue
|
||||||
|
|
||||||
def format(self, data, result):
|
@staticmethod
|
||||||
|
def keys_to_template_base(keys: List[str]):
|
||||||
|
if not keys:
|
||||||
|
return None
|
||||||
|
# Create copy of keys
|
||||||
|
keys = list(keys)
|
||||||
|
template_base = keys.pop(0)
|
||||||
|
joined_keys = "".join([f"[{key}]" for key in keys])
|
||||||
|
return f"{template_base}{joined_keys}"
|
||||||
|
|
||||||
|
def format(
|
||||||
|
self, data: Dict[str, Any], result: TemplatePartResult
|
||||||
|
) -> TemplatePartResult:
|
||||||
"""Format the formattings string.
|
"""Format the formattings string.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
data(dict): Data that should be used for formatting.
|
data(dict): Data that should be used for formatting.
|
||||||
result(TemplatePartResult): Object where result is stored.
|
result(TemplatePartResult): Object where result is stored.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
key = self.template[1:-1]
|
key = self._template_base
|
||||||
if key in result.realy_used_values:
|
if key in result.really_used_values:
|
||||||
result.add_output(result.realy_used_values[key])
|
result.add_output(result.really_used_values[key])
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# ensure key is properly formed [({})] properly closed.
|
# ensure key is properly formed [({})] properly closed.
|
||||||
|
|
@ -507,17 +572,38 @@ class FormattingPart:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# check if key expects subdictionary keys (e.g. project[name])
|
# check if key expects subdictionary keys (e.g. project[name])
|
||||||
existence_check = key
|
key_subdict = list(SUB_DICT_PATTERN.findall(self._field_name))
|
||||||
key_padding = list(KEY_PADDING_PATTERN.findall(existence_check))
|
|
||||||
if key_padding:
|
|
||||||
existence_check = key_padding[0]
|
|
||||||
key_subdict = list(SUB_DICT_PATTERN.findall(existence_check))
|
|
||||||
|
|
||||||
value = data
|
value = data
|
||||||
missing_key = False
|
missing_key = False
|
||||||
invalid_type = False
|
invalid_type = False
|
||||||
used_keys = []
|
used_keys = []
|
||||||
|
keys_to_value = None
|
||||||
|
used_value = None
|
||||||
|
|
||||||
for sub_key in key_subdict:
|
for sub_key in key_subdict:
|
||||||
|
if isinstance(value, list):
|
||||||
|
if not sub_key.lstrip("-").isdigit():
|
||||||
|
invalid_type = True
|
||||||
|
break
|
||||||
|
sub_key = int(sub_key)
|
||||||
|
if sub_key < 0:
|
||||||
|
sub_key = len(value) + sub_key
|
||||||
|
|
||||||
|
invalid = 0 > sub_key < len(data)
|
||||||
|
if invalid:
|
||||||
|
used_keys.append(sub_key)
|
||||||
|
missing_key = True
|
||||||
|
break
|
||||||
|
|
||||||
|
used_keys.append(sub_key)
|
||||||
|
if keys_to_value is None:
|
||||||
|
keys_to_value = list(used_keys)
|
||||||
|
keys_to_value.pop(-1)
|
||||||
|
used_value = copy.deepcopy(value)
|
||||||
|
value = value[sub_key]
|
||||||
|
continue
|
||||||
|
|
||||||
if (
|
if (
|
||||||
value is None
|
value is None
|
||||||
or (hasattr(value, "items") and sub_key not in value)
|
or (hasattr(value, "items") and sub_key not in value)
|
||||||
|
|
@ -533,45 +619,57 @@ class FormattingPart:
|
||||||
used_keys.append(sub_key)
|
used_keys.append(sub_key)
|
||||||
value = value.get(sub_key)
|
value = value.get(sub_key)
|
||||||
|
|
||||||
if missing_key or invalid_type:
|
field_name = key_subdict[0]
|
||||||
if len(used_keys) == 0:
|
if used_keys:
|
||||||
invalid_key = key_subdict[0]
|
field_name = self.keys_to_template_base(used_keys)
|
||||||
else:
|
|
||||||
invalid_key = used_keys[0]
|
|
||||||
for idx, sub_key in enumerate(used_keys):
|
|
||||||
if idx == 0:
|
|
||||||
continue
|
|
||||||
invalid_key += "[{0}]".format(sub_key)
|
|
||||||
|
|
||||||
|
if missing_key or invalid_type:
|
||||||
if missing_key:
|
if missing_key:
|
||||||
result.add_missing_key(invalid_key)
|
result.add_missing_key(field_name)
|
||||||
|
|
||||||
elif invalid_type:
|
elif invalid_type:
|
||||||
result.add_invalid_type(invalid_key, value)
|
result.add_invalid_type(field_name, value)
|
||||||
|
|
||||||
result.add_output(self.template)
|
result.add_output(self.template)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
if self.validate_value_type(value):
|
if not self.validate_value_type(value):
|
||||||
fill_data = {}
|
result.add_invalid_type(key, value)
|
||||||
first_value = True
|
result.add_output(self.template)
|
||||||
for used_key in reversed(used_keys):
|
|
||||||
if first_value:
|
|
||||||
first_value = False
|
|
||||||
fill_data[used_key] = value
|
|
||||||
else:
|
|
||||||
_fill_data = {used_key: fill_data}
|
|
||||||
fill_data = _fill_data
|
|
||||||
|
|
||||||
formatted_value = self.template.format(**fill_data)
|
|
||||||
result.add_realy_used_value(key, formatted_value)
|
|
||||||
result.add_used_value(existence_check, formatted_value)
|
|
||||||
result.add_output(formatted_value)
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
result.add_invalid_type(key, value)
|
fill_data = root_fill_data = {}
|
||||||
result.add_output(self.template)
|
parent_fill_data = None
|
||||||
|
parent_key = None
|
||||||
|
fill_value = data
|
||||||
|
value_filled = False
|
||||||
|
for used_key in used_keys:
|
||||||
|
if isinstance(fill_value, list):
|
||||||
|
parent_fill_data[parent_key] = fill_value
|
||||||
|
value_filled = True
|
||||||
|
break
|
||||||
|
fill_value = fill_value[used_key]
|
||||||
|
parent_fill_data = fill_data
|
||||||
|
fill_data = parent_fill_data.setdefault(used_key, {})
|
||||||
|
parent_key = used_key
|
||||||
|
|
||||||
|
if not value_filled:
|
||||||
|
parent_fill_data[used_keys[-1]] = value
|
||||||
|
|
||||||
|
template = f"{{{field_name}{self._format_spec}{self._conversion}}}"
|
||||||
|
formatted_value = template.format(**root_fill_data)
|
||||||
|
used_key = key
|
||||||
|
if keys_to_value is not None:
|
||||||
|
used_key = self.keys_to_template_base(keys_to_value)
|
||||||
|
|
||||||
|
if used_value is None:
|
||||||
|
if isinstance(value, numbers.Number):
|
||||||
|
used_value = value
|
||||||
|
else:
|
||||||
|
used_value = formatted_value
|
||||||
|
result.add_really_used_value(self._field_name, used_value)
|
||||||
|
result.add_used_value(used_key, used_value)
|
||||||
|
result.add_output(formatted_value)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -585,20 +683,27 @@ class OptionalPart:
|
||||||
'FormattingPart'.
|
'FormattingPart'.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, parts):
|
def __init__(
|
||||||
self._parts = parts
|
self,
|
||||||
|
parts: List["Union[str, OptionalPart, FormattingPart]"]
|
||||||
|
):
|
||||||
|
self._parts: List["Union[str, OptionalPart, FormattingPart]"] = parts
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def parts(self):
|
def parts(self) -> List["Union[str, OptionalPart, FormattingPart]"]:
|
||||||
return self._parts
|
return self._parts
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self) -> str:
|
||||||
return "<{}>".format("".join([str(p) for p in self._parts]))
|
return "<{}>".format("".join([str(p) for p in self._parts]))
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self) -> str:
|
||||||
return "<Optional:{}>".format("".join([str(p) for p in self._parts]))
|
return "<Optional:{}>".format("".join([str(p) for p in self._parts]))
|
||||||
|
|
||||||
def format(self, data, result):
|
def format(
|
||||||
|
self,
|
||||||
|
data: Dict[str, Any],
|
||||||
|
result: TemplatePartResult,
|
||||||
|
) -> TemplatePartResult:
|
||||||
new_result = TemplatePartResult(True)
|
new_result = TemplatePartResult(True)
|
||||||
for part in self._parts:
|
for part in self._parts:
|
||||||
if isinstance(part, str):
|
if isinstance(part, str):
|
||||||
|
|
|
||||||
|
|
@ -1283,12 +1283,16 @@ class CreateContext:
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def bulk_pre_create_attr_defs_change(self, sender=None):
|
def bulk_pre_create_attr_defs_change(self, sender=None):
|
||||||
with self._bulk_context("pre_create_attrs_change", sender) as bulk_info:
|
with self._bulk_context(
|
||||||
|
"pre_create_attrs_change", sender
|
||||||
|
) as bulk_info:
|
||||||
yield bulk_info
|
yield bulk_info
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def bulk_create_attr_defs_change(self, sender=None):
|
def bulk_create_attr_defs_change(self, sender=None):
|
||||||
with self._bulk_context("create_attrs_change", sender) as bulk_info:
|
with self._bulk_context(
|
||||||
|
"create_attrs_change", sender
|
||||||
|
) as bulk_info:
|
||||||
yield bulk_info
|
yield bulk_info
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
|
|
@ -1946,9 +1950,9 @@ class CreateContext:
|
||||||
creator are just removed from context.
|
creator are just removed from context.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
instances (List[CreatedInstance]): Instances that should be removed.
|
instances (List[CreatedInstance]): Instances that should be
|
||||||
Remove logic is done using creator, which may require to
|
removed. Remove logic is done using creator, which may require
|
||||||
do other cleanup than just remove instance from context.
|
to do other cleanup than just remove instance from context.
|
||||||
sender (Optional[str]): Sender of the event.
|
sender (Optional[str]): Sender of the event.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,9 @@
|
||||||
import ayon_api
|
import ayon_api
|
||||||
from ayon_core.lib import StringTemplate, filter_profiles, prepare_template_data
|
from ayon_core.lib import (
|
||||||
|
StringTemplate,
|
||||||
|
filter_profiles,
|
||||||
|
prepare_template_data,
|
||||||
|
)
|
||||||
from ayon_core.settings import get_project_settings
|
from ayon_core.settings import get_project_settings
|
||||||
|
|
||||||
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
from .constants import DEFAULT_PRODUCT_TEMPLATE
|
||||||
|
|
|
||||||
|
|
@ -387,7 +387,7 @@ def get_representations_delivery_template_data(
|
||||||
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
# convert representation entity. Fixed in 'ayon_api' 1.0.10.
|
||||||
if isinstance(template_data, str):
|
if isinstance(template_data, str):
|
||||||
con = ayon_api.get_server_api_connection()
|
con = ayon_api.get_server_api_connection()
|
||||||
repre_entity = con._representation_conversion(repre_entity)
|
con._representation_conversion(repre_entity)
|
||||||
template_data = repre_entity["context"]
|
template_data = repre_entity["context"]
|
||||||
|
|
||||||
template_data.update(copy.deepcopy(general_template_data))
|
template_data.update(copy.deepcopy(general_template_data))
|
||||||
|
|
|
||||||
|
|
@ -222,6 +222,9 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
||||||
source_range = otio_clip.source_range
|
source_range = otio_clip.source_range
|
||||||
available_range_rate = available_range.start_time.rate
|
available_range_rate = available_range.start_time.rate
|
||||||
media_in = available_range.start_time.value
|
media_in = available_range.start_time.value
|
||||||
|
available_range_start_frame = (
|
||||||
|
available_range.start_time.to_frames()
|
||||||
|
)
|
||||||
|
|
||||||
# Temporary.
|
# Temporary.
|
||||||
# Some AYON custom OTIO exporter were implemented with relative
|
# Some AYON custom OTIO exporter were implemented with relative
|
||||||
|
|
@ -230,7 +233,7 @@ def remap_range_on_file_sequence(otio_clip, in_out_range):
|
||||||
# while we are updating those.
|
# while we are updating those.
|
||||||
if (
|
if (
|
||||||
is_clip_from_media_sequence(otio_clip)
|
is_clip_from_media_sequence(otio_clip)
|
||||||
and otio_clip.available_range().start_time.to_frames() == media_ref.start_frame
|
and available_range_start_frame == media_ref.start_frame
|
||||||
and source_range.start_time.to_frames() < media_ref.start_frame
|
and source_range.start_time.to_frames() < media_ref.start_frame
|
||||||
):
|
):
|
||||||
media_in = 0
|
media_in = 0
|
||||||
|
|
@ -303,8 +306,12 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end):
|
||||||
rounded_av_rate = round(available_range_rate, 2)
|
rounded_av_rate = round(available_range_rate, 2)
|
||||||
rounded_src_rate = round(source_range.start_time.rate, 2)
|
rounded_src_rate = round(source_range.start_time.rate, 2)
|
||||||
if rounded_av_rate != rounded_src_rate:
|
if rounded_av_rate != rounded_src_rate:
|
||||||
conformed_src_in = source_range.start_time.rescaled_to(available_range_rate)
|
conformed_src_in = source_range.start_time.rescaled_to(
|
||||||
conformed_src_duration = source_range.duration.rescaled_to(available_range_rate)
|
available_range_rate
|
||||||
|
)
|
||||||
|
conformed_src_duration = source_range.duration.rescaled_to(
|
||||||
|
available_range_rate
|
||||||
|
)
|
||||||
conformed_source_range = otio.opentime.TimeRange(
|
conformed_source_range = otio.opentime.TimeRange(
|
||||||
start_time=conformed_src_in,
|
start_time=conformed_src_in,
|
||||||
duration=conformed_src_duration
|
duration=conformed_src_duration
|
||||||
|
|
|
||||||
|
|
@ -18,13 +18,13 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
>>> parse_ayon_entity_uri(
|
>>> parse_ayon_entity_uri(
|
||||||
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd" # noqa: E501
|
>>> "ayon://test/char/villain?product=modelMain&version=2&representation=usd"
|
||||||
>>> )
|
>>> )
|
||||||
{'project': 'test', 'folderPath': '/char/villain',
|
{'project': 'test', 'folderPath': '/char/villain',
|
||||||
'product': 'modelMain', 'version': 1,
|
'product': 'modelMain', 'version': 1,
|
||||||
'representation': 'usd'}
|
'representation': 'usd'}
|
||||||
>>> parse_ayon_entity_uri(
|
>>> parse_ayon_entity_uri(
|
||||||
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr" # noqa: E501
|
>>> "ayon+entity://project/folder?product=renderMain&version=3&representation=exr"
|
||||||
>>> )
|
>>> )
|
||||||
{'project': 'project', 'folderPath': '/folder',
|
{'project': 'project', 'folderPath': '/folder',
|
||||||
'product': 'renderMain', 'version': 3,
|
'product': 'renderMain', 'version': 3,
|
||||||
|
|
@ -34,7 +34,7 @@ def parse_ayon_entity_uri(uri: str) -> Optional[dict]:
|
||||||
dict[str, Union[str, int]]: The individual key with their values as
|
dict[str, Union[str, int]]: The individual key with their values as
|
||||||
found in the ayon entity URI.
|
found in the ayon entity URI.
|
||||||
|
|
||||||
"""
|
""" # noqa: E501
|
||||||
|
|
||||||
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
if not (uri.startswith("ayon+entity://") or uri.startswith("ayon://")):
|
||||||
return {}
|
return {}
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,11 @@ from copy import deepcopy
|
||||||
import attr
|
import attr
|
||||||
import ayon_api
|
import ayon_api
|
||||||
import clique
|
import clique
|
||||||
from ayon_core.lib import Logger
|
from ayon_core.lib import Logger, collect_frames
|
||||||
from ayon_core.pipeline import get_current_project_name, get_representation_path
|
from ayon_core.pipeline import (
|
||||||
|
get_current_project_name,
|
||||||
|
get_representation_path,
|
||||||
|
)
|
||||||
from ayon_core.pipeline.create import get_product_name
|
from ayon_core.pipeline.create import get_product_name
|
||||||
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
from ayon_core.pipeline.farm.patterning import match_aov_pattern
|
||||||
from ayon_core.pipeline.publish import KnownPublishError
|
from ayon_core.pipeline.publish import KnownPublishError
|
||||||
|
|
@ -295,11 +298,17 @@ def _add_review_families(families):
|
||||||
return families
|
return families
|
||||||
|
|
||||||
|
|
||||||
def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
def prepare_representations(
|
||||||
skip_integration_repre_list,
|
skeleton_data,
|
||||||
do_not_add_review,
|
exp_files,
|
||||||
context,
|
anatomy,
|
||||||
color_managed_plugin):
|
aov_filter,
|
||||||
|
skip_integration_repre_list,
|
||||||
|
do_not_add_review,
|
||||||
|
context,
|
||||||
|
color_managed_plugin,
|
||||||
|
frames_to_render=None
|
||||||
|
):
|
||||||
"""Create representations for file sequences.
|
"""Create representations for file sequences.
|
||||||
|
|
||||||
This will return representations of expected files if they are not
|
This will return representations of expected files if they are not
|
||||||
|
|
@ -315,6 +324,8 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
skip_integration_repre_list (list): exclude specific extensions,
|
skip_integration_repre_list (list): exclude specific extensions,
|
||||||
do_not_add_review (bool): explicitly skip review
|
do_not_add_review (bool): explicitly skip review
|
||||||
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
color_managed_plugin (publish.ColormanagedPyblishPluginMixin)
|
||||||
|
frames_to_render (str): implicit or explicit range of frames to render
|
||||||
|
this value is sent to Deadline in JobInfo.Frames
|
||||||
Returns:
|
Returns:
|
||||||
list of representations
|
list of representations
|
||||||
|
|
||||||
|
|
@ -325,6 +336,14 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
|
|
||||||
log = Logger.get_logger("farm_publishing")
|
log = Logger.get_logger("farm_publishing")
|
||||||
|
|
||||||
|
if frames_to_render is not None:
|
||||||
|
frames_to_render = _get_real_frames_to_render(frames_to_render)
|
||||||
|
else:
|
||||||
|
# Backwards compatibility for older logic
|
||||||
|
frame_start = int(skeleton_data.get("frameStartHandle"))
|
||||||
|
frame_end = int(skeleton_data.get("frameEndHandle"))
|
||||||
|
frames_to_render = list(range(frame_start, frame_end + 1))
|
||||||
|
|
||||||
# create representation for every collected sequence
|
# create representation for every collected sequence
|
||||||
for collection in collections:
|
for collection in collections:
|
||||||
ext = collection.tail.lstrip(".")
|
ext = collection.tail.lstrip(".")
|
||||||
|
|
@ -361,18 +380,21 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
" This may cause issues on farm."
|
" This may cause issues on farm."
|
||||||
).format(staging))
|
).format(staging))
|
||||||
|
|
||||||
frame_start = int(skeleton_data.get("frameStartHandle"))
|
frame_start = frames_to_render[0]
|
||||||
|
frame_end = frames_to_render[-1]
|
||||||
if skeleton_data.get("slate"):
|
if skeleton_data.get("slate"):
|
||||||
frame_start -= 1
|
frame_start -= 1
|
||||||
|
|
||||||
|
files = _get_real_files_to_rendered(collection, frames_to_render)
|
||||||
|
|
||||||
# explicitly disable review by user
|
# explicitly disable review by user
|
||||||
preview = preview and not do_not_add_review
|
preview = preview and not do_not_add_review
|
||||||
rep = {
|
rep = {
|
||||||
"name": ext,
|
"name": ext,
|
||||||
"ext": ext,
|
"ext": ext,
|
||||||
"files": [os.path.basename(f) for f in list(collection)],
|
"files": files,
|
||||||
"frameStart": frame_start,
|
"frameStart": frame_start,
|
||||||
"frameEnd": int(skeleton_data.get("frameEndHandle")),
|
"frameEnd": frame_end,
|
||||||
# If expectedFile are absolute, we need only filenames
|
# If expectedFile are absolute, we need only filenames
|
||||||
"stagingDir": staging,
|
"stagingDir": staging,
|
||||||
"fps": skeleton_data.get("fps"),
|
"fps": skeleton_data.get("fps"),
|
||||||
|
|
@ -413,10 +435,13 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
" This may cause issues on farm."
|
" This may cause issues on farm."
|
||||||
).format(staging))
|
).format(staging))
|
||||||
|
|
||||||
|
files = _get_real_files_to_rendered(
|
||||||
|
[os.path.basename(remainder)], frames_to_render)
|
||||||
|
|
||||||
rep = {
|
rep = {
|
||||||
"name": ext,
|
"name": ext,
|
||||||
"ext": ext,
|
"ext": ext,
|
||||||
"files": os.path.basename(remainder),
|
"files": files[0],
|
||||||
"stagingDir": staging,
|
"stagingDir": staging,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -453,6 +478,53 @@ def prepare_representations(skeleton_data, exp_files, anatomy, aov_filter,
|
||||||
return representations
|
return representations
|
||||||
|
|
||||||
|
|
||||||
|
def _get_real_frames_to_render(frames):
|
||||||
|
"""Returns list of frames that should be rendered.
|
||||||
|
|
||||||
|
Artists could want to selectively render only particular frames
|
||||||
|
"""
|
||||||
|
frames_to_render = []
|
||||||
|
for frame in frames.split(","):
|
||||||
|
if "-" in frame:
|
||||||
|
splitted = frame.split("-")
|
||||||
|
frames_to_render.extend(
|
||||||
|
range(int(splitted[0]), int(splitted[1])+1))
|
||||||
|
else:
|
||||||
|
frames_to_render.append(int(frame))
|
||||||
|
frames_to_render.sort()
|
||||||
|
return frames_to_render
|
||||||
|
|
||||||
|
|
||||||
|
def _get_real_files_to_rendered(collection, frames_to_render):
|
||||||
|
"""Use expected files based on real frames_to_render.
|
||||||
|
|
||||||
|
Artists might explicitly set frames they want to render via Publisher UI.
|
||||||
|
This uses this value to filter out files
|
||||||
|
Args:
|
||||||
|
frames_to_render (list): of str '1001'
|
||||||
|
"""
|
||||||
|
files = [os.path.basename(f) for f in list(collection)]
|
||||||
|
file_name, extracted_frame = list(collect_frames(files).items())[0]
|
||||||
|
|
||||||
|
if not extracted_frame:
|
||||||
|
return files
|
||||||
|
|
||||||
|
found_frame_pattern_length = len(extracted_frame)
|
||||||
|
normalized_frames_to_render = {
|
||||||
|
str(frame_to_render).zfill(found_frame_pattern_length)
|
||||||
|
for frame_to_render in frames_to_render
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
file_name
|
||||||
|
for file_name in files
|
||||||
|
if any(
|
||||||
|
frame in file_name
|
||||||
|
for frame in normalized_frames_to_render
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def create_instances_for_aov(instance, skeleton, aov_filter,
|
def create_instances_for_aov(instance, skeleton, aov_filter,
|
||||||
skip_integration_repre_list,
|
skip_integration_repre_list,
|
||||||
do_not_add_review):
|
do_not_add_review):
|
||||||
|
|
@ -702,9 +774,14 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
||||||
|
|
||||||
project_settings = instance.context.data.get("project_settings")
|
project_settings = instance.context.data.get("project_settings")
|
||||||
|
|
||||||
use_legacy_product_name = True
|
|
||||||
try:
|
try:
|
||||||
use_legacy_product_name = project_settings["core"]["tools"]["creator"]["use_legacy_product_names_for_renders"] # noqa: E501
|
use_legacy_product_name = (
|
||||||
|
project_settings
|
||||||
|
["core"]
|
||||||
|
["tools"]
|
||||||
|
["creator"]
|
||||||
|
["use_legacy_product_names_for_renders"]
|
||||||
|
)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
("use_legacy_for_renders not found in project settings. "
|
("use_legacy_for_renders not found in project settings. "
|
||||||
|
|
@ -720,7 +797,9 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
||||||
dynamic_data=dynamic_data)
|
dynamic_data=dynamic_data)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
product_name, group_name = get_product_name_and_group_from_template(
|
(
|
||||||
|
product_name, group_name
|
||||||
|
) = get_product_name_and_group_from_template(
|
||||||
task_entity=instance.data["taskEntity"],
|
task_entity=instance.data["taskEntity"],
|
||||||
project_name=instance.context.data["projectName"],
|
project_name=instance.context.data["projectName"],
|
||||||
host_name=instance.context.data["hostName"],
|
host_name=instance.context.data["hostName"],
|
||||||
|
|
@ -863,7 +942,7 @@ def _collect_expected_files_for_aov(files):
|
||||||
# but we really expect only one collection.
|
# but we really expect only one collection.
|
||||||
# Nothing else make sense.
|
# Nothing else make sense.
|
||||||
if len(cols) != 1:
|
if len(cols) != 1:
|
||||||
raise ValueError("Only one image sequence type is expected.") # noqa: E501
|
raise ValueError("Only one image sequence type is expected.")
|
||||||
return list(cols[0])
|
return list(cols[0])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ from .constants import (
|
||||||
ValidateContentsOrder,
|
ValidateContentsOrder,
|
||||||
ValidateSceneOrder,
|
ValidateSceneOrder,
|
||||||
ValidateMeshOrder,
|
ValidateMeshOrder,
|
||||||
|
FARM_JOB_ENV_DATA_KEY,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .publish_plugins import (
|
from .publish_plugins import (
|
||||||
|
|
@ -59,6 +60,7 @@ __all__ = (
|
||||||
"ValidateContentsOrder",
|
"ValidateContentsOrder",
|
||||||
"ValidateSceneOrder",
|
"ValidateSceneOrder",
|
||||||
"ValidateMeshOrder",
|
"ValidateMeshOrder",
|
||||||
|
"FARM_JOB_ENV_DATA_KEY",
|
||||||
|
|
||||||
"AbstractMetaInstancePlugin",
|
"AbstractMetaInstancePlugin",
|
||||||
"AbstractMetaContextPlugin",
|
"AbstractMetaContextPlugin",
|
||||||
|
|
|
||||||
|
|
@ -9,3 +9,5 @@ ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
|
||||||
DEFAULT_PUBLISH_TEMPLATE = "default"
|
DEFAULT_PUBLISH_TEMPLATE = "default"
|
||||||
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
DEFAULT_HERO_PUBLISH_TEMPLATE = "default"
|
||||||
TRANSIENT_DIR_TEMPLATE = "default"
|
TRANSIENT_DIR_TEMPLATE = "default"
|
||||||
|
|
||||||
|
FARM_JOB_ENV_DATA_KEY: str = "farmJobEnv"
|
||||||
|
|
|
||||||
|
|
@ -87,14 +87,13 @@ def get_folder_template_data(folder_entity, project_name):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = folder_entity["path"]
|
path = folder_entity["path"]
|
||||||
hierarchy_parts = path.split("/")
|
# Remove empty string from the beginning and split by '/'
|
||||||
# Remove empty string from the beginning
|
parents = path.lstrip("/").split("/")
|
||||||
hierarchy_parts.pop(0)
|
|
||||||
# Remove last part which is folder name
|
# Remove last part which is folder name
|
||||||
folder_name = hierarchy_parts.pop(-1)
|
folder_name = parents.pop(-1)
|
||||||
hierarchy = "/".join(hierarchy_parts)
|
hierarchy = "/".join(parents)
|
||||||
if hierarchy_parts:
|
if parents:
|
||||||
parent_name = hierarchy_parts[-1]
|
parent_name = parents[-1]
|
||||||
else:
|
else:
|
||||||
parent_name = project_name
|
parent_name = project_name
|
||||||
|
|
||||||
|
|
@ -103,6 +102,7 @@ def get_folder_template_data(folder_entity, project_name):
|
||||||
"name": folder_name,
|
"name": folder_name,
|
||||||
"type": folder_entity["folderType"],
|
"type": folder_entity["folderType"],
|
||||||
"path": path,
|
"path": path,
|
||||||
|
"parents": parents,
|
||||||
},
|
},
|
||||||
"asset": folder_name,
|
"asset": folder_name,
|
||||||
"hierarchy": hierarchy,
|
"hierarchy": hierarchy,
|
||||||
|
|
|
||||||
|
|
@ -413,14 +413,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
||||||
# Backwards compatible (Deprecated since 24/06/06)
|
# Backwards compatible (Deprecated since 24/06/06)
|
||||||
or instance.data.get("newAssetPublishing")
|
or instance.data.get("newAssetPublishing")
|
||||||
):
|
):
|
||||||
hierarchy = instance.data["hierarchy"]
|
folder_path = instance.data["folderPath"]
|
||||||
anatomy_data["hierarchy"] = hierarchy
|
parents = folder_path.lstrip("/").split("/")
|
||||||
|
folder_name = parents.pop(-1)
|
||||||
|
|
||||||
parent_name = project_entity["name"]
|
parent_name = project_entity["name"]
|
||||||
if hierarchy:
|
hierarchy = ""
|
||||||
parent_name = hierarchy.split("/")[-1]
|
if parents:
|
||||||
|
parent_name = parents[-1]
|
||||||
|
hierarchy = "/".join(parents)
|
||||||
|
|
||||||
folder_name = instance.data["folderPath"].split("/")[-1]
|
|
||||||
anatomy_data.update({
|
anatomy_data.update({
|
||||||
"asset": folder_name,
|
"asset": folder_name,
|
||||||
"hierarchy": hierarchy,
|
"hierarchy": hierarchy,
|
||||||
|
|
@ -432,6 +434,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
||||||
# Using 'Shot' is current default behavior of editorial
|
# Using 'Shot' is current default behavior of editorial
|
||||||
# (or 'newHierarchyIntegration') publishing.
|
# (or 'newHierarchyIntegration') publishing.
|
||||||
"type": "Shot",
|
"type": "Shot",
|
||||||
|
"parents": parents,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,43 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pyblish.api
|
||||||
|
|
||||||
|
from ayon_core.lib import get_ayon_username
|
||||||
|
from ayon_core.pipeline.publish import FARM_JOB_ENV_DATA_KEY
|
||||||
|
|
||||||
|
|
||||||
|
class CollectCoreJobEnvVars(pyblish.api.ContextPlugin):
|
||||||
|
"""Collect set of environment variables to submit with deadline jobs"""
|
||||||
|
order = pyblish.api.CollectorOrder - 0.45
|
||||||
|
label = "AYON core Farm Environment Variables"
|
||||||
|
targets = ["local"]
|
||||||
|
|
||||||
|
def process(self, context):
|
||||||
|
env = context.data.setdefault(FARM_JOB_ENV_DATA_KEY, {})
|
||||||
|
|
||||||
|
# Disable colored logs on farm
|
||||||
|
for key, value in (
|
||||||
|
("AYON_LOG_NO_COLORS", "1"),
|
||||||
|
("AYON_PROJECT_NAME", context.data["projectName"]),
|
||||||
|
("AYON_FOLDER_PATH", context.data.get("folderPath")),
|
||||||
|
("AYON_TASK_NAME", context.data.get("task")),
|
||||||
|
# NOTE we should use 'context.data["user"]' but that has higher
|
||||||
|
# order.
|
||||||
|
("AYON_USERNAME", get_ayon_username()),
|
||||||
|
):
|
||||||
|
if value:
|
||||||
|
self.log.debug(f"Setting job env: {key}: {value}")
|
||||||
|
env[key] = value
|
||||||
|
|
||||||
|
for key in [
|
||||||
|
"AYON_BUNDLE_NAME",
|
||||||
|
"AYON_DEFAULT_SETTINGS_VARIANT",
|
||||||
|
"AYON_IN_TESTS",
|
||||||
|
# NOTE Not sure why workdir is needed?
|
||||||
|
"AYON_WORKDIR",
|
||||||
|
]:
|
||||||
|
value = os.getenv(key)
|
||||||
|
if value:
|
||||||
|
self.log.debug(f"Setting job env: {key}: {value}")
|
||||||
|
env[key] = value
|
||||||
|
|
||||||
|
|
@ -43,7 +43,8 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
||||||
|
|
||||||
shot_data = {
|
shot_data = {
|
||||||
"entity_type": "folder",
|
"entity_type": "folder",
|
||||||
# WARNING unless overwritten, default folder type is hardcoded to shot
|
# WARNING unless overwritten, default folder type is hardcoded
|
||||||
|
# to shot
|
||||||
"folder_type": instance.data.get("folder_type") or "Shot",
|
"folder_type": instance.data.get("folder_type") or "Shot",
|
||||||
"tasks": instance.data.get("tasks") or {},
|
"tasks": instance.data.get("tasks") or {},
|
||||||
"comments": instance.data.get("comments", []),
|
"comments": instance.data.get("comments", []),
|
||||||
|
|
|
||||||
|
|
@ -71,20 +71,18 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
||||||
name = inst.data["folderPath"]
|
name = inst.data["folderPath"]
|
||||||
|
|
||||||
recycling_file = [f for f in created_files if name in f]
|
recycling_file = [f for f in created_files if name in f]
|
||||||
|
audio_clip = inst.data["otioClip"]
|
||||||
# frameranges
|
audio_range = audio_clip.range_in_parent()
|
||||||
timeline_in_h = inst.data["clipInH"]
|
duration = audio_range.duration.to_frames()
|
||||||
timeline_out_h = inst.data["clipOutH"]
|
|
||||||
fps = inst.data["fps"]
|
|
||||||
|
|
||||||
# create duration
|
|
||||||
duration = (timeline_out_h - timeline_in_h) + 1
|
|
||||||
|
|
||||||
# ffmpeg generate new file only if doesn't exists already
|
# ffmpeg generate new file only if doesn't exists already
|
||||||
if not recycling_file:
|
if not recycling_file:
|
||||||
# convert to seconds
|
parent_track = audio_clip.parent()
|
||||||
start_sec = float(timeline_in_h / fps)
|
parent_track_start = parent_track.range_in_parent().start_time
|
||||||
duration_sec = float(duration / fps)
|
relative_start_time = (
|
||||||
|
audio_range.start_time - parent_track_start)
|
||||||
|
start_sec = relative_start_time.to_seconds()
|
||||||
|
duration_sec = audio_range.duration.to_seconds()
|
||||||
|
|
||||||
# temp audio file
|
# temp audio file
|
||||||
audio_fpath = self.create_temp_file(name)
|
audio_fpath = self.create_temp_file(name)
|
||||||
|
|
@ -163,34 +161,36 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin):
|
||||||
|
|
||||||
output = []
|
output = []
|
||||||
# go trough all audio tracks
|
# go trough all audio tracks
|
||||||
for otio_track in otio_timeline.tracks:
|
for otio_track in otio_timeline.audio_tracks():
|
||||||
if "Audio" not in otio_track.kind:
|
|
||||||
continue
|
|
||||||
self.log.debug("_" * 50)
|
self.log.debug("_" * 50)
|
||||||
playhead = 0
|
playhead = 0
|
||||||
for otio_clip in otio_track:
|
for otio_clip in otio_track:
|
||||||
self.log.debug(otio_clip)
|
self.log.debug(otio_clip)
|
||||||
if isinstance(otio_clip, otio.schema.Gap):
|
if (isinstance(otio_clip, otio.schema.Clip) and
|
||||||
playhead += otio_clip.source_range.duration.value
|
not otio_clip.media_reference.is_missing_reference):
|
||||||
elif isinstance(otio_clip, otio.schema.Clip):
|
media_av_start = otio_clip.available_range().start_time
|
||||||
start = otio_clip.source_range.start_time.value
|
clip_start = otio_clip.source_range.start_time
|
||||||
duration = otio_clip.source_range.duration.value
|
fps = clip_start.rate
|
||||||
fps = otio_clip.source_range.start_time.rate
|
conformed_av_start = media_av_start.rescaled_to(fps)
|
||||||
|
# ffmpeg ignores embedded tc
|
||||||
|
start = clip_start - conformed_av_start
|
||||||
|
duration = otio_clip.source_range.duration
|
||||||
media_path = otio_clip.media_reference.target_url
|
media_path = otio_clip.media_reference.target_url
|
||||||
input = {
|
input = {
|
||||||
"mediaPath": media_path,
|
"mediaPath": media_path,
|
||||||
"delayFrame": playhead,
|
"delayFrame": playhead,
|
||||||
"startFrame": start,
|
"startFrame": start.to_frames(),
|
||||||
"durationFrame": duration,
|
"durationFrame": duration.to_frames(),
|
||||||
"delayMilSec": int(float(playhead / fps) * 1000),
|
"delayMilSec": int(float(playhead / fps) * 1000),
|
||||||
"startSec": float(start / fps),
|
"startSec": start.to_seconds(),
|
||||||
"durationSec": float(duration / fps),
|
"durationSec": duration.to_seconds(),
|
||||||
"fps": fps
|
"fps": float(fps)
|
||||||
}
|
}
|
||||||
if input not in output:
|
if input not in output:
|
||||||
output.append(input)
|
output.append(input)
|
||||||
self.log.debug("__ input: {}".format(input))
|
self.log.debug("__ input: {}".format(input))
|
||||||
playhead += otio_clip.source_range.duration.value
|
|
||||||
|
playhead += otio_clip.source_range.duration.value
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -78,6 +78,7 @@ class ExtractOTIOReview(
|
||||||
|
|
||||||
if otio_review_clips is None:
|
if otio_review_clips is None:
|
||||||
self.log.info(f"Instance `{instance}` has no otioReviewClips")
|
self.log.info(f"Instance `{instance}` has no otioReviewClips")
|
||||||
|
return
|
||||||
|
|
||||||
# add plugin wide attributes
|
# add plugin wide attributes
|
||||||
self.representation_files = []
|
self.representation_files = []
|
||||||
|
|
@ -129,26 +130,33 @@ class ExtractOTIOReview(
|
||||||
res_data[key] = value
|
res_data[key] = value
|
||||||
break
|
break
|
||||||
|
|
||||||
self.to_width, self.to_height = res_data["width"], res_data["height"]
|
self.to_width, self.to_height = (
|
||||||
self.log.debug("> self.to_width x self.to_height: {} x {}".format(
|
res_data["width"], res_data["height"]
|
||||||
self.to_width, self.to_height
|
)
|
||||||
))
|
self.log.debug(
|
||||||
|
"> self.to_width x self.to_height:"
|
||||||
|
f" {self.to_width} x {self.to_height}"
|
||||||
|
)
|
||||||
|
|
||||||
available_range = r_otio_cl.available_range()
|
available_range = r_otio_cl.available_range()
|
||||||
|
available_range_start_frame = (
|
||||||
|
available_range.start_time.to_frames()
|
||||||
|
)
|
||||||
processing_range = None
|
processing_range = None
|
||||||
self.actual_fps = available_range.duration.rate
|
self.actual_fps = available_range.duration.rate
|
||||||
start = src_range.start_time.rescaled_to(self.actual_fps)
|
start = src_range.start_time.rescaled_to(self.actual_fps)
|
||||||
duration = src_range.duration.rescaled_to(self.actual_fps)
|
duration = src_range.duration.rescaled_to(self.actual_fps)
|
||||||
|
src_frame_start = src_range.start_time.to_frames()
|
||||||
|
|
||||||
# Temporary.
|
# Temporary.
|
||||||
# Some AYON custom OTIO exporter were implemented with relative
|
# Some AYON custom OTIO exporter were implemented with
|
||||||
# source range for image sequence. Following code maintain
|
# relative source range for image sequence. Following code
|
||||||
# backward-compatibility by adjusting available range
|
# maintain backward-compatibility by adjusting available range
|
||||||
# while we are updating those.
|
# while we are updating those.
|
||||||
if (
|
if (
|
||||||
is_clip_from_media_sequence(r_otio_cl)
|
is_clip_from_media_sequence(r_otio_cl)
|
||||||
and available_range.start_time.to_frames() == media_ref.start_frame
|
and available_range_start_frame == media_ref.start_frame
|
||||||
and src_range.start_time.to_frames() < media_ref.start_frame
|
and src_frame_start < media_ref.start_frame
|
||||||
):
|
):
|
||||||
available_range = otio.opentime.TimeRange(
|
available_range = otio.opentime.TimeRange(
|
||||||
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
otio.opentime.RationalTime(0, rate=self.actual_fps),
|
||||||
|
|
@ -246,7 +254,8 @@ class ExtractOTIOReview(
|
||||||
# Extraction via FFmpeg.
|
# Extraction via FFmpeg.
|
||||||
else:
|
else:
|
||||||
path = media_ref.target_url
|
path = media_ref.target_url
|
||||||
# Set extract range from 0 (FFmpeg ignores embedded timecode).
|
# Set extract range from 0 (FFmpeg ignores
|
||||||
|
# embedded timecode).
|
||||||
extract_range = otio.opentime.TimeRange(
|
extract_range = otio.opentime.TimeRange(
|
||||||
otio.opentime.RationalTime(
|
otio.opentime.RationalTime(
|
||||||
(
|
(
|
||||||
|
|
@ -414,7 +423,8 @@ class ExtractOTIOReview(
|
||||||
to defined image sequence format.
|
to defined image sequence format.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sequence (list): input dir path string, collection object, fps in list
|
sequence (list): input dir path string, collection object,
|
||||||
|
fps in list.
|
||||||
video (list)[optional]: video_path string, otio_range in list
|
video (list)[optional]: video_path string, otio_range in list
|
||||||
gap (int)[optional]: gap duration
|
gap (int)[optional]: gap duration
|
||||||
end_offset (int)[optional]: offset gap frame start in frames
|
end_offset (int)[optional]: offset gap frame start in frames
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
||||||
"""Validate all product names are unique.
|
"""Validate all product names are unique.
|
||||||
|
|
||||||
This only validates whether the instances currently set to publish from
|
This only validates whether the instances currently set to publish from
|
||||||
the workfile overlap one another for the folder + product they are publishing
|
the workfile overlap one another for the folder + product they are
|
||||||
to.
|
publishing to.
|
||||||
|
|
||||||
This does not perform any check against existing publishes in the database
|
This does not perform any check against existing publishes in the database
|
||||||
since it is allowed to publish into existing products resulting in
|
since it is allowed to publish into existing products resulting in
|
||||||
|
|
@ -72,8 +72,10 @@ class ValidateProductUniqueness(pyblish.api.ContextPlugin):
|
||||||
# All is ok
|
# All is ok
|
||||||
return
|
return
|
||||||
|
|
||||||
msg = ("Instance product names {} are not unique. ".format(non_unique) +
|
msg = (
|
||||||
"Please remove or rename duplicates.")
|
f"Instance product names {non_unique} are not unique."
|
||||||
|
" Please remove or rename duplicates."
|
||||||
|
)
|
||||||
formatting_data = {
|
formatting_data = {
|
||||||
"non_unique": ",".join(non_unique)
|
"non_unique": ",".join(non_unique)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||||
- Datatypes explanation:
|
- Datatypes explanation:
|
||||||
<color> string format must be supported by FFmpeg.
|
<color> string format must be supported by FFmpeg.
|
||||||
Examples: "#000000", "0x000000", "black"
|
Examples: "#000000", "0x000000", "black"
|
||||||
<font> must be accesible by ffmpeg = name of registered Font in system or path to font file.
|
<font> must be accesible by ffmpeg = name of registered Font in system
|
||||||
|
or path to font file.
|
||||||
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
Examples: "Arial", "C:/Windows/Fonts/arial.ttf"
|
||||||
|
|
||||||
- Possible keys:
|
- Possible keys:
|
||||||
|
|
@ -87,17 +88,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
||||||
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
"bg_opacity" - Opacity of background (box around text) - <float, Range:0-1>
|
||||||
"bg_color" - Background color - <color>
|
"bg_color" - Background color - <color>
|
||||||
"bg_padding" - Background padding in pixels - <int>
|
"bg_padding" - Background padding in pixels - <int>
|
||||||
"x_offset" - offsets burnin vertically by entered pixels from border - <int>
|
"x_offset" - offsets burnin vertically by entered pixels
|
||||||
"y_offset" - offsets burnin horizontally by entered pixels from border - <int>
|
from border - <int>
|
||||||
|
"y_offset" - offsets burnin horizontally by entered pixels
|
||||||
|
from border - <int>
|
||||||
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
- x_offset & y_offset should be set at least to same value as bg_padding!!
|
||||||
"font" - Font Family for text - <font>
|
"font" - Font Family for text - <font>
|
||||||
"font_size" - Font size in pixels - <int>
|
"font_size" - Font size in pixels - <int>
|
||||||
"font_color" - Color of text - <color>
|
"font_color" - Color of text - <color>
|
||||||
"frame_offset" - Default start frame - <int>
|
"frame_offset" - Default start frame - <int>
|
||||||
- required IF start frame is not set when using frames or timecode burnins
|
- required IF start frame is not set when using frames
|
||||||
|
or timecode burnins
|
||||||
|
|
||||||
On initializing class can be set General options through "options_init" arg.
|
On initializing class can be set General options through
|
||||||
General can be overridden when adding burnin
|
"options_init" arg.
|
||||||
|
General options can be overridden when adding burnin.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
TOP_CENTERED = ffmpeg_burnins.TOP_CENTERED
|
||||||
|
|
|
||||||
|
|
@ -190,6 +190,7 @@ def get_current_project_settings():
|
||||||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||||
if not project_name:
|
if not project_name:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing context project in environemt variable `AYON_PROJECT_NAME`."
|
"Missing context project in environment"
|
||||||
|
" variable `AYON_PROJECT_NAME`."
|
||||||
)
|
)
|
||||||
return get_project_settings(project_name)
|
return get_project_settings(project_name)
|
||||||
|
|
|
||||||
|
|
@ -104,7 +104,7 @@ class ProductNameValidator(RegularExpressionValidatorClass):
|
||||||
|
|
||||||
def validate(self, text, pos):
|
def validate(self, text, pos):
|
||||||
results = super(ProductNameValidator, self).validate(text, pos)
|
results = super(ProductNameValidator, self).validate(text, pos)
|
||||||
if results[0] == self.Invalid:
|
if results[0] == RegularExpressionValidatorClass.Invalid:
|
||||||
self.invalid.emit(self.invalid_chars(text))
|
self.invalid.emit(self.invalid_chars(text))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
@ -217,7 +217,9 @@ class ProductTypeDescriptionWidget(QtWidgets.QWidget):
|
||||||
|
|
||||||
product_type_label = QtWidgets.QLabel(self)
|
product_type_label = QtWidgets.QLabel(self)
|
||||||
product_type_label.setObjectName("CreatorProductTypeLabel")
|
product_type_label.setObjectName("CreatorProductTypeLabel")
|
||||||
product_type_label.setAlignment(QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft)
|
product_type_label.setAlignment(
|
||||||
|
QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft
|
||||||
|
)
|
||||||
|
|
||||||
help_label = QtWidgets.QLabel(self)
|
help_label = QtWidgets.QLabel(self)
|
||||||
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
help_label.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
|
||||||
|
|
|
||||||
|
|
@ -21,9 +21,9 @@ except ImportError:
|
||||||
|
|
||||||
Application action based on 'ApplicationManager' system.
|
Application action based on 'ApplicationManager' system.
|
||||||
|
|
||||||
Handling of applications in launcher is not ideal and should be completely
|
Handling of applications in launcher is not ideal and should be
|
||||||
redone from scratch. This is just a temporary solution to keep backwards
|
completely redone from scratch. This is just a temporary solution
|
||||||
compatibility with AYON launcher.
|
to keep backwards compatibility with AYON launcher.
|
||||||
|
|
||||||
Todos:
|
Todos:
|
||||||
Move handling of errors to frontend.
|
Move handling of errors to frontend.
|
||||||
|
|
|
||||||
|
|
@ -517,7 +517,11 @@ class CustomPaintMultiselectComboBox(QtWidgets.QComboBox):
|
||||||
def setItemCheckState(self, index, state):
|
def setItemCheckState(self, index, state):
|
||||||
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
self.setItemData(index, state, QtCore.Qt.CheckStateRole)
|
||||||
|
|
||||||
def set_value(self, values: Optional[Iterable[Any]], role: Optional[int] = None):
|
def set_value(
|
||||||
|
self,
|
||||||
|
values: Optional[Iterable[Any]],
|
||||||
|
role: Optional[int] = None,
|
||||||
|
):
|
||||||
if role is None:
|
if role is None:
|
||||||
role = self._value_role
|
role = self._value_role
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -499,8 +499,10 @@ class ProductsModel(QtGui.QStandardItemModel):
|
||||||
version_item.version_id
|
version_item.version_id
|
||||||
for version_item in last_version_by_product_id.values()
|
for version_item in last_version_by_product_id.values()
|
||||||
}
|
}
|
||||||
repre_count_by_version_id = self._controller.get_versions_representation_count(
|
repre_count_by_version_id = (
|
||||||
project_name, version_ids
|
self._controller.get_versions_representation_count(
|
||||||
|
project_name, version_ids
|
||||||
|
)
|
||||||
)
|
)
|
||||||
sync_availability_by_version_id = (
|
sync_availability_by_version_id = (
|
||||||
self._controller.get_version_sync_availability(
|
self._controller.get_version_sync_availability(
|
||||||
|
|
|
||||||
|
|
@ -339,7 +339,9 @@ class OverviewWidget(QtWidgets.QFrame):
|
||||||
self._change_visibility_for_state()
|
self._change_visibility_for_state()
|
||||||
self._product_content_layout.addWidget(self._create_widget, 7)
|
self._product_content_layout.addWidget(self._create_widget, 7)
|
||||||
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
self._product_content_layout.addWidget(self._product_views_widget, 3)
|
||||||
self._product_content_layout.addWidget(self._product_attributes_wrap, 7)
|
self._product_content_layout.addWidget(
|
||||||
|
self._product_attributes_wrap, 7
|
||||||
|
)
|
||||||
|
|
||||||
def _change_visibility_for_state(self):
|
def _change_visibility_for_state(self):
|
||||||
self._create_widget.setVisible(
|
self._create_widget.setVisible(
|
||||||
|
|
|
||||||
|
|
@ -214,8 +214,8 @@ class TasksCombobox(QtWidgets.QComboBox):
|
||||||
Combobox gives ability to select only from intersection of task names for
|
Combobox gives ability to select only from intersection of task names for
|
||||||
folder paths in selected instances.
|
folder paths in selected instances.
|
||||||
|
|
||||||
If folder paths in selected instances does not have same tasks then combobox
|
If folder paths in selected instances does not have same tasks
|
||||||
will be empty.
|
then combobox will be empty.
|
||||||
"""
|
"""
|
||||||
value_changed = QtCore.Signal()
|
value_changed = QtCore.Signal()
|
||||||
|
|
||||||
|
|
@ -604,7 +604,7 @@ class VariantInputWidget(PlaceholderLineEdit):
|
||||||
|
|
||||||
|
|
||||||
class GlobalAttrsWidget(QtWidgets.QWidget):
|
class GlobalAttrsWidget(QtWidgets.QWidget):
|
||||||
"""Global attributes mainly to define context and product name of instances.
|
"""Global attributes to define context and product name of instances.
|
||||||
|
|
||||||
product name is or may be affected on context. Gives abiity to modify
|
product name is or may be affected on context. Gives abiity to modify
|
||||||
context and product name of instance. This change is not autopromoted but
|
context and product name of instance. This change is not autopromoted but
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,8 @@ class TasksModel(QtGui.QStandardItemModel):
|
||||||
tasks with same names then model is empty too.
|
tasks with same names then model is empty too.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
controller (AbstractPublisherFrontend): Controller which handles creation and
|
controller (AbstractPublisherFrontend): Controller which handles
|
||||||
publishing.
|
creation and publishing.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(
|
def __init__(
|
||||||
|
|
|
||||||
|
|
@ -998,7 +998,11 @@ class PublisherWindow(QtWidgets.QDialog):
|
||||||
new_item["label"] = new_item.pop("creator_label")
|
new_item["label"] = new_item.pop("creator_label")
|
||||||
new_item["identifier"] = new_item.pop("creator_identifier")
|
new_item["identifier"] = new_item.pop("creator_identifier")
|
||||||
new_failed_info.append(new_item)
|
new_failed_info.append(new_item)
|
||||||
self.add_error_message_dialog(event["title"], new_failed_info, "Creator:")
|
self.add_error_message_dialog(
|
||||||
|
event["title"],
|
||||||
|
new_failed_info,
|
||||||
|
"Creator:"
|
||||||
|
)
|
||||||
|
|
||||||
def _on_convertor_error(self, event):
|
def _on_convertor_error(self, event):
|
||||||
new_failed_info = []
|
new_failed_info = []
|
||||||
|
|
|
||||||
|
|
@ -368,8 +368,8 @@ class ContainersModel:
|
||||||
try:
|
try:
|
||||||
uuid.UUID(repre_id)
|
uuid.UUID(repre_id)
|
||||||
except (ValueError, TypeError, AttributeError):
|
except (ValueError, TypeError, AttributeError):
|
||||||
# Fake not existing representation id so container is shown in UI
|
# Fake not existing representation id so container
|
||||||
# but as invalid
|
# is shown in UI but as invalid
|
||||||
item.representation_id = invalid_ids_mapping.setdefault(
|
item.representation_id = invalid_ids_mapping.setdefault(
|
||||||
repre_id, uuid.uuid4().hex
|
repre_id, uuid.uuid4().hex
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -556,9 +556,10 @@ class _IconsCache:
|
||||||
log.info("Didn't find icon \"{}\"".format(icon_name))
|
log.info("Didn't find icon \"{}\"".format(icon_name))
|
||||||
|
|
||||||
elif used_variant != icon_name:
|
elif used_variant != icon_name:
|
||||||
log.debug("Icon \"{}\" was not found \"{}\" is used instead".format(
|
log.debug(
|
||||||
icon_name, used_variant
|
f"Icon \"{icon_name}\" was not found"
|
||||||
))
|
f" \"{used_variant}\" is used instead"
|
||||||
|
)
|
||||||
|
|
||||||
cls._qtawesome_cache[full_icon_name] = icon
|
cls._qtawesome_cache[full_icon_name] = icon
|
||||||
return icon
|
return icon
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,3 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
"""Package declaring AYON addon 'core' version."""
|
"""Package declaring AYON addon 'core' version."""
|
||||||
__version__ = "1.0.8+dev"
|
__version__ = "1.0.10+dev"
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,6 @@ qtawesome = "0.7.3"
|
||||||
aiohttp-middlewares = "^2.0.0"
|
aiohttp-middlewares = "^2.0.0"
|
||||||
Click = "^8"
|
Click = "^8"
|
||||||
OpenTimelineIO = "0.16.0"
|
OpenTimelineIO = "0.16.0"
|
||||||
opencolorio = "^2.3.2"
|
opencolorio = "^2.3.2,<2.4.0"
|
||||||
Pillow = "9.5.0"
|
Pillow = "9.5.0"
|
||||||
websocket-client = ">=0.40.0,<2"
|
websocket-client = ">=0.40.0,<2"
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
name = "core"
|
name = "core"
|
||||||
title = "Core"
|
title = "Core"
|
||||||
version = "1.0.8+dev"
|
version = "1.0.10+dev"
|
||||||
|
|
||||||
client_dir = "ayon_core"
|
client_dir = "ayon_core"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "ayon-core"
|
name = "ayon-core"
|
||||||
version = "1.0.8+dev"
|
version = "1.0.10+dev"
|
||||||
description = ""
|
description = ""
|
||||||
authors = ["Ynput Team <team@ynput.io>"]
|
authors = ["Ynput Team <team@ynput.io>"]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
@ -68,7 +68,7 @@ target-version = "py39"
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
|
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
|
||||||
select = ["E4", "E7", "E9", "F", "W"]
|
select = ["E", "F", "W"]
|
||||||
ignore = []
|
ignore = []
|
||||||
|
|
||||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||||
|
|
|
||||||
|
|
@ -358,7 +358,10 @@ class ExtractOIIOTranscodeOutputModel(BaseSettingsModel):
|
||||||
custom_tags: list[str] = SettingsField(
|
custom_tags: list[str] = SettingsField(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Custom Tags",
|
title="Custom Tags",
|
||||||
description="Additional custom tags that will be added to the created representation."
|
description=(
|
||||||
|
"Additional custom tags that will be added"
|
||||||
|
" to the created representation."
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -892,9 +895,11 @@ class PublishPuginsModel(BaseSettingsModel):
|
||||||
default_factory=CollectFramesFixDefModel,
|
default_factory=CollectFramesFixDefModel,
|
||||||
title="Collect Frames to Fix",
|
title="Collect Frames to Fix",
|
||||||
)
|
)
|
||||||
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = SettingsField(
|
CollectUSDLayerContributions: CollectUSDLayerContributionsModel = (
|
||||||
default_factory=CollectUSDLayerContributionsModel,
|
SettingsField(
|
||||||
title="Collect USD Layer Contributions",
|
default_factory=CollectUSDLayerContributionsModel,
|
||||||
|
title="Collect USD Layer Contributions",
|
||||||
|
)
|
||||||
)
|
)
|
||||||
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
ValidateEditorialAssetName: ValidateBaseModel = SettingsField(
|
||||||
default_factory=ValidateBaseModel,
|
default_factory=ValidateBaseModel,
|
||||||
|
|
@ -1214,7 +1219,9 @@ DEFAULT_PUBLISH_VALUES = {
|
||||||
"TOP_RIGHT": "{anatomy[version]}",
|
"TOP_RIGHT": "{anatomy[version]}",
|
||||||
"BOTTOM_LEFT": "{username}",
|
"BOTTOM_LEFT": "{username}",
|
||||||
"BOTTOM_CENTERED": "{folder[name]}",
|
"BOTTOM_CENTERED": "{folder[name]}",
|
||||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
"BOTTOM_RIGHT": (
|
||||||
|
"{frame_start}-{current_frame}-{frame_end}"
|
||||||
|
),
|
||||||
"filter": {
|
"filter": {
|
||||||
"families": [],
|
"families": [],
|
||||||
"tags": []
|
"tags": []
|
||||||
|
|
@ -1240,7 +1247,9 @@ DEFAULT_PUBLISH_VALUES = {
|
||||||
"TOP_RIGHT": "{anatomy[version]}",
|
"TOP_RIGHT": "{anatomy[version]}",
|
||||||
"BOTTOM_LEFT": "{username}",
|
"BOTTOM_LEFT": "{username}",
|
||||||
"BOTTOM_CENTERED": "{folder[name]}",
|
"BOTTOM_CENTERED": "{folder[name]}",
|
||||||
"BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}",
|
"BOTTOM_RIGHT": (
|
||||||
|
"{frame_start}-{current_frame}-{frame_end}"
|
||||||
|
),
|
||||||
"filter": {
|
"filter": {
|
||||||
"families": [],
|
"families": [],
|
||||||
"tags": []
|
"tags": []
|
||||||
|
|
|
||||||
|
|
@ -83,8 +83,8 @@ class CreatorToolModel(BaseSettingsModel):
|
||||||
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
filter_creator_profiles: list[FilterCreatorProfile] = SettingsField(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="Filter creator profiles",
|
title="Filter creator profiles",
|
||||||
description="Allowed list of creator labels that will be only shown if "
|
description="Allowed list of creator labels that will be only shown"
|
||||||
"profile matches context."
|
" if profile matches context."
|
||||||
)
|
)
|
||||||
|
|
||||||
@validator("product_types_smart_select")
|
@validator("product_types_smart_select")
|
||||||
|
|
@ -426,7 +426,9 @@ DEFAULT_TOOLS_VALUES = {
|
||||||
],
|
],
|
||||||
"task_types": [],
|
"task_types": [],
|
||||||
"tasks": [],
|
"tasks": [],
|
||||||
"template": "{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
"template": (
|
||||||
|
"{product[type]}{Task[name]}_{Renderlayer}_{Renderpass}"
|
||||||
|
)
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"product_types": [
|
"product_types": [
|
||||||
|
|
|
||||||
|
|
@ -130,19 +130,20 @@ def test_image_sequence_and_handles_out_of_range():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 5 head black frames generated from gap (991-995)
|
# 5 head black frames generated from gap (991-995)
|
||||||
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.2 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# 9 tail back frames generated from gap (1097-1105)
|
# 9 tail back frames generated from gap (1097-1105)
|
||||||
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.36 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 1097 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# Report from source tiff (996-1096)
|
# Report from source tiff (996-1096)
|
||||||
# 996-1000 = additional 5 head frames
|
# 996-1000 = additional 5 head frames
|
||||||
# 1001-1095 = source range conformed to 25fps
|
# 1001-1095 = source range conformed to 25fps
|
||||||
# 1096-1096 = additional 1 tail frames
|
# 1096-1096 = additional 1 tail frames
|
||||||
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
"/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i "
|
||||||
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996 C:/result/output.%03d.jpg"
|
f"C:\\tif_seq{os.sep}output.%04d.tif -start_number 996"
|
||||||
|
f" C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -179,13 +180,13 @@ def test_short_movie_head_gap_handles():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 10 head black frames generated from gap (991-1000)
|
# 10 head black frames generated from gap (991-1000)
|
||||||
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune "
|
"/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720"
|
||||||
"stillimage -start_number 991 C:/result/output.%03d.jpg",
|
" -tune stillimage -start_number 991 C:/result/output.%03d.jpg",
|
||||||
|
|
||||||
# source range + 10 tail frames
|
# source range + 10 tail frames
|
||||||
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
# duration = 50fr (source) + 10fr (tail handle) = 60 fr = 2.4s
|
||||||
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4 -start_number 1001 "
|
"/path/to/ffmpeg -ss 0.0 -t 2.4 -i C:\\data\\movie.mp4"
|
||||||
"C:/result/output.%03d.jpg"
|
" -start_number 1001 C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -208,7 +209,8 @@ def test_short_movie_tail_gap_handles():
|
||||||
# 10 head frames + source range
|
# 10 head frames + source range
|
||||||
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
# duration = 10fr (head handle) + 66fr (source) = 76fr = 3.16s
|
||||||
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
"/path/to/ffmpeg -ss 1.0416666666666667 -t 3.1666666666666665 -i "
|
||||||
"C:\\data\\qt_no_tc_24fps.mov -start_number 991 C:/result/output.%03d.jpg"
|
"C:\\data\\qt_no_tc_24fps.mov -start_number 991"
|
||||||
|
" C:/result/output.%03d.jpg"
|
||||||
]
|
]
|
||||||
|
|
||||||
assert calls == expected
|
assert calls == expected
|
||||||
|
|
@ -234,10 +236,12 @@ def test_multiple_review_clips_no_gap():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# 10 head black frames generated from gap (991-1000)
|
# 10 head black frames generated from gap (991-1000)
|
||||||
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
'/path/to/ffmpeg -t 0.4 -r 25.0 -f lavfi'
|
||||||
|
' -i color=c=black:s=1280x720 -tune '
|
||||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||||
|
|
||||||
# Alternance 25fps tiff sequence and 24fps exr sequence for 100 frames each
|
# Alternance 25fps tiff sequence and 24fps exr sequence
|
||||||
|
# for 100 frames each
|
||||||
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
'/path/to/ffmpeg -start_number 1000 -framerate 25.0 -i '
|
||||||
f'C:\\no_tc{os.sep}output.%04d.tif '
|
f'C:\\no_tc{os.sep}output.%04d.tif '
|
||||||
'-start_number 1001 C:/result/output.%03d.jpg',
|
'-start_number 1001 C:/result/output.%03d.jpg',
|
||||||
|
|
@ -315,7 +319,8 @@ def test_multiple_review_clips_with_gap():
|
||||||
|
|
||||||
expected = [
|
expected = [
|
||||||
# Gap on review track (12 frames)
|
# Gap on review track (12 frames)
|
||||||
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi -i color=c=black:s=1280x720 -tune '
|
'/path/to/ffmpeg -t 0.5 -r 24.0 -f lavfi'
|
||||||
|
' -i color=c=black:s=1280x720 -tune '
|
||||||
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
'stillimage -start_number 991 C:/result/output.%03d.jpg',
|
||||||
|
|
||||||
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
'/path/to/ffmpeg -start_number 1000 -framerate 24.0 -i '
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue