mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
[Automated] Merged develop into main
This commit is contained in:
commit
84c637176f
31 changed files with 296 additions and 210 deletions
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,7 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.16.6-nightly.1
|
||||
- 3.16.5
|
||||
- 3.16.5-nightly.5
|
||||
- 3.16.5-nightly.4
|
||||
|
|
@ -134,7 +135,6 @@ body:
|
|||
- 3.14.9-nightly.4
|
||||
- 3.14.9-nightly.3
|
||||
- 3.14.9-nightly.2
|
||||
- 3.14.9-nightly.1
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.26"
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.27"
|
||||
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel" Version="1.0" />
|
||||
|
|
@ -10,22 +10,22 @@
|
|||
<!-- Photoshop -->
|
||||
<!--<Host Name="PHXS" Version="[14.0,19.0]" /> -->
|
||||
<!-- <Host Name="PHSP" Version="[14.0,19.0]" /> -->
|
||||
|
||||
|
||||
<!-- Illustrator -->
|
||||
<!-- <Host Name="ILST" Version="[18.0,22.0]" /> -->
|
||||
|
||||
|
||||
<!-- InDesign -->
|
||||
<!-- <Host Name="IDSN" Version="[10.0,13.0]" /> -->
|
||||
|
||||
<!-- <Host Name="IDSN" Version="[10.0,13.0]" /> -->
|
||||
|
||||
<!-- Premiere -->
|
||||
<!-- <Host Name="PPRO" Version="[8.0,12.0]" /> -->
|
||||
|
||||
|
||||
<!-- AfterEffects -->
|
||||
<Host Name="AEFT" Version="[13.0,99.0]" />
|
||||
|
||||
<!-- PRELUDE -->
|
||||
|
||||
<!-- PRELUDE -->
|
||||
<!-- <Host Name="PRLD" Version="[3.0,7.0]" /> -->
|
||||
|
||||
|
||||
<!-- FLASH Pro -->
|
||||
<!-- <Host Name="FLPR" Version="[14.0,18.0]" /> -->
|
||||
|
||||
|
|
@ -63,7 +63,7 @@
|
|||
<Height>550</Height>
|
||||
<Width>400</Width>
|
||||
</MaxSize>-->
|
||||
|
||||
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/iconNormal.png</Icon>
|
||||
|
|
@ -71,9 +71,9 @@
|
|||
<Icon Type="Disabled">./icons/iconDisabled.png</Icon>
|
||||
<Icon Type="DarkNormal">./icons/iconDarkNormal.png</Icon>
|
||||
<Icon Type="DarkRollOver">./icons/iconDarkRollover.png</Icon>
|
||||
</Icons>
|
||||
</Icons>
|
||||
</UI>
|
||||
</DispatchInfo>
|
||||
</Extension>
|
||||
</DispatchInfoList>
|
||||
</ExtensionManifest>
|
||||
</ExtensionManifest>
|
||||
|
|
|
|||
|
|
@ -215,6 +215,8 @@ function _getItem(item, comps, folders, footages){
|
|||
* Refactor
|
||||
*/
|
||||
var item_type = '';
|
||||
var path = '';
|
||||
var containing_comps = [];
|
||||
if (item instanceof FolderItem){
|
||||
item_type = 'folder';
|
||||
if (!folders){
|
||||
|
|
@ -222,10 +224,18 @@ function _getItem(item, comps, folders, footages){
|
|||
}
|
||||
}
|
||||
if (item instanceof FootageItem){
|
||||
item_type = 'footage';
|
||||
if (!footages){
|
||||
return "{}";
|
||||
}
|
||||
item_type = 'footage';
|
||||
if (item.file){
|
||||
path = item.file.fsName;
|
||||
}
|
||||
if (item.usedIn){
|
||||
for (j = 0; j < item.usedIn.length; ++j){
|
||||
containing_comps.push(item.usedIn[j].id);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (item instanceof CompItem){
|
||||
item_type = 'comp';
|
||||
|
|
@ -236,7 +246,9 @@ function _getItem(item, comps, folders, footages){
|
|||
|
||||
var item = {"name": item.name,
|
||||
"id": item.id,
|
||||
"type": item_type};
|
||||
"type": item_type,
|
||||
"path": path,
|
||||
"containing_comps": containing_comps};
|
||||
return JSON.stringify(item);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,9 @@ class AEItem(object):
|
|||
height = attr.ib(default=None)
|
||||
is_placeholder = attr.ib(default=False)
|
||||
uuid = attr.ib(default=False)
|
||||
path = attr.ib(default=False) # path to FootageItem to validate
|
||||
# list of composition Footage is in
|
||||
containing_comps = attr.ib(factory=list)
|
||||
|
||||
|
||||
class AfterEffectsServerStub():
|
||||
|
|
@ -704,7 +707,10 @@ class AfterEffectsServerStub():
|
|||
d.get("instance_id"),
|
||||
d.get("width"),
|
||||
d.get("height"),
|
||||
d.get("is_placeholder"))
|
||||
d.get("is_placeholder"),
|
||||
d.get("uuid"),
|
||||
d.get("path"),
|
||||
d.get("containing_comps"),)
|
||||
|
||||
ret.append(item)
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -31,13 +31,8 @@ class FileLoader(api.AfterEffectsLoader):
|
|||
|
||||
path = self.filepath_from_context(context)
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
if "#" not in path:
|
||||
frame = repr_cont.get("frame")
|
||||
if frame:
|
||||
padding = len(frame)
|
||||
path = path.replace(frame, "#" * padding)
|
||||
import_options['sequence'] = True
|
||||
if len(context["representation"]["files"]) > 1:
|
||||
import_options['sequence'] = True
|
||||
|
||||
if not path:
|
||||
repr_id = context["representation"]["_id"]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Footage item missing</title>
|
||||
<description>
|
||||
## Footage item missing
|
||||
|
||||
FootageItem `{name}` contains missing `{path}`. Render will not produce any frames and AE will stop react to any integration
|
||||
### How to repair?
|
||||
|
||||
Remove `{name}` or provide missing file.
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validate presence of footage items in composition
|
||||
Requires:
|
||||
"""
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError
|
||||
)
|
||||
from openpype.hosts.aftereffects.api import get_stub
|
||||
|
||||
|
||||
class ValidateFootageItems(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
Validates if FootageItems contained in composition exist.
|
||||
|
||||
AE fails silently and doesn't render anything if footage item file is
|
||||
missing. This will result in nonresponsiveness of AE UI as it expects
|
||||
reaction from user, but it will not provide dialog.
|
||||
This validator tries to check existence of the files.
|
||||
It will not protect from missing frame in multiframes though
|
||||
(as AE api doesn't provide this information and it cannot be told how many
|
||||
frames should be there easily). Missing frame is replaced by placeholder.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Footage Items"
|
||||
families = ["render.farm", "render.local", "render"]
|
||||
hosts = ["aftereffects"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
"""Plugin entry point."""
|
||||
|
||||
comp_id = instance.data["comp_id"]
|
||||
for footage_item in get_stub().get_items(comps=False, folders=False,
|
||||
footages=True):
|
||||
self.log.info(footage_item)
|
||||
if comp_id not in footage_item.containing_comps:
|
||||
continue
|
||||
|
||||
path = footage_item.path
|
||||
if path and not os.path.exists(path):
|
||||
msg = f"File {path} not found."
|
||||
formatting = {"name": footage_item.name, "path": path}
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting)
|
||||
|
|
@ -39,15 +39,11 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
]
|
||||
|
||||
if not instance.data.get("remove"):
|
||||
|
||||
task = instance.context.data["task"]
|
||||
|
||||
# Store focal length in `burninDataMembers`
|
||||
burninData = instance.data.setdefault("burninDataMembers", {})
|
||||
burninData["focalLength"] = focal_length
|
||||
|
||||
instance.data.update({
|
||||
"subset": f"{task}Review",
|
||||
"review_camera": camera,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from openpype.lib import BoolDef
|
|||
import openpype.hosts.photoshop.api as api
|
||||
from openpype.hosts.photoshop.lib import PSAutoCreator
|
||||
from openpype.pipeline.create import get_subset_name
|
||||
from openpype.lib import prepare_template_data
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
|
||||
|
|
@ -37,19 +38,14 @@ class AutoImageCreator(PSAutoCreator):
|
|||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
if existing_instance is None:
|
||||
subset_name = get_subset_name(
|
||||
self.family, self.default_variant, task_name, asset_doc,
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
|
||||
publishable_ids = [layer.id for layer in api.stub().get_layers()
|
||||
if layer.visible]
|
||||
data = {
|
||||
"asset": asset_name,
|
||||
"task": task_name,
|
||||
# ids are "virtual" layers, won't get grouped as 'members' do
|
||||
# same difference in color coded layers in WP
|
||||
"ids": publishable_ids
|
||||
}
|
||||
|
||||
if not self.active_on_create:
|
||||
|
|
@ -69,8 +65,8 @@ class AutoImageCreator(PSAutoCreator):
|
|||
existing_instance["asset"] != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
subset_name = get_subset_name(
|
||||
self.family, self.default_variant, task_name, asset_doc,
|
||||
subset_name = self.get_subset_name(
|
||||
self.default_variant, task_name, asset_doc,
|
||||
project_name, host_name
|
||||
)
|
||||
|
||||
|
|
@ -118,3 +114,29 @@ class AutoImageCreator(PSAutoCreator):
|
|||
Artist might disable this instance from publishing or from creating
|
||||
review for it though.
|
||||
"""
|
||||
|
||||
def get_subset_name(
|
||||
self,
|
||||
variant,
|
||||
task_name,
|
||||
asset_doc,
|
||||
project_name,
|
||||
host_name=None,
|
||||
instance=None
|
||||
):
|
||||
dynamic_data = prepare_template_data({"layer": "{layer}"})
|
||||
subset_name = get_subset_name(
|
||||
self.family, variant, task_name, asset_doc,
|
||||
project_name, host_name, dynamic_data=dynamic_data
|
||||
)
|
||||
return self._clean_subset_name(subset_name)
|
||||
|
||||
def _clean_subset_name(self, subset_name):
|
||||
"""Clean all variants leftover {layer} from subset name."""
|
||||
dynamic_data = prepare_template_data({"layer": "{layer}"})
|
||||
for value in dynamic_data.values():
|
||||
if value in subset_name:
|
||||
return (subset_name.replace(value, "")
|
||||
.replace("__", "_")
|
||||
.replace("..", "."))
|
||||
return subset_name
|
||||
|
|
|
|||
|
|
@ -94,12 +94,17 @@ class ImageCreator(Creator):
|
|||
name = self._clean_highlights(stub, directory)
|
||||
layer_names_in_hierarchy.append(name)
|
||||
|
||||
data.update({"subset": subset_name})
|
||||
data.update({"members": [str(group.id)]})
|
||||
data.update({"layer_name": layer_name})
|
||||
data.update({"long_name": "_".join(layer_names_in_hierarchy)})
|
||||
data_update = {
|
||||
"subset": subset_name,
|
||||
"members": [str(group.id)],
|
||||
"layer_name": layer_name,
|
||||
"long_name": "_".join(layer_names_in_hierarchy)
|
||||
}
|
||||
data.update(data_update)
|
||||
|
||||
creator_attributes = {"mark_for_review": self.mark_for_review}
|
||||
mark_for_review = (pre_create_data.get("mark_for_review") or
|
||||
self.mark_for_review)
|
||||
creator_attributes = {"mark_for_review": mark_for_review}
|
||||
data.update({"creator_attributes": creator_attributes})
|
||||
|
||||
if not self.active_on_create:
|
||||
|
|
@ -124,8 +129,6 @@ class ImageCreator(Creator):
|
|||
|
||||
if creator_id == self.identifier:
|
||||
instance_data = self._handle_legacy(instance_data)
|
||||
layer = api.stub().get_layer(instance_data["members"][0])
|
||||
instance_data["layer"] = layer
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.hosts.photoshop import api as photoshop
|
||||
|
||||
|
||||
class CollectAutoImageRefresh(pyblish.api.ContextPlugin):
|
||||
"""Refreshes auto_image instance with currently visible layers..
|
||||
"""
|
||||
|
||||
label = "Collect Auto Image Refresh"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["photoshop"]
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
creator_identifier = instance.data.get("creator_identifier")
|
||||
if creator_identifier and creator_identifier == "auto_image":
|
||||
self.log.debug("Auto image instance found, won't create new")
|
||||
# refresh existing auto image instance with current visible
|
||||
publishable_ids = [layer.id for layer in photoshop.stub().get_layers() # noqa
|
||||
if layer.visible]
|
||||
instance.data["ids"] = publishable_ids
|
||||
return
|
||||
20
openpype/hosts/photoshop/plugins/publish/collect_image.py
Normal file
20
openpype/hosts/photoshop/plugins/publish/collect_image.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.hosts.photoshop import api
|
||||
|
||||
|
||||
class CollectImage(pyblish.api.InstancePlugin):
|
||||
"""Collect layer metadata into a instance.
|
||||
|
||||
Used later in validation
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.200
|
||||
label = 'Collect Image'
|
||||
|
||||
hosts = ["photoshop"]
|
||||
families = ["image"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("members"):
|
||||
layer = api.stub().get_layer(instance.data["members"][0])
|
||||
instance.data["layer"] = layer
|
||||
|
|
@ -45,9 +45,11 @@ class ExtractImage(pyblish.api.ContextPlugin):
|
|||
# Perform extraction
|
||||
files = {}
|
||||
ids = set()
|
||||
layer = instance.data.get("layer")
|
||||
if layer:
|
||||
ids.add(layer.id)
|
||||
# real layers and groups
|
||||
members = instance.data("members")
|
||||
if members:
|
||||
ids.update(set([int(member) for member in members]))
|
||||
# virtual groups collected by color coding or auto_image
|
||||
add_ids = instance.data.pop("ids", None)
|
||||
if add_ids:
|
||||
ids.update(set(add_ids))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import shutil
|
||||
from PIL import Image
|
||||
|
||||
from openpype.lib import (
|
||||
|
|
@ -55,6 +56,7 @@ class ExtractReview(publish.Extractor):
|
|||
}
|
||||
|
||||
if instance.data["family"] != "review":
|
||||
self.log.debug("Existing extracted file from image family used.")
|
||||
# enable creation of review, without this jpg review would clash
|
||||
# with jpg of the image family
|
||||
output_name = repre_name
|
||||
|
|
@ -62,8 +64,15 @@ class ExtractReview(publish.Extractor):
|
|||
repre_skeleton.update({"name": repre_name,
|
||||
"outputName": output_name})
|
||||
|
||||
if self.make_image_sequence and len(layers) > 1:
|
||||
self.log.info("Extract layers to image sequence.")
|
||||
img_file = self.output_seq_filename % 0
|
||||
self._prepare_file_for_image_family(img_file, instance,
|
||||
staging_dir)
|
||||
repre_skeleton.update({
|
||||
"files": img_file,
|
||||
})
|
||||
processed_img_names = [img_file]
|
||||
elif self.make_image_sequence and len(layers) > 1:
|
||||
self.log.debug("Extract layers to image sequence.")
|
||||
img_list = self._save_sequence_images(staging_dir, layers)
|
||||
|
||||
repre_skeleton.update({
|
||||
|
|
@ -72,17 +81,17 @@ class ExtractReview(publish.Extractor):
|
|||
"fps": fps,
|
||||
"files": img_list,
|
||||
})
|
||||
instance.data["representations"].append(repre_skeleton)
|
||||
processed_img_names = img_list
|
||||
else:
|
||||
self.log.info("Extract layers to flatten image.")
|
||||
img_list = self._save_flatten_image(staging_dir, layers)
|
||||
self.log.debug("Extract layers to flatten image.")
|
||||
img_file = self._save_flatten_image(staging_dir, layers)
|
||||
|
||||
repre_skeleton.update({
|
||||
"files": img_list,
|
||||
"files": img_file,
|
||||
})
|
||||
instance.data["representations"].append(repre_skeleton)
|
||||
processed_img_names = [img_list]
|
||||
processed_img_names = [img_file]
|
||||
|
||||
instance.data["representations"].append(repre_skeleton)
|
||||
|
||||
ffmpeg_args = get_ffmpeg_tool_args("ffmpeg")
|
||||
|
||||
|
|
@ -111,6 +120,35 @@ class ExtractReview(publish.Extractor):
|
|||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
|
||||
def _prepare_file_for_image_family(self, img_file, instance, staging_dir):
|
||||
"""Converts existing file for image family to .jpg
|
||||
|
||||
Image instance could have its own separate review (instance per layer
|
||||
for example). This uses extracted file instead of extracting again.
|
||||
Args:
|
||||
img_file (str): name of output file (with 0000 value for ffmpeg
|
||||
later)
|
||||
instance:
|
||||
staging_dir (str): temporary folder where extracted file is located
|
||||
"""
|
||||
repre_file = instance.data["representations"][0]
|
||||
source_file_path = os.path.join(repre_file["stagingDir"],
|
||||
repre_file["files"])
|
||||
if not os.path.exists(source_file_path):
|
||||
raise RuntimeError(f"{source_file_path} doesn't exist for "
|
||||
"review to create from")
|
||||
_, ext = os.path.splitext(repre_file["files"])
|
||||
if ext != ".jpg":
|
||||
im = Image.open(source_file_path)
|
||||
# without this it produces messy low quality jpg
|
||||
rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff")
|
||||
rgb_im.alpha_composite(im)
|
||||
rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file))
|
||||
else:
|
||||
# handles already .jpg
|
||||
shutil.copy(source_file_path,
|
||||
os.path.join(staging_dir, img_file))
|
||||
|
||||
def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames,
|
||||
source_files_pattern, staging_dir):
|
||||
"""Generates .mov to upload to Ftrack.
|
||||
|
|
@ -218,6 +256,11 @@ class ExtractReview(publish.Extractor):
|
|||
(list) of PSItem
|
||||
"""
|
||||
layers = []
|
||||
# creating review for existing 'image' instance
|
||||
if instance.data["family"] == "image" and instance.data.get("layer"):
|
||||
layers.append(instance.data["layer"])
|
||||
return layers
|
||||
|
||||
for image_instance in instance.context:
|
||||
if image_instance.data["family"] != "image":
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import filecmp
|
|||
import tempfile
|
||||
import threading
|
||||
import shutil
|
||||
from queue import Queue
|
||||
|
||||
from contextlib import closing
|
||||
|
||||
from aiohttp import web
|
||||
|
|
@ -319,19 +319,19 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
async def workfiles_tool(self):
|
||||
log.info("Triggering Workfile tool")
|
||||
item = MainThreadItem(self.tools_helper.show_workfiles)
|
||||
self._execute_in_main_thread(item)
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def loader_tool(self):
|
||||
log.info("Triggering Loader tool")
|
||||
item = MainThreadItem(self.tools_helper.show_loader)
|
||||
self._execute_in_main_thread(item)
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def publish_tool(self):
|
||||
log.info("Triggering Publish tool")
|
||||
item = MainThreadItem(self.tools_helper.show_publisher_tool)
|
||||
self._execute_in_main_thread(item)
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def scene_inventory_tool(self):
|
||||
|
|
@ -350,13 +350,13 @@ class QtTVPaintRpc(BaseTVPaintRpc):
|
|||
async def library_loader_tool(self):
|
||||
log.info("Triggering Library loader tool")
|
||||
item = MainThreadItem(self.tools_helper.show_library_loader)
|
||||
self._execute_in_main_thread(item)
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def experimental_tools(self):
|
||||
log.info("Triggering Library loader tool")
|
||||
item = MainThreadItem(self.tools_helper.show_experimental_tools_dialog)
|
||||
self._execute_in_main_thread(item)
|
||||
self._execute_in_main_thread(item, wait=False)
|
||||
return
|
||||
|
||||
async def _async_execute_in_main_thread(self, item, **kwargs):
|
||||
|
|
@ -867,7 +867,7 @@ class QtCommunicator(BaseCommunicator):
|
|||
|
||||
def __init__(self, qt_app):
|
||||
super().__init__()
|
||||
self.callback_queue = Queue()
|
||||
self.callback_queue = collections.deque()
|
||||
self.qt_app = qt_app
|
||||
|
||||
def _create_routes(self):
|
||||
|
|
@ -880,14 +880,14 @@ class QtCommunicator(BaseCommunicator):
|
|||
|
||||
def execute_in_main_thread(self, main_thread_item, wait=True):
|
||||
"""Add `MainThreadItem` to callback queue and wait for result."""
|
||||
self.callback_queue.put(main_thread_item)
|
||||
self.callback_queue.append(main_thread_item)
|
||||
if wait:
|
||||
return main_thread_item.wait()
|
||||
return
|
||||
|
||||
async def async_execute_in_main_thread(self, main_thread_item, wait=True):
|
||||
"""Add `MainThreadItem` to callback queue and wait for result."""
|
||||
self.callback_queue.put(main_thread_item)
|
||||
self.callback_queue.append(main_thread_item)
|
||||
if wait:
|
||||
return await main_thread_item.async_wait()
|
||||
|
||||
|
|
@ -904,9 +904,9 @@ class QtCommunicator(BaseCommunicator):
|
|||
self._exit()
|
||||
return None
|
||||
|
||||
if self.callback_queue.empty():
|
||||
return None
|
||||
return self.callback_queue.get()
|
||||
if self.callback_queue:
|
||||
return self.callback_queue.popleft()
|
||||
return None
|
||||
|
||||
def _on_client_connect(self):
|
||||
super()._on_client_connect()
|
||||
|
|
|
|||
|
|
@ -28,11 +28,7 @@
|
|||
"colorManagement": "Nuke",
|
||||
"OCIO_config": "nuke-default",
|
||||
"workingSpaceLUT": "linear",
|
||||
"monitorLut": "sRGB",
|
||||
"int8Lut": "sRGB",
|
||||
"int16Lut": "sRGB",
|
||||
"logLut": "Cineon",
|
||||
"floatLut": "linear"
|
||||
"monitorLut": "sRGB"
|
||||
},
|
||||
"nodes": {
|
||||
"requiredNodes": [
|
||||
|
|
|
|||
|
|
@ -106,26 +106,6 @@
|
|||
"type": "text",
|
||||
"key": "monitorLut",
|
||||
"label": "monitor"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "int8Lut",
|
||||
"label": "8-bit files"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "int16Lut",
|
||||
"label": "16-bit files"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "logLut",
|
||||
"label": "log files"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "floatLut",
|
||||
"label": "float files"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,12 +89,6 @@ knob_types_enum = [
|
|||
|
||||
|
||||
class KnobModel(BaseSettingsModel):
|
||||
"""# TODO: new data structure
|
||||
- v3 was having type, name, value but
|
||||
ayon is not able to make it the same. Current model is
|
||||
defining `type` as `text` and instead of `value` the key is `text`.
|
||||
So if `type` is `boolean` then key is `boolean` (value).
|
||||
"""
|
||||
_layout = "expanded"
|
||||
|
||||
type: str = Field(
|
||||
|
|
|
|||
|
|
@ -16,13 +16,10 @@ def instance_attributes_enum():
|
|||
|
||||
|
||||
class PrenodeModel(BaseSettingsModel):
|
||||
# TODO: missing in host api
|
||||
# - good for `dependency`
|
||||
name: str = Field(
|
||||
title="Node name"
|
||||
)
|
||||
|
||||
# TODO: `nodeclass` should be renamed to `nuke_node_class`
|
||||
nodeclass: str = Field(
|
||||
"",
|
||||
title="Node class"
|
||||
|
|
@ -32,11 +29,8 @@ class PrenodeModel(BaseSettingsModel):
|
|||
title="Incoming dependency"
|
||||
)
|
||||
|
||||
"""# TODO: Changes in host api:
|
||||
- Need complete rework of knob types in nuke integration.
|
||||
- We could not support v3 style of settings.
|
||||
"""
|
||||
knobs: list[KnobModel] = Field(
|
||||
default_factory=list,
|
||||
title="Knobs",
|
||||
)
|
||||
|
||||
|
|
@ -61,11 +55,8 @@ class CreateWriteRenderModel(BaseSettingsModel):
|
|||
title="Instance attributes"
|
||||
)
|
||||
|
||||
"""# TODO: Changes in host api:
|
||||
- prenodes key was originally dict and now is list
|
||||
(we could not support v3 style of settings)
|
||||
"""
|
||||
prenodes: list[PrenodeModel] = Field(
|
||||
default_factory=list,
|
||||
title="Preceding nodes",
|
||||
)
|
||||
|
||||
|
|
@ -90,11 +81,8 @@ class CreateWritePrerenderModel(BaseSettingsModel):
|
|||
title="Instance attributes"
|
||||
)
|
||||
|
||||
"""# TODO: Changes in host api:
|
||||
- prenodes key was originally dict and now is list
|
||||
(we could not support v3 style of settings)
|
||||
"""
|
||||
prenodes: list[PrenodeModel] = Field(
|
||||
default_factory=list,
|
||||
title="Preceding nodes",
|
||||
)
|
||||
|
||||
|
|
@ -119,11 +107,8 @@ class CreateWriteImageModel(BaseSettingsModel):
|
|||
title="Instance attributes"
|
||||
)
|
||||
|
||||
"""# TODO: Changes in host api:
|
||||
- prenodes key was originally dict and now is list
|
||||
(we could not support v3 style of settings)
|
||||
"""
|
||||
prenodes: list[PrenodeModel] = Field(
|
||||
default_factory=list,
|
||||
title="Preceding nodes",
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -25,19 +25,6 @@ class DirmapSettings(BaseSettingsModel):
|
|||
)
|
||||
|
||||
|
||||
"""# TODO:
|
||||
nuke is having originally implemented
|
||||
following data inputs:
|
||||
|
||||
"nuke-dirmap": {
|
||||
"enabled": false,
|
||||
"paths": {
|
||||
"source-path": [],
|
||||
"destination-path": []
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
DEFAULT_DIRMAP_SETTINGS = {
|
||||
"enabled": False,
|
||||
"paths": {
|
||||
|
|
|
|||
|
|
@ -9,22 +9,17 @@ from .common import KnobModel
|
|||
|
||||
|
||||
class NodesModel(BaseSettingsModel):
|
||||
"""# TODO: This needs to be somehow labeled in settings panel
|
||||
or at least it could show gist of configuration
|
||||
"""
|
||||
_layout = "expanded"
|
||||
plugins: list[str] = Field(
|
||||
default_factory=list,
|
||||
title="Used in plugins"
|
||||
)
|
||||
# TODO: rename `nukeNodeClass` to `nuke_node_class`
|
||||
nukeNodeClass: str = Field(
|
||||
title="Nuke Node Class",
|
||||
)
|
||||
|
||||
""" # TODO: Need complete rework of knob types
|
||||
in nuke integration. We could not support v3 style of settings.
|
||||
"""
|
||||
knobs: list[KnobModel] = Field(
|
||||
default_factory=list,
|
||||
title="Knobs",
|
||||
)
|
||||
|
||||
|
|
@ -66,22 +61,6 @@ def ocio_configs_switcher_enum():
|
|||
|
||||
class WorkfileColorspaceSettings(BaseSettingsModel):
|
||||
"""Nuke workfile colorspace preset. """
|
||||
"""# TODO: enhance settings with host api:
|
||||
we need to add mapping to resolve properly keys.
|
||||
Nuke is excpecting camel case key names,
|
||||
but for better code consistency we need to
|
||||
be using snake_case:
|
||||
|
||||
color_management = colorManagement
|
||||
ocio_config = OCIO_config
|
||||
working_space_name = workingSpaceLUT
|
||||
monitor_name = monitorLut
|
||||
monitor_out_name = monitorOutLut
|
||||
int_8_name = int8Lut
|
||||
int_16_name = int16Lut
|
||||
log_name = logLut
|
||||
float_name = floatLut
|
||||
"""
|
||||
|
||||
colorManagement: Literal["Nuke", "OCIO"] = Field(
|
||||
title="Color Management"
|
||||
|
|
@ -100,18 +79,6 @@ class WorkfileColorspaceSettings(BaseSettingsModel):
|
|||
monitorLut: str = Field(
|
||||
title="Monitor"
|
||||
)
|
||||
int8Lut: str = Field(
|
||||
title="8-bit files"
|
||||
)
|
||||
int16Lut: str = Field(
|
||||
title="16-bit files"
|
||||
)
|
||||
logLut: str = Field(
|
||||
title="Log files"
|
||||
)
|
||||
floatLut: str = Field(
|
||||
title="Float files"
|
||||
)
|
||||
|
||||
|
||||
class ReadColorspaceRulesItems(BaseSettingsModel):
|
||||
|
|
@ -170,7 +137,7 @@ class ImageIOSettings(BaseSettingsModel):
|
|||
_isGroup: bool = True
|
||||
|
||||
"""# TODO: enhance settings with host api:
|
||||
to restruture settings for simplification.
|
||||
to restructure settings for simplification.
|
||||
|
||||
now: nuke/imageio/viewer/viewerProcess
|
||||
future: nuke/imageio/viewer
|
||||
|
|
@ -193,7 +160,7 @@ class ImageIOSettings(BaseSettingsModel):
|
|||
)
|
||||
|
||||
"""# TODO: enhance settings with host api:
|
||||
to restruture settings for simplification.
|
||||
to restructure settings for simplification.
|
||||
|
||||
now: nuke/imageio/baking/viewerProcess
|
||||
future: nuke/imageio/baking
|
||||
|
|
@ -215,9 +182,9 @@ class ImageIOSettings(BaseSettingsModel):
|
|||
title="Nodes"
|
||||
)
|
||||
"""# TODO: enhance settings with host api:
|
||||
- old settings are using `regexInputs` key but we
|
||||
- [ ] old settings are using `regexInputs` key but we
|
||||
need to rename to `regex_inputs`
|
||||
- no need for `inputs` middle part. It can stay
|
||||
- [ ] no need for `inputs` middle part. It can stay
|
||||
directly on `regex_inputs`
|
||||
"""
|
||||
regexInputs: RegexInputsModel = Field(
|
||||
|
|
@ -238,10 +205,6 @@ DEFAULT_IMAGEIO_SETTINGS = {
|
|||
"OCIO_config": "nuke-default",
|
||||
"workingSpaceLUT": "linear",
|
||||
"monitorLut": "sRGB",
|
||||
"int8Lut": "sRGB",
|
||||
"int16Lut": "sRGB",
|
||||
"logLut": "Cineon",
|
||||
"floatLut": "linear"
|
||||
},
|
||||
"nodes": {
|
||||
"requiredNodes": [
|
||||
|
|
|
|||
|
|
@ -6,10 +6,6 @@ class LoadImageModel(BaseSettingsModel):
|
|||
enabled: bool = Field(
|
||||
title="Enabled"
|
||||
)
|
||||
"""# TODO: v3 api used `_representation`
|
||||
New api is hiding it so it had to be renamed
|
||||
to `representations_include`
|
||||
"""
|
||||
representations_include: list[str] = Field(
|
||||
default_factory=list,
|
||||
title="Include representations"
|
||||
|
|
@ -33,10 +29,6 @@ class LoadClipModel(BaseSettingsModel):
|
|||
enabled: bool = Field(
|
||||
title="Enabled"
|
||||
)
|
||||
"""# TODO: v3 api used `_representation`
|
||||
New api is hiding it so it had to be renamed
|
||||
to `representations_include`
|
||||
"""
|
||||
representations_include: list[str] = Field(
|
||||
default_factory=list,
|
||||
title="Include representations"
|
||||
|
|
|
|||
|
|
@ -59,9 +59,7 @@ class NukeSettings(BaseSettingsModel):
|
|||
default_factory=ImageIOSettings,
|
||||
title="Color Management (imageio)",
|
||||
)
|
||||
"""# TODO: fix host api:
|
||||
- rename `nuke-dirmap` to `dirmap` was inevitable
|
||||
"""
|
||||
|
||||
dirmap: DirmapSettings = Field(
|
||||
default_factory=DirmapSettings,
|
||||
title="Nuke Directory Mapping",
|
||||
|
|
|
|||
|
|
@ -28,11 +28,9 @@ def nuke_product_types_enum():
|
|||
|
||||
|
||||
class NodeModel(BaseSettingsModel):
|
||||
# TODO: missing in host api
|
||||
name: str = Field(
|
||||
title="Node name"
|
||||
)
|
||||
# TODO: `nodeclass` rename to `nuke_node_class`
|
||||
nodeclass: str = Field(
|
||||
"",
|
||||
title="Node class"
|
||||
|
|
@ -41,11 +39,8 @@ class NodeModel(BaseSettingsModel):
|
|||
"",
|
||||
title="Incoming dependency"
|
||||
)
|
||||
"""# TODO: Changes in host api:
|
||||
- Need complete rework of knob types in nuke integration.
|
||||
- We could not support v3 style of settings.
|
||||
"""
|
||||
knobs: list[KnobModel] = Field(
|
||||
default_factory=list,
|
||||
title="Knobs",
|
||||
)
|
||||
|
||||
|
|
@ -99,12 +94,9 @@ class ExtractThumbnailModel(BaseSettingsModel):
|
|||
use_rendered: bool = Field(title="Use rendered images")
|
||||
bake_viewer_process: bool = Field(title="Bake view process")
|
||||
bake_viewer_input_process: bool = Field(title="Bake viewer input process")
|
||||
"""# TODO: needs to rewrite from v3 to ayon
|
||||
- `nodes` in v3 was dict but now `prenodes` is list of dict
|
||||
- also later `nodes` should be `prenodes`
|
||||
"""
|
||||
|
||||
nodes: list[NodeModel] = Field(
|
||||
default_factory=list,
|
||||
title="Nodes (deprecated)"
|
||||
)
|
||||
reposition_nodes: list[ThumbnailRepositionNodeModel] = Field(
|
||||
|
|
@ -177,6 +169,7 @@ class ExtractReviewDataMovModel(BaseSettingsModel):
|
|||
enabled: bool = Field(title="Enabled")
|
||||
viewer_lut_raw: bool = Field(title="Viewer lut raw")
|
||||
outputs: list[BakingStreamModel] = Field(
|
||||
default_factory=list,
|
||||
title="Baking streams"
|
||||
)
|
||||
|
||||
|
|
@ -213,12 +206,6 @@ class ExctractSlateFrameParamModel(BaseSettingsModel):
|
|||
|
||||
class ExtractSlateFrameModel(BaseSettingsModel):
|
||||
viewer_lut_raw: bool = Field(title="Viewer lut raw")
|
||||
"""# TODO: v3 api different model:
|
||||
- not possible to replicate v3 model:
|
||||
{"name": [bool, str]}
|
||||
- not it is:
|
||||
{"name": {"enabled": bool, "template": str}}
|
||||
"""
|
||||
key_value_mapping: ExctractSlateFrameParamModel = Field(
|
||||
title="Key value mapping",
|
||||
default_factory=ExctractSlateFrameParamModel
|
||||
|
|
@ -287,7 +274,6 @@ class PublishPuginsModel(BaseSettingsModel):
|
|||
title="Extract Slate Frame",
|
||||
default_factory=ExtractSlateFrameModel
|
||||
)
|
||||
# TODO: plugin should be renamed - `workfile` not `script`
|
||||
IncrementScriptVersion: IncrementScriptVersionModel = Field(
|
||||
title="Increment Workfile Version",
|
||||
default_factory=IncrementScriptVersionModel,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ class ScriptsmenuSettings(BaseSettingsModel):
|
|||
"""Nuke script menu project settings."""
|
||||
_isGroup = True
|
||||
|
||||
# TODO: in api rename key `name` to `menu_name`
|
||||
name: str = Field(title="Menu Name")
|
||||
definition: list[ScriptsmenuSubmodel] = Field(
|
||||
default_factory=list,
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ class TemplatedWorkfileProfileModel(BaseSettingsModel):
|
|||
|
||||
|
||||
class TemplatedWorkfileBuildModel(BaseSettingsModel):
|
||||
"""Settings for templated workfile builder."""
|
||||
profiles: list[TemplatedWorkfileProfileModel] = Field(
|
||||
default_factory=list
|
||||
)
|
||||
|
|
|
|||
|
|
@ -48,20 +48,32 @@ class BuilderProfileModel(BaseSettingsModel):
|
|||
title="Task names"
|
||||
)
|
||||
current_context: list[BuilderProfileItemModel] = Field(
|
||||
title="Current context")
|
||||
default_factory=list,
|
||||
title="Current context"
|
||||
)
|
||||
linked_assets: list[BuilderProfileItemModel] = Field(
|
||||
title="Linked assets/shots")
|
||||
default_factory=list,
|
||||
title="Linked assets/shots"
|
||||
)
|
||||
|
||||
|
||||
class WorkfileBuilderModel(BaseSettingsModel):
|
||||
"""[deprecated] use Template Workfile Build Settings instead.
|
||||
"""
|
||||
create_first_version: bool = Field(
|
||||
title="Create first workfile")
|
||||
custom_templates: list[CustomTemplateModel] = Field(
|
||||
title="Custom templates")
|
||||
default_factory=list,
|
||||
title="Custom templates"
|
||||
)
|
||||
builder_on_start: bool = Field(
|
||||
title="Run Builder at first workfile")
|
||||
default=False,
|
||||
title="Run Builder at first workfile"
|
||||
)
|
||||
profiles: list[BuilderProfileModel] = Field(
|
||||
title="Builder profiles")
|
||||
default_factory=list,
|
||||
title="Builder profiles"
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_WORKFILE_BUILDER_SETTINGS = {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,10 @@ Location: Settings > Project > AfterEffects
|
|||
|
||||
## Publish plugins
|
||||
|
||||
### Collect Review
|
||||
|
||||
Enable/disable creation of auto instance of review.
|
||||
|
||||
### Validate Scene Settings
|
||||
|
||||
#### Skip Resolution Check for Tasks
|
||||
|
|
@ -28,6 +32,10 @@ Set regex pattern(s) to look for in a Task name to skip resolution check against
|
|||
|
||||
Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB.
|
||||
|
||||
### ValidateContainers
|
||||
|
||||
By default this validator will look loaded items with lower version than latest. This validator is context wide so it must be disabled in Context button.
|
||||
|
||||
### AfterEffects Submit to Deadline
|
||||
|
||||
* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one.
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ Provides list of [variants](artist_concepts.md#variant) that will be shown to an
|
|||
Provides simplified publishing process. It will create single `image` instance for artist automatically. This instance will
|
||||
produce flatten image from all visible layers in a workfile.
|
||||
|
||||
- Subset template for flatten image - provide template for subset name for this instance (example `imageBeauty`)
|
||||
- Review - should be separate review created for this instance
|
||||
|
||||
### Create Review
|
||||
|
|
@ -111,11 +110,11 @@ Set Byte limit for review file. Applicable if gigantic `image` instances are pro
|
|||
|
||||
#### Extract jpg Options
|
||||
|
||||
Handles tags for produced `.jpg` representation. `Create review` and `Add review to Ftrack` are defaults.
|
||||
Handles tags for produced `.jpg` representation. `Create review` and `Add review to Ftrack` are defaults.
|
||||
|
||||
#### Extract mov Options
|
||||
|
||||
Handles tags for produced `.mov` representation. `Create review` and `Add review to Ftrack` are defaults.
|
||||
Handles tags for produced `.mov` representation. `Create review` and `Add review to Ftrack` are defaults.
|
||||
|
||||
|
||||
### Workfile Builder
|
||||
|
|
@ -124,4 +123,4 @@ Allows to open prepared workfile for an artist when no workfile exists. Useful t
|
|||
|
||||
Could be configured per `Task type`, eg. `composition` task type could use different `.psd` template file than `art` task.
|
||||
Workfile template must be accessible for all artists.
|
||||
(Currently not handled by [SiteSync](module_site_sync.md))
|
||||
(Currently not handled by [SiteSync](module_site_sync.md))
|
||||
|
|
|
|||
Binary file not shown.
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 16 KiB |
Loading…
Add table
Add a link
Reference in a new issue