mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into bugfix/OP-6359_Maya-camera-state-beforeafter-publish
This commit is contained in:
commit
fee4b80020
68 changed files with 1985 additions and 572 deletions
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,10 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.18.4-nightly.1
|
||||
- 3.18.3
|
||||
- 3.18.3-nightly.2
|
||||
- 3.18.3-nightly.1
|
||||
- 3.18.2
|
||||
- 3.18.2-nightly.6
|
||||
- 3.18.2-nightly.5
|
||||
|
|
@ -131,10 +135,6 @@ body:
|
|||
- 3.15.7-nightly.2
|
||||
- 3.15.7-nightly.1
|
||||
- 3.15.6
|
||||
- 3.15.6-nightly.3
|
||||
- 3.15.6-nightly.2
|
||||
- 3.15.6-nightly.1
|
||||
- 3.15.5
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
256
CHANGELOG.md
256
CHANGELOG.md
|
|
@ -1,6 +1,262 @@
|
|||
# Changelog
|
||||
|
||||
|
||||
## [3.18.3](https://github.com/ynput/OpenPype/tree/3.18.3)
|
||||
|
||||
|
||||
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.18.2...3.18.3)
|
||||
|
||||
### **🚀 Enhancements**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Apply initial viewport shader for Redshift Proxy after loading <a href="https://github.com/ynput/OpenPype/pull/6102">#6102</a></summary>
|
||||
|
||||
When the published redshift proxy is being loaded, the shader of the proxy is missing. This is different from the manual load through creating redshift proxy for files. This PR is to assign the default lambert to the redshift proxy, which replicates the same approach when the user manually loads the proxy with filepath.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: We should keep current subset version when we switch only the representation type <a href="https://github.com/ynput/OpenPype/pull/4629">#4629</a></summary>
|
||||
|
||||
When we switch only the representation type of subsets, we should not get the representation from the last version of the subset.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Houdini: Add loader for redshift proxy family <a href="https://github.com/ynput/OpenPype/pull/5948">#5948</a></summary>
|
||||
|
||||
Loader for Redshift Proxy in Houdini (Thanks for @BigRoy contribution)
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>AfterEffects: exposing Deadline pools fields in Publisher UI <a href="https://github.com/ynput/OpenPype/pull/6079">#6079</a></summary>
|
||||
|
||||
Deadline pools might be adhoc set by an artist during publishing. AfterEffects implementation wasn't providing this.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Chore: Event callbacks can have order <a href="https://github.com/ynput/OpenPype/pull/6080">#6080</a></summary>
|
||||
|
||||
Event callbacks can have order in which are called, and fixed issue with getting function name and file when using `partial` function as callback.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>AYON: OpenPype addon defines runtime dependencies <a href="https://github.com/ynput/OpenPype/pull/6095">#6095</a></summary>
|
||||
|
||||
Moved runtime dependencies from ayon-launcher to openpype addon.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Max: User's setting for scene unit scale <a href="https://github.com/ynput/OpenPype/pull/6097">#6097</a></summary>
|
||||
|
||||
Options for users to set the default scene unit scale for their scenes.AYONLegacy OP
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Chore: Remove deprecated templates profiles <a href="https://github.com/ynput/OpenPype/pull/6103">#6103</a></summary>
|
||||
|
||||
Remove deprecated usage of template profiles from settings.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Window is not always on top <a href="https://github.com/ynput/OpenPype/pull/6107">#6107</a></summary>
|
||||
|
||||
Goal of this PR is to avoid using `WindowStaysOnTopHint` which causes issues, especially in cases when DCC shows a popup dialog that is behind the window, in that case both Publisher and DCC are frozen and there is nothing to do.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Houdini: add split job export support for Redshift ROP <a href="https://github.com/ynput/OpenPype/pull/6108">#6108</a></summary>
|
||||
|
||||
This is adding support for splitting of export and render jobs for Redshift as is already implemented for Vray, Mantra and Arnold.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fusion: automatic installation of PySide2 <a href="https://github.com/ynput/OpenPype/pull/6111">#6111</a></summary>
|
||||
|
||||
This PR adds hook which tries to check if PySide2 is installed in Python used by Fusion and if not, it tries to install it automatically.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>AYON: OpenPype addon dependencies <a href="https://github.com/ynput/OpenPype/pull/6113">#6113</a></summary>
|
||||
|
||||
Added `click` and `six` to requirements of openpype addon, and removed `Qt.py` requirement, which is not used anywhere.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Chore: Thumbnail representation has 'outputName' <a href="https://github.com/ynput/OpenPype/pull/6114">#6114</a></summary>
|
||||
|
||||
Add thumbnail output name to thumbnail representation to prevent same output filename during integration.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu: Clear credentials is safe <a href="https://github.com/ynput/OpenPype/pull/6116">#6116</a></summary>
|
||||
|
||||
Do not remove not existing keyring items.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🐛 Bug fixes**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: bug fix the playblast without textures <a href="https://github.com/ynput/OpenPype/pull/5942">#5942</a></summary>
|
||||
|
||||
Bug fix the texture not being displayed when users enable texture placement in the OP/AYON setting
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Blender: Workfile instance update fix <a href="https://github.com/ynput/OpenPype/pull/6048">#6048</a></summary>
|
||||
|
||||
Make sure workfile instance has always available 'instance_node' in transient data.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Fix issue with parenting of widgets <a href="https://github.com/ynput/OpenPype/pull/6106">#6106</a></summary>
|
||||
|
||||
Don't use publisher window parent (usually main DCC window) as parent for report widget.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>:wrench: fix and update pydocstyle configuration <a href="https://github.com/ynput/OpenPype/pull/6109">#6109</a></summary>
|
||||
|
||||
Fix pydocstyle configuration and move it to `pyproject.toml`
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Nuke: Create camera node with the latest camera node class in Nuke 14 <a href="https://github.com/ynput/OpenPype/pull/6118">#6118</a></summary>
|
||||
|
||||
Creating instance fails for certain cameras, and it seems to only exist in Nuke 14. The reason of causing that contributes to the new camera node class `Camera4` while the camera creator is working with the `Camera2` class.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Site Sync: small fixes in Loader <a href="https://github.com/ynput/OpenPype/pull/6119">#6119</a></summary>
|
||||
|
||||
Resolves issue:
|
||||
- local and studio icons were same, they should be different
|
||||
- `TypeError: string indices must be integers` error when downloading/uploading workfiles
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Chore: Template data for editorial publishing <a href="https://github.com/ynput/OpenPype/pull/6120">#6120</a></summary>
|
||||
|
||||
Template data for editorial publishing are filled during `CollectInstanceAnatomyData`. The structure for editorial is determined, as it's required for ExtractHierarchy AYON/OpenPype plugins.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>SceneInventory: Fix site sync icon conversion <a href="https://github.com/ynput/OpenPype/pull/6123">#6123</a></summary>
|
||||
|
||||
Use 'get_qt_icon' to convert icon definitions from site sync.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
|
||||
## [3.18.2](https://github.com/ynput/OpenPype/tree/3.18.2)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -124,23 +124,24 @@ def get_linked_representation_id(
|
|||
if not versions_to_check:
|
||||
break
|
||||
|
||||
links = con.get_versions_links(
|
||||
versions_links = con.get_versions_links(
|
||||
project_name,
|
||||
versions_to_check,
|
||||
link_types=link_types,
|
||||
link_direction="out")
|
||||
|
||||
versions_to_check = set()
|
||||
for link in links:
|
||||
# Care only about version links
|
||||
if link["entityType"] != "version":
|
||||
continue
|
||||
entity_id = link["entityId"]
|
||||
# Skip already found linked version ids
|
||||
if entity_id in linked_version_ids:
|
||||
continue
|
||||
linked_version_ids.add(entity_id)
|
||||
versions_to_check.add(entity_id)
|
||||
for links in versions_links.values():
|
||||
for link in links:
|
||||
# Care only about version links
|
||||
if link["entityType"] != "version":
|
||||
continue
|
||||
entity_id = link["entityId"]
|
||||
# Skip already found linked version ids
|
||||
if entity_id in linked_version_ids:
|
||||
continue
|
||||
linked_version_ids.add(entity_id)
|
||||
versions_to_check.add(entity_id)
|
||||
|
||||
linked_version_ids.remove(version_id)
|
||||
if not linked_version_ids:
|
||||
|
|
|
|||
|
|
@ -36,6 +36,12 @@ def prepare_scene_name(
|
|||
if namespace:
|
||||
name = f"{name}_{namespace}"
|
||||
name = f"{name}_{subset}"
|
||||
|
||||
# Blender name for a collection or object cannot be longer than 63
|
||||
# characters. If the name is longer, it will raise an error.
|
||||
if len(name) > 63:
|
||||
raise ValueError(f"Scene name '{name}' would be too long.")
|
||||
|
||||
return name
|
||||
|
||||
|
||||
|
|
@ -226,7 +232,7 @@ class BaseCreator(Creator):
|
|||
|
||||
# Create asset group
|
||||
if AYON_SERVER_ENABLED:
|
||||
asset_name = instance_data["folderPath"]
|
||||
asset_name = instance_data["folderPath"].split("/")[-1]
|
||||
else:
|
||||
asset_name = instance_data["asset"]
|
||||
|
||||
|
|
@ -305,12 +311,16 @@ class BaseCreator(Creator):
|
|||
)
|
||||
return
|
||||
|
||||
# Rename the instance node in the scene if subset or asset changed
|
||||
# Rename the instance node in the scene if subset or asset changed.
|
||||
# Do not rename the instance if the family is workfile, as the
|
||||
# workfile instance is included in the AVALON_CONTAINER collection.
|
||||
if (
|
||||
"subset" in changes.changed_keys
|
||||
or asset_name_key in changes.changed_keys
|
||||
):
|
||||
) and created_instance.family != "workfile":
|
||||
asset_name = data[asset_name_key]
|
||||
if AYON_SERVER_ENABLED:
|
||||
asset_name = asset_name.split("/")[-1]
|
||||
name = prepare_scene_name(
|
||||
asset=asset_name, subset=data["subset"]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class CreateWorkfile(BaseCreator, AutoCreator):
|
|||
|
||||
def create(self):
|
||||
"""Create workfile instances."""
|
||||
existing_instance = next(
|
||||
workfile_instance = next(
|
||||
(
|
||||
instance for instance in self.create_context.instances
|
||||
if instance.creator_identifier == self.identifier
|
||||
|
|
@ -39,14 +39,14 @@ class CreateWorkfile(BaseCreator, AutoCreator):
|
|||
host_name = self.create_context.host_name
|
||||
|
||||
existing_asset_name = None
|
||||
if existing_instance is not None:
|
||||
if workfile_instance is not None:
|
||||
if AYON_SERVER_ENABLED:
|
||||
existing_asset_name = existing_instance.get("folderPath")
|
||||
existing_asset_name = workfile_instance.get("folderPath")
|
||||
|
||||
if existing_asset_name is None:
|
||||
existing_asset_name = existing_instance["asset"]
|
||||
existing_asset_name = workfile_instance["asset"]
|
||||
|
||||
if not existing_instance:
|
||||
if not workfile_instance:
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
subset_name = self.get_subset_name(
|
||||
task_name, task_name, asset_doc, project_name, host_name
|
||||
|
|
@ -66,19 +66,18 @@ class CreateWorkfile(BaseCreator, AutoCreator):
|
|||
asset_doc,
|
||||
project_name,
|
||||
host_name,
|
||||
existing_instance,
|
||||
workfile_instance,
|
||||
)
|
||||
)
|
||||
self.log.info("Auto-creating workfile instance...")
|
||||
current_instance = CreatedInstance(
|
||||
workfile_instance = CreatedInstance(
|
||||
self.family, subset_name, data, self
|
||||
)
|
||||
instance_node = bpy.data.collections.get(AVALON_CONTAINERS, {})
|
||||
current_instance.transient_data["instance_node"] = instance_node
|
||||
self._add_instance_to_context(current_instance)
|
||||
self._add_instance_to_context(workfile_instance)
|
||||
|
||||
elif (
|
||||
existing_asset_name != asset_name
|
||||
or existing_instance["task"] != task_name
|
||||
or workfile_instance["task"] != task_name
|
||||
):
|
||||
# Update instance context if it's different
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
|
@ -86,12 +85,17 @@ class CreateWorkfile(BaseCreator, AutoCreator):
|
|||
task_name, task_name, asset_doc, project_name, host_name
|
||||
)
|
||||
if AYON_SERVER_ENABLED:
|
||||
existing_instance["folderPath"] = asset_name
|
||||
workfile_instance["folderPath"] = asset_name
|
||||
else:
|
||||
existing_instance["asset"] = asset_name
|
||||
workfile_instance["asset"] = asset_name
|
||||
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["subset"] = subset_name
|
||||
workfile_instance["task"] = task_name
|
||||
workfile_instance["subset"] = subset_name
|
||||
|
||||
instance_node = bpy.data.collections.get(AVALON_CONTAINERS)
|
||||
if not instance_node:
|
||||
instance_node = bpy.data.collections.new(name=AVALON_CONTAINERS)
|
||||
workfile_instance.transient_data["instance_node"] = instance_node
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
|
|
|
|||
|
|
@ -61,5 +61,10 @@ class BlendAnimationLoader(plugin.AssetLoader):
|
|||
|
||||
bpy.data.objects.remove(container)
|
||||
|
||||
library = bpy.data.libraries.get(bpy.path.basename(libpath))
|
||||
filename = bpy.path.basename(libpath)
|
||||
# Blender has a limit of 63 characters for any data name.
|
||||
# If the filename is longer, it will be truncated.
|
||||
if len(filename) > 63:
|
||||
filename = filename[:63]
|
||||
library = bpy.data.libraries.get(filename)
|
||||
bpy.data.libraries.remove(library)
|
||||
|
|
|
|||
|
|
@ -106,7 +106,12 @@ class BlendLoader(plugin.AssetLoader):
|
|||
bpy.context.scene.collection.objects.link(obj)
|
||||
|
||||
# Remove the library from the blend file
|
||||
library = bpy.data.libraries.get(bpy.path.basename(libpath))
|
||||
filepath = bpy.path.basename(libpath)
|
||||
# Blender has a limit of 63 characters for any data name.
|
||||
# If the filepath is longer, it will be truncated.
|
||||
if len(filepath) > 63:
|
||||
filepath = filepath[:63]
|
||||
library = bpy.data.libraries.get(filepath)
|
||||
bpy.data.libraries.remove(library)
|
||||
|
||||
return container, members
|
||||
|
|
|
|||
|
|
@ -60,7 +60,12 @@ class BlendSceneLoader(plugin.AssetLoader):
|
|||
bpy.context.scene.collection.children.link(container)
|
||||
|
||||
# Remove the library from the blend file
|
||||
library = bpy.data.libraries.get(bpy.path.basename(libpath))
|
||||
filepath = bpy.path.basename(libpath)
|
||||
# Blender has a limit of 63 characters for any data name.
|
||||
# If the filepath is longer, it will be truncated.
|
||||
if len(filepath) > 63:
|
||||
filepath = filepath[:63]
|
||||
library = bpy.data.libraries.get(filepath)
|
||||
bpy.data.libraries.remove(library)
|
||||
|
||||
return container, members
|
||||
|
|
|
|||
221
openpype/hosts/fusion/api/plugin.py
Normal file
221
openpype/hosts/fusion/api/plugin.py
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk,
|
||||
)
|
||||
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
EnumDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
)
|
||||
|
||||
|
||||
class GenericCreateSaver(Creator):
|
||||
default_variants = ["Main", "Mask"]
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
icon = "fa5.eye"
|
||||
|
||||
instance_attributes = [
|
||||
"reviewable"
|
||||
]
|
||||
|
||||
settings_category = "fusion"
|
||||
|
||||
image_format = "exr"
|
||||
|
||||
# TODO: This should be renamed together with Nuke so it is aligned
|
||||
temp_rendering_path_template = (
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}")
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
|
||||
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self,
|
||||
)
|
||||
data = instance.data_to_store()
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
|
||||
self._update_tool_with_data(saver, data=data)
|
||||
|
||||
# Register the CreatedInstance
|
||||
self._imprint(saver, data)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
continue
|
||||
|
||||
# Add instance
|
||||
created_instance = CreatedInstance.from_existing(data, self)
|
||||
|
||||
# Collect transient data
|
||||
created_instance.transient_data["tool"] = tool
|
||||
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
self._imprint(tool, new_data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
# Remove the tool from the scene
|
||||
|
||||
tool = instance.transient_data["tool"]
|
||||
if tool:
|
||||
tool.Delete()
|
||||
|
||||
# Remove the collected CreatedInstance to remove from UI directly
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def _imprint(self, tool, data):
|
||||
# Save all data in a "openpype.{key}" = value data
|
||||
|
||||
# Instance id is the tool's name so we don't need to imprint as data
|
||||
data.pop("instance_id", None)
|
||||
|
||||
active = data.pop("active", None)
|
||||
if active is not None:
|
||||
# Use active value to set the passthrough state
|
||||
tool.SetAttrs({"TOOLB_PassThrough": not active})
|
||||
|
||||
for key, value in data.items():
|
||||
tool.SetData(f"openpype.{key}", value)
|
||||
|
||||
def _update_tool_with_data(self, tool, data):
|
||||
"""Update tool node name and output path based on subset data"""
|
||||
if "subset" not in data:
|
||||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
original_format = tool.GetData(
|
||||
"openpype.creator_attributes.image_format"
|
||||
)
|
||||
|
||||
subset = data["subset"]
|
||||
if (
|
||||
original_subset != subset
|
||||
or original_format != data["creator_attributes"]["image_format"]
|
||||
):
|
||||
self._configure_saver_tool(data, tool, subset)
|
||||
|
||||
def _configure_saver_tool(self, data, tool, subset):
|
||||
formatting_data = deepcopy(data)
|
||||
|
||||
# get frame padding from anatomy templates
|
||||
frame_padding = self.project_anatomy.templates["frame_padding"]
|
||||
|
||||
# get output format
|
||||
ext = data["creator_attributes"]["image_format"]
|
||||
|
||||
# Subset change detected
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
formatting_data.update({
|
||||
"workdir": workdir,
|
||||
"frame": "0" * frame_padding,
|
||||
"ext": ext,
|
||||
"product": {
|
||||
"name": formatting_data["subset"],
|
||||
"type": formatting_data["family"],
|
||||
},
|
||||
})
|
||||
|
||||
# build file path to render
|
||||
filepath = self.temp_rendering_path_template.format(**formatting_data)
|
||||
|
||||
comp = get_current_comp()
|
||||
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
|
||||
|
||||
# Rename tool
|
||||
if tool.Name != subset:
|
||||
print(f"Renaming {tool.Name} -> {subset}")
|
||||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData("openpype")
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
return
|
||||
|
||||
# Get active state from the actual tool state
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
data["active"] = not passthrough
|
||||
|
||||
# Override publisher's UUID generation because tool names are
|
||||
# already unique in Fusion in a comp
|
||||
data["instance_id"] = tool.Name
|
||||
|
||||
return data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""Settings for publish page"""
|
||||
return self.get_pre_create_attr_defs()
|
||||
|
||||
def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
|
||||
creator_attrs = instance_data["creator_attributes"] = {}
|
||||
for pass_key in pre_create_data.keys():
|
||||
creator_attrs[pass_key] = pre_create_data[pass_key]
|
||||
|
||||
def _get_render_target_enum(self):
|
||||
rendering_targets = {
|
||||
"local": "Local machine rendering",
|
||||
"frames": "Use existing frames",
|
||||
}
|
||||
if "farm_rendering" in self.instance_attributes:
|
||||
rendering_targets["farm"] = "Farm rendering"
|
||||
|
||||
return EnumDef(
|
||||
"render_target", items=rendering_targets, label="Render target"
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
return BoolDef(
|
||||
"review",
|
||||
default=("reviewable" in self.instance_attributes),
|
||||
label="Review",
|
||||
)
|
||||
|
||||
def _get_image_format_enum(self):
|
||||
image_format_options = ["exr", "tga", "tif", "png", "jpg"]
|
||||
return EnumDef(
|
||||
"image_format",
|
||||
items=image_format_options,
|
||||
default=self.image_format,
|
||||
label="Output Image Format",
|
||||
)
|
||||
|
|
@ -64,5 +64,8 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
|
||||
self.launch_context.env[py3_var] = py3_dir
|
||||
|
||||
# for hook installing PySide2
|
||||
self.data["fusion_python3_home"] = py3_dir
|
||||
|
||||
self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}")
|
||||
self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR
|
||||
|
|
|
|||
186
openpype/hosts/fusion/hooks/pre_pyside_install.py
Normal file
186
openpype/hosts/fusion/hooks/pre_pyside_install.py
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
import os
|
||||
import subprocess
|
||||
import platform
|
||||
import uuid
|
||||
|
||||
from openpype.lib.applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class InstallPySideToFusion(PreLaunchHook):
|
||||
"""Automatically installs Qt binding to fusion's python packages.
|
||||
|
||||
Check if fusion has installed PySide2 and will try to install if not.
|
||||
|
||||
For pipeline implementation is required to have Qt binding installed in
|
||||
fusion's python packages.
|
||||
"""
|
||||
|
||||
app_groups = {"fusion"}
|
||||
order = 2
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
# Prelaunch hook is not crucial
|
||||
try:
|
||||
settings = self.data["project_settings"][self.host_name]
|
||||
if not settings["hooks"]["InstallPySideToFusion"]["enabled"]:
|
||||
return
|
||||
self.inner_execute()
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
"Processing of {} crashed.".format(self.__class__.__name__),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def inner_execute(self):
|
||||
self.log.debug("Check for PySide2 installation.")
|
||||
|
||||
fusion_python3_home = self.data.get("fusion_python3_home")
|
||||
if not fusion_python3_home:
|
||||
self.log.warning("'fusion_python3_home' was not provided. "
|
||||
"Installation of PySide2 not possible")
|
||||
return
|
||||
|
||||
if platform.system().lower() == "windows":
|
||||
exe_filenames = ["python.exe"]
|
||||
else:
|
||||
exe_filenames = ["python3", "python"]
|
||||
|
||||
for exe_filename in exe_filenames:
|
||||
python_executable = os.path.join(fusion_python3_home, exe_filename)
|
||||
if os.path.exists(python_executable):
|
||||
break
|
||||
|
||||
if not os.path.exists(python_executable):
|
||||
self.log.warning(
|
||||
"Couldn't find python executable for fusion. {}".format(
|
||||
python_executable
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Check if PySide2 is installed and skip if yes
|
||||
if self._is_pyside_installed(python_executable):
|
||||
self.log.debug("Fusion has already installed PySide2.")
|
||||
return
|
||||
|
||||
self.log.debug("Installing PySide2.")
|
||||
# Install PySide2 in fusion's python
|
||||
if self._windows_require_permissions(
|
||||
os.path.dirname(python_executable)):
|
||||
result = self._install_pyside_windows(python_executable)
|
||||
else:
|
||||
result = self._install_pyside(python_executable)
|
||||
|
||||
if result:
|
||||
self.log.info("Successfully installed PySide2 module to fusion.")
|
||||
else:
|
||||
self.log.warning("Failed to install PySide2 module to fusion.")
|
||||
|
||||
def _install_pyside_windows(self, python_executable):
|
||||
"""Install PySide2 python module to fusion's python.
|
||||
|
||||
Installation requires administration rights that's why it is required
|
||||
to use "pywin32" module which can execute command's and ask for
|
||||
administration rights.
|
||||
"""
|
||||
try:
|
||||
import win32api
|
||||
import win32con
|
||||
import win32process
|
||||
import win32event
|
||||
import pywintypes
|
||||
from win32comext.shell.shell import ShellExecuteEx
|
||||
from win32comext.shell import shellcon
|
||||
except Exception:
|
||||
self.log.warning("Couldn't import \"pywin32\" modules")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Parameters
|
||||
# - use "-m pip" as module pip to install PySide2 and argument
|
||||
# "--ignore-installed" is to force install module to fusion's
|
||||
# site-packages and make sure it is binary compatible
|
||||
parameters = "-m pip install --ignore-installed PySide2"
|
||||
|
||||
# Execute command and ask for administrator's rights
|
||||
process_info = ShellExecuteEx(
|
||||
nShow=win32con.SW_SHOWNORMAL,
|
||||
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,
|
||||
lpVerb="runas",
|
||||
lpFile=python_executable,
|
||||
lpParameters=parameters,
|
||||
lpDirectory=os.path.dirname(python_executable)
|
||||
)
|
||||
process_handle = process_info["hProcess"]
|
||||
win32event.WaitForSingleObject(process_handle,
|
||||
win32event.INFINITE)
|
||||
returncode = win32process.GetExitCodeProcess(process_handle)
|
||||
return returncode == 0
|
||||
except pywintypes.error:
|
||||
return False
|
||||
|
||||
def _install_pyside(self, python_executable):
|
||||
"""Install PySide2 python module to fusion's python."""
|
||||
try:
|
||||
# Parameters
|
||||
# - use "-m pip" as module pip to install PySide2 and argument
|
||||
# "--ignore-installed" is to force install module to fusion's
|
||||
# site-packages and make sure it is binary compatible
|
||||
env = dict(os.environ)
|
||||
del env['PYTHONPATH']
|
||||
args = [
|
||||
python_executable,
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"--ignore-installed",
|
||||
"PySide2",
|
||||
]
|
||||
process = subprocess.Popen(
|
||||
args, stdout=subprocess.PIPE, universal_newlines=True,
|
||||
env=env
|
||||
)
|
||||
process.communicate()
|
||||
return process.returncode == 0
|
||||
except PermissionError:
|
||||
self.log.warning(
|
||||
"Permission denied with command:"
|
||||
"\"{}\".".format(" ".join(args))
|
||||
)
|
||||
except OSError as error:
|
||||
self.log.warning(f"OS error has occurred: \"{error}\".")
|
||||
except subprocess.SubprocessError:
|
||||
pass
|
||||
|
||||
def _is_pyside_installed(self, python_executable):
|
||||
"""Check if PySide2 module is in fusion's pip list."""
|
||||
args = [python_executable, "-c", "from qtpy import QtWidgets"]
|
||||
process = subprocess.Popen(args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
_, stderr = process.communicate()
|
||||
stderr = stderr.decode()
|
||||
if stderr:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _windows_require_permissions(self, dirpath):
|
||||
if platform.system().lower() != "windows":
|
||||
return False
|
||||
|
||||
try:
|
||||
# Attempt to create a temporary file in the folder
|
||||
temp_file_path = os.path.join(dirpath, uuid.uuid4().hex)
|
||||
with open(temp_file_path, "w"):
|
||||
pass
|
||||
os.remove(temp_file_path) # Clean up temporary file
|
||||
return False
|
||||
|
||||
except PermissionError:
|
||||
return True
|
||||
|
||||
except BaseException as exc:
|
||||
print(("Failed to determine if root requires permissions."
|
||||
"Unexpected error: {}").format(exc))
|
||||
return False
|
||||
64
openpype/hosts/fusion/plugins/create/create_image_saver.py
Normal file
64
openpype/hosts/fusion/plugins/create/create_image_saver.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
from openpype.lib import NumberDef
|
||||
|
||||
from openpype.hosts.fusion.api.plugin import GenericCreateSaver
|
||||
from openpype.hosts.fusion.api import get_current_comp
|
||||
|
||||
|
||||
class CreateImageSaver(GenericCreateSaver):
|
||||
"""Fusion Saver to generate single image.
|
||||
|
||||
Created to explicitly separate single ('image') or
|
||||
multi frame('render) outputs.
|
||||
|
||||
This might be temporary creator until 'alias' functionality will be
|
||||
implemented to limit creation of additional product types with similar, but
|
||||
not the same workflows.
|
||||
"""
|
||||
identifier = "io.openpype.creators.fusion.imagesaver"
|
||||
label = "Image (saver)"
|
||||
name = "image"
|
||||
family = "image"
|
||||
description = "Fusion Saver to generate image"
|
||||
|
||||
default_frame = 0
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Fusion Saver to generate single image.
|
||||
|
||||
This creator is expected for publishing of single frame `image` product
|
||||
type.
|
||||
|
||||
Artist should provide frame number (integer) to specify which frame
|
||||
should be published. It must be inside of global timeline frame range.
|
||||
|
||||
Supports local and deadline rendering.
|
||||
|
||||
Supports selection from predefined set of output file extensions:
|
||||
- exr
|
||||
- tga
|
||||
- png
|
||||
- tif
|
||||
- jpg
|
||||
|
||||
Created to explicitly separate single frame ('image') or
|
||||
multi frame ('render') outputs.
|
||||
"""
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
"""Settings for create page"""
|
||||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
self._get_frame_int(),
|
||||
self._get_image_format_enum(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
def _get_frame_int(self):
|
||||
return NumberDef(
|
||||
"frame",
|
||||
default=self.default_frame,
|
||||
label="Frame",
|
||||
tooltip="Set frame to be rendered, must be inside of global "
|
||||
"timeline range"
|
||||
)
|
||||
|
|
@ -1,187 +1,42 @@
|
|||
from copy import deepcopy
|
||||
import os
|
||||
from openpype.lib import EnumDef
|
||||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk,
|
||||
)
|
||||
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
EnumDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator as NewCreator,
|
||||
CreatedInstance,
|
||||
Anatomy,
|
||||
)
|
||||
from openpype.hosts.fusion.api.plugin import GenericCreateSaver
|
||||
|
||||
|
||||
class CreateSaver(NewCreator):
|
||||
class CreateSaver(GenericCreateSaver):
|
||||
"""Fusion Saver to generate image sequence of 'render' product type.
|
||||
|
||||
Original Saver creator targeted for 'render' product type. It uses
|
||||
original not to descriptive name because of values in Settings.
|
||||
"""
|
||||
identifier = "io.openpype.creators.fusion.saver"
|
||||
label = "Render (saver)"
|
||||
name = "render"
|
||||
family = "render"
|
||||
default_variants = ["Main", "Mask"]
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
icon = "fa5.eye"
|
||||
|
||||
instance_attributes = ["reviewable"]
|
||||
image_format = "exr"
|
||||
default_frame_range_option = "asset_db"
|
||||
|
||||
# TODO: This should be renamed together with Nuke so it is aligned
|
||||
temp_rendering_path_template = (
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}"
|
||||
)
|
||||
def get_detail_description(self):
|
||||
return """Fusion Saver to generate image sequence.
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
|
||||
This creator is expected for publishing of image sequences for 'render'
|
||||
product type. (But can publish even single frame 'render'.)
|
||||
|
||||
instance_data.update(
|
||||
{"id": "pyblish.avalon.instance", "subset": subset_name}
|
||||
)
|
||||
Select what should be source of render range:
|
||||
- "Current asset context" - values set on Asset in DB (Ftrack)
|
||||
- "From render in/out" - from node itself
|
||||
- "From composition timeline" - from timeline
|
||||
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
saver = comp.AddTool("Saver", *args)
|
||||
Supports local and farm rendering.
|
||||
|
||||
self._update_tool_with_data(saver, data=instance_data)
|
||||
|
||||
# Register the CreatedInstance
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self,
|
||||
)
|
||||
data = instance.data_to_store()
|
||||
self._imprint(saver, data)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
continue
|
||||
|
||||
# Add instance
|
||||
created_instance = CreatedInstance.from_existing(data, self)
|
||||
|
||||
# Collect transient data
|
||||
created_instance.transient_data["tool"] = tool
|
||||
|
||||
self._add_instance_to_context(created_instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
self._imprint(tool, new_data)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
# Remove the tool from the scene
|
||||
|
||||
tool = instance.transient_data["tool"]
|
||||
if tool:
|
||||
tool.Delete()
|
||||
|
||||
# Remove the collected CreatedInstance to remove from UI directly
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def _imprint(self, tool, data):
|
||||
# Save all data in a "openpype.{key}" = value data
|
||||
|
||||
# Instance id is the tool's name so we don't need to imprint as data
|
||||
data.pop("instance_id", None)
|
||||
|
||||
active = data.pop("active", None)
|
||||
if active is not None:
|
||||
# Use active value to set the passthrough state
|
||||
tool.SetAttrs({"TOOLB_PassThrough": not active})
|
||||
|
||||
for key, value in data.items():
|
||||
tool.SetData(f"openpype.{key}", value)
|
||||
|
||||
def _update_tool_with_data(self, tool, data):
|
||||
"""Update tool node name and output path based on subset data"""
|
||||
if "subset" not in data:
|
||||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
original_format = tool.GetData(
|
||||
"openpype.creator_attributes.image_format"
|
||||
)
|
||||
|
||||
subset = data["subset"]
|
||||
if (
|
||||
original_subset != subset
|
||||
or original_format != data["creator_attributes"]["image_format"]
|
||||
):
|
||||
self._configure_saver_tool(data, tool, subset)
|
||||
|
||||
def _configure_saver_tool(self, data, tool, subset):
|
||||
formatting_data = deepcopy(data)
|
||||
|
||||
# get frame padding from anatomy templates
|
||||
anatomy = Anatomy()
|
||||
frame_padding = anatomy.templates["frame_padding"]
|
||||
|
||||
# get output format
|
||||
ext = data["creator_attributes"]["image_format"]
|
||||
|
||||
# Subset change detected
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
formatting_data.update(
|
||||
{"workdir": workdir, "frame": "0" * frame_padding, "ext": ext}
|
||||
)
|
||||
|
||||
# build file path to render
|
||||
filepath = self.temp_rendering_path_template.format(**formatting_data)
|
||||
|
||||
comp = get_current_comp()
|
||||
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
|
||||
|
||||
# Rename tool
|
||||
if tool.Name != subset:
|
||||
print(f"Renaming {tool.Name} -> {subset}")
|
||||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData("openpype")
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
return
|
||||
|
||||
# Get active state from the actual tool state
|
||||
attrs = tool.GetAttrs()
|
||||
passthrough = attrs["TOOLB_PassThrough"]
|
||||
data["active"] = not passthrough
|
||||
|
||||
# Override publisher's UUID generation because tool names are
|
||||
# already unique in Fusion in a comp
|
||||
data["instance_id"] = tool.Name
|
||||
|
||||
return data
|
||||
Supports selection from predefined set of output file extensions:
|
||||
- exr
|
||||
- tga
|
||||
- png
|
||||
- tif
|
||||
- jpg
|
||||
"""
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
"""Settings for create page"""
|
||||
|
|
@ -193,29 +48,6 @@ class CreateSaver(NewCreator):
|
|||
]
|
||||
return attr_defs
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""Settings for publish page"""
|
||||
return self.get_pre_create_attr_defs()
|
||||
|
||||
def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
|
||||
creator_attrs = instance_data["creator_attributes"] = {}
|
||||
for pass_key in pre_create_data.keys():
|
||||
creator_attrs[pass_key] = pre_create_data[pass_key]
|
||||
|
||||
# These functions below should be moved to another file
|
||||
# so it can be used by other plugins. plugin.py ?
|
||||
def _get_render_target_enum(self):
|
||||
rendering_targets = {
|
||||
"local": "Local machine rendering",
|
||||
"frames": "Use existing frames",
|
||||
}
|
||||
if "farm_rendering" in self.instance_attributes:
|
||||
rendering_targets["farm"] = "Farm rendering"
|
||||
|
||||
return EnumDef(
|
||||
"render_target", items=rendering_targets, label="Render target"
|
||||
)
|
||||
|
||||
def _get_frame_range_enum(self):
|
||||
frame_range_options = {
|
||||
"asset_db": "Current asset context",
|
||||
|
|
@ -227,42 +59,5 @@ class CreateSaver(NewCreator):
|
|||
"frame_range_source",
|
||||
items=frame_range_options,
|
||||
label="Frame range source",
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
return BoolDef(
|
||||
"review",
|
||||
default=("reviewable" in self.instance_attributes),
|
||||
label="Review",
|
||||
)
|
||||
|
||||
def _get_image_format_enum(self):
|
||||
image_format_options = ["exr", "tga", "tif", "png", "jpg"]
|
||||
return EnumDef(
|
||||
"image_format",
|
||||
items=image_format_options,
|
||||
default=self.image_format,
|
||||
label="Output Image Format",
|
||||
)
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
# plugin settings
|
||||
plugin_settings = project_settings["fusion"]["create"][
|
||||
self.__class__.__name__
|
||||
]
|
||||
|
||||
# individual attributes
|
||||
self.instance_attributes = plugin_settings.get(
|
||||
"instance_attributes", self.instance_attributes
|
||||
)
|
||||
self.default_variants = plugin_settings.get(
|
||||
"default_variants", self.default_variants
|
||||
)
|
||||
self.temp_rendering_path_template = plugin_settings.get(
|
||||
"temp_rendering_path_template", self.temp_rendering_path_template
|
||||
)
|
||||
self.image_format = plugin_settings.get(
|
||||
"image_format", self.image_format
|
||||
default=self.default_frame_range_option
|
||||
)
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin):
|
|||
label = "Collect Inputs"
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
|
|
|
|||
|
|
@ -57,6 +57,18 @@ class CollectInstanceData(pyblish.api.InstancePlugin):
|
|||
start_with_handle = comp_start
|
||||
end_with_handle = comp_end
|
||||
|
||||
frame = instance.data["creator_attributes"].get("frame")
|
||||
# explicitly publishing only single frame
|
||||
if frame is not None:
|
||||
frame = int(frame)
|
||||
|
||||
start = frame
|
||||
end = frame
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
start_with_handle = frame
|
||||
end_with_handle = frame
|
||||
|
||||
# Include start and end render frame in label
|
||||
subset = instance.data["subset"]
|
||||
label = (
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class CollectFusionRender(
|
|||
continue
|
||||
|
||||
family = inst.data["family"]
|
||||
if family != "render":
|
||||
if family not in ["render", "image"]:
|
||||
continue
|
||||
|
||||
task_name = context.data["task"]
|
||||
|
|
@ -59,7 +59,7 @@ class CollectFusionRender(
|
|||
instance_families = inst.data.get("families", [])
|
||||
subset_name = inst.data["subset"]
|
||||
instance = FusionRenderInstance(
|
||||
family="render",
|
||||
family=family,
|
||||
tool=tool,
|
||||
workfileComp=comp,
|
||||
families=instance_families,
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin):
|
|||
label = "Save current file"
|
||||
order = pyblish.api.ExtractorOrder - 0.49
|
||||
hosts = ["fusion"]
|
||||
families = ["render", "workfile"]
|
||||
families = ["render", "image", "workfile"]
|
||||
|
||||
def process(self, context):
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class ValidateBackgroundDepth(
|
|||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Background Depth 32 bit"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
optional = True
|
||||
|
||||
actions = [SelectInvalidAction, publish.RepairAction]
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Comp Saved"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Create Folder Checked"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
actions = [RepairAction, SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Filename Has Extension"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
|
||||
class ValidateImageFrame(pyblish.api.InstancePlugin):
|
||||
"""Validates that `image` product type contains only single frame."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Image Frame"
|
||||
families = ["image"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, instance):
|
||||
render_start = instance.data["frameStartHandle"]
|
||||
render_end = instance.data["frameEndHandle"]
|
||||
too_many_frames = (isinstance(instance.data["expectedFiles"], list)
|
||||
and len(instance.data["expectedFiles"]) > 1)
|
||||
|
||||
if render_end - render_start > 0 or too_many_frames:
|
||||
desc = ("Trying to render multiple frames. 'image' product type "
|
||||
"is meant for single frame. Please use 'render' creator.")
|
||||
raise PublishValidationError(
|
||||
title="Frame range outside of comp range",
|
||||
message=desc,
|
||||
description=desc
|
||||
)
|
||||
|
|
@ -7,8 +7,8 @@ class ValidateInstanceFrameRange(pyblish.api.InstancePlugin):
|
|||
"""Validate instance frame range is within comp's global render range."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Filename Has Extension"
|
||||
families = ["render"]
|
||||
label = "Validate Frame Range"
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Saver Has Input"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Saver Passthrough"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class ValidateSaverResolution(
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Asset Resolution"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
optional = True
|
||||
actions = [SelectInvalidAction]
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
|
|||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Unique Subsets"
|
||||
families = ["render"]
|
||||
families = ["render", "image"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
|
||||
|
|
|
|||
|
|
@ -15,6 +15,9 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
split_render = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -36,12 +39,15 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
# Also create the linked Redshift IPR Rop
|
||||
try:
|
||||
ipr_rop = instance_node.parent().createNode(
|
||||
"Redshift_IPR", node_name=basename + "_IPR"
|
||||
"Redshift_IPR", node_name=f"{basename}_IPR"
|
||||
)
|
||||
except hou.OperationFailed:
|
||||
except hou.OperationFailed as e:
|
||||
raise plugin.OpenPypeCreatorError(
|
||||
("Cannot create Redshift node. Is Redshift "
|
||||
"installed and enabled?"))
|
||||
(
|
||||
"Cannot create Redshift node. Is Redshift "
|
||||
"installed and enabled?"
|
||||
)
|
||||
) from e
|
||||
|
||||
# Move it to directly under the Redshift ROP
|
||||
ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1))
|
||||
|
|
@ -74,8 +80,15 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
for node in self.selected_nodes:
|
||||
if node.type().name() == "cam":
|
||||
camera = node.path()
|
||||
parms.update({
|
||||
"RS_renderCamera": camera or ""})
|
||||
parms["RS_renderCamera"] = camera or ""
|
||||
|
||||
export_dir = hou.text.expandString("$HIP/pyblish/rs/")
|
||||
rs_filepath = f"{export_dir}{subset_name}/{subset_name}.$F4.rs"
|
||||
parms["RS_archive_file"] = rs_filepath
|
||||
|
||||
if pre_create_data.get("split_render", self.split_render):
|
||||
parms["RS_archive_enable"] = 1
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock some Avalon attributes
|
||||
|
|
@ -102,6 +115,9 @@ class CreateRedshiftROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("split_render",
|
||||
label="Split export and render jobs",
|
||||
default=self.split_render),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
112
openpype/hosts/houdini/plugins/load/load_redshift_proxy.py
Normal file
112
openpype/hosts/houdini/plugins/load/load_redshift_proxy.py
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
import os
|
||||
import re
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
from openpype.hosts.houdini.api import pipeline
|
||||
from openpype.pipeline.load import LoadError
|
||||
|
||||
import hou
|
||||
|
||||
|
||||
class RedshiftProxyLoader(load.LoaderPlugin):
|
||||
"""Load Redshift Proxy"""
|
||||
|
||||
families = ["redshiftproxy"]
|
||||
label = "Load Redshift Proxy"
|
||||
representations = ["rs"]
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
# Get the root node
|
||||
obj = hou.node("/obj")
|
||||
|
||||
# Define node name
|
||||
namespace = namespace if namespace else context["asset"]["name"]
|
||||
node_name = "{}_{}".format(namespace, name) if namespace else name
|
||||
|
||||
# Create a new geo node
|
||||
container = obj.createNode("geo", node_name=node_name)
|
||||
|
||||
# Check whether the Redshift parameters exist - if not, then likely
|
||||
# redshift is not set up or initialized correctly
|
||||
if not container.parm("RS_objprop_proxy_enable"):
|
||||
container.destroy()
|
||||
raise LoadError("Unable to initialize geo node with Redshift "
|
||||
"attributes. Make sure you have the Redshift "
|
||||
"plug-in set up correctly for Houdini.")
|
||||
|
||||
# Enable by default
|
||||
container.setParms({
|
||||
"RS_objprop_proxy_enable": True,
|
||||
"RS_objprop_proxy_file": self.format_path(
|
||||
self.filepath_from_context(context),
|
||||
context["representation"])
|
||||
})
|
||||
|
||||
# Remove the file node, it only loads static meshes
|
||||
# Houdini 17 has removed the file node from the geo node
|
||||
file_node = container.node("file1")
|
||||
if file_node:
|
||||
file_node.destroy()
|
||||
|
||||
# Add this stub node inside so it previews ok
|
||||
proxy_sop = container.createNode("redshift_proxySOP",
|
||||
node_name=node_name)
|
||||
proxy_sop.setDisplayFlag(True)
|
||||
|
||||
nodes = [container, proxy_sop]
|
||||
|
||||
self[:] = nodes
|
||||
|
||||
return pipeline.containerise(
|
||||
node_name,
|
||||
namespace,
|
||||
nodes,
|
||||
context,
|
||||
self.__class__.__name__,
|
||||
suffix="",
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
# Update the file path
|
||||
file_path = get_representation_path(representation)
|
||||
|
||||
node = container["node"]
|
||||
node.setParms({
|
||||
"RS_objprop_proxy_file": self.format_path(
|
||||
file_path, representation)
|
||||
})
|
||||
|
||||
# Update attribute
|
||||
node.setParms({"representation": str(representation["_id"])})
|
||||
|
||||
def remove(self, container):
|
||||
|
||||
node = container["node"]
|
||||
node.destroy()
|
||||
|
||||
@staticmethod
|
||||
def format_path(path, representation):
|
||||
"""Format file path correctly for single redshift proxy
|
||||
or redshift proxy sequence."""
|
||||
if not os.path.exists(path):
|
||||
raise RuntimeError("Path does not exist: %s" % path)
|
||||
|
||||
is_sequence = bool(representation["context"].get("frame"))
|
||||
# The path is either a single file or sequence in a folder.
|
||||
if is_sequence:
|
||||
filename = re.sub(r"(.*)\.(\d+)\.(rs.*)", "\\1.$F4.\\3", path)
|
||||
filename = os.path.join(path, filename)
|
||||
else:
|
||||
filename = path
|
||||
|
||||
filename = os.path.normpath(filename)
|
||||
filename = filename.replace("\\", "/")
|
||||
|
||||
return filename
|
||||
|
|
@ -31,7 +31,6 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
families = ["redshift_rop"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
rop = hou.node(instance.data.get("instance_node"))
|
||||
|
||||
# Collect chunkSize
|
||||
|
|
@ -43,13 +42,29 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
|
||||
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
|
||||
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
|
||||
render_products = []
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
split_render = bool(rop.parm("RS_archive_enable").eval())
|
||||
instance.data["splitRender"] = split_render
|
||||
export_products = []
|
||||
if split_render:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "RS_archive_file", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=beauty_suffix
|
||||
)
|
||||
render_products.append(beauty_product)
|
||||
render_products = [beauty_product]
|
||||
files_by_aov = {
|
||||
"_": self.generate_expected_files(instance,
|
||||
beauty_product)}
|
||||
|
|
@ -59,11 +74,11 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
i = index + 1
|
||||
|
||||
# Skip disabled AOVs
|
||||
if not rop.evalParm("RS_aovEnable_%s" % i):
|
||||
if not rop.evalParm(f"RS_aovEnable_{i}"):
|
||||
continue
|
||||
|
||||
aov_suffix = rop.evalParm("RS_aovSuffix_%s" % i)
|
||||
aov_prefix = evalParmNoFrame(rop, "RS_aovCustomPrefix_%s" % i)
|
||||
aov_suffix = rop.evalParm(f"RS_aovSuffix_{i}")
|
||||
aov_prefix = evalParmNoFrame(rop, f"RS_aovCustomPrefix_{i}")
|
||||
if not aov_prefix:
|
||||
aov_prefix = default_prefix
|
||||
|
||||
|
|
@ -85,7 +100,7 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
instance.data["attachTo"] = [] # stub required data
|
||||
|
||||
if "expectedFiles" not in instance.data:
|
||||
instance.data["expectedFiles"] = list()
|
||||
instance.data["expectedFiles"] = []
|
||||
instance.data["expectedFiles"].append(files_by_aov)
|
||||
|
||||
# update the colorspace data
|
||||
|
|
|
|||
|
|
@ -37,6 +37,95 @@ class RenderProducts(object):
|
|||
)
|
||||
}
|
||||
|
||||
def get_multiple_beauty(self, outputs, cameras):
|
||||
beauty_output_frames = dict()
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
filename = filename.replace(".", "")
|
||||
ext = ext.replace(".", "")
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
new_beauty = self.get_expected_beauty(
|
||||
filename, start_frame, end_frame, ext
|
||||
)
|
||||
beauty_output = ({
|
||||
f"{camera}_beauty": new_beauty
|
||||
})
|
||||
beauty_output_frames.update(beauty_output)
|
||||
return beauty_output_frames
|
||||
|
||||
def get_multiple_aovs(self, outputs, cameras):
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
aovs_frames = {}
|
||||
for output, camera in zip(outputs, cameras):
|
||||
filename, ext = os.path.splitext(output)
|
||||
filename = filename.replace(".", "")
|
||||
ext = ext.replace(".", "")
|
||||
start_frame = int(rt.rendStart)
|
||||
end_frame = int(rt.rendEnd) + 1
|
||||
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer == "Redshift_Renderer":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
rs_aov_files = rt.Execute("renderers.current.separateAovFiles") # noqa
|
||||
# this doesn't work, always returns False
|
||||
# rs_AovFiles = rt.RedShift_Renderer().separateAovFiles
|
||||
if ext == "exr" and not rs_aov_files:
|
||||
for name in render_name:
|
||||
if name == "RsCryptomatte":
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
else:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer == "Arnold":
|
||||
render_name = self.get_arnold_product_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_arnold_product( # noqa
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
elif renderer in [
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3"
|
||||
]:
|
||||
if ext != "exr":
|
||||
render_name = self.get_render_elements_name()
|
||||
if render_name:
|
||||
for name in render_name:
|
||||
aovs_frames.update({
|
||||
f"{camera}_{name}": self.get_expected_aovs(
|
||||
filename, name, start_frame,
|
||||
end_frame, ext)
|
||||
})
|
||||
|
||||
return aovs_frames
|
||||
|
||||
def get_aovs(self, container):
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
|
||||
|
|
@ -63,7 +152,7 @@ class RenderProducts(object):
|
|||
if render_name:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_render_elements(
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
|
|
@ -77,14 +166,14 @@ class RenderProducts(object):
|
|||
for name in render_name:
|
||||
if name == "RsCryptomatte":
|
||||
render_dict.update({
|
||||
name: self.get_expected_render_elements(
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
else:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_render_elements(
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
|
|
@ -95,7 +184,8 @@ class RenderProducts(object):
|
|||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_arnold_product(
|
||||
output_file, name, start_frame, end_frame, img_fmt)
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt)
|
||||
})
|
||||
elif renderer in [
|
||||
"V_Ray_6_Hotfix_3",
|
||||
|
|
@ -106,7 +196,7 @@ class RenderProducts(object):
|
|||
if render_name:
|
||||
for name in render_name:
|
||||
render_dict.update({
|
||||
name: self.get_expected_render_elements(
|
||||
name: self.get_expected_aovs(
|
||||
output_file, name, start_frame,
|
||||
end_frame, img_fmt) # noqa
|
||||
})
|
||||
|
|
@ -169,8 +259,8 @@ class RenderProducts(object):
|
|||
|
||||
return render_name
|
||||
|
||||
def get_expected_render_elements(self, folder, name,
|
||||
start_frame, end_frame, fmt):
|
||||
def get_expected_aovs(self, folder, name,
|
||||
start_frame, end_frame, fmt):
|
||||
"""Get all the expected render element output files. """
|
||||
render_elements = []
|
||||
for f in range(start_frame, end_frame):
|
||||
|
|
|
|||
|
|
@ -74,13 +74,13 @@ class RenderSettings(object):
|
|||
output = os.path.join(output_dir, container)
|
||||
try:
|
||||
aov_separator = self._aov_chars[(
|
||||
self._project_settings["maya"]
|
||||
self._project_settings["max"]
|
||||
["RenderSettings"]
|
||||
["aov_separator"]
|
||||
)]
|
||||
except KeyError:
|
||||
aov_separator = "."
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = f"{output}..{img_fmt}"
|
||||
output_filename = output_filename.replace("{aov_separator}",
|
||||
aov_separator)
|
||||
rt.rendOutputFilename = output_filename
|
||||
|
|
@ -146,13 +146,13 @@ class RenderSettings(object):
|
|||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext)
|
||||
aov_name = f"{dir}_{renderpass}..{ext}"
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def get_render_output(self, container, output_dir):
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
output_filename = "{0}..{1}".format(output, img_fmt)
|
||||
output_filename = f"{output}..{img_fmt}"
|
||||
return output_filename
|
||||
|
||||
def get_render_element(self):
|
||||
|
|
@ -167,3 +167,61 @@ class RenderSettings(object):
|
|||
orig_render_elem.append(render_element)
|
||||
|
||||
return orig_render_elem
|
||||
|
||||
def get_batch_render_elements(self, container,
|
||||
output_dir, camera):
|
||||
render_element_list = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{output}_{camera}_{renderpass}..{img_fmt}"
|
||||
render_element_list.append(aov_name)
|
||||
return render_element_list
|
||||
|
||||
def get_batch_render_output(self, camera):
|
||||
target_layer_no = rt.batchRenderMgr.FindView(camera)
|
||||
target_layer = rt.batchRenderMgr.GetView(target_layer_no)
|
||||
return target_layer.outputFilename
|
||||
|
||||
def batch_render_elements(self, camera):
|
||||
target_layer_no = rt.batchRenderMgr.FindView(camera)
|
||||
target_layer = rt.batchRenderMgr.GetView(target_layer_no)
|
||||
outputfilename = target_layer.outputFilename
|
||||
directory = os.path.dirname(outputfilename)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num < 0:
|
||||
return
|
||||
ext = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{directory}_{camera}_{renderpass}..{ext}"
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
|
||||
def batch_render_layer(self, container,
|
||||
output_dir, cameras):
|
||||
outputs = list()
|
||||
output = os.path.join(output_dir, container)
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
for cam in cameras:
|
||||
camera = rt.getNodeByName(cam)
|
||||
layer_no = rt.batchRenderMgr.FindView(cam)
|
||||
renderlayer = None
|
||||
if layer_no == 0:
|
||||
renderlayer = rt.batchRenderMgr.CreateView(camera)
|
||||
else:
|
||||
renderlayer = rt.batchRenderMgr.GetView(layer_no)
|
||||
# use camera name as renderlayer name
|
||||
renderlayer.name = cam
|
||||
renderlayer.outputFilename = f"{output}_{cam}..{img_fmt}"
|
||||
outputs.append(renderlayer.outputFilename)
|
||||
return outputs
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
"""Creator plugin for creating camera."""
|
||||
import os
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
|
||||
|
||||
|
|
@ -17,15 +18,33 @@ class CreateRender(plugin.MaxCreator):
|
|||
file = rt.maxFileName
|
||||
filename, _ = os.path.splitext(file)
|
||||
instance_data["AssetName"] = filename
|
||||
instance_data["multiCamera"] = pre_create_data.get("multi_cam")
|
||||
num_of_renderlayer = rt.batchRenderMgr.numViews
|
||||
if num_of_renderlayer > 0:
|
||||
rt.batchRenderMgr.DeleteView(num_of_renderlayer)
|
||||
|
||||
instance = super(CreateRender, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data)
|
||||
|
||||
container_name = instance.data.get("instance_node")
|
||||
sel_obj = self.selected_nodes
|
||||
if sel_obj:
|
||||
# set viewport camera for rendering(mandatory for deadline)
|
||||
RenderSettings(self.project_settings).set_render_camera(sel_obj)
|
||||
# set output paths for rendering(mandatory for deadline)
|
||||
RenderSettings().render_output(container_name)
|
||||
# TODO: create multiple camera options
|
||||
if self.selected_nodes:
|
||||
selected_nodes_name = []
|
||||
for sel in self.selected_nodes:
|
||||
name = sel.name
|
||||
selected_nodes_name.append(name)
|
||||
RenderSettings().batch_render_layer(
|
||||
container_name, filename,
|
||||
selected_nodes_name)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
attrs = super(CreateRender, self).get_pre_create_attr_defs()
|
||||
return attrs + [
|
||||
BoolDef("multi_cam",
|
||||
label="Multiple Cameras Submission",
|
||||
default=False),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -4,8 +4,10 @@ import os
|
|||
import pyblish.api
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.hosts.max.api import colorspace
|
||||
from openpype.hosts.max.api.lib import get_max_version, get_current_renderer
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
|
||||
|
||||
|
|
@ -23,7 +25,6 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
file = rt.maxFileName
|
||||
current_file = os.path.join(folder, file)
|
||||
filepath = current_file.replace("\\", "/")
|
||||
|
||||
context.data['currentFile'] = current_file
|
||||
|
||||
files_by_aov = RenderProducts().get_beauty(instance.name)
|
||||
|
|
@ -39,6 +40,28 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
|
||||
instance.data["cameras"] = [camera.name] if camera else None # noqa
|
||||
|
||||
if instance.data.get("multiCamera"):
|
||||
cameras = instance.data.get("members")
|
||||
if not cameras:
|
||||
raise KnownPublishError("There should be at least"
|
||||
" one renderable camera in container")
|
||||
sel_cam = [
|
||||
c.name for c in cameras
|
||||
if rt.classOf(c) in rt.Camera.classes]
|
||||
container_name = instance.data.get("instance_node")
|
||||
render_dir = os.path.dirname(rt.rendOutputFilename)
|
||||
outputs = RenderSettings().batch_render_layer(
|
||||
container_name, render_dir, sel_cam
|
||||
)
|
||||
|
||||
instance.data["cameras"] = sel_cam
|
||||
|
||||
files_by_aov = RenderProducts().get_multiple_beauty(
|
||||
outputs, sel_cam)
|
||||
aovs = RenderProducts().get_multiple_aovs(
|
||||
outputs, sel_cam)
|
||||
files_by_aov.update(aovs)
|
||||
|
||||
if "expectedFiles" not in instance.data:
|
||||
instance.data["expectedFiles"] = list()
|
||||
instance.data["files"] = list()
|
||||
|
|
|
|||
100
openpype/hosts/max/plugins/publish/save_scenes_for_cameras.py
Normal file
100
openpype/hosts/max/plugins/publish/save_scenes_for_cameras.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from pymxs import runtime as rt
|
||||
from openpype.lib import run_subprocess
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from openpype.hosts.max.api.lib_renderproducts import RenderProducts
|
||||
|
||||
|
||||
class SaveScenesForCamera(pyblish.api.InstancePlugin):
|
||||
"""Save scene files for multiple cameras without
|
||||
editing the original scene before deadline submission
|
||||
|
||||
"""
|
||||
|
||||
label = "Save Scene files for cameras"
|
||||
order = pyblish.api.ExtractorOrder - 0.48
|
||||
hosts = ["max"]
|
||||
families = ["maxrender"]
|
||||
|
||||
def process(self, instance):
|
||||
current_folder = rt.maxFilePath
|
||||
current_filename = rt.maxFileName
|
||||
current_filepath = os.path.join(current_folder, current_filename)
|
||||
camera_scene_files = []
|
||||
scripts = []
|
||||
filename, ext = os.path.splitext(current_filename)
|
||||
fmt = RenderProducts().image_format()
|
||||
cameras = instance.data.get("cameras")
|
||||
if not cameras:
|
||||
return
|
||||
new_folder = f"{current_folder}_{filename}"
|
||||
os.makedirs(new_folder, exist_ok=True)
|
||||
for camera in cameras:
|
||||
new_output = RenderSettings().get_batch_render_output(camera) # noqa
|
||||
new_output = new_output.replace("\\", "/")
|
||||
new_filename = f"{filename}_{camera}{ext}"
|
||||
new_filepath = os.path.join(new_folder, new_filename)
|
||||
new_filepath = new_filepath.replace("\\", "/")
|
||||
camera_scene_files.append(new_filepath)
|
||||
RenderSettings().batch_render_elements(camera)
|
||||
rt.rendOutputFilename = new_output
|
||||
rt.saveMaxFile(current_filepath)
|
||||
script = ("""
|
||||
from pymxs import runtime as rt
|
||||
import os
|
||||
filename = "{filename}"
|
||||
new_filepath = "{new_filepath}"
|
||||
new_output = "{new_output}"
|
||||
camera = "{camera}"
|
||||
rt.rendOutputFilename = new_output
|
||||
directory = os.path.dirname(rt.rendOutputFilename)
|
||||
directory = os.path.join(directory, filename)
|
||||
render_elem = rt.maxOps.GetCurRenderElementMgr()
|
||||
render_elem_num = render_elem.NumRenderElements()
|
||||
if render_elem_num > 0:
|
||||
ext = "{ext}"
|
||||
for i in range(render_elem_num):
|
||||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
aov_name = f"{{directory}}_{camera}_{{renderpass}}..{ext}"
|
||||
render_elem.SetRenderElementFileName(i, aov_name)
|
||||
rt.saveMaxFile(new_filepath)
|
||||
""").format(filename=instance.name,
|
||||
new_filepath=new_filepath,
|
||||
new_output=new_output,
|
||||
camera=camera,
|
||||
ext=fmt)
|
||||
scripts.append(script)
|
||||
|
||||
maxbatch_exe = os.path.join(
|
||||
os.path.dirname(sys.executable), "3dsmaxbatch")
|
||||
maxbatch_exe = maxbatch_exe.replace("\\", "/")
|
||||
if sys.platform == "windows":
|
||||
maxbatch_exe += ".exe"
|
||||
maxbatch_exe = os.path.normpath(maxbatch_exe)
|
||||
with tempfile.TemporaryDirectory() as tmp_dir_name:
|
||||
tmp_script_path = os.path.join(
|
||||
tmp_dir_name, "extract_scene_files.py")
|
||||
self.log.info("Using script file: {}".format(tmp_script_path))
|
||||
|
||||
with open(tmp_script_path, "wt") as tmp:
|
||||
for script in scripts:
|
||||
tmp.write(script + "\n")
|
||||
|
||||
try:
|
||||
current_filepath = current_filepath.replace("\\", "/")
|
||||
tmp_script_path = tmp_script_path.replace("\\", "/")
|
||||
run_subprocess([maxbatch_exe, tmp_script_path,
|
||||
"-sceneFile", current_filepath])
|
||||
except RuntimeError:
|
||||
self.log.debug("Checking the scene files existing")
|
||||
|
||||
for camera_scene in camera_scene_files:
|
||||
if not os.path.exists(camera_scene):
|
||||
self.log.error("Camera scene files not existed yet!")
|
||||
raise RuntimeError("MaxBatch.exe doesn't run as expected")
|
||||
self.log.debug(f"Found Camera scene:{camera_scene}")
|
||||
|
|
@ -137,6 +137,11 @@ class RedshiftProxyLoader(load.LoaderPlugin):
|
|||
cmds.connectAttr("{}.outMesh".format(rs_mesh),
|
||||
"{}.inMesh".format(mesh_shape))
|
||||
|
||||
# TODO: use the assigned shading group as shaders if existed
|
||||
# assign default shader to redshift proxy
|
||||
if cmds.ls("initialShadingGroup", type="shadingEngine"):
|
||||
cmds.sets(mesh_shape, forceElement="initialShadingGroup")
|
||||
|
||||
group_node = cmds.group(empty=True, name="{}_GRP".format(name))
|
||||
mesh_transform = cmds.listRelatives(mesh_shape,
|
||||
parent=True, fullPath=True)
|
||||
|
|
|
|||
|
|
@ -3483,3 +3483,19 @@ def get_filenames_without_hash(filename, frame_start, frame_end):
|
|||
new_filename = filename_without_hashes.format(frame)
|
||||
filenames.append(new_filename)
|
||||
return filenames
|
||||
|
||||
|
||||
def create_camera_node_by_version():
|
||||
"""Function to create the camera with the latest node class
|
||||
For Nuke version 14.0 or later, the Camera4 camera node class
|
||||
would be used
|
||||
For the version before, the Camera2 camera node class
|
||||
would be used
|
||||
Returns:
|
||||
Node: camera node
|
||||
"""
|
||||
nuke_number_version = nuke.NUKE_VERSION_MAJOR
|
||||
if nuke_number_version >= 14:
|
||||
return nuke.createNode("Camera4")
|
||||
else:
|
||||
return nuke.createNode("Camera2")
|
||||
|
|
|
|||
|
|
@ -259,9 +259,7 @@ def _install_menu():
|
|||
menu.addCommand(
|
||||
"Create...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
parent=main_window,
|
||||
tab="create"
|
||||
)
|
||||
)
|
||||
|
|
@ -270,9 +268,7 @@ def _install_menu():
|
|||
menu.addCommand(
|
||||
"Publish...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
parent=main_window,
|
||||
tab="publish"
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@ from openpype.hosts.nuke.api import (
|
|||
NukeCreatorError,
|
||||
maintained_selection
|
||||
)
|
||||
from openpype.hosts.nuke.api.lib import (
|
||||
create_camera_node_by_version
|
||||
)
|
||||
|
||||
|
||||
class CreateCamera(NukeCreator):
|
||||
|
|
@ -32,7 +35,7 @@ class CreateCamera(NukeCreator):
|
|||
"Creator error: Select only camera node type")
|
||||
created_node = self.selected_nodes[0]
|
||||
else:
|
||||
created_node = nuke.createNode("Camera2")
|
||||
created_node = create_camera_node_by_version()
|
||||
|
||||
created_node["tile_color"].setValue(
|
||||
int(self.node_color, 16))
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ class ValidateOutputMaps(pyblish.api.InstancePlugin):
|
|||
self.log.warning(f"Disabling texture instance: "
|
||||
f"{image_instance}")
|
||||
image_instance.data["active"] = False
|
||||
image_instance.data["publish"] = False
|
||||
image_instance.data["integrate"] = False
|
||||
representation.setdefault("tags", []).append("delete")
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -1,7 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect Deadline pools. Choose default one from Settings
|
||||
|
||||
"""
|
||||
import pyblish.api
|
||||
from openpype.lib import TextDef
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
|
|
@ -9,11 +6,35 @@ from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
|||
|
||||
class CollectDeadlinePools(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect pools from instance if present, from Setting otherwise."""
|
||||
"""Collect pools from instance or Publisher attributes, from Setting
|
||||
otherwise.
|
||||
|
||||
Pools are used to control which DL workers could render the job.
|
||||
|
||||
Pools might be set:
|
||||
- directly on the instance (set directly in DCC)
|
||||
- from Publisher attributes
|
||||
- from defaults from Settings.
|
||||
|
||||
Publisher attributes could be shown even for instances that should be
|
||||
rendered locally as visibility is driven by product type of the instance
|
||||
(which will be `render` most likely).
|
||||
(Might be resolved in the future and class attribute 'families' should
|
||||
be cleaned up.)
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.420
|
||||
label = "Collect Deadline Pools"
|
||||
families = ["rendering",
|
||||
hosts = ["aftereffects",
|
||||
"fusion",
|
||||
"harmony"
|
||||
"nuke",
|
||||
"maya",
|
||||
"max"]
|
||||
|
||||
families = ["render",
|
||||
"rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
|
|
@ -30,7 +51,6 @@ class CollectDeadlinePools(pyblish.api.InstancePlugin,
|
|||
cls.secondary_pool = settings.get("secondary_pool", None)
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
if not instance.data.get("primaryPool"):
|
||||
instance.data["primaryPool"] = (
|
||||
|
|
@ -60,8 +80,12 @@ class CollectDeadlinePools(pyblish.api.InstancePlugin,
|
|||
return [
|
||||
TextDef("primaryPool",
|
||||
label="Primary Pool",
|
||||
default=cls.primary_pool),
|
||||
default=cls.primary_pool,
|
||||
tooltip="Deadline primary pool, "
|
||||
"applicable for farm rendering"),
|
||||
TextDef("secondaryPool",
|
||||
label="Secondary Pool",
|
||||
default=cls.secondary_pool)
|
||||
default=cls.secondary_pool,
|
||||
tooltip="Deadline secondary pool, "
|
||||
"applicable for farm rendering")
|
||||
]
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ from openpype.lib import (
|
|||
NumberDef
|
||||
)
|
||||
|
||||
|
||||
@attr.s
|
||||
class DeadlinePluginInfo():
|
||||
SceneFile = attr.ib(default=None)
|
||||
|
|
@ -41,6 +42,12 @@ class VrayRenderPluginInfo():
|
|||
SeparateFilesPerFrame = attr.ib(default=True)
|
||||
|
||||
|
||||
@attr.s
|
||||
class RedshiftRenderPluginInfo():
|
||||
SceneFile = attr.ib(default=None)
|
||||
Version = attr.ib(default=None)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(
|
||||
abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
OpenPypePyblishPluginMixin
|
||||
|
|
@ -262,6 +269,25 @@ class HoudiniSubmitDeadline(
|
|||
plugin_info = VrayRenderPluginInfo(
|
||||
InputFilename=instance.data["ifdFile"],
|
||||
)
|
||||
elif family == "redshift_rop":
|
||||
plugin_info = RedshiftRenderPluginInfo(
|
||||
SceneFile=instance.data["ifdFile"]
|
||||
)
|
||||
# Note: To use different versions of Redshift on Deadline
|
||||
# set the `REDSHIFT_VERSION` env variable in the Tools
|
||||
# settings in the AYON Application plugin. You will also
|
||||
# need to set that version in `Redshift.param` file
|
||||
# of the Redshift Deadline plugin:
|
||||
# [Redshift_Executable_*]
|
||||
# where * is the version number.
|
||||
if os.getenv("REDSHIFT_VERSION"):
|
||||
plugin_info.Version = os.getenv("REDSHIFT_VERSION")
|
||||
else:
|
||||
self.log.warning((
|
||||
"REDSHIFT_VERSION env variable is not set"
|
||||
" - using version configured in Deadline"
|
||||
))
|
||||
|
||||
else:
|
||||
self.log.error(
|
||||
"Family '%s' not supported yet to split render job",
|
||||
|
|
|
|||
|
|
@ -15,6 +15,12 @@ from openpype.pipeline import (
|
|||
from openpype.pipeline.publish.lib import (
|
||||
replace_with_published_scene_path
|
||||
)
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
get_multipass_setting
|
||||
)
|
||||
from openpype.hosts.max.api.lib_rendersettings import RenderSettings
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.lib import is_running_from_build
|
||||
|
|
@ -54,7 +60,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
cls.priority)
|
||||
cls.chuck_size = settings.get("chunk_size", cls.chunk_size)
|
||||
cls.group = settings.get("group", cls.group)
|
||||
|
||||
# TODO: multiple camera instance, separate job infos
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="3dsmax")
|
||||
|
||||
|
|
@ -71,7 +77,6 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
src_filepath = context.data["currentFile"]
|
||||
src_filename = os.path.basename(src_filepath)
|
||||
|
||||
job_info.Name = "%s - %s" % (src_filename, instance.name)
|
||||
job_info.BatchName = src_filename
|
||||
job_info.Plugin = instance.data["plugin"]
|
||||
|
|
@ -134,11 +139,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
# Add list of expected files to job
|
||||
# ---------------------------------
|
||||
exp = instance.data.get("expectedFiles")
|
||||
|
||||
for filepath in self._iter_expected_files(exp):
|
||||
job_info.OutputDirectory += os.path.dirname(filepath)
|
||||
job_info.OutputFilename += os.path.basename(filepath)
|
||||
if not instance.data.get("multiCamera"):
|
||||
exp = instance.data.get("expectedFiles")
|
||||
for filepath in self._iter_expected_files(exp):
|
||||
job_info.OutputDirectory += os.path.dirname(filepath)
|
||||
job_info.OutputFilename += os.path.basename(filepath)
|
||||
|
||||
return job_info
|
||||
|
||||
|
|
@ -163,11 +168,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
def process_submission(self):
|
||||
|
||||
instance = self._instance
|
||||
filepath = self.scene_path
|
||||
filepath = instance.context.data["currentFile"]
|
||||
|
||||
files = instance.data["expectedFiles"]
|
||||
if not files:
|
||||
raise RuntimeError("No Render Elements found!")
|
||||
raise KnownPublishError("No Render Elements found!")
|
||||
first_file = next(self._iter_expected_files(files))
|
||||
output_dir = os.path.dirname(first_file)
|
||||
instance.data["outputDir"] = output_dir
|
||||
|
|
@ -181,9 +186,17 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
self.log.debug("Submitting 3dsMax render..")
|
||||
project_settings = instance.context.data["project_settings"]
|
||||
payload = self._use_published_name(payload_data, project_settings)
|
||||
job_info, plugin_info = payload
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
if instance.data.get("multiCamera"):
|
||||
self.log.debug("Submitting jobs for multiple cameras..")
|
||||
payload = self._use_published_name_for_multiples(
|
||||
payload_data, project_settings)
|
||||
job_infos, plugin_infos = payload
|
||||
for job_info, plugin_info in zip(job_infos, plugin_infos):
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
else:
|
||||
payload = self._use_published_name(payload_data, project_settings)
|
||||
job_info, plugin_info = payload
|
||||
self.submit(self.assemble_payload(job_info, plugin_info))
|
||||
|
||||
def _use_published_name(self, data, project_settings):
|
||||
# Not all hosts can import these modules.
|
||||
|
|
@ -206,7 +219,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
files = instance.data.get("expectedFiles")
|
||||
if not files:
|
||||
raise RuntimeError("No render elements found")
|
||||
raise KnownPublishError("No render elements found")
|
||||
first_file = next(self._iter_expected_files(files))
|
||||
old_output_dir = os.path.dirname(first_file)
|
||||
output_beauty = RenderSettings().get_render_output(instance.name,
|
||||
|
|
@ -218,6 +231,7 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
plugin_data["RenderOutput"] = beauty_name
|
||||
# as 3dsmax has version with different languages
|
||||
plugin_data["Language"] = "ENU"
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
|
@ -249,6 +263,120 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
|||
|
||||
return job_info, plugin_info
|
||||
|
||||
def get_job_info_through_camera(self, camera):
|
||||
"""Get the job parameters for deadline submission when
|
||||
multi-camera is enabled.
|
||||
Args:
|
||||
infos(dict): a dictionary with job info.
|
||||
"""
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
job_info = copy.deepcopy(self.job_info)
|
||||
exp = instance.data.get("expectedFiles")
|
||||
|
||||
src_filepath = context.data["currentFile"]
|
||||
src_filename = os.path.basename(src_filepath)
|
||||
job_info.Name = "%s - %s - %s" % (
|
||||
src_filename, instance.name, camera)
|
||||
for filepath in self._iter_expected_files(exp):
|
||||
if camera not in filepath:
|
||||
continue
|
||||
job_info.OutputDirectory += os.path.dirname(filepath)
|
||||
job_info.OutputFilename += os.path.basename(filepath)
|
||||
|
||||
return job_info
|
||||
# set the output filepath with the relative camera
|
||||
|
||||
def get_plugin_info_through_camera(self, camera):
|
||||
"""Get the plugin parameters for deadline submission when
|
||||
multi-camera is enabled.
|
||||
Args:
|
||||
infos(dict): a dictionary with plugin info.
|
||||
"""
|
||||
instance = self._instance
|
||||
# set the target camera
|
||||
plugin_info = copy.deepcopy(self.plugin_info)
|
||||
|
||||
plugin_data = {}
|
||||
# set the output filepath with the relative camera
|
||||
if instance.data.get("multiCamera"):
|
||||
scene_filepath = instance.context.data["currentFile"]
|
||||
scene_filename = os.path.basename(scene_filepath)
|
||||
scene_directory = os.path.dirname(scene_filepath)
|
||||
current_filename, ext = os.path.splitext(scene_filename)
|
||||
camera_scene_name = f"{current_filename}_{camera}{ext}"
|
||||
camera_scene_filepath = os.path.join(
|
||||
scene_directory, f"_{current_filename}", camera_scene_name)
|
||||
plugin_data["SceneFile"] = camera_scene_filepath
|
||||
|
||||
files = instance.data.get("expectedFiles")
|
||||
if not files:
|
||||
raise KnownPublishError("No render elements found")
|
||||
first_file = next(self._iter_expected_files(files))
|
||||
old_output_dir = os.path.dirname(first_file)
|
||||
rgb_output = RenderSettings().get_batch_render_output(camera) # noqa
|
||||
rgb_bname = os.path.basename(rgb_output)
|
||||
dir = os.path.dirname(first_file)
|
||||
beauty_name = f"{dir}/{rgb_bname}"
|
||||
beauty_name = beauty_name.replace("\\", "/")
|
||||
plugin_info["RenderOutput"] = beauty_name
|
||||
renderer_class = get_current_renderer()
|
||||
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
if renderer in [
|
||||
"ART_Renderer",
|
||||
"Redshift_Renderer",
|
||||
"V_Ray_6_Hotfix_3",
|
||||
"V_Ray_GPU_6_Hotfix_3",
|
||||
"Default_Scanline_Renderer",
|
||||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = RenderSettings().get_batch_render_elements(
|
||||
instance.name, old_output_dir, camera
|
||||
)
|
||||
for i, element in enumerate(render_elem_list):
|
||||
if camera in element:
|
||||
elem_bname = os.path.basename(element)
|
||||
new_elem = f"{dir}/{elem_bname}"
|
||||
new_elem = new_elem.replace("/", "\\")
|
||||
plugin_info["RenderElementOutputFilename%d" % i] = new_elem # noqa
|
||||
|
||||
if camera:
|
||||
# set the default camera and target camera
|
||||
# (weird parameters from max)
|
||||
plugin_data["Camera"] = camera
|
||||
plugin_data["Camera1"] = camera
|
||||
plugin_data["Camera0"] = None
|
||||
|
||||
plugin_info.update(plugin_data)
|
||||
return plugin_info
|
||||
|
||||
def _use_published_name_for_multiples(self, data, project_settings):
|
||||
"""Process the parameters submission for deadline when
|
||||
user enables multi-cameras option.
|
||||
Args:
|
||||
job_info_list (list): A list of multiple job infos
|
||||
plugin_info_list (list): A list of multiple plugin infos
|
||||
"""
|
||||
job_info_list = []
|
||||
plugin_info_list = []
|
||||
instance = self._instance
|
||||
cameras = instance.data.get("cameras", [])
|
||||
plugin_data = {}
|
||||
multipass = get_multipass_setting(project_settings)
|
||||
if multipass:
|
||||
plugin_data["DisableMultipass"] = 0
|
||||
else:
|
||||
plugin_data["DisableMultipass"] = 1
|
||||
for cam in cameras:
|
||||
job_info = self.get_job_info_through_camera(cam)
|
||||
plugin_info = self.get_plugin_info_through_camera(cam)
|
||||
plugin_info.update(plugin_data)
|
||||
job_info_list.append(job_info)
|
||||
plugin_info_list.append(plugin_info)
|
||||
|
||||
return job_info_list, plugin_info_list
|
||||
|
||||
def from_published_scene(self, replace_in_path=True):
|
||||
instance = self._instance
|
||||
if instance.data["renderer"] == "Redshift_Renderer":
|
||||
|
|
|
|||
|
|
@ -64,8 +64,10 @@ def clear_credentials():
|
|||
user_registry = OpenPypeSecureRegistry("kitsu_user")
|
||||
|
||||
# Set local settings
|
||||
user_registry.delete_item("login")
|
||||
user_registry.delete_item("password")
|
||||
if user_registry.get_item("login", None) is not None:
|
||||
user_registry.delete_item("login")
|
||||
if user_registry.get_item("password", None) is not None:
|
||||
user_registry.delete_item("password")
|
||||
|
||||
|
||||
def save_credentials(login: str, password: str):
|
||||
|
|
@ -92,8 +94,9 @@ def load_credentials() -> Tuple[str, str]:
|
|||
# Get user registry
|
||||
user_registry = OpenPypeSecureRegistry("kitsu_user")
|
||||
|
||||
return user_registry.get_item("login", None), user_registry.get_item(
|
||||
"password", None
|
||||
return (
|
||||
user_registry.get_item("login", None),
|
||||
user_registry.get_item("password", None)
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -582,16 +582,17 @@ def _create_instances_for_aov(instance, skeleton, aov_filter, additional_data,
|
|||
group_name = subset
|
||||
|
||||
# if there are multiple cameras, we need to add camera name
|
||||
if isinstance(col, (list, tuple)):
|
||||
cam = [c for c in cameras if c in col[0]]
|
||||
else:
|
||||
# in case of single frame
|
||||
cam = [c for c in cameras if c in col]
|
||||
if cam:
|
||||
if aov:
|
||||
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
|
||||
else:
|
||||
subset_name = '{}_{}'.format(group_name, cam)
|
||||
expected_filepath = col[0] if isinstance(col, (list, tuple)) else col
|
||||
cams = [cam for cam in cameras if cam in expected_filepath]
|
||||
if cams:
|
||||
for cam in cams:
|
||||
if aov:
|
||||
if not aov.startswith(cam):
|
||||
subset_name = '{}_{}_{}'.format(group_name, cam, aov)
|
||||
else:
|
||||
subset_name = "{}_{}".format(group_name, aov)
|
||||
else:
|
||||
subset_name = '{}_{}'.format(group_name, cam)
|
||||
else:
|
||||
if aov:
|
||||
subset_name = '{}_{}'.format(group_name, aov)
|
||||
|
|
|
|||
|
|
@ -58,41 +58,13 @@ def get_template_name_profiles(
|
|||
if not project_settings:
|
||||
project_settings = get_project_settings(project_name)
|
||||
|
||||
profiles = (
|
||||
return copy.deepcopy(
|
||||
project_settings
|
||||
["global"]
|
||||
["tools"]
|
||||
["publish"]
|
||||
["template_name_profiles"]
|
||||
)
|
||||
if profiles:
|
||||
return copy.deepcopy(profiles)
|
||||
|
||||
# Use legacy approach for cases new settings are not filled yet for the
|
||||
# project
|
||||
legacy_profiles = (
|
||||
project_settings
|
||||
["global"]
|
||||
["publish"]
|
||||
["IntegrateAssetNew"]
|
||||
["template_name_profiles"]
|
||||
)
|
||||
if legacy_profiles:
|
||||
if not logger:
|
||||
logger = Logger.get_logger("get_template_name_profiles")
|
||||
|
||||
logger.warning((
|
||||
"Project \"{}\" is using legacy access to publish template."
|
||||
" It is recommended to move settings to new location"
|
||||
" 'project_settings/global/tools/publish/template_name_profiles'."
|
||||
).format(project_name))
|
||||
|
||||
# Replace "tasks" key with "task_names"
|
||||
profiles = []
|
||||
for profile in copy.deepcopy(legacy_profiles):
|
||||
profile["task_names"] = profile.pop("tasks", [])
|
||||
profiles.append(profile)
|
||||
return profiles
|
||||
|
||||
|
||||
def get_hero_template_name_profiles(
|
||||
|
|
@ -121,36 +93,13 @@ def get_hero_template_name_profiles(
|
|||
if not project_settings:
|
||||
project_settings = get_project_settings(project_name)
|
||||
|
||||
profiles = (
|
||||
return copy.deepcopy(
|
||||
project_settings
|
||||
["global"]
|
||||
["tools"]
|
||||
["publish"]
|
||||
["hero_template_name_profiles"]
|
||||
)
|
||||
if profiles:
|
||||
return copy.deepcopy(profiles)
|
||||
|
||||
# Use legacy approach for cases new settings are not filled yet for the
|
||||
# project
|
||||
legacy_profiles = copy.deepcopy(
|
||||
project_settings
|
||||
["global"]
|
||||
["publish"]
|
||||
["IntegrateHeroVersion"]
|
||||
["template_name_profiles"]
|
||||
)
|
||||
if legacy_profiles:
|
||||
if not logger:
|
||||
logger = Logger.get_logger("get_hero_template_name_profiles")
|
||||
|
||||
logger.warning((
|
||||
"Project \"{}\" is using legacy access to hero publish template."
|
||||
" It is recommended to move settings to new location"
|
||||
" 'project_settings/global/tools/publish/"
|
||||
"hero_template_name_profiles'."
|
||||
).format(project_name))
|
||||
return legacy_profiles
|
||||
|
||||
|
||||
def get_publish_template_name(
|
||||
|
|
|
|||
|
|
@ -190,47 +190,18 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
project_task_types = project_doc["config"]["tasks"]
|
||||
|
||||
for instance in context:
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
anatomy_updates = {
|
||||
anatomy_data = copy.deepcopy(context.data["anatomyData"])
|
||||
anatomy_data.update({
|
||||
"family": instance.data["family"],
|
||||
"subset": instance.data["subset"],
|
||||
}
|
||||
if asset_doc:
|
||||
parents = asset_doc["data"].get("parents") or list()
|
||||
parent_name = project_doc["name"]
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
})
|
||||
|
||||
hierarchy = "/".join(parents)
|
||||
anatomy_updates.update({
|
||||
"asset": asset_doc["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": asset_doc["name"],
|
||||
},
|
||||
})
|
||||
|
||||
# Task
|
||||
task_type = None
|
||||
task_name = instance.data.get("task")
|
||||
if task_name:
|
||||
asset_tasks = asset_doc["data"]["tasks"]
|
||||
task_type = asset_tasks.get(task_name, {}).get("type")
|
||||
task_code = (
|
||||
project_task_types
|
||||
.get(task_type, {})
|
||||
.get("short_name")
|
||||
)
|
||||
anatomy_updates["task"] = {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
self._fill_asset_data(instance, project_doc, anatomy_data)
|
||||
self._fill_task_data(instance, project_task_types, anatomy_data)
|
||||
|
||||
# Define version
|
||||
if self.follow_workfile_version:
|
||||
version_number = context.data('version')
|
||||
version_number = context.data("version")
|
||||
else:
|
||||
version_number = instance.data.get("version")
|
||||
|
||||
|
|
@ -242,6 +213,9 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
|
||||
# If version is not specified for instance or context
|
||||
if version_number is None:
|
||||
task_data = anatomy_data.get("task") or {}
|
||||
task_name = task_data.get("name")
|
||||
task_type = task_data.get("type")
|
||||
version_number = get_versioning_start(
|
||||
context.data["projectName"],
|
||||
instance.context.data["hostName"],
|
||||
|
|
@ -250,29 +224,26 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
family=instance.data["family"],
|
||||
subset=instance.data["subset"]
|
||||
)
|
||||
anatomy_updates["version"] = version_number
|
||||
anatomy_data["version"] = version_number
|
||||
|
||||
# Additional data
|
||||
resolution_width = instance.data.get("resolutionWidth")
|
||||
if resolution_width:
|
||||
anatomy_updates["resolution_width"] = resolution_width
|
||||
anatomy_data["resolution_width"] = resolution_width
|
||||
|
||||
resolution_height = instance.data.get("resolutionHeight")
|
||||
if resolution_height:
|
||||
anatomy_updates["resolution_height"] = resolution_height
|
||||
anatomy_data["resolution_height"] = resolution_height
|
||||
|
||||
pixel_aspect = instance.data.get("pixelAspect")
|
||||
if pixel_aspect:
|
||||
anatomy_updates["pixel_aspect"] = float(
|
||||
anatomy_data["pixel_aspect"] = float(
|
||||
"{:0.2f}".format(float(pixel_aspect))
|
||||
)
|
||||
|
||||
fps = instance.data.get("fps")
|
||||
if fps:
|
||||
anatomy_updates["fps"] = float("{:0.2f}".format(float(fps)))
|
||||
|
||||
anatomy_data = copy.deepcopy(context.data["anatomyData"])
|
||||
anatomy_data.update(anatomy_updates)
|
||||
anatomy_data["fps"] = float("{:0.2f}".format(float(fps)))
|
||||
|
||||
# Store anatomy data
|
||||
instance.data["projectEntity"] = project_doc
|
||||
|
|
@ -288,3 +259,157 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin):
|
|||
instance_name,
|
||||
json.dumps(anatomy_data, indent=4)
|
||||
))
|
||||
|
||||
def _fill_asset_data(self, instance, project_doc, anatomy_data):
|
||||
# QUESTION should we make sure that all asset data are poped if asset
|
||||
# data cannot be found?
|
||||
# - 'asset', 'hierarchy', 'parent', 'folder'
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
if asset_doc:
|
||||
parents = asset_doc["data"].get("parents") or list()
|
||||
parent_name = project_doc["name"]
|
||||
if parents:
|
||||
parent_name = parents[-1]
|
||||
|
||||
hierarchy = "/".join(parents)
|
||||
anatomy_data.update({
|
||||
"asset": asset_doc["name"],
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": asset_doc["name"],
|
||||
},
|
||||
})
|
||||
return
|
||||
|
||||
if instance.data.get("newAssetPublishing"):
|
||||
hierarchy = instance.data["hierarchy"]
|
||||
anatomy_data["hierarchy"] = hierarchy
|
||||
|
||||
parent_name = project_doc["name"]
|
||||
if hierarchy:
|
||||
parent_name = hierarchy.split("/")[-1]
|
||||
|
||||
asset_name = instance.data["asset"].split("/")[-1]
|
||||
anatomy_data.update({
|
||||
"asset": asset_name,
|
||||
"hierarchy": hierarchy,
|
||||
"parent": parent_name,
|
||||
"folder": {
|
||||
"name": asset_name,
|
||||
},
|
||||
})
|
||||
|
||||
def _fill_task_data(self, instance, project_task_types, anatomy_data):
|
||||
# QUESTION should we make sure that all task data are poped if task
|
||||
# data cannot be resolved?
|
||||
# - 'task'
|
||||
|
||||
# Skip if there is no task
|
||||
task_name = instance.data.get("task")
|
||||
if not task_name:
|
||||
return
|
||||
|
||||
# Find task data based on asset entity
|
||||
asset_doc = instance.data.get("assetEntity")
|
||||
task_data = self._get_task_data_from_asset(
|
||||
asset_doc, task_name, project_task_types
|
||||
)
|
||||
if task_data:
|
||||
# Fill task data
|
||||
# - if we're in editorial, make sure the task type is filled
|
||||
if (
|
||||
not instance.data.get("newAssetPublishing")
|
||||
or task_data["type"]
|
||||
):
|
||||
anatomy_data["task"] = task_data
|
||||
return
|
||||
|
||||
# New hierarchy is not created, so we can only skip rest of the logic
|
||||
if not instance.data.get("newAssetPublishing"):
|
||||
return
|
||||
|
||||
# Try to find task data based on hierarchy context and asset name
|
||||
hierarchy_context = instance.context.data.get("hierarchyContext")
|
||||
asset_name = instance.data.get("asset")
|
||||
if not hierarchy_context or not asset_name:
|
||||
return
|
||||
|
||||
project_name = instance.context.data["projectName"]
|
||||
# OpenPype approach vs AYON approach
|
||||
if "/" not in asset_name:
|
||||
tasks_info = self._find_tasks_info_in_hierarchy(
|
||||
hierarchy_context, asset_name
|
||||
)
|
||||
else:
|
||||
current_data = hierarchy_context.get(project_name, {})
|
||||
for key in asset_name.split("/"):
|
||||
if key:
|
||||
current_data = current_data.get("childs", {}).get(key, {})
|
||||
tasks_info = current_data.get("tasks", {})
|
||||
|
||||
task_info = tasks_info.get(task_name, {})
|
||||
task_type = task_info.get("type")
|
||||
task_code = (
|
||||
project_task_types
|
||||
.get(task_type, {})
|
||||
.get("short_name")
|
||||
)
|
||||
anatomy_data["task"] = {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
|
||||
def _get_task_data_from_asset(
|
||||
self, asset_doc, task_name, project_task_types
|
||||
):
|
||||
"""
|
||||
|
||||
Args:
|
||||
asset_doc (Union[dict[str, Any], None]): Asset document.
|
||||
task_name (Union[str, None]): Task name.
|
||||
project_task_types (dict[str, dict[str, Any]]): Project task
|
||||
types.
|
||||
|
||||
Returns:
|
||||
Union[dict[str, str], None]: Task data or None if not found.
|
||||
"""
|
||||
|
||||
if not asset_doc or not task_name:
|
||||
return None
|
||||
|
||||
asset_tasks = asset_doc["data"]["tasks"]
|
||||
task_type = asset_tasks.get(task_name, {}).get("type")
|
||||
task_code = (
|
||||
project_task_types
|
||||
.get(task_type, {})
|
||||
.get("short_name")
|
||||
)
|
||||
return {
|
||||
"name": task_name,
|
||||
"type": task_type,
|
||||
"short": task_code
|
||||
}
|
||||
|
||||
def _find_tasks_info_in_hierarchy(self, hierarchy_context, asset_name):
|
||||
"""Find tasks info for an asset in editorial hierarchy.
|
||||
|
||||
Args:
|
||||
hierarchy_context (dict[str, Any]): Editorial hierarchy context.
|
||||
asset_name (str): Asset name.
|
||||
|
||||
Returns:
|
||||
dict[str, dict[str, Any]]: Tasks info by name.
|
||||
"""
|
||||
|
||||
hierarchy_queue = collections.deque()
|
||||
hierarchy_queue.append(hierarchy_context)
|
||||
while hierarchy_queue:
|
||||
item = hierarchy_context.popleft()
|
||||
if asset_name in item:
|
||||
return item[asset_name].get("tasks") or {}
|
||||
|
||||
for subitem in item.values():
|
||||
hierarchy_queue.extend(subitem.get("childs") or [])
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -79,19 +79,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"representation": "TEMP"
|
||||
})
|
||||
|
||||
# Add fill keys for editorial publishing creating new entity
|
||||
# TODO handle in editorial plugin
|
||||
if instance.data.get("newAssetPublishing"):
|
||||
if "hierarchy" not in template_data:
|
||||
template_data["hierarchy"] = instance.data["hierarchy"]
|
||||
|
||||
if "asset" not in template_data:
|
||||
asset_name = instance.data["asset"].split("/")[-1]
|
||||
template_data["asset"] = asset_name
|
||||
template_data["folder"] = {
|
||||
"name": asset_name
|
||||
}
|
||||
|
||||
publish_templates = anatomy.templates_obj["publish"]
|
||||
if "folder" in publish_templates:
|
||||
publish_folder = publish_templates["folder"].format_strict(
|
||||
|
|
|
|||
|
|
@ -65,7 +65,8 @@ class ExtractThumbnailFromSource(pyblish.api.InstancePlugin):
|
|||
"files": dst_filename,
|
||||
"stagingDir": dst_staging,
|
||||
"thumbnail": True,
|
||||
"tags": ["thumbnail"]
|
||||
"tags": ["thumbnail"],
|
||||
"outputName": "thumbnail",
|
||||
}
|
||||
|
||||
# adding representation
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin):
|
|||
# permissions error on files (files were used or user didn't have perms)
|
||||
# *but all other plugins must be sucessfully completed
|
||||
|
||||
template_name_profiles = []
|
||||
_default_template_name = "hero"
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
|
|
@ -15,6 +15,11 @@
|
|||
"copy_status": false,
|
||||
"force_sync": false
|
||||
},
|
||||
"hooks": {
|
||||
"InstallPySideToFusion": {
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"create": {
|
||||
"CreateSaver": {
|
||||
"temp_rendering_path_template": "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}",
|
||||
|
|
@ -27,6 +32,18 @@
|
|||
"farm_rendering"
|
||||
],
|
||||
"image_format": "exr"
|
||||
},
|
||||
"CreateImageSaver": {
|
||||
"temp_rendering_path_template": "{workdir}/renders/fusion/{subset}/{subset}.{ext}",
|
||||
"default_variants": [
|
||||
"Main",
|
||||
"Mask"
|
||||
],
|
||||
"instance_attributes": [
|
||||
"reviewable",
|
||||
"farm_rendering"
|
||||
],
|
||||
"image_format": "exr"
|
||||
}
|
||||
},
|
||||
"publish": {
|
||||
|
|
|
|||
|
|
@ -41,6 +41,29 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "hooks",
|
||||
"label": "Hooks",
|
||||
"children": [
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"checkbox_key": "enabled",
|
||||
"key": "InstallPySideToFusion",
|
||||
"label": "Install PySide2",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "boolean",
|
||||
"key": "enabled",
|
||||
"label": "Enabled"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
|
|
@ -51,7 +74,56 @@
|
|||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "CreateSaver",
|
||||
"label": "Create Saver",
|
||||
"label": "Create Render Saver",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
"type": "text",
|
||||
"key": "temp_rendering_path_template",
|
||||
"label": "Temporary rendering path template"
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "default_variants",
|
||||
"label": "Default variants",
|
||||
"object_type": {
|
||||
"type": "text"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "instance_attributes",
|
||||
"label": "Instance attributes",
|
||||
"type": "enum",
|
||||
"multiselection": true,
|
||||
"enum_items": [
|
||||
{
|
||||
"reviewable": "Reviewable"
|
||||
},
|
||||
{
|
||||
"farm_rendering": "Farm rendering"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"key": "image_format",
|
||||
"label": "Output Image Format",
|
||||
"type": "enum",
|
||||
"multiselect": false,
|
||||
"enum_items": [
|
||||
{"exr": "exr"},
|
||||
{"tga": "tga"},
|
||||
{"png": "png"},
|
||||
{"tif": "tif"},
|
||||
{"jpg": "jpg"}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "dict",
|
||||
"collapsible": true,
|
||||
"key": "CreateImageSaver",
|
||||
"label": "Create Image Saver",
|
||||
"is_group": true,
|
||||
"children": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1023,49 +1023,6 @@
|
|||
{
|
||||
"type": "label",
|
||||
"label": "<b>NOTE:</b> Hero publish template profiles settings were moved to <a href=\"settings://project_settings/global/tools/publish/hero_template_name_profiles\"><b>Tools/Publish/Hero template name profiles</b></a>. Please move values there."
|
||||
},
|
||||
{
|
||||
"type": "list",
|
||||
"key": "template_name_profiles",
|
||||
"label": "Template name profiles (DEPRECATED)",
|
||||
"use_label_wrap": true,
|
||||
"object_type": {
|
||||
"type": "dict",
|
||||
"children": [
|
||||
{
|
||||
"key": "families",
|
||||
"label": "Families",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "hosts-enum",
|
||||
"key": "hosts",
|
||||
"label": "Hosts",
|
||||
"multiselection": true
|
||||
},
|
||||
{
|
||||
"key": "task_types",
|
||||
"label": "Task types",
|
||||
"type": "task-types-enum"
|
||||
},
|
||||
{
|
||||
"key": "task_names",
|
||||
"label": "Task names",
|
||||
"type": "list",
|
||||
"object_type": "text"
|
||||
},
|
||||
{
|
||||
"type": "separator"
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"key": "template_name",
|
||||
"label": "Template name",
|
||||
"tooltip": "Name of template from Anatomy templates"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -140,12 +140,10 @@ class SiteSyncModel:
|
|||
Union[dict[str, Any], None]: Site icon definition.
|
||||
"""
|
||||
|
||||
if not project_name:
|
||||
if not project_name or not self.is_site_sync_enabled(project_name):
|
||||
return None
|
||||
|
||||
active_site = self.get_active_site(project_name)
|
||||
provider = self._get_provider_for_site(project_name, active_site)
|
||||
return self._get_provider_icon(provider)
|
||||
return self._get_site_icon_def(project_name, active_site)
|
||||
|
||||
def get_remote_site_icon_def(self, project_name):
|
||||
"""Remote site icon definition.
|
||||
|
|
@ -160,7 +158,14 @@ class SiteSyncModel:
|
|||
if not project_name or not self.is_site_sync_enabled(project_name):
|
||||
return None
|
||||
remote_site = self.get_remote_site(project_name)
|
||||
provider = self._get_provider_for_site(project_name, remote_site)
|
||||
return self._get_site_icon_def(project_name, remote_site)
|
||||
|
||||
def _get_site_icon_def(self, project_name, site_name):
|
||||
# use different icon for studio even if provider is 'local_drive'
|
||||
if site_name == self._site_sync_addon.DEFAULT_SITE:
|
||||
provider = "studio"
|
||||
else:
|
||||
provider = self._get_provider_for_site(project_name, site_name)
|
||||
return self._get_provider_icon(provider)
|
||||
|
||||
def get_version_sync_availability(self, project_name, version_ids):
|
||||
|
|
|
|||
|
|
@ -84,9 +84,9 @@ class SceneInventoryController:
|
|||
def get_containers(self):
|
||||
host = self._host
|
||||
if isinstance(host, ILoadHost):
|
||||
return host.get_containers()
|
||||
return list(host.get_containers())
|
||||
elif hasattr(host, "ls"):
|
||||
return host.ls()
|
||||
return list(host.ls())
|
||||
return []
|
||||
|
||||
# Site Sync methods
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.style import get_default_entity_icon_color
|
||||
from openpype.tools.utils.models import TreeModel, Item
|
||||
from openpype.tools.ayon_utils.widgets import get_qt_icon
|
||||
|
||||
|
||||
def walk_hierarchy(node):
|
||||
|
|
@ -71,8 +72,8 @@ class InventoryModel(TreeModel):
|
|||
site_icons = self._controller.get_site_provider_icons()
|
||||
|
||||
self._site_icons = {
|
||||
provider: QtGui.QIcon(icon_path)
|
||||
for provider, icon_path in site_icons.items()
|
||||
provider: get_qt_icon(icon_def)
|
||||
for provider, icon_def in site_icons.items()
|
||||
}
|
||||
|
||||
def outdated(self, item):
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@ class SiteSyncModel:
|
|||
|
||||
if not self.is_sync_server_enabled():
|
||||
return {}
|
||||
site_sync = self._get_sync_server_module()
|
||||
return site_sync.get_site_icons()
|
||||
site_sync_addon = self._get_sync_server_module()
|
||||
return site_sync_addon.get_site_icons()
|
||||
|
||||
def get_sites_information(self):
|
||||
return {
|
||||
|
|
@ -150,23 +150,23 @@ class SiteSyncModel:
|
|||
return self._remote_site_provider
|
||||
|
||||
def _cache_sites(self):
|
||||
site_sync = self._get_sync_server_module()
|
||||
active_site = None
|
||||
remote_site = None
|
||||
active_site_provider = None
|
||||
remote_site_provider = None
|
||||
if site_sync is not None:
|
||||
if self.is_sync_server_enabled():
|
||||
site_sync = self._get_sync_server_module()
|
||||
project_name = self._controller.get_current_project_name()
|
||||
active_site = site_sync.get_active_site(project_name)
|
||||
remote_site = site_sync.get_remote_site(project_name)
|
||||
active_site_provider = "studio"
|
||||
remote_site_provider = "studio"
|
||||
if active_site != "studio":
|
||||
active_site_provider = site_sync.get_active_provider(
|
||||
active_site_provider = site_sync.get_provider_for_site(
|
||||
project_name, active_site
|
||||
)
|
||||
if remote_site != "studio":
|
||||
remote_site_provider = site_sync.get_active_provider(
|
||||
remote_site_provider = site_sync.get_provider_for_site(
|
||||
project_name, remote_site
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,10 +18,11 @@ class ScreenMarquee(QtWidgets.QDialog):
|
|||
super(ScreenMarquee, self).__init__(parent=parent)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.FramelessWindowHint
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.FramelessWindowHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.Tool)
|
||||
)
|
||||
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
|
||||
self.setCursor(QtCore.Qt.CrossCursor)
|
||||
self.setMouseTracking(True)
|
||||
|
|
@ -210,6 +211,9 @@ class ScreenMarquee(QtWidgets.QDialog):
|
|||
"""
|
||||
|
||||
tool = cls()
|
||||
# Activate so Escape event is not ignored.
|
||||
tool.setWindowState(QtCore.Qt.WindowActive)
|
||||
# Exec dialog and return captured pixmap.
|
||||
tool.exec_()
|
||||
return tool.get_captured_pixmap()
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ from .widgets import (
|
|||
)
|
||||
|
||||
|
||||
class PublisherWindow(QtWidgets.QDialog):
|
||||
class PublisherWindow(QtWidgets.QWidget):
|
||||
"""Main window of publisher."""
|
||||
default_width = 1300
|
||||
default_height = 800
|
||||
|
|
@ -50,7 +50,7 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
publish_footer_spacer = 2
|
||||
|
||||
def __init__(self, parent=None, controller=None, reset_on_show=None):
|
||||
super(PublisherWindow, self).__init__(parent)
|
||||
super(PublisherWindow, self).__init__()
|
||||
|
||||
self.setObjectName("PublishWindow")
|
||||
|
||||
|
|
@ -64,17 +64,12 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
if reset_on_show is None:
|
||||
reset_on_show = True
|
||||
|
||||
if parent is None:
|
||||
on_top_flag = QtCore.Qt.WindowStaysOnTopHint
|
||||
else:
|
||||
on_top_flag = QtCore.Qt.Dialog
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowTitleHint
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowMaximizeButtonHint
|
||||
| QtCore.Qt.WindowMinimizeButtonHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| on_top_flag
|
||||
)
|
||||
|
||||
if controller is None:
|
||||
|
|
@ -189,7 +184,7 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
controller, content_stacked_widget
|
||||
)
|
||||
|
||||
report_widget = ReportPageWidget(controller, parent)
|
||||
report_widget = ReportPageWidget(controller, content_stacked_widget)
|
||||
|
||||
# Details - Publish details
|
||||
publish_details_widget = PublishReportViewerWidget(
|
||||
|
|
@ -299,6 +294,12 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
controller.event_system.add_callback(
|
||||
"publish.process.stopped", self._on_publish_stop
|
||||
)
|
||||
controller.event_system.add_callback(
|
||||
"publish.process.instance.changed", self._on_instance_change
|
||||
)
|
||||
controller.event_system.add_callback(
|
||||
"publish.process.plugin.changed", self._on_plugin_change
|
||||
)
|
||||
controller.event_system.add_callback(
|
||||
"show.card.message", self._on_overlay_message
|
||||
)
|
||||
|
|
@ -557,6 +558,18 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
self._reset_on_show = False
|
||||
self.reset()
|
||||
|
||||
def _make_sure_on_top(self):
|
||||
"""Raise window to top and activate it.
|
||||
|
||||
This may not work for some DCCs without Qt.
|
||||
"""
|
||||
|
||||
if not self._window_is_visible:
|
||||
self.show()
|
||||
|
||||
self.setWindowState(QtCore.Qt.WindowActive)
|
||||
self.raise_()
|
||||
|
||||
def _checks_before_save(self, explicit_save):
|
||||
"""Save of changes may trigger some issues.
|
||||
|
||||
|
|
@ -869,6 +882,12 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
if self._is_on_create_tab():
|
||||
self._go_to_publish_tab()
|
||||
|
||||
def _on_instance_change(self):
|
||||
self._make_sure_on_top()
|
||||
|
||||
def _on_plugin_change(self):
|
||||
self._make_sure_on_top()
|
||||
|
||||
def _on_publish_validated_change(self, event):
|
||||
if event["value"]:
|
||||
self._validate_btn.setEnabled(False)
|
||||
|
|
@ -879,6 +898,7 @@ class PublisherWindow(QtWidgets.QDialog):
|
|||
self._comment_input.setText("")
|
||||
|
||||
def _on_publish_stop(self):
|
||||
self._make_sure_on_top()
|
||||
self._set_publish_overlay_visibility(False)
|
||||
self._reset_btn.setEnabled(True)
|
||||
self._stop_btn.setEnabled(False)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring Pype version."""
|
||||
__version__ = "3.18.2"
|
||||
__version__ = "3.18.4-nightly.1"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "OpenPype"
|
||||
version = "3.18.2" # OpenPype
|
||||
version = "3.18.3" # OpenPype
|
||||
description = "Open VFX and Animation pipeline with support."
|
||||
authors = ["OpenPype Team <info@openpype.io>"]
|
||||
license = "MIT License"
|
||||
|
|
@ -181,3 +181,8 @@ reportMissingTypeStubs = false
|
|||
|
||||
[tool.poetry.extras]
|
||||
docs = ["Sphinx", "furo", "sphinxcontrib-napoleon"]
|
||||
|
||||
[tool.pydocstyle]
|
||||
inherit = false
|
||||
convetion = "google"
|
||||
match = "(?!test_).*\\.py"
|
||||
|
|
|
|||
|
|
@ -697,13 +697,6 @@ class IntegrateHeroVersionModel(BaseSettingsModel):
|
|||
optional: bool = Field(False, title="Optional")
|
||||
active: bool = Field(True, title="Active")
|
||||
families: list[str] = Field(default_factory=list, title="Families")
|
||||
# TODO remove when removed from client code
|
||||
template_name_profiles: list[IntegrateHeroTemplateNameProfileModel] = (
|
||||
Field(
|
||||
default_factory=list,
|
||||
title="Template name profiles"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class CleanUpModel(BaseSettingsModel):
|
||||
|
|
@ -1049,19 +1042,6 @@ DEFAULT_PUBLISH_VALUES = {
|
|||
"layout",
|
||||
"mayaScene",
|
||||
"simpleUnrealTexture"
|
||||
],
|
||||
"template_name_profiles": [
|
||||
{
|
||||
"product_types": [
|
||||
"simpleUnrealTexture"
|
||||
],
|
||||
"hosts": [
|
||||
"standalonepublisher"
|
||||
],
|
||||
"task_types": [],
|
||||
"task_names": [],
|
||||
"template_name": "simpleUnrealTextureHero"
|
||||
}
|
||||
]
|
||||
},
|
||||
"CleanUp": {
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "0.1.5"
|
||||
__version__ = "0.1.6"
|
||||
|
|
|
|||
|
|
@ -35,6 +35,14 @@ def _image_format_enum():
|
|||
]
|
||||
|
||||
|
||||
def _frame_range_options_enum():
|
||||
return [
|
||||
{"value": "asset_db", "label": "Current asset context"},
|
||||
{"value": "render_range", "label": "From render in/out"},
|
||||
{"value": "comp_range", "label": "From composition timeline"},
|
||||
]
|
||||
|
||||
|
||||
class CreateSaverPluginModel(BaseSettingsModel):
|
||||
_isGroup = True
|
||||
temp_rendering_path_template: str = Field(
|
||||
|
|
@ -49,16 +57,49 @@ class CreateSaverPluginModel(BaseSettingsModel):
|
|||
enum_resolver=_create_saver_instance_attributes_enum,
|
||||
title="Instance attributes"
|
||||
)
|
||||
image_format: str = Field(
|
||||
enum_resolver=_image_format_enum,
|
||||
title="Output Image Format"
|
||||
output_formats: list[str] = Field(
|
||||
default_factory=list,
|
||||
title="Output formats"
|
||||
)
|
||||
|
||||
|
||||
class HookOptionalModel(BaseSettingsModel):
|
||||
enabled: bool = Field(
|
||||
True,
|
||||
title="Enabled"
|
||||
)
|
||||
|
||||
|
||||
class HooksModel(BaseSettingsModel):
|
||||
InstallPySideToFusion: HookOptionalModel = Field(
|
||||
default_factory=HookOptionalModel,
|
||||
title="Install PySide2"
|
||||
)
|
||||
|
||||
|
||||
class CreateSaverModel(CreateSaverPluginModel):
|
||||
default_frame_range_option: str = Field(
|
||||
default="asset_db",
|
||||
enum_resolver=_frame_range_options_enum,
|
||||
title="Default frame range source"
|
||||
)
|
||||
|
||||
|
||||
class CreateImageSaverModel(CreateSaverPluginModel):
|
||||
default_frame: int = Field(
|
||||
0,
|
||||
title="Default rendered frame"
|
||||
)
|
||||
class CreatPluginsModel(BaseSettingsModel):
|
||||
CreateSaver: CreateSaverPluginModel = Field(
|
||||
default_factory=CreateSaverPluginModel,
|
||||
title="Create Saver"
|
||||
CreateSaver: CreateSaverModel = Field(
|
||||
default_factory=CreateSaverModel,
|
||||
title="Create Saver",
|
||||
description="Creator for render product type (eg. sequence)"
|
||||
)
|
||||
CreateImageSaver: CreateImageSaverModel = Field(
|
||||
default_factory=CreateImageSaverModel,
|
||||
title="Create Image Saver",
|
||||
description="Creator for image product type (eg. single)"
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -71,6 +112,10 @@ class FusionSettings(BaseSettingsModel):
|
|||
default_factory=CopyFusionSettingsModel,
|
||||
title="Local Fusion profile settings"
|
||||
)
|
||||
hooks: HooksModel = Field(
|
||||
default_factory=HooksModel,
|
||||
title="Hooks"
|
||||
)
|
||||
create: CreatPluginsModel = Field(
|
||||
default_factory=CreatPluginsModel,
|
||||
title="Creator plugins"
|
||||
|
|
@ -93,6 +138,11 @@ DEFAULT_VALUES = {
|
|||
"copy_status": False,
|
||||
"force_sync": False
|
||||
},
|
||||
"hooks": {
|
||||
"InstallPySideToFusion": {
|
||||
"enabled": True
|
||||
}
|
||||
},
|
||||
"create": {
|
||||
"CreateSaver": {
|
||||
"temp_rendering_path_template": "{workdir}/renders/fusion/{product[name]}/{product[name]}.{frame}.{ext}",
|
||||
|
|
@ -104,7 +154,21 @@ DEFAULT_VALUES = {
|
|||
"reviewable",
|
||||
"farm_rendering"
|
||||
],
|
||||
"image_format": "exr"
|
||||
"image_format": "exr",
|
||||
"default_frame_range_option": "asset_db"
|
||||
},
|
||||
"CreateImageSaver": {
|
||||
"temp_rendering_path_template": "{workdir}/renders/fusion/{product[name]}/{product[name]}.{ext}",
|
||||
"default_variants": [
|
||||
"Main",
|
||||
"Mask"
|
||||
],
|
||||
"instance_attributes": [
|
||||
"reviewable",
|
||||
"farm_rendering"
|
||||
],
|
||||
"image_format": "exr",
|
||||
"default_frame": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "0.1.1"
|
||||
__version__ = "0.1.3"
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__version__ = "0.2.10"
|
||||
__version__ = "0.2.11"
|
||||
|
|
|
|||
|
|
@ -7,15 +7,16 @@ python = ">=3.9.1,<3.10"
|
|||
aiohttp_json_rpc = "*" # TVPaint server
|
||||
aiohttp-middlewares = "^2.0.0"
|
||||
wsrpc_aiohttp = "^3.1.1" # websocket server
|
||||
Click = "^8"
|
||||
clique = "1.6.*"
|
||||
jsonschema = "^2.6.0"
|
||||
pymongo = "^3.11.2"
|
||||
log4mongo = "^1.7"
|
||||
pyblish-base = "^1.8.11"
|
||||
pynput = "^1.7.2" # Timers manager - TODO remove
|
||||
"Qt.py" = "^1.3.3"
|
||||
qtawesome = "0.7.3"
|
||||
speedcopy = "^2.1"
|
||||
six = "^1.15"
|
||||
qtawesome = "0.7.3"
|
||||
|
||||
[ayon.runtimeDependencies]
|
||||
OpenTimelineIO = "0.14.1"
|
||||
|
|
|
|||
|
|
@ -16,10 +16,6 @@ max-complexity = 30
|
|||
[pylint.'MESSAGES CONTROL']
|
||||
disable = no-member
|
||||
|
||||
[pydocstyle]
|
||||
convention = google
|
||||
ignore = D107
|
||||
|
||||
[coverage:run]
|
||||
branch = True
|
||||
omit = /tests
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue