Merge branch 'develop' into bugfix/OP-7463_Maya-Yeti-publish-textures
20
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,16 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.18.2-nightly.2
|
||||
- 3.18.2-nightly.1
|
||||
- 3.18.1
|
||||
- 3.18.1-nightly.1
|
||||
- 3.18.0
|
||||
- 3.17.7
|
||||
- 3.17.7-nightly.7
|
||||
- 3.17.7-nightly.6
|
||||
- 3.17.7-nightly.5
|
||||
- 3.17.7-nightly.4
|
||||
- 3.17.7-nightly.3
|
||||
- 3.17.7-nightly.2
|
||||
- 3.17.7-nightly.1
|
||||
|
|
@ -125,16 +135,6 @@ body:
|
|||
- 3.15.4
|
||||
- 3.15.4-nightly.3
|
||||
- 3.15.4-nightly.2
|
||||
- 3.15.4-nightly.1
|
||||
- 3.15.3
|
||||
- 3.15.3-nightly.4
|
||||
- 3.15.3-nightly.3
|
||||
- 3.15.3-nightly.2
|
||||
- 3.15.3-nightly.1
|
||||
- 3.15.2
|
||||
- 3.15.2-nightly.6
|
||||
- 3.15.2-nightly.5
|
||||
- 3.15.2-nightly.4
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
1140
CHANGELOG.md
|
|
@ -7,6 +7,10 @@ OpenPype
|
|||
|
||||
[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) 
|
||||
|
||||
## Important Notice!
|
||||
|
||||
OpenPype as a standalone product has reach end of it's life and this repository is now used as a pipeline core code for [AYON](https://ynput.io/ayon/). You can read more details about the end of life process here https://community.ynput.io/t/openpype-end-of-life-timeline/877
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
|
|
|||
|
|
@ -296,12 +296,15 @@ def run(script):
|
|||
@click.option("--mongo_url",
|
||||
help="MongoDB for testing.",
|
||||
default=None)
|
||||
@click.option("--dump_databases",
|
||||
help="Dump all databases to data folder.",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
|
||||
timeout, setup_only, mongo_url, app_group):
|
||||
timeout, setup_only, mongo_url, app_group, dump_databases):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
|
||||
persist, app_variant, timeout, setup_only,
|
||||
mongo_url, app_group)
|
||||
mongo_url, app_group, dump_databases)
|
||||
|
||||
|
||||
@main.command(help="DEPRECATED - run sync server")
|
||||
|
|
|
|||
|
|
@ -606,7 +606,7 @@ def convert_v4_version_to_v3(version):
|
|||
output_data[dst_key] = version[src_key]
|
||||
|
||||
if "createdAt" in version:
|
||||
created_at = arrow.get(version["createdAt"])
|
||||
created_at = arrow.get(version["createdAt"]).to("local")
|
||||
output_data["time"] = created_at.strftime("%Y%m%dT%H%M%SZ")
|
||||
|
||||
output["data"] = output_data
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ def _get_subsets(
|
|||
|
||||
for subset in con.get_products(
|
||||
project_name,
|
||||
subset_ids,
|
||||
subset_names,
|
||||
product_ids=subset_ids,
|
||||
product_names=subset_names,
|
||||
folder_ids=folder_ids,
|
||||
names_by_folder_ids=names_by_folder_ids,
|
||||
active=active,
|
||||
|
|
@ -113,23 +113,23 @@ def _get_versions(
|
|||
|
||||
queried_versions = con.get_versions(
|
||||
project_name,
|
||||
version_ids,
|
||||
subset_ids,
|
||||
versions,
|
||||
hero,
|
||||
standard,
|
||||
latest,
|
||||
version_ids=version_ids,
|
||||
product_ids=subset_ids,
|
||||
versions=versions,
|
||||
hero=hero,
|
||||
standard=standard,
|
||||
latest=latest,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
|
||||
versions = []
|
||||
version_entities = []
|
||||
hero_versions = []
|
||||
for version in queried_versions:
|
||||
if version["version"] < 0:
|
||||
hero_versions.append(version)
|
||||
else:
|
||||
versions.append(convert_v4_version_to_v3(version))
|
||||
version_entities.append(convert_v4_version_to_v3(version))
|
||||
|
||||
if hero_versions:
|
||||
subset_ids = set()
|
||||
|
|
@ -159,9 +159,9 @@ def _get_versions(
|
|||
break
|
||||
conv_hero = convert_v4_version_to_v3(hero_version)
|
||||
conv_hero["version_id"] = version_id
|
||||
versions.append(conv_hero)
|
||||
version_entities.append(conv_hero)
|
||||
|
||||
return versions
|
||||
return version_entities
|
||||
|
||||
|
||||
def get_asset_by_id(project_name, asset_id, fields=None):
|
||||
|
|
@ -539,11 +539,11 @@ def get_representations(
|
|||
|
||||
representations = con.get_representations(
|
||||
project_name,
|
||||
representation_ids,
|
||||
representation_names,
|
||||
version_ids,
|
||||
names_by_version_ids,
|
||||
active,
|
||||
representation_ids=representation_ids,
|
||||
representation_names=representation_names,
|
||||
version_ids=version_ids,
|
||||
names_by_version_ids=names_by_version_ids,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
for representation in representations:
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
"tvpaint",
|
||||
"substancepainter",
|
||||
"aftereffects",
|
||||
"wrap"
|
||||
}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
|
|||
|
||||
# Before `AddLastWorkfileToLaunchArgs`
|
||||
order = 0
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
|
||||
"wrap"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# AfterEffects Integration
|
||||
|
||||
Requirements: This extension requires use of Javascript engine, which is
|
||||
Requirements: This extension requires use of Javascript engine, which is
|
||||
available since CC 16.0.
|
||||
Please check your File>Project Settings>Expressions>Expressions Engine
|
||||
|
||||
|
|
@ -13,26 +13,28 @@ The After Effects integration requires two components to work; `extension` and `
|
|||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
OR
|
||||
download [Anastasiy’s Extension Manager](https://install.anastasiy.com/)
|
||||
|
||||
`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.)
|
||||
|
||||
### Server
|
||||
|
||||
The easiest way to get the server and After Effects launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import avalon.photoshop;avalon.aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
|
||||
python -c ^"import openpype.hosts.photoshop;openpype.hosts..aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The After Effects extension can be found under `Window > Extensions > OpenPype`. Once launched you should be presented with a panel like this:
|
||||
The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
|
@ -43,8 +45,8 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
|
|||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-After-Effects avalon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to avalon-core}\avalon\aftereffects\extension.zxp extension.p12 avalon
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon
|
||||
```
|
||||
|
||||
### Plugin Examples
|
||||
|
|
@ -52,14 +54,14 @@ ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to av
|
|||
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
|
||||
|
||||
Expected deployed extension location on default Windows:
|
||||
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\com.openpype.AE.panel`
|
||||
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel`
|
||||
|
||||
For easier debugging of Javascript:
|
||||
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
|
||||
Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
|
||||
then localhost:8092
|
||||
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
## Resources
|
||||
- https://javascript-tools-guide.readthedocs.io/introduction/index.html
|
||||
- https://github.com/Adobe-CEP/Getting-Started-guides
|
||||
|
|
|
|||
|
|
@ -1,32 +1,31 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel">
|
||||
<Extension Id="io.ynput.AE.panel">
|
||||
<HostList>
|
||||
|
||||
|
||||
<!-- Comment Host tags according to the apps you want your panel to support -->
|
||||
|
||||
|
||||
<!-- Photoshop -->
|
||||
<Host Name="PHXS" Port="8088"/>
|
||||
|
||||
|
||||
<!-- Illustrator -->
|
||||
<Host Name="ILST" Port="8089"/>
|
||||
|
||||
<!-- InDesign -->
|
||||
<Host Name="IDSN" Port="8090" />
|
||||
|
||||
|
||||
<!-- Premiere -->
|
||||
<Host Name="PPRO" Port="8091" />
|
||||
|
||||
|
||||
<!-- AfterEffects -->
|
||||
<Host Name="AEFT" Port="8092" />
|
||||
|
||||
|
||||
<!-- PRELUDE -->
|
||||
<Host Name="PRLD" Port="8093" />
|
||||
|
||||
|
||||
<!-- FLASH Pro -->
|
||||
<Host Name="FLPR" Port="8094" />
|
||||
|
||||
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.27"
|
||||
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="io.ynput.AE.panel" ExtensionBundleVersion="1.1.0"
|
||||
ExtensionBundleName="io.ynput.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel" Version="1.0" />
|
||||
<Extension Id="io.ynput.AE.panel" Version="1.0" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
|
|
@ -38,7 +38,7 @@
|
|||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="com.openpype.AE.panel">
|
||||
<Extension Id="io.ynput.AE.panel">
|
||||
<DispatchInfo >
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
|
|
@ -49,7 +49,7 @@
|
|||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>OpenPype</Menu>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Height>200</Height>
|
||||
|
|
@ -66,7 +66,7 @@
|
|||
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/iconNormal.png</Icon>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
<Icon Type="RollOver">./icons/iconRollover.png</Icon>
|
||||
<Icon Type="Disabled">./icons/iconDisabled.png</Icon>
|
||||
<Icon Type="DarkNormal">./icons/iconDarkNormal.png</Icon>
|
||||
|
|
|
|||
BIN
openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
openpype/hosts/aftereffects/api/panel.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 13 KiB |
BIN
openpype/hosts/aftereffects/api/panel_failure.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
|
@ -56,16 +56,15 @@ class RenderCreator(Creator):
|
|||
use_composition_name = (pre_create_data.get("use_composition_name") or
|
||||
len(comps) > 1)
|
||||
for comp in comps:
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
if use_composition_name:
|
||||
if "{composition}" not in subset_name_from_ui.lower():
|
||||
subset_name_from_ui += "{Composition}"
|
||||
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
|
||||
dynamic_fill = prepare_template_data({"composition":
|
||||
composition_name})
|
||||
subset_name = subset_name_from_ui.format(**dynamic_fill)
|
||||
|
|
@ -81,6 +80,8 @@ class RenderCreator(Creator):
|
|||
inst.subset_name))
|
||||
|
||||
data["members"] = [comp.id]
|
||||
data["orig_comp_name"] = composition_name
|
||||
|
||||
new_instance = CreatedInstance(self.family, subset_name, data,
|
||||
self)
|
||||
if "farm" in pre_create_data:
|
||||
|
|
@ -88,7 +89,7 @@ class RenderCreator(Creator):
|
|||
new_instance.creator_attributes["farm"] = use_farm
|
||||
|
||||
review = pre_create_data["mark_for_review"]
|
||||
new_instance.creator_attributes["mark_for_review"] = review
|
||||
new_instance. creator_attributes["mark_for_review"] = review
|
||||
|
||||
api.get_stub().imprint(new_instance.id,
|
||||
new_instance.data_to_store())
|
||||
|
|
@ -150,16 +151,18 @@ class RenderCreator(Creator):
|
|||
subset_change.new_value)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Removes metadata and renames to original comp name if available."""
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
self.host.remove_instance(instance)
|
||||
|
||||
subset = instance.data["subset"]
|
||||
comp_id = instance.data["members"][0]
|
||||
comp = api.get_stub().get_item(comp_id)
|
||||
orig_comp_name = instance.data.get("orig_comp_name")
|
||||
if comp:
|
||||
new_comp_name = comp.name.replace(subset, '')
|
||||
if not new_comp_name:
|
||||
if orig_comp_name:
|
||||
new_comp_name = orig_comp_name
|
||||
else:
|
||||
new_comp_name = "dummyCompName"
|
||||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
|
|
|||
|
|
@ -60,8 +60,9 @@ class ExtractLocalRender(publish.Extractor):
|
|||
first_repre = not representations
|
||||
if instance.data["review"] and first_repre:
|
||||
repre_data["tags"] = ["review"]
|
||||
thumbnail_path = os.path.join(staging_dir, files[0])
|
||||
instance.data["thumbnailSource"] = thumbnail_path
|
||||
# TODO return back when Extract from source same as regular
|
||||
# thumbnail_path = os.path.join(staging_dir, files[0])
|
||||
# instance.data["thumbnailSource"] = thumbnail_path
|
||||
|
||||
representations.append(repre_data)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import bpy
|
||||
|
||||
|
|
@ -59,7 +59,7 @@ def get_render_product(output_path, name, aov_sep):
|
|||
instance (pyblish.api.Instance): The instance to publish.
|
||||
ext (str): The image format to render.
|
||||
"""
|
||||
filepath = os.path.join(output_path, name)
|
||||
filepath = output_path / name.lstrip("/")
|
||||
render_product = f"{filepath}{aov_sep}beauty.####"
|
||||
render_product = render_product.replace("\\", "/")
|
||||
|
||||
|
|
@ -180,7 +180,7 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
|
|||
return []
|
||||
|
||||
output.file_slots.clear()
|
||||
output.base_path = output_path
|
||||
output.base_path = str(output_path)
|
||||
|
||||
aov_file_products = []
|
||||
|
||||
|
|
@ -191,8 +191,9 @@ def set_node_tree(output_path, name, aov_sep, ext, multilayer):
|
|||
|
||||
output.file_slots.new(filepath)
|
||||
|
||||
aov_file_products.append(
|
||||
(render_pass.name, os.path.join(output_path, filepath)))
|
||||
filename = str(output_path / filepath.lstrip("/"))
|
||||
|
||||
aov_file_products.append((render_pass.name, filename))
|
||||
|
||||
node_input = output.inputs[-1]
|
||||
|
||||
|
|
@ -214,12 +215,11 @@ def imprint_render_settings(node, data):
|
|||
def prepare_rendering(asset_group):
|
||||
name = asset_group.name
|
||||
|
||||
filepath = bpy.data.filepath
|
||||
filepath = Path(bpy.data.filepath)
|
||||
assert filepath, "Workfile not saved. Please save the file first."
|
||||
|
||||
file_path = os.path.dirname(filepath)
|
||||
file_name = os.path.basename(filepath)
|
||||
file_name, _ = os.path.splitext(file_name)
|
||||
dirpath = filepath.parent
|
||||
file_name = Path(filepath.name).stem
|
||||
|
||||
project = get_current_project_name()
|
||||
settings = get_project_settings(project)
|
||||
|
|
@ -232,7 +232,7 @@ def prepare_rendering(asset_group):
|
|||
set_render_format(ext, multilayer)
|
||||
aov_list, custom_passes = set_render_passes(settings)
|
||||
|
||||
output_path = os.path.join(file_path, render_folder, file_name)
|
||||
output_path = Path.joinpath(dirpath, render_folder, file_name)
|
||||
|
||||
render_product = get_render_product(output_path, name, aov_sep)
|
||||
aov_file_product = set_node_tree(
|
||||
|
|
|
|||
|
|
@ -11,12 +11,12 @@ import pyblish.api
|
|||
|
||||
|
||||
class CollectBlenderRender(pyblish.api.InstancePlugin):
|
||||
"""Gather all publishable render layers from renderSetup."""
|
||||
"""Gather all publishable render instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["blender"]
|
||||
families = ["render"]
|
||||
label = "Collect Render Layers"
|
||||
label = "Collect Render"
|
||||
sync_workfile_version = False
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -78,8 +78,6 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
|
||||
assert render_data, "No render data found."
|
||||
|
||||
self.log.debug(f"render_data: {dict(render_data)}")
|
||||
|
||||
render_product = render_data.get("render_product")
|
||||
aov_file_product = render_data.get("aov_file_product")
|
||||
ext = render_data.get("image_format")
|
||||
|
|
@ -101,7 +99,7 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
expected_files = expected_beauty | expected_aovs
|
||||
|
||||
instance.data.update({
|
||||
"family": "render.farm",
|
||||
"families": ["render", "render.farm"],
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"frameStartHandle": frame_handle_start,
|
||||
|
|
@ -120,5 +118,3 @@ class CollectBlenderRender(pyblish.api.InstancePlugin):
|
|||
"colorspaceView": "ACES 1.0 SDR-video",
|
||||
"renderProducts": colorspace.ARenderProduct(),
|
||||
})
|
||||
|
||||
self.log.debug(f"data: {instance.data}")
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.hosts.blender.api.pipeline import AVALON_PROPERTY
|
|||
class ExtractLayout(publish.Extractor, publish.OptionalPyblishPluginMixin):
|
||||
"""Extract a layout."""
|
||||
|
||||
label = "Extract Layout"
|
||||
label = "Extract Layout (JSON)"
|
||||
hosts = ["blender"]
|
||||
families = ["layout"]
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@ class ExtractThumbnail(publish.Extractor):
|
|||
def process(self, instance):
|
||||
self.log.debug("Extracting capture..")
|
||||
|
||||
if instance.data.get("thumbnailSource"):
|
||||
self.log.debug("Thumbnail source found, skipping...")
|
||||
return
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
asset_name = instance.data["assetEntity"]["name"]
|
||||
subset = instance.data["subset"]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ class IncrementWorkfileVersion(
|
|||
optional = True
|
||||
hosts = ["blender"]
|
||||
families = ["animation", "model", "rig", "action", "layout", "blendScene",
|
||||
"pointcache", "render"]
|
||||
"pointcache", "render.farm"]
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class ValidateDeadlinePublish(pyblish.api.InstancePlugin,
|
|||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
families = ["render.farm"]
|
||||
families = ["render"]
|
||||
hosts = ["blender"]
|
||||
label = "Validate Render Output for Deadline"
|
||||
optional = True
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
|
@ -18,6 +19,10 @@ from openpype.resources import get_openpype_icon_filepath
|
|||
from .pipeline import FusionEventHandler
|
||||
from .pulse import FusionPulse
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.menu = None
|
||||
|
||||
|
|
@ -26,7 +31,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
icon_path = get_openpype_icon_filepath()
|
||||
icon = QtGui.QIcon(icon_path)
|
||||
|
|
@ -41,7 +46,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.render_mode_widget = None
|
||||
self.setWindowTitle("OpenPype")
|
||||
self.setWindowTitle(MENU_LABEL)
|
||||
|
||||
asset_label = QtWidgets.QLabel("Context", self)
|
||||
asset_label.setStyleSheet(
|
||||
|
|
|
|||
60
openpype/hosts/fusion/deploy/ayon/Config/menu.fu
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Menu",
|
||||
Category = "AYON",
|
||||
Name = "AYON Menu",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Install_PySide2",
|
||||
Category = "AYON",
|
||||
Name = "Install PySide2",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Menus
|
||||
{
|
||||
Target = "ChildFrame",
|
||||
|
||||
Before "Help"
|
||||
{
|
||||
Sub "AYON"
|
||||
{
|
||||
"AYON_Menu{}",
|
||||
"_",
|
||||
Sub "Admin" {
|
||||
"AYON_Install_PySide2{}"
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
19
openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
Locked = true,
|
||||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon",
|
||||
["Config:"] = "UserPaths:Config;AYON:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
},
|
||||
Script = {
|
||||
PythonVersion = 3,
|
||||
Python3Forced = true
|
||||
},
|
||||
UserInterface = {
|
||||
Language = "en_US"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -31,7 +31,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -3,7 +3,7 @@ Locked = true,
|
|||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype",
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import shutil
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.hosts.fusion import (
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
|
|
@ -161,6 +162,13 @@ class FusionCopyPrefsPrelaunch(PreLaunchHook):
|
|||
# profile directory variables to customize Fusion
|
||||
# to define where it can read custom scripts and tools from
|
||||
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
|
||||
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
|
||||
|
||||
if AYON_SERVER_ENABLED:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs")
|
||||
else:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs")
|
||||
|
||||
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
|
||||
self.launch_context.env[master_prefs_variable] = str(master_prefs)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.pipeline import (
|
|||
legacy_io,
|
||||
Creator as NewCreator,
|
||||
CreatedInstance,
|
||||
Anatomy
|
||||
Anatomy,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -27,28 +27,21 @@ class CreateSaver(NewCreator):
|
|||
description = "Fusion Saver to generate image sequence"
|
||||
icon = "fa5.eye"
|
||||
|
||||
instance_attributes = [
|
||||
"reviewable"
|
||||
]
|
||||
instance_attributes = ["reviewable"]
|
||||
image_format = "exr"
|
||||
|
||||
# TODO: This should be renamed together with Nuke so it is aligned
|
||||
temp_rendering_path_template = (
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}")
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}"
|
||||
)
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
self.pass_pre_attributes_to_instance(
|
||||
instance_data,
|
||||
pre_create_data
|
||||
self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
|
||||
|
||||
instance_data.update(
|
||||
{"id": "pyblish.avalon.instance", "subset": subset_name}
|
||||
)
|
||||
|
||||
instance_data.update({
|
||||
"id": "pyblish.avalon.instance",
|
||||
"subset": subset_name
|
||||
})
|
||||
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
|
|
@ -56,19 +49,6 @@ class CreateSaver(NewCreator):
|
|||
|
||||
self._update_tool_with_data(saver, data=instance_data)
|
||||
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError(
|
||||
f"File format is not set to {file_format}, this is a bug"
|
||||
)
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 0 # Auto | float16 | float32
|
||||
# TODO Is this needed?
|
||||
saver[file_format]["SaveAlpha"] = 1
|
||||
|
||||
# Register the CreatedInstance
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
|
|
@ -140,8 +120,15 @@ class CreateSaver(NewCreator):
|
|||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
original_format = tool.GetData(
|
||||
"openpype.creator_attributes.image_format"
|
||||
)
|
||||
|
||||
subset = data["subset"]
|
||||
if original_subset != subset:
|
||||
if (
|
||||
original_subset != subset
|
||||
or original_format != data["creator_attributes"]["image_format"]
|
||||
):
|
||||
self._configure_saver_tool(data, tool, subset)
|
||||
|
||||
def _configure_saver_tool(self, data, tool, subset):
|
||||
|
|
@ -151,17 +138,17 @@ class CreateSaver(NewCreator):
|
|||
anatomy = Anatomy()
|
||||
frame_padding = anatomy.templates["frame_padding"]
|
||||
|
||||
# get output format
|
||||
ext = data["creator_attributes"]["image_format"]
|
||||
|
||||
# Subset change detected
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
formatting_data.update({
|
||||
"workdir": workdir,
|
||||
"frame": "0" * frame_padding,
|
||||
"ext": "exr"
|
||||
})
|
||||
formatting_data.update(
|
||||
{"workdir": workdir, "frame": "0" * frame_padding, "ext": ext}
|
||||
)
|
||||
|
||||
# build file path to render
|
||||
filepath = self.temp_rendering_path_template.format(
|
||||
**formatting_data)
|
||||
filepath = self.temp_rendering_path_template.format(**formatting_data)
|
||||
|
||||
comp = get_current_comp()
|
||||
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
|
||||
|
|
@ -201,7 +188,8 @@ class CreateSaver(NewCreator):
|
|||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
self._get_frame_range_enum()
|
||||
self._get_frame_range_enum(),
|
||||
self._get_image_format_enum(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
|
|
@ -209,11 +197,7 @@ class CreateSaver(NewCreator):
|
|||
"""Settings for publish page"""
|
||||
return self.get_pre_create_attr_defs()
|
||||
|
||||
def pass_pre_attributes_to_instance(
|
||||
self,
|
||||
instance_data,
|
||||
pre_create_data
|
||||
):
|
||||
def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
|
||||
creator_attrs = instance_data["creator_attributes"] = {}
|
||||
for pass_key in pre_create_data.keys():
|
||||
creator_attrs[pass_key] = pre_create_data[pass_key]
|
||||
|
|
@ -236,13 +220,13 @@ class CreateSaver(NewCreator):
|
|||
frame_range_options = {
|
||||
"asset_db": "Current asset context",
|
||||
"render_range": "From render in/out",
|
||||
"comp_range": "From composition timeline"
|
||||
"comp_range": "From composition timeline",
|
||||
}
|
||||
|
||||
return EnumDef(
|
||||
"frame_range_source",
|
||||
items=frame_range_options,
|
||||
label="Frame range source"
|
||||
label="Frame range source",
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
|
|
@ -252,20 +236,33 @@ class CreateSaver(NewCreator):
|
|||
label="Review",
|
||||
)
|
||||
|
||||
def _get_image_format_enum(self):
|
||||
image_format_options = ["exr", "tga", "tif", "png", "jpg"]
|
||||
return EnumDef(
|
||||
"image_format",
|
||||
items=image_format_options,
|
||||
default=self.image_format,
|
||||
label="Output Image Format",
|
||||
)
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
# plugin settings
|
||||
plugin_settings = (
|
||||
project_settings["fusion"]["create"][self.__class__.__name__]
|
||||
)
|
||||
plugin_settings = project_settings["fusion"]["create"][
|
||||
self.__class__.__name__
|
||||
]
|
||||
|
||||
# individual attributes
|
||||
self.instance_attributes = plugin_settings.get(
|
||||
"instance_attributes") or self.instance_attributes
|
||||
self.default_variants = plugin_settings.get(
|
||||
"default_variants") or self.default_variants
|
||||
self.temp_rendering_path_template = (
|
||||
plugin_settings.get("temp_rendering_path_template")
|
||||
or self.temp_rendering_path_template
|
||||
"instance_attributes", self.instance_attributes
|
||||
)
|
||||
self.default_variants = plugin_settings.get(
|
||||
"default_variants", self.default_variants
|
||||
)
|
||||
self.temp_rendering_path_template = plugin_settings.get(
|
||||
"temp_rendering_path_template", self.temp_rendering_path_template
|
||||
)
|
||||
self.image_format = plugin_settings.get(
|
||||
"image_format", self.image_format
|
||||
)
|
||||
|
|
|
|||
|
|
@ -25,20 +25,24 @@ def enabled_savers(comp, savers):
|
|||
"""
|
||||
passthrough_key = "TOOLB_PassThrough"
|
||||
original_states = {}
|
||||
enabled_save_names = {saver.Name for saver in savers}
|
||||
enabled_saver_names = {saver.Name for saver in savers}
|
||||
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
savers_by_name = {saver.Name: saver for saver in all_savers}
|
||||
|
||||
try:
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
for saver in all_savers:
|
||||
original_state = saver.GetAttrs()[passthrough_key]
|
||||
original_states[saver] = original_state
|
||||
original_states[saver.Name] = original_state
|
||||
|
||||
# The passthrough state we want to set (passthrough != enabled)
|
||||
state = saver.Name not in enabled_save_names
|
||||
state = saver.Name not in enabled_saver_names
|
||||
if state != original_state:
|
||||
saver.SetAttrs({passthrough_key: state})
|
||||
yield
|
||||
finally:
|
||||
for saver, original_state in original_states.items():
|
||||
for saver_name, original_state in original_states.items():
|
||||
saver = savers_by_name[saver_name]
|
||||
saver.SetAttrs({"TOOLB_PassThrough": original_state})
|
||||
|
||||
|
||||
|
|
@ -142,11 +146,15 @@ class FusionRenderLocal(
|
|||
|
||||
staging_dir = os.path.dirname(path)
|
||||
|
||||
files = [os.path.basename(f) for f in expected_files]
|
||||
if len(expected_files) == 1:
|
||||
files = files[0]
|
||||
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{padding}d" % start,
|
||||
"files": [os.path.basename(f) for f in expected_files],
|
||||
"files": files,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ var LD_OPENHARMONY_PATH = System.getenv('LIB_OPENHARMONY_PATH');
|
|||
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH + '/openHarmony.js';
|
||||
LD_OPENHARMONY_PATH = LD_OPENHARMONY_PATH.replace(/\\/g, "/");
|
||||
include(LD_OPENHARMONY_PATH);
|
||||
this.__proto__['$'] = $;
|
||||
//this.__proto__['$'] = $;
|
||||
|
||||
function Client() {
|
||||
var self = this;
|
||||
|
|
|
|||
|
|
@ -59,8 +59,8 @@ class ExtractRender(pyblish.api.InstancePlugin):
|
|||
|
||||
args = [application_path, "-batch",
|
||||
"-frames", str(frame_start), str(frame_end),
|
||||
"-scene", scene_path]
|
||||
self.log.info(f"running [ {application_path} {' '.join(args)}")
|
||||
scene_path]
|
||||
self.log.info(f"running: {' '.join(args)}")
|
||||
proc = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.PIPE,
|
||||
|
|
|
|||
|
|
@ -95,18 +95,18 @@ def menu_install():
|
|||
|
||||
menu.addSeparator()
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
creator_action = menu.addAction("Create...")
|
||||
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
creator_action.triggered.connect(
|
||||
lambda: host_tools.show_creator(parent=main_window)
|
||||
)
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(
|
||||
|
|
|
|||
|
|
@ -121,8 +121,8 @@ def get_id_required_nodes():
|
|||
return list(nodes)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter name of the given node
|
||||
def get_export_parameter(node):
|
||||
"""Return the export output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
|
|
@ -137,13 +137,70 @@ def get_output_parameter(node):
|
|||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
|
||||
node_type = node.type().name()
|
||||
if node_type == "geometry":
|
||||
# Ensures the proper Take is selected for each ROP to retrieve the correct
|
||||
# ifd
|
||||
try:
|
||||
rop_take = hou.takes.findTake(node.parm("take").eval())
|
||||
if rop_take is not None:
|
||||
hou.takes.setCurrentTake(rop_take)
|
||||
except AttributeError:
|
||||
# hou object doesn't always have the 'takes' attribute
|
||||
pass
|
||||
|
||||
if node_type == "Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Alfred":
|
||||
return node.parm("alf_diskfile")
|
||||
elif (node_type == "RenderMan" or node_type == "RenderMan RIS"):
|
||||
pre_ris22 = node.parm("rib_outputmode") and \
|
||||
node.parm("rib_outputmode").eval()
|
||||
ris22 = node.parm("diskfile") and node.parm("diskfile").eval()
|
||||
if pre_ris22 or ris22:
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Redshift" and node.parm("RS_archive_enable").eval():
|
||||
return node.parm("RS_archive_file")
|
||||
elif node_type == "Wedge" and node.parm("driver").eval():
|
||||
return get_export_parameter(node.node(node.parm("driver").eval()))
|
||||
elif node_type == "Arnold":
|
||||
return node.parm("ar_ass_file")
|
||||
elif node_type == "Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node_type == "Shotgun Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("sgtk_soho_diskfile")
|
||||
elif node_type == "Shotgun Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("render_export_filepath")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
my_alembic_node = root.createNode("alembic")
|
||||
get_output_parameter(my_alembic_node)
|
||||
# Result: "output"
|
||||
|
||||
Args:
|
||||
node(hou.Node): node instance
|
||||
|
||||
Returns:
|
||||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
category = node.type().category().name()
|
||||
|
||||
# Figure out which type of node is being rendered
|
||||
if node_type == "Geometry" or node_type == "Filmbox FBX" or \
|
||||
(node_type == "ROP Output Driver" and category == "Sop"):
|
||||
return node.parm("sopoutput")
|
||||
elif node_type == "alembic":
|
||||
return node.parm("filename")
|
||||
elif node_type == "comp":
|
||||
elif node_type == "Composite":
|
||||
return node.parm("copoutput")
|
||||
elif node_type == "opengl":
|
||||
return node.parm("picture")
|
||||
|
|
@ -155,6 +212,15 @@ def get_output_parameter(node):
|
|||
elif node_type == "ifd":
|
||||
if node.evalParm("soho_outputmode"):
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Octane":
|
||||
return node.parm("HO_img_fileName")
|
||||
elif node_type == "Fetch":
|
||||
inner_node = node.node(node.parm("source").eval())
|
||||
if inner_node:
|
||||
return get_output_parameter(inner_node)
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("SettingsOutput_img_file_path")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
# Default extension
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
|
||||
|
|
@ -48,6 +51,15 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
"ar_exr_half_precision": 1 # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ass_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ass".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["ar_ass_export_enable"] = 1
|
||||
parms["ar_ass_file"] = ass_filepath
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
|
|
@ -66,6 +78,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -12,6 +12,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
family = "mantra_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
|
||||
|
|
@ -44,6 +47,15 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
"vm_picture": filepath,
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ifd_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ifd".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["soho_outputmode"] = 1
|
||||
parms["soho_diskfile"] = ifd_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# If camera found in selection
|
||||
# we will use as render camera
|
||||
|
|
@ -78,6 +90,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -52,6 +55,17 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
scene_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.vrscene".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
# Setting render_export_mode to "2" because that's for
|
||||
# "Export only" ("1" is for "Export & Render")
|
||||
parms["render_export_mode"] = "2"
|
||||
parms["render_export_filepath"] = scene_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# set up the render camera from the selected node
|
||||
camera = None
|
||||
|
|
@ -140,6 +154,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -40,6 +40,25 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("ar_ass_export_enable").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "ar_ass_file", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(prefix=default_prefix,
|
||||
suffix=None)
|
||||
|
|
|
|||
|
|
@ -14,18 +14,13 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
|
|||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Chunk Size"
|
||||
chunkSize = 999999
|
||||
chunk_size = 999999
|
||||
|
||||
def process(self, instance):
|
||||
# need to get the chunk size info from the setting
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
instance.data["chunkSize"] = attr_values.get("chunkSize")
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
|
||||
cls.chunkSize = project_setting["chunk_size"]
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
|
|
@ -33,7 +28,6 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
|
|||
minimum=1,
|
||||
maximum=999999,
|
||||
decimals=0,
|
||||
default=cls.chunkSize,
|
||||
default=cls.chunk_size,
|
||||
label="Frame Per Task")
|
||||
|
||||
]
|
||||
|
|
|
|||
|
|
@ -44,6 +44,25 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("soho_outputmode").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "soho_diskfile", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=None
|
||||
|
|
|
|||
|
|
@ -45,7 +45,26 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products = []
|
||||
# TODO: add render elements if render element
|
||||
|
||||
beauty_product = self.get_beauty_render_product(default_prefix)
|
||||
# Store whether we are splitting the render job in an export + render
|
||||
export_job = rop.parm("render_export_mode").eval() == "2"
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "render_export_filepath", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
beauty_product = self.get_render_product_name(default_prefix)
|
||||
render_products.append(beauty_product)
|
||||
files_by_aov = {
|
||||
"RGB Color": self.generate_expected_files(instance,
|
||||
|
|
@ -79,7 +98,7 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
instance.data["colorspaceDisplay"] = colorspace_data["display"]
|
||||
instance.data["colorspaceView"] = colorspace_data["view"]
|
||||
|
||||
def get_beauty_render_product(self, prefix, suffix="<reName>"):
|
||||
def get_render_product_name(self, prefix, suffix="<reName>"):
|
||||
"""Return the beauty output filename if render element enabled
|
||||
"""
|
||||
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -511,3 +511,20 @@ def render_resolution(width, height):
|
|||
finally:
|
||||
rt.renderWidth = current_renderWidth
|
||||
rt.renderHeight = current_renderHeight
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suspended_refresh():
|
||||
"""Suspended refresh for scene and modify panel redraw.
|
||||
"""
|
||||
if is_headless():
|
||||
yield
|
||||
return
|
||||
rt.disableSceneRedraw()
|
||||
rt.suspendEditing()
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
rt.enableSceneRedraw()
|
||||
rt.resumeEditing()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""3dsmax menu definition of OpenPype."""
|
||||
"""3dsmax menu definition of AYON."""
|
||||
import os
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
|
@ -8,7 +9,7 @@ from openpype.hosts.max.api import lib
|
|||
|
||||
|
||||
class OpenPypeMenu(object):
|
||||
"""Object representing OpenPype menu.
|
||||
"""Object representing OpenPype/AYON menu.
|
||||
|
||||
This is using "hack" to inject itself before "Help" menu of 3dsmax.
|
||||
For some reason `postLoadingMenus` event doesn't fire, and main menu
|
||||
|
|
@ -50,17 +51,17 @@ class OpenPypeMenu(object):
|
|||
return list(self.main_widget.findChildren(QtWidgets.QMenuBar))[0]
|
||||
|
||||
def get_or_create_openpype_menu(
|
||||
self, name: str = "&OpenPype",
|
||||
self, name: str = "&Openpype",
|
||||
before: str = "&Help") -> QtWidgets.QAction:
|
||||
"""Create OpenPype menu.
|
||||
"""Create AYON menu.
|
||||
|
||||
Args:
|
||||
name (str, Optional): OpenPypep menu name.
|
||||
name (str, Optional): AYON menu name.
|
||||
before (str, Optional): Name of the 3dsmax main menu item to
|
||||
add OpenPype menu before.
|
||||
add AYON menu before.
|
||||
|
||||
Returns:
|
||||
QtWidgets.QAction: OpenPype menu action.
|
||||
QtWidgets.QAction: AYON menu action.
|
||||
|
||||
"""
|
||||
if self.menu is not None:
|
||||
|
|
@ -77,15 +78,15 @@ class OpenPypeMenu(object):
|
|||
|
||||
if before in item.title():
|
||||
help_action = item.menuAction()
|
||||
|
||||
op_menu = QtWidgets.QMenu("&OpenPype")
|
||||
tab_menu_label = os.environ.get("AVALON_LABEL") or "AYON"
|
||||
op_menu = QtWidgets.QMenu("&{}".format(tab_menu_label))
|
||||
menu_bar.insertMenu(help_action, op_menu)
|
||||
|
||||
self.menu = op_menu
|
||||
return op_menu
|
||||
|
||||
def build_openpype_menu(self) -> QtWidgets.QAction:
|
||||
"""Build items in OpenPype menu."""
|
||||
"""Build items in AYON menu."""
|
||||
openpype_menu = self.get_or_create_openpype_menu()
|
||||
load_action = QtWidgets.QAction("Load...", openpype_menu)
|
||||
load_action.triggered.connect(self.load_callback)
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def containerise(name: str, nodes: list, context,
|
|||
|
||||
|
||||
def load_custom_attribute_data():
|
||||
"""Re-loading the Openpype/AYON custom parameter built by the creator
|
||||
"""Re-loading the AYON custom parameter built by the creator
|
||||
|
||||
Returns:
|
||||
attribute: re-loading the custom OP attributes set in Maxscript
|
||||
|
|
@ -213,7 +213,7 @@ def import_custom_attribute_data(container: str, selections: list):
|
|||
|
||||
|
||||
def update_custom_attribute_data(container: str, selections: list):
|
||||
"""Updating the Openpype/AYON custom parameter built by the creator
|
||||
"""Updating the AYON custom parameter built by the creator
|
||||
|
||||
Args:
|
||||
container (str): target container which adds custom attributes
|
||||
|
|
|
|||
|
|
@ -198,8 +198,8 @@ def _render_preview_animation_max_pre_2024(
|
|||
res_width, res_height, filename=filepath
|
||||
)
|
||||
dib = rt.gw.getViewportDib()
|
||||
dib_width = rt.renderWidth
|
||||
dib_height = rt.renderHeight
|
||||
dib_width = float(dib.width)
|
||||
dib_height = float(dib.height)
|
||||
# aspect ratio
|
||||
viewportRatio = dib_width / dib_height
|
||||
renderRatio = float(res_width / res_height)
|
||||
|
|
|
|||
|
|
@ -39,45 +39,41 @@ Note:
|
|||
"""
|
||||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.hosts.max.api.lib import suspended_refresh
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor):
|
||||
class ExtractAlembic(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Pointcache"
|
||||
hosts = ["max"]
|
||||
families = ["pointcache"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.debug("Extracting pointcache ...")
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
file_name = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(parent_dir, file_name)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name, parent_dir))
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
with suspended_refresh():
|
||||
self._set_abc_attributes(instance)
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -89,3 +85,51 @@ class ExtractAlembic(publish.Extractor):
|
|||
"stagingDir": parent_dir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef("custom_attrs",
|
||||
label="Custom Attributes",
|
||||
default=False),
|
||||
]
|
||||
|
||||
|
||||
class ExtractCameraAlembic(ExtractAlembic):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
label = "Extract Alembic Camera"
|
||||
families = ["camera"]
|
||||
|
||||
|
||||
class ExtractModel(ExtractAlembic):
|
||||
"""Extract Geometry in Alembic Format"""
|
||||
label = "Extract Geometry (Alembic)"
|
||||
families = ["model"]
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
rt.AlembicExport.UVs = True
|
||||
rt.AlembicExport.VertexColors = True
|
||||
rt.AlembicExport.PreserveInstances = True
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Alembic Camera"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Extracting Camera ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info(f"Writing alembic '{filename}' to '{stagingdir}'")
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = True
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.ExportFile(
|
||||
path,
|
||||
rt.Name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "abc",
|
||||
"ext": "abc",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
|
||||
|
|
@ -20,13 +20,10 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Camera ...")
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info(f"Writing fbx file '{filename}' to '{filepath}'")
|
||||
|
||||
rt.FBXExporterSetParam("Animation", True)
|
||||
rt.FBXExporterSetParam("Cameras", True)
|
||||
rt.FBXExporterSetParam("AxisConversionMethod", "Animation")
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
filename = "{name}.max".format(**instance.data)
|
||||
|
||||
max_path = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing max file '%s' to '%s'" % (filename, max_path))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
|
|||
|
|
@ -1,63 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Geometry in Alembic Format
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Geometry (Alembic)"
|
||||
hosts = ["max"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir))
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.CustomAttributes = True
|
||||
rt.AlembicExport.UVs = True
|
||||
rt.AlembicExport.VertexColors = True
|
||||
rt.AlembicExport.PreserveInstances = True
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "abc",
|
||||
"ext": "abc",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, filepath)
|
||||
)
|
||||
|
|
@ -20,12 +20,9 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing FBX '%s' to '%s'" % (filepath, stagingdir))
|
||||
|
||||
rt.FBXExporterSetParam("Animation", False)
|
||||
rt.FBXExporterSetParam("Cameras", False)
|
||||
|
|
@ -46,7 +43,6 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
using=rt.FBXEXP,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import pyblish.api
|
|||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.hosts.max.api.lib import suspended_refresh
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
|
|
@ -21,25 +22,21 @@ class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.obj".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing OBJ '%s' to '%s'" % (filepath, stagingdir))
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.ObjExp,
|
||||
)
|
||||
|
||||
with suspended_refresh():
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.ObjExp,
|
||||
)
|
||||
if not os.path.exists(filepath):
|
||||
raise KnownPublishError(
|
||||
"File {} wasn't produced by 3ds max, please check the logs.")
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
PublishValidationError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import reset_scene_resolution
|
||||
|
||||
|
||||
|
|
@ -16,6 +19,7 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
hosts = ["max"]
|
||||
label = "Validate Resolution Setting"
|
||||
optional = True
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class ImportModelRender(InventoryAction):
|
|||
)
|
||||
|
||||
def process(self, containers):
|
||||
from maya import cmds
|
||||
from maya import cmds # noqa: F401
|
||||
|
||||
project_name = get_current_project_name()
|
||||
for container in containers:
|
||||
|
|
@ -66,7 +66,7 @@ class ImportModelRender(InventoryAction):
|
|||
None
|
||||
"""
|
||||
|
||||
from maya import cmds
|
||||
from maya import cmds # noqa: F401
|
||||
|
||||
project_name = get_current_project_name()
|
||||
repre_docs = get_representations(
|
||||
|
|
@ -85,12 +85,7 @@ class ImportModelRender(InventoryAction):
|
|||
if scene_type_regex.fullmatch(repre_name):
|
||||
look_repres.append(repre_doc)
|
||||
|
||||
# QUESTION should we care if there is more then one look
|
||||
# representation? (since it's based on regex match)
|
||||
look_repre = None
|
||||
if look_repres:
|
||||
look_repre = look_repres[0]
|
||||
|
||||
look_repre = look_repres[0] if look_repres else None
|
||||
# QUESTION shouldn't be json representation validated too?
|
||||
if not look_repre:
|
||||
print("No model render sets for this model version..")
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.pipeline.load.utils import get_representation_path_from_context
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_colorspace_from_filepath,
|
||||
get_imageio_file_rules_colorspace_from_filepath,
|
||||
get_imageio_config,
|
||||
get_imageio_file_rules
|
||||
)
|
||||
|
|
@ -285,10 +285,10 @@ class FileNodeLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
path = get_representation_path_from_context(context)
|
||||
colorspace = get_imageio_colorspace_from_filepath(
|
||||
path=path,
|
||||
host_name=host_name,
|
||||
project_name=project_name,
|
||||
colorspace = get_imageio_file_rules_colorspace_from_filepath(
|
||||
path,
|
||||
host_name,
|
||||
project_name,
|
||||
config_data=config_data,
|
||||
file_rules=file_rules,
|
||||
project_settings=project_settings
|
||||
|
|
|
|||
|
|
@ -265,6 +265,7 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
class MayaUSDReferenceLoader(ReferenceLoader):
|
||||
"""Reference USD file to native Maya nodes using MayaUSDImport reference"""
|
||||
|
||||
label = "Reference Maya USD"
|
||||
families = ["usd"]
|
||||
representations = ["usd"]
|
||||
extensions = {"usd", "usda", "usdc"}
|
||||
|
|
|
|||
|
|
@ -45,11 +45,23 @@ FILE_NODES = {
|
|||
"PxrTexture": "filename"
|
||||
}
|
||||
|
||||
RENDER_SET_TYPES = [
|
||||
"VRayDisplacement",
|
||||
"VRayLightMesh",
|
||||
"VRayObjectProperties",
|
||||
"RedshiftObjectId",
|
||||
"RedshiftMeshParameters",
|
||||
]
|
||||
|
||||
# Keep only node types that actually exist
|
||||
all_node_types = set(cmds.allNodeTypes())
|
||||
for node_type in list(FILE_NODES.keys()):
|
||||
if node_type not in all_node_types:
|
||||
FILE_NODES.pop(node_type)
|
||||
|
||||
for node_type in RENDER_SET_TYPES:
|
||||
if node_type not in all_node_types:
|
||||
RENDER_SET_TYPES.remove(node_type)
|
||||
del all_node_types
|
||||
|
||||
# Cache pixar dependency node types so we can perform a type lookup against it
|
||||
|
|
@ -69,9 +81,7 @@ def get_attributes(dictionary, attr, node=None):
|
|||
else:
|
||||
val = dictionary.get(attr, [])
|
||||
|
||||
if not isinstance(val, list):
|
||||
return [val]
|
||||
return val
|
||||
return val if isinstance(val, list) else [val]
|
||||
|
||||
|
||||
def get_look_attrs(node):
|
||||
|
|
@ -106,7 +116,7 @@ def get_look_attrs(node):
|
|||
|
||||
|
||||
def node_uses_image_sequence(node, node_path):
|
||||
# type: (str) -> bool
|
||||
# type: (str, str) -> bool
|
||||
"""Return whether file node uses an image sequence or single image.
|
||||
|
||||
Determine if a node uses an image sequence or just a single image,
|
||||
|
|
@ -114,6 +124,7 @@ def node_uses_image_sequence(node, node_path):
|
|||
|
||||
Args:
|
||||
node (str): Name of the Maya node
|
||||
node_path (str): The file path of the node
|
||||
|
||||
Returns:
|
||||
bool: True if node uses an image sequence
|
||||
|
|
@ -247,7 +258,7 @@ def get_file_node_files(node):
|
|||
|
||||
# For sequences get all files and filter to only existing files
|
||||
result = []
|
||||
for index, path in enumerate(paths):
|
||||
for path in paths:
|
||||
if node_uses_image_sequence(node, path):
|
||||
glob_pattern = seq_to_glob(path)
|
||||
result.extend(glob.glob(glob_pattern))
|
||||
|
|
@ -358,6 +369,7 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
for attr in shader_attrs:
|
||||
if cmds.attributeQuery(attr, node=look, exists=True):
|
||||
existing_attrs.append("{}.{}".format(look, attr))
|
||||
|
||||
materials = cmds.listConnections(existing_attrs,
|
||||
source=True,
|
||||
destination=False) or []
|
||||
|
|
@ -367,30 +379,32 @@ class CollectLook(pyblish.api.InstancePlugin):
|
|||
self.log.debug("Found the following sets:\n{}".format(look_sets))
|
||||
# Get the entire node chain of the look sets
|
||||
# history = cmds.listHistory(look_sets, allConnections=True)
|
||||
history = cmds.listHistory(materials, allConnections=True)
|
||||
# if materials list is empty, listHistory() will crash with
|
||||
# RuntimeError
|
||||
history = set()
|
||||
if materials:
|
||||
history = set(
|
||||
cmds.listHistory(materials, allConnections=True))
|
||||
|
||||
# Since we retrieved history only of the connected materials
|
||||
# connected to the look sets above we now add direct history
|
||||
# for some of the look sets directly
|
||||
# handling render attribute sets
|
||||
render_set_types = [
|
||||
"VRayDisplacement",
|
||||
"VRayLightMesh",
|
||||
"VRayObjectProperties",
|
||||
"RedshiftObjectId",
|
||||
"RedshiftMeshParameters",
|
||||
]
|
||||
render_sets = cmds.ls(look_sets, type=render_set_types)
|
||||
if render_sets:
|
||||
history.extend(
|
||||
cmds.listHistory(render_sets,
|
||||
future=False,
|
||||
pruneDagObjects=True)
|
||||
or []
|
||||
)
|
||||
|
||||
# Maya (at least 2024) crashes with Warning when render set type
|
||||
# isn't available. cmds.ls() will return empty list
|
||||
if RENDER_SET_TYPES:
|
||||
render_sets = cmds.ls(look_sets, type=RENDER_SET_TYPES)
|
||||
if render_sets:
|
||||
history.update(
|
||||
cmds.listHistory(render_sets,
|
||||
future=False,
|
||||
pruneDagObjects=True)
|
||||
or []
|
||||
)
|
||||
|
||||
# Ensure unique entries only
|
||||
history = list(set(history))
|
||||
history = list(history)
|
||||
|
||||
files = cmds.ls(history,
|
||||
# It's important only node types are passed that
|
||||
|
|
|
|||
|
|
@ -3,60 +3,76 @@ from maya import cmds
|
|||
import pyblish.api
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
RepairContextAction,
|
||||
PublishValidationError
|
||||
)
|
||||
|
||||
|
||||
class ValidateLookDefaultShadersConnections(pyblish.api.InstancePlugin):
|
||||
class ValidateLookDefaultShadersConnections(pyblish.api.ContextPlugin):
|
||||
"""Validate default shaders in the scene have their default connections.
|
||||
|
||||
For example the lambert1 could potentially be disconnected from the
|
||||
initialShadingGroup. As such it's not lambert1 that will be identified
|
||||
as the default shader which can have unpredictable results.
|
||||
For example the standardSurface1 or lambert1 (maya 2023 and before) could
|
||||
potentially be disconnected from the initialShadingGroup. As such it's not
|
||||
lambert1 that will be identified as the default shader which can have
|
||||
unpredictable results.
|
||||
|
||||
To fix the default connections need to be made again. See the logs for
|
||||
more details on which connections are missing.
|
||||
|
||||
"""
|
||||
|
||||
order = ValidateContentsOrder
|
||||
order = pyblish.api.ValidatorOrder - 0.4999
|
||||
families = ['look']
|
||||
hosts = ['maya']
|
||||
label = 'Look Default Shader Connections'
|
||||
actions = [RepairContextAction]
|
||||
|
||||
# The default connections to check
|
||||
DEFAULTS = [("initialShadingGroup.surfaceShader", "lambert1"),
|
||||
("initialParticleSE.surfaceShader", "lambert1"),
|
||||
("initialParticleSE.volumeShader", "particleCloud1")
|
||||
]
|
||||
DEFAULTS = {
|
||||
"initialShadingGroup.surfaceShader": ["standardSurface1.outColor",
|
||||
"lambert1.outColor"],
|
||||
"initialParticleSE.surfaceShader": ["standardSurface1.outColor",
|
||||
"lambert1.outColor"],
|
||||
"initialParticleSE.volumeShader": ["particleCloud1.outColor"]
|
||||
}
|
||||
|
||||
def process(self, instance):
|
||||
def process(self, context):
|
||||
|
||||
# Ensure check is run only once. We don't use ContextPlugin because
|
||||
# of a bug where the ContextPlugin will always be visible. Even when
|
||||
# the family is not present in an instance.
|
||||
key = "__validate_look_default_shaders_connections_checked"
|
||||
context = instance.context
|
||||
is_run = context.data.get(key, False)
|
||||
if is_run:
|
||||
return
|
||||
else:
|
||||
context.data[key] = True
|
||||
if self.get_invalid():
|
||||
raise PublishValidationError(
|
||||
"Default shaders in your scene do not have their "
|
||||
"default shader connections. Please repair them to continue."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls):
|
||||
|
||||
# Process as usual
|
||||
invalid = list()
|
||||
for plug, input_node in self.DEFAULTS:
|
||||
for plug, valid_inputs in cls.DEFAULTS.items():
|
||||
inputs = cmds.listConnections(plug,
|
||||
source=True,
|
||||
destination=False) or None
|
||||
|
||||
if not inputs or inputs[0] != input_node:
|
||||
self.log.error("{0} is not connected to {1}. "
|
||||
"This can result in unexpected behavior. "
|
||||
"Please reconnect to continue.".format(
|
||||
plug,
|
||||
input_node))
|
||||
destination=False,
|
||||
plugs=True) or None
|
||||
if not inputs or inputs[0] not in valid_inputs:
|
||||
cls.log.error(
|
||||
"{0} is not connected to {1}. This can result in "
|
||||
"unexpected behavior. Please reconnect to continue."
|
||||
"".format(plug, " or ".join(valid_inputs))
|
||||
)
|
||||
invalid.append(plug)
|
||||
|
||||
if invalid:
|
||||
raise PublishValidationError("Invalid connections.")
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def repair(cls, context):
|
||||
invalid = cls.get_invalid()
|
||||
for plug in invalid:
|
||||
valid_inputs = cls.DEFAULTS[plug]
|
||||
for valid_input in valid_inputs:
|
||||
if cmds.objExists(valid_input):
|
||||
cls.log.info(
|
||||
"Connecting {} -> {}".format(valid_input, plug)
|
||||
)
|
||||
cmds.connectAttr(valid_input, plug, force=True)
|
||||
break
|
||||
|
|
|
|||
|
|
@ -371,7 +371,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
for node in data["nodes"]:
|
||||
lib.set_attribute(data["attribute"], data["values"][0], node)
|
||||
|
||||
with lib.renderlayer(layer_node):
|
||||
|
||||
# Repair animation must be enabled
|
||||
|
|
@ -392,13 +391,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
if renderer != "renderman":
|
||||
prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
|
||||
fname_prefix = default_prefix
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
cmds.setAttr(prefix_attr, fname_prefix, type="string")
|
||||
|
||||
# Repair padding
|
||||
padding_attr = RenderSettings.get_padding_attr(renderer)
|
||||
cmds.setAttr("{}.{}".format(node, padding_attr),
|
||||
cls.DEFAULT_PADDING)
|
||||
cmds.setAttr(padding_attr, cls.DEFAULT_PADDING)
|
||||
else:
|
||||
# renderman handles stuff differently
|
||||
cmds.setAttr("rmanGlobals.imageFileFormat",
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ def _install_menu():
|
|||
"Create...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
tab="create"
|
||||
)
|
||||
|
|
@ -271,7 +271,7 @@ def _install_menu():
|
|||
"Publish...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
tab="publish"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -21,6 +21,11 @@ from openpype.pipeline import (
|
|||
CreatedInstance,
|
||||
get_current_task_name
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_display_view_colorspace_name,
|
||||
get_colorspace_settings_from_publish_context,
|
||||
set_colorspace_data_to_representation
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS
|
||||
)
|
||||
|
|
@ -612,7 +617,7 @@ class ExporterReview(object):
|
|||
|
||||
def get_representation_data(
|
||||
self, tags=None, range=False,
|
||||
custom_tags=None
|
||||
custom_tags=None, colorspace=None
|
||||
):
|
||||
""" Add representation data to self.data
|
||||
|
||||
|
|
@ -652,6 +657,14 @@ class ExporterReview(object):
|
|||
if self.publish_on_farm:
|
||||
repre["tags"].append("publish_on_farm")
|
||||
|
||||
# add colorspace data to representation
|
||||
if colorspace:
|
||||
set_colorspace_data_to_representation(
|
||||
repre,
|
||||
self.instance.context.data,
|
||||
colorspace=colorspace,
|
||||
log=self.log
|
||||
)
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_imageio_baking_profile(self):
|
||||
|
|
@ -866,6 +879,13 @@ class ExporterReviewMov(ExporterReview):
|
|||
return path
|
||||
|
||||
def generate_mov(self, farm=False, **kwargs):
|
||||
# colorspace data
|
||||
colorspace = None
|
||||
# get colorspace settings
|
||||
# get colorspace data from context
|
||||
config_data, _ = get_colorspace_settings_from_publish_context(
|
||||
self.instance.context.data)
|
||||
|
||||
add_tags = []
|
||||
self.publish_on_farm = farm
|
||||
read_raw = kwargs["read_raw"]
|
||||
|
|
@ -951,6 +971,14 @@ class ExporterReviewMov(ExporterReview):
|
|||
# assign viewer
|
||||
dag_node["view"].setValue(viewer)
|
||||
|
||||
if config_data:
|
||||
# convert display and view to colorspace
|
||||
colorspace = get_display_view_colorspace_name(
|
||||
config_path=config_data["path"],
|
||||
display=display,
|
||||
view=viewer
|
||||
)
|
||||
|
||||
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
|
|
@ -996,9 +1024,10 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"] + add_tags,
|
||||
tags=["review", "need_thumbnail", "delete"] + add_tags,
|
||||
custom_tags=add_custom_tags,
|
||||
range=True
|
||||
range=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
if not matching_repre:
|
||||
self.log.info(
|
||||
"Matching reresentation was not found."
|
||||
"Matching representation was not found."
|
||||
" Representation files were not filled with slate."
|
||||
)
|
||||
return
|
||||
|
|
@ -294,7 +294,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
self.log.debug(
|
||||
"__ matching_repre: {}".format(pformat(matching_repre)))
|
||||
|
||||
self.log.warning("Added slate frame to representation files")
|
||||
self.log.info("Added slate frame to representation files")
|
||||
|
||||
def add_comment_slate_node(self, instance, node):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,216 +0,0 @@
|
|||
import sys
|
||||
import os
|
||||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.nuke import api as napi
|
||||
from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
if sys.version_info[0] >= 3:
|
||||
unicode = str
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.011
|
||||
label = "Extract Thumbnail"
|
||||
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# settings
|
||||
use_rendered = False
|
||||
bake_viewer_process = True
|
||||
bake_viewer_input_process = True
|
||||
nodes = {}
|
||||
reposition_nodes = None
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
return
|
||||
|
||||
with napi.maintained_selection():
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
self.log.debug("instance.data[families]: {}".format(
|
||||
instance.data["families"]))
|
||||
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.render_thumbnail(instance, o_name, **o_data)
|
||||
else:
|
||||
viewer_process_switches = {
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True
|
||||
}
|
||||
self.render_thumbnail(
|
||||
instance, None, **viewer_process_switches)
|
||||
|
||||
def render_thumbnail(self, instance, output_name=None, **kwargs):
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
colorspace = instance.data["colorspace"]
|
||||
|
||||
# find frame range and define middle thumb frame
|
||||
mid_frame = int((last_frame - first_frame) / 2)
|
||||
|
||||
# solve output name if any is set
|
||||
output_name = output_name or ""
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
node = instance.data["transientData"]["node"] # group node
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data['path']))
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
temporary_nodes = []
|
||||
|
||||
# try to connect already rendered images
|
||||
previous_node = node
|
||||
collection = instance.data.get("collection", None)
|
||||
self.log.debug("__ collection: `{}`".format(collection))
|
||||
|
||||
if collection:
|
||||
# get path
|
||||
fhead = collection.format("{head}")
|
||||
|
||||
thumb_fname = list(collection)[mid_frame]
|
||||
else:
|
||||
fname = thumb_fname = os.path.basename(
|
||||
instance.data.get("path", None))
|
||||
fhead = os.path.splitext(fname)[0] + "."
|
||||
|
||||
self.log.debug("__ fhead: `{}`".format(fhead))
|
||||
|
||||
if "#" in fhead:
|
||||
fhead = fhead.replace("#", "")[:-1]
|
||||
|
||||
path_render = os.path.join(
|
||||
staging_dir, thumb_fname).replace("\\", "/")
|
||||
self.log.debug("__ path_render: `{}`".format(path_render))
|
||||
|
||||
if self.use_rendered and os.path.isfile(path_render):
|
||||
# check if file exist otherwise connect to write node
|
||||
rnode = nuke.createNode("Read")
|
||||
rnode["file"].setValue(path_render)
|
||||
rnode["colorspace"].setValue(colorspace)
|
||||
|
||||
# turn it raw if none of baking is ON
|
||||
if all([
|
||||
not self.bake_viewer_input_process,
|
||||
not self.bake_viewer_process
|
||||
]):
|
||||
rnode["raw"].setValue(True)
|
||||
|
||||
temporary_nodes.append(rnode)
|
||||
previous_node = rnode
|
||||
|
||||
if self.reposition_nodes is None:
|
||||
# [deprecated] create reformat node old way
|
||||
reformat_node = nuke.createNode("Reformat")
|
||||
ref_node = self.nodes.get("Reformat", None)
|
||||
if ref_node:
|
||||
for k, v in ref_node:
|
||||
self.log.debug("k, v: {0}:{1}".format(k, v))
|
||||
if isinstance(v, unicode):
|
||||
v = str(v)
|
||||
reformat_node[k].setValue(v)
|
||||
|
||||
reformat_node.setInput(0, previous_node)
|
||||
previous_node = reformat_node
|
||||
temporary_nodes.append(reformat_node)
|
||||
else:
|
||||
# create reformat node new way
|
||||
for repo_node in self.reposition_nodes:
|
||||
node_class = repo_node["node_class"]
|
||||
knobs = repo_node["knobs"]
|
||||
node = nuke.createNode(node_class)
|
||||
set_node_knobs_from_settings(node, knobs)
|
||||
|
||||
# connect in order
|
||||
node.setInput(0, previous_node)
|
||||
previous_node = node
|
||||
temporary_nodes.append(node)
|
||||
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# get input process and connect it to baking
|
||||
ipn = napi.get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node.setInput(0, previous_node)
|
||||
previous_node = dag_node
|
||||
temporary_nodes.append(dag_node)
|
||||
|
||||
thumb_name = "thumbnail"
|
||||
# only add output name and
|
||||
# if there are more than one bake preset
|
||||
if (
|
||||
output_name
|
||||
and len(instance.data.get("bakePresets", {}).keys()) > 1
|
||||
):
|
||||
thumb_name = "{}_{}".format(output_name, thumb_name)
|
||||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead[:-1] + thumb_name + ".jpg"
|
||||
thumb_path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
|
||||
# add thumbnail to cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(thumb_path)
|
||||
|
||||
# make sure only one thumbnail path is set
|
||||
# and it is existing file
|
||||
instance_thumb_path = instance.data.get("thumbnailPath")
|
||||
if not instance_thumb_path or not os.path.isfile(instance_thumb_path):
|
||||
instance.data["thumbnailPath"] = thumb_path
|
||||
|
||||
write_node["file"].setValue(thumb_path)
|
||||
write_node["file_type"].setValue("jpg")
|
||||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
|
||||
repre = {
|
||||
'name': thumb_name,
|
||||
'ext': "jpg",
|
||||
"outputName": thumb_name,
|
||||
'files': file,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "publish_on_farm", "delete"]
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(write_node.name(), mid_frame, mid_frame)
|
||||
|
||||
self.log.debug(
|
||||
"representations: {}".format(instance.data["representations"]))
|
||||
|
||||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
|
@ -9,7 +9,7 @@ The Photoshop integration requires two components to work; `extension` and `serv
|
|||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
|
||||
### Server
|
||||
|
|
@ -17,16 +17,16 @@ ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
|||
The easiest way to get the server and Photoshop launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import avalon.photoshop;avalon.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
python -c ^"import openpype.hosts.photoshop;openpype.hosts.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The Photoshop extension can be found under `Window > Extensions > Avalon`. Once launched you should be presented with a panel like this:
|
||||
The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
|
@ -37,7 +37,7 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
|
|||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-Photoshop avalon extension.p12
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.PS.panel">
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<HostList>
|
||||
<Host Name="PHXS" Port="8078"/>
|
||||
<Host Name="FLPR" Port="8078"/>
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
</ExtensionList>
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<ExtensionManifest ExtensionBundleId="com.openpype.PS.panel" ExtensionBundleVersion="1.0.12" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionManifest ExtensionBundleId="io.ynput.PS.panel" ExtensionBundleVersion="1.1.0" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.PS.panel" Version="1.0.1" />
|
||||
<Extension Id="io.ynput.PS.panel" Version="1.0.1" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="com.openpype.PS.panel">
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<DispatchInfo>
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
|
|
@ -32,7 +32,7 @@
|
|||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>OpenPype</Menu>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Width>300</Width>
|
||||
|
|
@ -44,7 +44,7 @@
|
|||
</MaxSize>
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/avalon-logo-48.png</Icon>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
</Icons>
|
||||
</UI>
|
||||
</DispatchInfo>
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 1.3 KiB |
BIN
openpype/hosts/photoshop/api/extension/icons/ayon_logo.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
|
Before Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 13 KiB |
BIN
openpype/hosts/photoshop/api/panel_failure.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
|
@ -170,8 +170,7 @@ class ExtractReview(publish.Extractor):
|
|||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
self.log.info(f"Generate mov review: {mov_path}")
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
args = ffmpeg_path + [
|
||||
"-y",
|
||||
"-i", source_files_pattern,
|
||||
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
||||
|
|
@ -224,6 +223,7 @@ class ExtractReview(publish.Extractor):
|
|||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "delete"]
|
||||
})
|
||||
instance.data["thumbnailPath"] = thumbnail_path
|
||||
|
||||
def _check_and_resize(self, processed_img_names, source_files_pattern,
|
||||
staging_dir):
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ def create_timeline_item(
|
|||
if source_end:
|
||||
clip_data["endFrame"] = source_end
|
||||
if timecode_in:
|
||||
clip_data["recordFrame"] = timecode_in
|
||||
clip_data["recordFrame"] = timeline_in
|
||||
|
||||
# add to timeline
|
||||
media_pool.AppendToTimeline([clip_data])
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@ from openpype.tools.utils import host_tools
|
|||
from openpype.pipeline import registered_host
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
|
||||
if not os.path.exists(path):
|
||||
|
|
@ -39,7 +42,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
|
|
@ -49,7 +52,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
|
||||
self.setWindowTitle("OpenPype")
|
||||
self.setWindowTitle(f"{MENU_LABEL}")
|
||||
save_current_btn = QtWidgets.QPushButton("Save current file", self)
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create ...", self)
|
||||
|
|
|
|||
|
|
@ -406,26 +406,42 @@ class ClipLoader:
|
|||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# get handles
|
||||
handle_start = self.data["versionData"].get("handleStart")
|
||||
handle_end = self.data["versionData"].get("handleEnd")
|
||||
if handle_start is None:
|
||||
handle_start = int(self.data["assetData"]["handleStart"])
|
||||
if handle_end is None:
|
||||
handle_end = int(self.data["assetData"]["handleEnd"])
|
||||
if not self.with_handles:
|
||||
# Load file without the handles of the source media
|
||||
# We remove the handles from the source in and source out
|
||||
# so that the handles are excluded in the timeline
|
||||
handle_start = 0
|
||||
handle_end = 0
|
||||
|
||||
# check frame duration from versionData or assetData
|
||||
frame_start = self.data["versionData"].get("frameStart")
|
||||
if frame_start is None:
|
||||
frame_start = self.data["assetData"]["frameStart"]
|
||||
# get version data frame data from db
|
||||
version_data = self.data["versionData"]
|
||||
frame_start = version_data.get("frameStart")
|
||||
frame_end = version_data.get("frameEnd")
|
||||
|
||||
# check frame duration from versionData or assetData
|
||||
frame_end = self.data["versionData"].get("frameEnd")
|
||||
if frame_end is None:
|
||||
frame_end = self.data["assetData"]["frameEnd"]
|
||||
|
||||
db_frame_duration = int(frame_end) - int(frame_start) + 1
|
||||
# The version data usually stored the frame range + handles of the
|
||||
# media however certain representations may be shorter because they
|
||||
# exclude those handles intentionally. Unfortunately the
|
||||
# representation does not store that in the database currently;
|
||||
# so we should compensate for those cases. If the media is shorter
|
||||
# than the frame range specified in the database we assume it is
|
||||
# without handles and thus we do not need to remove the handles
|
||||
# from source and out
|
||||
if frame_start is not None and frame_end is not None:
|
||||
# Version has frame range data, so we can compare media length
|
||||
handle_start = version_data.get("handleStart", 0)
|
||||
handle_end = version_data.get("handleEnd", 0)
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_start + handle_end
|
||||
database_frame_duration = int(
|
||||
frame_end_handle - frame_start_handle + 1
|
||||
)
|
||||
if source_duration >= database_frame_duration:
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# get timeline in
|
||||
timeline_start = self.active_timeline.GetStartFrame()
|
||||
|
|
@ -437,24 +453,6 @@ class ClipLoader:
|
|||
timeline_in = int(
|
||||
timeline_start + self.data["assetData"]["clipIn"])
|
||||
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# check if source duration is shorter than db frame duration
|
||||
source_with_handles = True
|
||||
if source_duration < db_frame_duration:
|
||||
source_with_handles = False
|
||||
|
||||
# only exclude handles if source has no handles or
|
||||
# if user wants to load without handles
|
||||
if (
|
||||
not self.with_handles
|
||||
or not source_with_handles
|
||||
):
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# make track item from source in bin as item
|
||||
timeline_item = lib.create_timeline_item(
|
||||
media_pool_item,
|
||||
|
|
@ -868,7 +866,7 @@ class PublishClip:
|
|||
def _convert_to_entity(self, key):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
entity_type = self.types.get(key, None)
|
||||
entity_type = self.types.get(key)
|
||||
|
||||
assert entity_type, "Missing entity type for `{}`".format(
|
||||
key
|
||||
|
|
|
|||
22
openpype/hosts/resolve/utility_scripts/AYON__Menu.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def main(env):
|
||||
from openpype.hosts.resolve.api import ResolveHost, launch_pype_menu
|
||||
|
||||
# activate resolve from openpype
|
||||
host = ResolveHost()
|
||||
install_host(host)
|
||||
|
||||
launch_pype_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import shutil
|
||||
from openpype.lib import Logger, is_running_from_build
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
|
|
@ -54,6 +55,14 @@ def setup(env):
|
|||
src = os.path.join(directory, script)
|
||||
dst = os.path.join(util_scripts_dir, script)
|
||||
|
||||
# TODO: remove this once we have a proper solution
|
||||
if AYON_SERVER_ENABLED:
|
||||
if "OpenPype__Menu.py" == script:
|
||||
continue
|
||||
else:
|
||||
if "AYON__Menu.py" == script:
|
||||
continue
|
||||
|
||||
# TODO: Make this a less hacky workaround
|
||||
if script == "openpype_startup.scriptlib":
|
||||
# Handle special case for scriptlib that needs to be a folder
|
||||
|
|
|
|||
|
|
@ -257,8 +257,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
if 'shot' not in instance.data.get('family', ''):
|
||||
continue
|
||||
|
||||
name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
|
@ -286,6 +284,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
parents = instance.data.get('parents', [])
|
||||
self.log.debug(f"parents: {pformat(parents)}")
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
name = instance.data["asset"].split("/")[-1]
|
||||
actual = {name: in_info}
|
||||
|
||||
for parent in reversed(parents):
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect original base name for use in templates."""
|
||||
from pathlib import Path
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOriginalBasename(pyblish.api.InstancePlugin):
|
||||
"""Collect original file base name."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
label = "Collect Base Name"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
|
||||
def process(self, instance):
|
||||
file_name = Path(instance.data["representations"][0]["files"])
|
||||
instance.data["originalBasename"] = file_name.stem
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validator for correct file naming."""
|
||||
import re
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
)
|
||||
|
||||
|
||||
class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin):
|
||||
label = "Validate Unreal Texture Names"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
order = ValidateContentsOrder
|
||||
regex = "^T_{asset}.*"
|
||||
|
||||
def process(self, instance):
|
||||
file_name = instance.data.get("originalBasename")
|
||||
self.log.info(file_name)
|
||||
pattern = self.regex.format(asset=instance.data.get("asset"))
|
||||
if not re.match(pattern, file_name):
|
||||
msg = f"Invalid file name {file_name}"
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data={
|
||||
"invalid_file": file_name,
|
||||
"asset": instance.data.get("asset")
|
||||
})
|
||||
|
|
@ -583,18 +583,9 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
|
||||
# Give the explorer window time to refresh to the folder and select
|
||||
# the file
|
||||
while not file_dialog.selectedFiles():
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
|
||||
print(f"Selected: {file_dialog.selectedFiles()}")
|
||||
|
||||
# Set it again now we know the path is refreshed - without this
|
||||
# accepting the dialog will often not trigger the correct filepath
|
||||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
# TODO: find a way to improve the process event to
|
||||
# load more complicated mesh
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
|
||||
|
||||
file_dialog.done(file_dialog.Accepted)
|
||||
app.processEvents(QtCore.QEventLoop.AllEvents)
|
||||
|
|
@ -628,7 +619,12 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel)
|
||||
if not mesh_filename_label.text():
|
||||
dialog.close()
|
||||
raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}")
|
||||
substance_painter.logging.warning(
|
||||
"Failed to set mesh path with the prompt dialog:"
|
||||
f"{mesh_filepath}\n\n"
|
||||
"Creating new project directly with the mesh path instead.")
|
||||
else:
|
||||
dialog.done(dialog.Accepted)
|
||||
|
||||
new_action = _get_new_project_action()
|
||||
if not new_action:
|
||||
|
|
|
|||
|
|
@ -44,14 +44,22 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
|||
# Get user inputs
|
||||
import_cameras = data.get("import_cameras", True)
|
||||
preserve_strokes = data.get("preserve_strokes", True)
|
||||
|
||||
sp_settings = substance_painter.project.Settings(
|
||||
import_cameras=import_cameras
|
||||
)
|
||||
if not substance_painter.project.is_open():
|
||||
# Allow to 'initialize' a new project
|
||||
path = self.filepath_from_context(context)
|
||||
# TODO: improve the prompt dialog function to not
|
||||
# only works for simple polygon scene
|
||||
result = prompt_new_file_with_mesh(mesh_filepath=path)
|
||||
if not result:
|
||||
self.log.info("User cancelled new project prompt.")
|
||||
return
|
||||
self.log.info("User cancelled new project prompt."
|
||||
"Creating new project directly from"
|
||||
" Substance Painter API Instead.")
|
||||
settings = substance_painter.project.create(
|
||||
mesh_file_path=path, settings=sp_settings
|
||||
)
|
||||
|
||||
else:
|
||||
# Reload the mesh
|
||||
|
|
|
|||
|
|
@ -663,7 +663,7 @@ or updating already created. Publishing will create OTIO file.
|
|||
variant_name = instance_data["variant"]
|
||||
|
||||
# basic unique asset name
|
||||
clip_name = os.path.splitext(otio_clip.name)[0].lower()
|
||||
clip_name = os.path.splitext(otio_clip.name)[0]
|
||||
project_doc = get_project(self.project_name)
|
||||
|
||||
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.lib import EnumDef
|
||||
from openpype.pipeline import colorspace
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
class CollectColorspace(pyblish.api.InstancePlugin,
|
||||
|
|
@ -26,18 +27,44 @@ class CollectColorspace(pyblish.api.InstancePlugin,
|
|||
|
||||
def process(self, instance):
|
||||
values = self.get_attr_values_from_data(instance.data)
|
||||
colorspace = values.get("colorspace", None)
|
||||
if colorspace is None:
|
||||
colorspace_value = values.get("colorspace", None)
|
||||
if colorspace_value is None:
|
||||
return
|
||||
|
||||
self.log.debug("Explicit colorspace set to: {}".format(colorspace))
|
||||
color_data = colorspace.convert_colorspace_enumerator_item(
|
||||
colorspace_value, self.config_items)
|
||||
|
||||
colorspace_name = self._colorspace_name_by_type(color_data)
|
||||
self.log.debug("Explicit colorspace name: {}".format(colorspace_name))
|
||||
|
||||
context = instance.context
|
||||
for repre in instance.data.get("representations", {}):
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
colorspace=colorspace
|
||||
colorspace=colorspace_name
|
||||
)
|
||||
|
||||
def _colorspace_name_by_type(self, colorspace_data):
|
||||
"""
|
||||
Returns colorspace name by type
|
||||
|
||||
Arguments:
|
||||
colorspace_data (dict): colorspace data
|
||||
|
||||
Returns:
|
||||
str: colorspace name
|
||||
"""
|
||||
if colorspace_data["type"] == "colorspaces":
|
||||
return colorspace_data["name"]
|
||||
elif colorspace_data["type"] == "roles":
|
||||
return colorspace_data["colorspace"]
|
||||
else:
|
||||
raise KnownPublishError(
|
||||
(
|
||||
"Collecting of colorspace failed. used config is missing "
|
||||
"colorspace type: '{}' . Please contact your pipeline TD."
|
||||
).format(colorspace_data['type'])
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -155,8 +155,6 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
|
|||
else {}
|
||||
)
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
|
@ -177,6 +175,8 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
parents = instance.data.get('parents', [])
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
asset_name = instance.data["asset"].split("/")[-1]
|
||||
actual = {asset_name: in_info}
|
||||
|
||||
for parent in reversed(parents):
|
||||
|
|
|
|||
|
|
@ -33,7 +33,19 @@ class ValidateColorspace(pyblish.api.InstancePlugin,
|
|||
config_path = colorspace_data["config"]["path"]
|
||||
if config_path not in config_colorspaces:
|
||||
colorspaces = get_ocio_config_colorspaces(config_path)
|
||||
config_colorspaces[config_path] = set(colorspaces)
|
||||
if not colorspaces.get("colorspaces"):
|
||||
message = (
|
||||
f"OCIO config '{config_path}' does not contain any "
|
||||
"colorspaces. This is an error in the OCIO config. "
|
||||
"Contact your pipeline TD.",
|
||||
)
|
||||
raise PublishValidationError(
|
||||
title="Colorspace validation",
|
||||
message=message,
|
||||
description=message
|
||||
)
|
||||
config_colorspaces[config_path] = set(
|
||||
colorspaces["colorspaces"])
|
||||
|
||||
colorspace = colorspace_data["colorspace"]
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
|
|||
render_layer_id = creator_attributes["render_layer_instance_id"]
|
||||
for in_data in instance.context.data["workfileInstances"]:
|
||||
if (
|
||||
in_data["creator_identifier"] == "render.layer"
|
||||
in_data.get("creator_identifier") == "render.layer"
|
||||
and in_data["instance_id"] == render_layer_id
|
||||
):
|
||||
render_layer_data = in_data
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ from .transcoding import (
|
|||
get_ffmpeg_format_args,
|
||||
convert_ffprobe_fps_value,
|
||||
convert_ffprobe_fps_to_float,
|
||||
get_rescaled_command_arguments,
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
|
|
@ -232,6 +233,7 @@ __all__ = [
|
|||
"get_ffmpeg_format_args",
|
||||
"convert_ffprobe_fps_value",
|
||||
"convert_ffprobe_fps_to_float",
|
||||
"get_rescaled_command_arguments",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ def is_running_staging():
|
|||
latest_version = get_latest_version(local=False, remote=True)
|
||||
staging_version = latest_version
|
||||
|
||||
if current_version == production_version:
|
||||
if current_version == staging_version:
|
||||
return True
|
||||
|
||||
return is_staging_enabled()
|
||||
|
|
|
|||
|
|
@ -655,47 +655,6 @@ def convert_for_ffmpeg(
|
|||
run_subprocess(oiio_cmd, logger=logger)
|
||||
|
||||
|
||||
def get_oiio_input_and_channel_args(oiio_input_info):
|
||||
"""Get input and channel arguments for oiiotool.
|
||||
|
||||
Args:
|
||||
oiio_input_info (dict): Information about input from oiio tool.
|
||||
Should be output of function `get_oiio_info_for_input`.
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: Tuple of input and channel arguments.
|
||||
"""
|
||||
channel_names = oiio_input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
|
||||
# TODO find subimage where rgba is available for multipart exrs
|
||||
channels_arg = "R={},G={},B={}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = oiio_input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
|
||||
return input_arg, channels_arg
|
||||
|
||||
|
||||
def convert_input_paths_for_ffmpeg(
|
||||
input_paths,
|
||||
output_dir,
|
||||
|
|
@ -1236,3 +1195,221 @@ def split_cmd_args(in_args):
|
|||
continue
|
||||
splitted_args.extend(arg.split(" "))
|
||||
return splitted_args
|
||||
|
||||
|
||||
def get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
target_width,
|
||||
target_height,
|
||||
target_par=None,
|
||||
bg_color=None,
|
||||
log=None
|
||||
):
|
||||
"""Get command arguments for rescaling input to target size.
|
||||
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
Currently supported are "ffmpeg" and "oiiotool".
|
||||
input_path (str): Path to input file.
|
||||
target_width (int): Width of target.
|
||||
target_height (int): Height of target.
|
||||
target_par (Optional[float]): Pixel aspect ratio of target.
|
||||
bg_color (Optional[list[int]]): List of 8bit int values for
|
||||
background color. Should be in range 0 - 255.
|
||||
log (Optional[logging.Logger]): Logger used for logging.
|
||||
|
||||
Returns:
|
||||
list[str]: List of command arguments.
|
||||
"""
|
||||
command_args = []
|
||||
target_par = target_par or 1.0
|
||||
input_par = 1.0
|
||||
|
||||
# ffmpeg command
|
||||
input_file_metadata = get_ffprobe_data(input_path, logger=log)
|
||||
stream = input_file_metadata["streams"][0]
|
||||
input_width = int(stream["width"])
|
||||
input_height = int(stream["height"])
|
||||
stream_input_par = stream.get("sample_aspect_ratio")
|
||||
if stream_input_par:
|
||||
input_par = (
|
||||
float(stream_input_par.split(":")[0])
|
||||
/ float(stream_input_par.split(":")[1])
|
||||
)
|
||||
# recalculating input and target width
|
||||
input_width = int(input_width * input_par)
|
||||
target_width = int(target_width * target_par)
|
||||
|
||||
# calculate aspect ratios
|
||||
target_aspect = float(target_width) / target_height
|
||||
input_aspect = float(input_width) / input_height
|
||||
|
||||
# calculate scale size
|
||||
scale_size = float(input_width) / target_width
|
||||
if input_aspect < target_aspect:
|
||||
scale_size = float(input_height) / target_height
|
||||
|
||||
# calculate rescaled width and height
|
||||
rescaled_width = int(input_width / scale_size)
|
||||
rescaled_height = int(input_height / scale_size)
|
||||
|
||||
# calculate width and height shift
|
||||
rescaled_width_shift = int((target_width - rescaled_width) / 2)
|
||||
rescaled_height_shift = int((target_height - rescaled_height) / 2)
|
||||
|
||||
if application == "ffmpeg":
|
||||
# create scale command
|
||||
scale = "scale={0}:{1}".format(input_width, input_height)
|
||||
pad = "pad={0}:{1}:({2}-iw)/2:({3}-ih)/2".format(
|
||||
target_width,
|
||||
target_height,
|
||||
target_width,
|
||||
target_height
|
||||
)
|
||||
if input_width > target_width or input_height > target_height:
|
||||
scale = "scale={0}:{1}".format(rescaled_width, rescaled_height)
|
||||
pad = "pad={0}:{1}:{2}:{3}".format(
|
||||
target_width,
|
||||
target_height,
|
||||
rescaled_width_shift,
|
||||
rescaled_height_shift
|
||||
)
|
||||
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
pad += ":{0}".format(color)
|
||||
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
|
||||
|
||||
elif application == "oiiotool":
|
||||
input_info = get_oiio_info_for_input(input_path, logger=log)
|
||||
# Collect channels to export
|
||||
_, channels_arg = get_oiio_input_and_channel_args(
|
||||
input_info, alpha_default=1.0)
|
||||
|
||||
command_args.extend([
|
||||
# Tell oiiotool which channels should be put to top stack
|
||||
# (and output)
|
||||
"--ch", channels_arg,
|
||||
# Use first subimage
|
||||
"--subimage", "0"
|
||||
])
|
||||
|
||||
if input_par != 1.0:
|
||||
command_args.extend(["--pixelaspect", "1"])
|
||||
|
||||
width_shift = int((target_width - input_width) / 2)
|
||||
height_shift = int((target_height - input_height) / 2)
|
||||
|
||||
# default resample is not scaling source image
|
||||
resample = [
|
||||
"--resize",
|
||||
"{0}x{1}".format(input_width, input_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(width_shift, height_shift),
|
||||
]
|
||||
# scaled source image to target size
|
||||
if input_width > target_width or input_height > target_height:
|
||||
# form resample command
|
||||
resample = [
|
||||
"--resize:filter=lanczos3",
|
||||
"{0}x{1}".format(rescaled_width, rescaled_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(rescaled_width_shift, rescaled_height_shift),
|
||||
]
|
||||
command_args.extend(resample)
|
||||
|
||||
fullsize = [
|
||||
"--fullsize",
|
||||
"{0}x{1}".format(target_width, target_height)
|
||||
]
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
|
||||
fullsize.extend([
|
||||
"--pattern",
|
||||
"constant:color={0}".format(color),
|
||||
"{0}x{1}".format(target_width, target_height),
|
||||
"4", # 4 channels
|
||||
"--over"
|
||||
])
|
||||
command_args.extend(fullsize)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
return command_args
|
||||
|
||||
|
||||
def convert_color_values(application, color_value):
|
||||
"""Get color mapping for ffmpeg and oiiotool.
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
color_value (list[int]): List of 8bit int values for RGBA.
|
||||
Returns:
|
||||
str: ffmpeg returns hex string, oiiotool is string with floats.
|
||||
"""
|
||||
red, green, blue, alpha = color_value
|
||||
|
||||
if application == "ffmpeg":
|
||||
return "{0:0>2X}{1:0>2X}{2:0>2X}@{3}".format(
|
||||
red, green, blue, (alpha / 255.0)
|
||||
)
|
||||
elif application == "oiiotool":
|
||||
red = float(red / 255)
|
||||
green = float(green / 255)
|
||||
blue = float(blue / 255)
|
||||
alpha = float(alpha / 255)
|
||||
|
||||
return "{0:.3f},{1:.3f},{2:.3f},{3:.3f}".format(
|
||||
red, green, blue, alpha)
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
|
||||
def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
|
||||
"""Get input and channel arguments for oiiotool.
|
||||
Args:
|
||||
oiio_input_info (dict): Information about input from oiio tool.
|
||||
Should be output of function `get_oiio_info_for_input`.
|
||||
alpha_default (float, optional): Default value for alpha channel.
|
||||
Returns:
|
||||
tuple[str, str]: Tuple of input and channel arguments.
|
||||
"""
|
||||
channel_names = oiio_input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
|
||||
channels_arg = "R={0},G={1},B={2}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
elif alpha_default:
|
||||
channels_arg += ",A={}".format(float(alpha_default))
|
||||
input_channels.append("A")
|
||||
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = oiio_input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
|
||||
return input_arg, channels_arg
|
||||
|
|
|
|||
|
|
@ -460,7 +460,21 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.plugin_info = self.get_plugin_info()
|
||||
self.aux_files = self.get_aux_files()
|
||||
|
||||
self.process_submission()
|
||||
job_id = self.process_submission()
|
||||
self.log.info("Submitted job to Deadline: {}.".format(job_id))
|
||||
|
||||
# TODO: Find a way that's more generic and not render type specific
|
||||
if "exportJob" in instance.data:
|
||||
self.log.info("Splitting export and render in two jobs")
|
||||
self.log.info("Export job id: %s", job_id)
|
||||
render_job_info = self.get_job_info(dependency_job_ids=[job_id])
|
||||
render_plugin_info = self.get_plugin_info(job_type="render")
|
||||
payload = self.assemble_payload(
|
||||
job_info=render_job_info,
|
||||
plugin_info=render_plugin_info
|
||||
)
|
||||
render_job_id = self.submit(payload)
|
||||
self.log.info("Render job id: %s", render_job_id)
|
||||
|
||||
def process_submission(self):
|
||||
"""Process data for submission.
|
||||
|
|
|
|||