Merge branch 'develop' into enhancement/maya_scene_preserve_references
16
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
|
|
@ -35,6 +35,14 @@ body:
|
|||
label: Version
|
||||
description: What version are you running? Look to OpenPype Tray
|
||||
options:
|
||||
- 3.18.2-nightly.2
|
||||
- 3.18.2-nightly.1
|
||||
- 3.18.1
|
||||
- 3.18.1-nightly.1
|
||||
- 3.18.0
|
||||
- 3.17.7
|
||||
- 3.17.7-nightly.7
|
||||
- 3.17.7-nightly.6
|
||||
- 3.17.7-nightly.5
|
||||
- 3.17.7-nightly.4
|
||||
- 3.17.7-nightly.3
|
||||
|
|
@ -127,14 +135,6 @@ body:
|
|||
- 3.15.4
|
||||
- 3.15.4-nightly.3
|
||||
- 3.15.4-nightly.2
|
||||
- 3.15.4-nightly.1
|
||||
- 3.15.3
|
||||
- 3.15.3-nightly.4
|
||||
- 3.15.3-nightly.3
|
||||
- 3.15.3-nightly.2
|
||||
- 3.15.3-nightly.1
|
||||
- 3.15.2
|
||||
- 3.15.2-nightly.6
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
|
|
|
|||
1140
CHANGELOG.md
|
|
@ -7,6 +7,10 @@ OpenPype
|
|||
|
||||
[](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) 
|
||||
|
||||
## Important Notice!
|
||||
|
||||
OpenPype as a standalone product has reach end of it's life and this repository is now used as a pipeline core code for [AYON](https://ynput.io/ayon/). You can read more details about the end of life process here https://community.ynput.io/t/openpype-end-of-life-timeline/877
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
|
|
|||
|
|
@ -296,12 +296,15 @@ def run(script):
|
|||
@click.option("--mongo_url",
|
||||
help="MongoDB for testing.",
|
||||
default=None)
|
||||
@click.option("--dump_databases",
|
||||
help="Dump all databases to data folder.",
|
||||
default=None)
|
||||
def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant,
|
||||
timeout, setup_only, mongo_url, app_group):
|
||||
timeout, setup_only, mongo_url, app_group, dump_databases):
|
||||
"""Run all automatic tests after proper initialization via start.py"""
|
||||
PypeCommands().run_tests(folder, mark, pyargs, test_data_folder,
|
||||
persist, app_variant, timeout, setup_only,
|
||||
mongo_url, app_group)
|
||||
mongo_url, app_group, dump_databases)
|
||||
|
||||
|
||||
@main.command(help="DEPRECATED - run sync server")
|
||||
|
|
|
|||
|
|
@ -606,7 +606,7 @@ def convert_v4_version_to_v3(version):
|
|||
output_data[dst_key] = version[src_key]
|
||||
|
||||
if "createdAt" in version:
|
||||
created_at = arrow.get(version["createdAt"])
|
||||
created_at = arrow.get(version["createdAt"]).to("local")
|
||||
output_data["time"] = created_at.strftime("%Y%m%dT%H%M%SZ")
|
||||
|
||||
output["data"] = output_data
|
||||
|
|
|
|||
|
|
@ -80,8 +80,8 @@ def _get_subsets(
|
|||
|
||||
for subset in con.get_products(
|
||||
project_name,
|
||||
subset_ids,
|
||||
subset_names,
|
||||
product_ids=subset_ids,
|
||||
product_names=subset_names,
|
||||
folder_ids=folder_ids,
|
||||
names_by_folder_ids=names_by_folder_ids,
|
||||
active=active,
|
||||
|
|
@ -113,23 +113,23 @@ def _get_versions(
|
|||
|
||||
queried_versions = con.get_versions(
|
||||
project_name,
|
||||
version_ids,
|
||||
subset_ids,
|
||||
versions,
|
||||
hero,
|
||||
standard,
|
||||
latest,
|
||||
version_ids=version_ids,
|
||||
product_ids=subset_ids,
|
||||
versions=versions,
|
||||
hero=hero,
|
||||
standard=standard,
|
||||
latest=latest,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
|
||||
versions = []
|
||||
version_entities = []
|
||||
hero_versions = []
|
||||
for version in queried_versions:
|
||||
if version["version"] < 0:
|
||||
hero_versions.append(version)
|
||||
else:
|
||||
versions.append(convert_v4_version_to_v3(version))
|
||||
version_entities.append(convert_v4_version_to_v3(version))
|
||||
|
||||
if hero_versions:
|
||||
subset_ids = set()
|
||||
|
|
@ -159,9 +159,9 @@ def _get_versions(
|
|||
break
|
||||
conv_hero = convert_v4_version_to_v3(hero_version)
|
||||
conv_hero["version_id"] = version_id
|
||||
versions.append(conv_hero)
|
||||
version_entities.append(conv_hero)
|
||||
|
||||
return versions
|
||||
return version_entities
|
||||
|
||||
|
||||
def get_asset_by_id(project_name, asset_id, fields=None):
|
||||
|
|
@ -539,11 +539,11 @@ def get_representations(
|
|||
|
||||
representations = con.get_representations(
|
||||
project_name,
|
||||
representation_ids,
|
||||
representation_names,
|
||||
version_ids,
|
||||
names_by_version_ids,
|
||||
active,
|
||||
representation_ids=representation_ids,
|
||||
representation_names=representation_names,
|
||||
version_ids=version_ids,
|
||||
names_by_version_ids=names_by_version_ids,
|
||||
active=active,
|
||||
fields=fields
|
||||
)
|
||||
for representation in representations:
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
"tvpaint",
|
||||
"substancepainter",
|
||||
"aftereffects",
|
||||
"wrap"
|
||||
}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@ class CopyTemplateWorkfile(PreLaunchHook):
|
|||
|
||||
# Before `AddLastWorkfileToLaunchArgs`
|
||||
order = 0
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects"}
|
||||
app_groups = {"blender", "photoshop", "tvpaint", "aftereffects",
|
||||
"wrap"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# AfterEffects Integration
|
||||
|
||||
Requirements: This extension requires use of Javascript engine, which is
|
||||
Requirements: This extension requires use of Javascript engine, which is
|
||||
available since CC 16.0.
|
||||
Please check your File>Project Settings>Expressions>Expressions Engine
|
||||
|
||||
|
|
@ -13,26 +13,28 @@ The After Effects integration requires two components to work; `extension` and `
|
|||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
OR
|
||||
download [Anastasiy’s Extension Manager](https://install.anastasiy.com/)
|
||||
|
||||
`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.)
|
||||
|
||||
### Server
|
||||
|
||||
The easiest way to get the server and After Effects launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import avalon.photoshop;avalon.aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
|
||||
python -c ^"import openpype.hosts.photoshop;openpype.hosts..aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The After Effects extension can be found under `Window > Extensions > OpenPype`. Once launched you should be presented with a panel like this:
|
||||
The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
|
@ -43,8 +45,8 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
|
|||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-After-Effects avalon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to avalon-core}\avalon\aftereffects\extension.zxp extension.p12 avalon
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon
|
||||
```
|
||||
|
||||
### Plugin Examples
|
||||
|
|
@ -52,14 +54,14 @@ ZXPSignCmd -sign {path to avalon-core}\avalon\aftereffects\extension {path to av
|
|||
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
|
||||
|
||||
Expected deployed extension location on default Windows:
|
||||
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\com.openpype.AE.panel`
|
||||
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel`
|
||||
|
||||
For easier debugging of Javascript:
|
||||
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
|
||||
Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
|
||||
then localhost:8092
|
||||
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
## Resources
|
||||
- https://javascript-tools-guide.readthedocs.io/introduction/index.html
|
||||
- https://github.com/Adobe-CEP/Getting-Started-guides
|
||||
|
|
|
|||
|
|
@ -1,32 +1,31 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel">
|
||||
<Extension Id="io.ynput.AE.panel">
|
||||
<HostList>
|
||||
|
||||
|
||||
<!-- Comment Host tags according to the apps you want your panel to support -->
|
||||
|
||||
|
||||
<!-- Photoshop -->
|
||||
<Host Name="PHXS" Port="8088"/>
|
||||
|
||||
|
||||
<!-- Illustrator -->
|
||||
<Host Name="ILST" Port="8089"/>
|
||||
|
||||
<!-- InDesign -->
|
||||
<Host Name="IDSN" Port="8090" />
|
||||
|
||||
|
||||
<!-- Premiere -->
|
||||
<Host Name="PPRO" Port="8091" />
|
||||
|
||||
|
||||
<!-- AfterEffects -->
|
||||
<Host Name="AEFT" Port="8092" />
|
||||
|
||||
|
||||
<!-- PRELUDE -->
|
||||
<Host Name="PRLD" Port="8093" />
|
||||
|
||||
|
||||
<!-- FLASH Pro -->
|
||||
<Host Name="FLPR" Port="8094" />
|
||||
|
||||
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="com.openpype.AE.panel" ExtensionBundleVersion="1.0.27"
|
||||
ExtensionBundleName="com.openpype.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionManifest Version="8.0" ExtensionBundleId="io.ynput.AE.panel" ExtensionBundleVersion="1.1.0"
|
||||
ExtensionBundleName="io.ynput.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.AE.panel" Version="1.0" />
|
||||
<Extension Id="io.ynput.AE.panel" Version="1.0" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
|
|
@ -38,7 +38,7 @@
|
|||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="com.openpype.AE.panel">
|
||||
<Extension Id="io.ynput.AE.panel">
|
||||
<DispatchInfo >
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
|
|
@ -49,7 +49,7 @@
|
|||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>OpenPype</Menu>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Height>200</Height>
|
||||
|
|
@ -66,7 +66,7 @@
|
|||
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/iconNormal.png</Icon>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
<Icon Type="RollOver">./icons/iconRollover.png</Icon>
|
||||
<Icon Type="Disabled">./icons/iconDisabled.png</Icon>
|
||||
<Icon Type="DarkNormal">./icons/iconDarkNormal.png</Icon>
|
||||
|
|
|
|||
BIN
openpype/hosts/aftereffects/api/extension/icons/ayon_logo.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
openpype/hosts/aftereffects/api/panel.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 13 KiB |
BIN
openpype/hosts/aftereffects/api/panel_failure.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
|
@ -56,16 +56,15 @@ class RenderCreator(Creator):
|
|||
use_composition_name = (pre_create_data.get("use_composition_name") or
|
||||
len(comps) > 1)
|
||||
for comp in comps:
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
if use_composition_name:
|
||||
if "{composition}" not in subset_name_from_ui.lower():
|
||||
subset_name_from_ui += "{Composition}"
|
||||
|
||||
composition_name = re.sub(
|
||||
"[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
comp.name
|
||||
)
|
||||
|
||||
dynamic_fill = prepare_template_data({"composition":
|
||||
composition_name})
|
||||
subset_name = subset_name_from_ui.format(**dynamic_fill)
|
||||
|
|
@ -81,6 +80,8 @@ class RenderCreator(Creator):
|
|||
inst.subset_name))
|
||||
|
||||
data["members"] = [comp.id]
|
||||
data["orig_comp_name"] = composition_name
|
||||
|
||||
new_instance = CreatedInstance(self.family, subset_name, data,
|
||||
self)
|
||||
if "farm" in pre_create_data:
|
||||
|
|
@ -88,7 +89,7 @@ class RenderCreator(Creator):
|
|||
new_instance.creator_attributes["farm"] = use_farm
|
||||
|
||||
review = pre_create_data["mark_for_review"]
|
||||
new_instance.creator_attributes["mark_for_review"] = review
|
||||
new_instance. creator_attributes["mark_for_review"] = review
|
||||
|
||||
api.get_stub().imprint(new_instance.id,
|
||||
new_instance.data_to_store())
|
||||
|
|
@ -150,16 +151,18 @@ class RenderCreator(Creator):
|
|||
subset_change.new_value)
|
||||
|
||||
def remove_instances(self, instances):
|
||||
"""Removes metadata and renames to original comp name if available."""
|
||||
for instance in instances:
|
||||
self._remove_instance_from_context(instance)
|
||||
self.host.remove_instance(instance)
|
||||
|
||||
subset = instance.data["subset"]
|
||||
comp_id = instance.data["members"][0]
|
||||
comp = api.get_stub().get_item(comp_id)
|
||||
orig_comp_name = instance.data.get("orig_comp_name")
|
||||
if comp:
|
||||
new_comp_name = comp.name.replace(subset, '')
|
||||
if not new_comp_name:
|
||||
if orig_comp_name:
|
||||
new_comp_name = orig_comp_name
|
||||
else:
|
||||
new_comp_name = "dummyCompName"
|
||||
api.get_stub().rename_item(comp_id,
|
||||
new_comp_name)
|
||||
|
|
|
|||
|
|
@ -60,8 +60,9 @@ class ExtractLocalRender(publish.Extractor):
|
|||
first_repre = not representations
|
||||
if instance.data["review"] and first_repre:
|
||||
repre_data["tags"] = ["review"]
|
||||
thumbnail_path = os.path.join(staging_dir, files[0])
|
||||
instance.data["thumbnailSource"] = thumbnail_path
|
||||
# TODO return back when Extract from source same as regular
|
||||
# thumbnail_path = os.path.join(staging_dir, files[0])
|
||||
# instance.data["thumbnailSource"] = thumbnail_path
|
||||
|
||||
representations.append(repre_data)
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,10 @@ class ExtractThumbnail(publish.Extractor):
|
|||
def process(self, instance):
|
||||
self.log.debug("Extracting capture..")
|
||||
|
||||
if instance.data.get("thumbnailSource"):
|
||||
self.log.debug("Thumbnail source found, skipping...")
|
||||
return
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
asset_name = instance.data["assetEntity"]["name"]
|
||||
subset = instance.data["subset"]
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
|
@ -18,6 +19,10 @@ from openpype.resources import get_openpype_icon_filepath
|
|||
from .pipeline import FusionEventHandler
|
||||
from .pulse import FusionPulse
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AVALON_LABEL"]
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.menu = None
|
||||
|
||||
|
|
@ -26,7 +31,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super(OpenPypeMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName("OpenPypeMenu")
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
icon_path = get_openpype_icon_filepath()
|
||||
icon = QtGui.QIcon(icon_path)
|
||||
|
|
@ -41,7 +46,7 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.render_mode_widget = None
|
||||
self.setWindowTitle("OpenPype")
|
||||
self.setWindowTitle(MENU_LABEL)
|
||||
|
||||
asset_label = QtWidgets.QLabel("Context", self)
|
||||
asset_label.setStyleSheet(
|
||||
|
|
|
|||
60
openpype/hosts/fusion/deploy/ayon/Config/menu.fu
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Menu",
|
||||
Category = "AYON",
|
||||
Name = "AYON Menu",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Action
|
||||
{
|
||||
ID = "AYON_Install_PySide2",
|
||||
Category = "AYON",
|
||||
Name = "Install PySide2",
|
||||
|
||||
Targets =
|
||||
{
|
||||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[AYON Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
target:RunScript(scriptPath)
|
||||
end
|
||||
]=],
|
||||
},
|
||||
},
|
||||
},
|
||||
Menus
|
||||
{
|
||||
Target = "ChildFrame",
|
||||
|
||||
Before "Help"
|
||||
{
|
||||
Sub "AYON"
|
||||
{
|
||||
"AYON_Menu{}",
|
||||
"_",
|
||||
Sub "Admin" {
|
||||
"AYON_Install_PySide2{}"
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
19
openpype/hosts/fusion/deploy/ayon/fusion_shared.prefs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
Locked = true,
|
||||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["AYON:"] = "$(OPENPYPE_FUSION)/deploy/ayon",
|
||||
["Config:"] = "UserPaths:Config;AYON:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
},
|
||||
Script = {
|
||||
PythonVersion = 3,
|
||||
Python3Forced = true
|
||||
},
|
||||
UserInterface = {
|
||||
Language = "en_US"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/openpype_menu.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/launch_menu.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -31,7 +31,7 @@
|
|||
Composition =
|
||||
{
|
||||
Execute = _Lua [=[
|
||||
local scriptPath = app:MapPath("OpenPype:MenuScripts/install_pyside2.py")
|
||||
local scriptPath = app:MapPath("OpenPype:../MenuScripts/install_pyside2.py")
|
||||
if bmd.fileexists(scriptPath) == false then
|
||||
print("[OpenPype Error] Can't run file: " .. scriptPath)
|
||||
else
|
||||
|
|
@ -3,7 +3,7 @@ Locked = true,
|
|||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy/openpype",
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
|
||||
},
|
||||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import shutil
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.hosts.fusion import (
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
|
|
@ -161,6 +162,13 @@ class FusionCopyPrefsPrelaunch(PreLaunchHook):
|
|||
# profile directory variables to customize Fusion
|
||||
# to define where it can read custom scripts and tools from
|
||||
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
|
||||
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
|
||||
|
||||
if AYON_SERVER_ENABLED:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "ayon", "fusion_shared.prefs")
|
||||
else:
|
||||
master_prefs = Path(
|
||||
FUSION_HOST_DIR, "deploy", "openpype", "fusion_shared.prefs")
|
||||
|
||||
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
|
||||
self.launch_context.env[master_prefs_variable] = str(master_prefs)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from openpype.pipeline import (
|
|||
legacy_io,
|
||||
Creator as NewCreator,
|
||||
CreatedInstance,
|
||||
Anatomy
|
||||
Anatomy,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -27,28 +27,21 @@ class CreateSaver(NewCreator):
|
|||
description = "Fusion Saver to generate image sequence"
|
||||
icon = "fa5.eye"
|
||||
|
||||
instance_attributes = [
|
||||
"reviewable"
|
||||
]
|
||||
instance_attributes = ["reviewable"]
|
||||
image_format = "exr"
|
||||
|
||||
# TODO: This should be renamed together with Nuke so it is aligned
|
||||
temp_rendering_path_template = (
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}")
|
||||
"{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}"
|
||||
)
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
self.pass_pre_attributes_to_instance(
|
||||
instance_data,
|
||||
pre_create_data
|
||||
self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
|
||||
|
||||
instance_data.update(
|
||||
{"id": "pyblish.avalon.instance", "subset": subset_name}
|
||||
)
|
||||
|
||||
instance_data.update({
|
||||
"id": "pyblish.avalon.instance",
|
||||
"subset": subset_name
|
||||
})
|
||||
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
comp = get_current_comp()
|
||||
with comp_lock_and_undo_chunk(comp):
|
||||
args = (-32768, -32768) # Magical position numbers
|
||||
|
|
@ -56,19 +49,6 @@ class CreateSaver(NewCreator):
|
|||
|
||||
self._update_tool_with_data(saver, data=instance_data)
|
||||
|
||||
saver["OutputFormat"] = file_format
|
||||
|
||||
# Check file format settings are available
|
||||
if saver[file_format] is None:
|
||||
raise RuntimeError(
|
||||
f"File format is not set to {file_format}, this is a bug"
|
||||
)
|
||||
|
||||
# Set file format attributes
|
||||
saver[file_format]["Depth"] = 0 # Auto | float16 | float32
|
||||
# TODO Is this needed?
|
||||
saver[file_format]["SaveAlpha"] = 1
|
||||
|
||||
# Register the CreatedInstance
|
||||
instance = CreatedInstance(
|
||||
family=self.family,
|
||||
|
|
@ -140,8 +120,15 @@ class CreateSaver(NewCreator):
|
|||
return
|
||||
|
||||
original_subset = tool.GetData("openpype.subset")
|
||||
original_format = tool.GetData(
|
||||
"openpype.creator_attributes.image_format"
|
||||
)
|
||||
|
||||
subset = data["subset"]
|
||||
if original_subset != subset:
|
||||
if (
|
||||
original_subset != subset
|
||||
or original_format != data["creator_attributes"]["image_format"]
|
||||
):
|
||||
self._configure_saver_tool(data, tool, subset)
|
||||
|
||||
def _configure_saver_tool(self, data, tool, subset):
|
||||
|
|
@ -151,17 +138,17 @@ class CreateSaver(NewCreator):
|
|||
anatomy = Anatomy()
|
||||
frame_padding = anatomy.templates["frame_padding"]
|
||||
|
||||
# get output format
|
||||
ext = data["creator_attributes"]["image_format"]
|
||||
|
||||
# Subset change detected
|
||||
workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"])
|
||||
formatting_data.update({
|
||||
"workdir": workdir,
|
||||
"frame": "0" * frame_padding,
|
||||
"ext": "exr"
|
||||
})
|
||||
formatting_data.update(
|
||||
{"workdir": workdir, "frame": "0" * frame_padding, "ext": ext}
|
||||
)
|
||||
|
||||
# build file path to render
|
||||
filepath = self.temp_rendering_path_template.format(
|
||||
**formatting_data)
|
||||
filepath = self.temp_rendering_path_template.format(**formatting_data)
|
||||
|
||||
comp = get_current_comp()
|
||||
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
|
||||
|
|
@ -201,7 +188,8 @@ class CreateSaver(NewCreator):
|
|||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
self._get_frame_range_enum()
|
||||
self._get_frame_range_enum(),
|
||||
self._get_image_format_enum(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
|
|
@ -209,11 +197,7 @@ class CreateSaver(NewCreator):
|
|||
"""Settings for publish page"""
|
||||
return self.get_pre_create_attr_defs()
|
||||
|
||||
def pass_pre_attributes_to_instance(
|
||||
self,
|
||||
instance_data,
|
||||
pre_create_data
|
||||
):
|
||||
def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
|
||||
creator_attrs = instance_data["creator_attributes"] = {}
|
||||
for pass_key in pre_create_data.keys():
|
||||
creator_attrs[pass_key] = pre_create_data[pass_key]
|
||||
|
|
@ -236,13 +220,13 @@ class CreateSaver(NewCreator):
|
|||
frame_range_options = {
|
||||
"asset_db": "Current asset context",
|
||||
"render_range": "From render in/out",
|
||||
"comp_range": "From composition timeline"
|
||||
"comp_range": "From composition timeline",
|
||||
}
|
||||
|
||||
return EnumDef(
|
||||
"frame_range_source",
|
||||
items=frame_range_options,
|
||||
label="Frame range source"
|
||||
label="Frame range source",
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
|
|
@ -252,20 +236,33 @@ class CreateSaver(NewCreator):
|
|||
label="Review",
|
||||
)
|
||||
|
||||
def _get_image_format_enum(self):
|
||||
image_format_options = ["exr", "tga", "tif", "png", "jpg"]
|
||||
return EnumDef(
|
||||
"image_format",
|
||||
items=image_format_options,
|
||||
default=self.image_format,
|
||||
label="Output Image Format",
|
||||
)
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
"""Method called on initialization of plugin to apply settings."""
|
||||
|
||||
# plugin settings
|
||||
plugin_settings = (
|
||||
project_settings["fusion"]["create"][self.__class__.__name__]
|
||||
)
|
||||
plugin_settings = project_settings["fusion"]["create"][
|
||||
self.__class__.__name__
|
||||
]
|
||||
|
||||
# individual attributes
|
||||
self.instance_attributes = plugin_settings.get(
|
||||
"instance_attributes") or self.instance_attributes
|
||||
self.default_variants = plugin_settings.get(
|
||||
"default_variants") or self.default_variants
|
||||
self.temp_rendering_path_template = (
|
||||
plugin_settings.get("temp_rendering_path_template")
|
||||
or self.temp_rendering_path_template
|
||||
"instance_attributes", self.instance_attributes
|
||||
)
|
||||
self.default_variants = plugin_settings.get(
|
||||
"default_variants", self.default_variants
|
||||
)
|
||||
self.temp_rendering_path_template = plugin_settings.get(
|
||||
"temp_rendering_path_template", self.temp_rendering_path_template
|
||||
)
|
||||
self.image_format = plugin_settings.get(
|
||||
"image_format", self.image_format
|
||||
)
|
||||
|
|
|
|||
|
|
@ -146,11 +146,15 @@ class FusionRenderLocal(
|
|||
|
||||
staging_dir = os.path.dirname(path)
|
||||
|
||||
files = [os.path.basename(f) for f in expected_files]
|
||||
if len(expected_files) == 1:
|
||||
files = files[0]
|
||||
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{padding}d" % start,
|
||||
"files": [os.path.basename(f) for f in expected_files],
|
||||
"files": files,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -121,8 +121,8 @@ def get_id_required_nodes():
|
|||
return list(nodes)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter name of the given node
|
||||
def get_export_parameter(node):
|
||||
"""Return the export output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
|
|
@ -137,13 +137,70 @@ def get_output_parameter(node):
|
|||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
|
||||
node_type = node.type().name()
|
||||
if node_type == "geometry":
|
||||
# Ensures the proper Take is selected for each ROP to retrieve the correct
|
||||
# ifd
|
||||
try:
|
||||
rop_take = hou.takes.findTake(node.parm("take").eval())
|
||||
if rop_take is not None:
|
||||
hou.takes.setCurrentTake(rop_take)
|
||||
except AttributeError:
|
||||
# hou object doesn't always have the 'takes' attribute
|
||||
pass
|
||||
|
||||
if node_type == "Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Alfred":
|
||||
return node.parm("alf_diskfile")
|
||||
elif (node_type == "RenderMan" or node_type == "RenderMan RIS"):
|
||||
pre_ris22 = node.parm("rib_outputmode") and \
|
||||
node.parm("rib_outputmode").eval()
|
||||
ris22 = node.parm("diskfile") and node.parm("diskfile").eval()
|
||||
if pre_ris22 or ris22:
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Redshift" and node.parm("RS_archive_enable").eval():
|
||||
return node.parm("RS_archive_file")
|
||||
elif node_type == "Wedge" and node.parm("driver").eval():
|
||||
return get_export_parameter(node.node(node.parm("driver").eval()))
|
||||
elif node_type == "Arnold":
|
||||
return node.parm("ar_ass_file")
|
||||
elif node_type == "Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node_type == "Shotgun Mantra" and node.parm("soho_outputmode").eval():
|
||||
return node.parm("sgtk_soho_diskfile")
|
||||
elif node_type == "Shotgun Alembic" and node.parm("use_sop_path").eval():
|
||||
return node.parm("sop_path")
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("render_export_filepath")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
def get_output_parameter(node):
|
||||
"""Return the render output parameter of the given node
|
||||
|
||||
Example:
|
||||
root = hou.node("/obj")
|
||||
my_alembic_node = root.createNode("alembic")
|
||||
get_output_parameter(my_alembic_node)
|
||||
# Result: "output"
|
||||
|
||||
Args:
|
||||
node(hou.Node): node instance
|
||||
|
||||
Returns:
|
||||
hou.Parm
|
||||
|
||||
"""
|
||||
node_type = node.type().description()
|
||||
category = node.type().category().name()
|
||||
|
||||
# Figure out which type of node is being rendered
|
||||
if node_type == "Geometry" or node_type == "Filmbox FBX" or \
|
||||
(node_type == "ROP Output Driver" and category == "Sop"):
|
||||
return node.parm("sopoutput")
|
||||
elif node_type == "alembic":
|
||||
return node.parm("filename")
|
||||
elif node_type == "comp":
|
||||
elif node_type == "Composite":
|
||||
return node.parm("copoutput")
|
||||
elif node_type == "opengl":
|
||||
return node.parm("picture")
|
||||
|
|
@ -155,6 +212,15 @@ def get_output_parameter(node):
|
|||
elif node_type == "ifd":
|
||||
if node.evalParm("soho_outputmode"):
|
||||
return node.parm("soho_diskfile")
|
||||
elif node_type == "Octane":
|
||||
return node.parm("HO_img_fileName")
|
||||
elif node_type == "Fetch":
|
||||
inner_node = node.node(node.parm("source").eval())
|
||||
if inner_node:
|
||||
return get_output_parameter(inner_node)
|
||||
elif node.type().nameWithCategory() == "Driver/vray_renderer":
|
||||
return node.parm("SettingsOutput_img_file_path")
|
||||
|
||||
raise TypeError("Node type '%s' not supported" % node_type)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
# Default extension
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou
|
||||
|
||||
|
|
@ -48,6 +51,15 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
"ar_exr_half_precision": 1 # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ass_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ass".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ass/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["ar_ass_export_enable"] = 1
|
||||
parms["ar_ass_file"] = ass_filepath
|
||||
|
||||
instance_node.setParms(parms)
|
||||
|
||||
# Lock any parameters in this list
|
||||
|
|
@ -66,6 +78,9 @@ class CreateArnoldRop(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -12,6 +12,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
family = "mantra_rop"
|
||||
icon = "magic"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
import hou # noqa
|
||||
|
||||
|
|
@ -44,6 +47,15 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
"vm_picture": filepath,
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
ifd_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.ifd".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/ifd/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
parms["soho_outputmode"] = 1
|
||||
parms["soho_diskfile"] = ifd_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# If camera found in selection
|
||||
# we will use as render camera
|
||||
|
|
@ -78,6 +90,9 @@ class CreateMantraROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default="exr",
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
icon = "magic"
|
||||
ext = "exr"
|
||||
|
||||
# Default to split export and render jobs
|
||||
export_job = True
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
|
||||
instance_data.pop("active", None)
|
||||
|
|
@ -52,6 +55,17 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
"SettingsEXR_bits_per_channel": "16" # half precision
|
||||
}
|
||||
|
||||
if pre_create_data.get("export_job"):
|
||||
scene_filepath = \
|
||||
"{export_dir}{subset_name}/{subset_name}.$F4.vrscene".format(
|
||||
export_dir=hou.text.expandString("$HIP/pyblish/vrscene/"),
|
||||
subset_name=subset_name,
|
||||
)
|
||||
# Setting render_export_mode to "2" because that's for
|
||||
# "Export only" ("1" is for "Export & Render")
|
||||
parms["render_export_mode"] = "2"
|
||||
parms["render_export_filepath"] = scene_filepath
|
||||
|
||||
if self.selected_nodes:
|
||||
# set up the render camera from the selected node
|
||||
camera = None
|
||||
|
|
@ -140,6 +154,9 @@ class CreateVrayROP(plugin.HoudiniCreator):
|
|||
BoolDef("farm",
|
||||
label="Submitting to Farm",
|
||||
default=True),
|
||||
BoolDef("export_job",
|
||||
label="Split export and render jobs",
|
||||
default=self.export_job),
|
||||
EnumDef("image_format",
|
||||
image_format_enum,
|
||||
default=self.ext,
|
||||
|
|
|
|||
|
|
@ -40,6 +40,25 @@ class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "ar_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("ar_ass_export_enable").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "ar_ass_file", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(prefix=default_prefix,
|
||||
suffix=None)
|
||||
|
|
|
|||
|
|
@ -14,18 +14,13 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
|
|||
hosts = ["houdini"]
|
||||
targets = ["local", "remote"]
|
||||
label = "Collect Chunk Size"
|
||||
chunkSize = 999999
|
||||
chunk_size = 999999
|
||||
|
||||
def process(self, instance):
|
||||
# need to get the chunk size info from the setting
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
instance.data["chunkSize"] = attr_values.get("chunkSize")
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
project_setting = project_settings["houdini"]["publish"]["CollectChunkSize"] # noqa
|
||||
cls.chunkSize = project_setting["chunk_size"]
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
|
|
@ -33,7 +28,6 @@ class CollectChunkSize(pyblish.api.InstancePlugin,
|
|||
minimum=1,
|
||||
maximum=999999,
|
||||
decimals=0,
|
||||
default=cls.chunkSize,
|
||||
default=cls.chunk_size,
|
||||
label="Frame Per Task")
|
||||
|
||||
]
|
||||
|
|
|
|||
|
|
@ -44,6 +44,25 @@ class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
default_prefix = evalParmNoFrame(rop, "vm_picture")
|
||||
render_products = []
|
||||
|
||||
# Store whether we are splitting the render job (export + render)
|
||||
export_job = bool(rop.parm("soho_outputmode").eval())
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "soho_diskfile", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
# Default beauty AOV
|
||||
beauty_product = self.get_render_product_name(
|
||||
prefix=default_prefix, suffix=None
|
||||
|
|
|
|||
|
|
@ -45,7 +45,26 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
render_products = []
|
||||
# TODO: add render elements if render element
|
||||
|
||||
beauty_product = self.get_beauty_render_product(default_prefix)
|
||||
# Store whether we are splitting the render job in an export + render
|
||||
export_job = rop.parm("render_export_mode").eval() == "2"
|
||||
instance.data["exportJob"] = export_job
|
||||
export_prefix = None
|
||||
export_products = []
|
||||
if export_job:
|
||||
export_prefix = evalParmNoFrame(
|
||||
rop, "render_export_filepath", pad_character="0"
|
||||
)
|
||||
beauty_export_product = self.get_render_product_name(
|
||||
prefix=export_prefix,
|
||||
suffix=None)
|
||||
export_products.append(beauty_export_product)
|
||||
self.log.debug(
|
||||
"Found export product: {}".format(beauty_export_product)
|
||||
)
|
||||
instance.data["ifdFile"] = beauty_export_product
|
||||
instance.data["exportFiles"] = list(export_products)
|
||||
|
||||
beauty_product = self.get_render_product_name(default_prefix)
|
||||
render_products.append(beauty_product)
|
||||
files_by_aov = {
|
||||
"RGB Color": self.generate_expected_files(instance,
|
||||
|
|
@ -79,7 +98,7 @@ class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin):
|
|||
instance.data["colorspaceDisplay"] = colorspace_data["display"]
|
||||
instance.data["colorspaceView"] = colorspace_data["view"]
|
||||
|
||||
def get_beauty_render_product(self, prefix, suffix="<reName>"):
|
||||
def get_render_product_name(self, prefix, suffix="<reName>"):
|
||||
"""Return the beauty output filename if render element enabled
|
||||
"""
|
||||
# Remove aov suffix from the product: `prefix.aov_suffix` -> `prefix`
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,10 +2,12 @@
|
|||
"""OpenPype startup script."""
|
||||
from openpype.pipeline import install_host
|
||||
from openpype.hosts.houdini.api import HoudiniHost
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
def main():
|
||||
print("Installing OpenPype ...")
|
||||
print("Installing {} ...".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"))
|
||||
install_host(HoudiniHost())
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -511,3 +511,20 @@ def render_resolution(width, height):
|
|||
finally:
|
||||
rt.renderWidth = current_renderWidth
|
||||
rt.renderHeight = current_renderHeight
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suspended_refresh():
|
||||
"""Suspended refresh for scene and modify panel redraw.
|
||||
"""
|
||||
if is_headless():
|
||||
yield
|
||||
return
|
||||
rt.disableSceneRedraw()
|
||||
rt.suspendEditing()
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
rt.enableSceneRedraw()
|
||||
rt.resumeEditing()
|
||||
|
|
|
|||
|
|
@ -198,8 +198,8 @@ def _render_preview_animation_max_pre_2024(
|
|||
res_width, res_height, filename=filepath
|
||||
)
|
||||
dib = rt.gw.getViewportDib()
|
||||
dib_width = rt.renderWidth
|
||||
dib_height = rt.renderHeight
|
||||
dib_width = float(dib.width)
|
||||
dib_height = float(dib.height)
|
||||
# aspect ratio
|
||||
viewportRatio = dib_width / dib_height
|
||||
renderRatio = float(res_width / res_height)
|
||||
|
|
|
|||
|
|
@ -39,45 +39,41 @@ Note:
|
|||
"""
|
||||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.hosts.max.api.lib import suspended_refresh
|
||||
from openpype.lib import BoolDef
|
||||
|
||||
|
||||
class ExtractAlembic(publish.Extractor):
|
||||
class ExtractAlembic(publish.Extractor,
|
||||
OptionalPyblishPluginMixin):
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Pointcache"
|
||||
hosts = ["max"]
|
||||
families = ["pointcache"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.debug("Extracting pointcache ...")
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
parent_dir = self.staging_dir(instance)
|
||||
file_name = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(parent_dir, file_name)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (file_name, parent_dir))
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
with suspended_refresh():
|
||||
self._set_abc_attributes(instance)
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
path,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
@ -89,3 +85,51 @@ class ExtractAlembic(publish.Extractor):
|
|||
"stagingDir": parent_dir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
BoolDef("custom_attrs",
|
||||
label="Custom Attributes",
|
||||
default=False),
|
||||
]
|
||||
|
||||
|
||||
class ExtractCameraAlembic(ExtractAlembic):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
label = "Extract Alembic Camera"
|
||||
families = ["camera"]
|
||||
|
||||
|
||||
class ExtractModel(ExtractAlembic):
|
||||
"""Extract Geometry in Alembic Format"""
|
||||
label = "Extract Geometry (Alembic)"
|
||||
families = ["model"]
|
||||
|
||||
def _set_abc_attributes(self, instance):
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
custom_attrs = attr_values.get("custom_attrs", False)
|
||||
if not custom_attrs:
|
||||
self.log.debug(
|
||||
"No Custom Attributes included in this abc export...")
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.CustomAttributes = custom_attrs
|
||||
rt.AlembicExport.UVs = True
|
||||
rt.AlembicExport.VertexColors = True
|
||||
rt.AlembicExport.PreserveInstances = True
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.pipeline import OptionalPyblishPluginMixin, publish
|
||||
|
||||
|
||||
class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""Extract Camera with AlembicExport."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Alembic Camera"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
start = instance.data["frameStartHandle"]
|
||||
end = instance.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Extracting Camera ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info(f"Writing alembic '{filename}' to '{stagingdir}'")
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.Name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.Name("maya")
|
||||
rt.AlembicExport.StartFrame = start
|
||||
rt.AlembicExport.EndFrame = end
|
||||
rt.AlembicExport.CustomAttributes = True
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.ExportFile(
|
||||
path,
|
||||
rt.Name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "abc",
|
||||
"ext": "abc",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(f"Extracted instance '{instance.name}' to: {path}")
|
||||
|
|
@ -20,13 +20,10 @@ class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Camera ...")
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info(f"Writing fbx file '{filename}' to '{filepath}'")
|
||||
|
||||
rt.FBXExporterSetParam("Animation", True)
|
||||
rt.FBXExporterSetParam("Cameras", True)
|
||||
rt.FBXExporterSetParam("AxisConversionMethod", "Animation")
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
filename = "{name}.max".format(**instance.data)
|
||||
|
||||
max_path = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing max file '%s' to '%s'" % (filename, max_path))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
|
|
|||
|
|
@ -1,63 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
|
||||
|
||||
class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin):
|
||||
"""
|
||||
Extract Geometry in Alembic Format
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Extract Geometry (Alembic)"
|
||||
hosts = ["max"]
|
||||
families = ["model"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.abc".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
|
||||
# We run the render
|
||||
self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir))
|
||||
|
||||
rt.AlembicExport.ArchiveType = rt.name("ogawa")
|
||||
rt.AlembicExport.CoordinateSystem = rt.name("maya")
|
||||
rt.AlembicExport.CustomAttributes = True
|
||||
rt.AlembicExport.UVs = True
|
||||
rt.AlembicExport.VertexColors = True
|
||||
rt.AlembicExport.PreserveInstances = True
|
||||
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.AlembicExport,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": "abc",
|
||||
"ext": "abc",
|
||||
"files": filename,
|
||||
"stagingDir": stagingdir,
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info(
|
||||
"Extracted instance '%s' to: %s" % (instance.name, filepath)
|
||||
)
|
||||
|
|
@ -20,12 +20,9 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.fbx".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing FBX '%s' to '%s'" % (filepath, stagingdir))
|
||||
|
||||
rt.FBXExporterSetParam("Animation", False)
|
||||
rt.FBXExporterSetParam("Cameras", False)
|
||||
|
|
@ -46,7 +43,6 @@ class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
using=rt.FBXEXP,
|
||||
)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import pyblish.api
|
|||
from openpype.pipeline import publish, OptionalPyblishPluginMixin
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import maintained_selection
|
||||
from openpype.hosts.max.api.lib import suspended_refresh
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
|
|
@ -21,25 +22,21 @@ class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin):
|
|||
if not self.is_active(instance.data):
|
||||
return
|
||||
|
||||
self.log.debug("Extracting Geometry ...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.obj".format(**instance.data)
|
||||
filepath = os.path.join(stagingdir, filename)
|
||||
self.log.info("Writing OBJ '%s' to '%s'" % (filepath, stagingdir))
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.ObjExp,
|
||||
)
|
||||
|
||||
with suspended_refresh():
|
||||
with maintained_selection():
|
||||
# select and export
|
||||
node_list = instance.data["members"]
|
||||
rt.Select(node_list)
|
||||
rt.exportFile(
|
||||
filepath,
|
||||
rt.name("noPrompt"),
|
||||
selectedOnly=True,
|
||||
using=rt.ObjExp,
|
||||
)
|
||||
if not os.path.exists(filepath):
|
||||
raise KnownPublishError(
|
||||
"File {} wasn't produced by 3ds max, please check the logs.")
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
import pyblish.api
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline import (
|
||||
PublishValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from pymxs import runtime as rt
|
||||
from openpype.pipeline.publish import (
|
||||
RepairAction,
|
||||
PublishValidationError
|
||||
)
|
||||
from openpype.hosts.max.api.lib import reset_scene_resolution
|
||||
|
||||
|
||||
|
|
@ -16,6 +19,7 @@ class ValidateResolutionSetting(pyblish.api.InstancePlugin,
|
|||
hosts = ["max"]
|
||||
label = "Validate Resolution Setting"
|
||||
optional = True
|
||||
actions = [RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
if not self.is_active(instance.data):
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.pipeline.load.utils import get_representation_path_from_context
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_colorspace_from_filepath,
|
||||
get_imageio_file_rules_colorspace_from_filepath,
|
||||
get_imageio_config,
|
||||
get_imageio_file_rules
|
||||
)
|
||||
|
|
@ -285,10 +285,10 @@ class FileNodeLoader(load.LoaderPlugin):
|
|||
)
|
||||
|
||||
path = get_representation_path_from_context(context)
|
||||
colorspace = get_imageio_colorspace_from_filepath(
|
||||
path=path,
|
||||
host_name=host_name,
|
||||
project_name=project_name,
|
||||
colorspace = get_imageio_file_rules_colorspace_from_filepath(
|
||||
path,
|
||||
host_name,
|
||||
project_name,
|
||||
config_data=config_data,
|
||||
file_rules=file_rules,
|
||||
project_settings=project_settings
|
||||
|
|
|
|||
|
|
@ -371,7 +371,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
continue
|
||||
for node in data["nodes"]:
|
||||
lib.set_attribute(data["attribute"], data["values"][0], node)
|
||||
|
||||
with lib.renderlayer(layer_node):
|
||||
|
||||
# Repair animation must be enabled
|
||||
|
|
@ -392,13 +391,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
if renderer != "renderman":
|
||||
prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
|
||||
fname_prefix = default_prefix
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
cmds.setAttr(prefix_attr, fname_prefix, type="string")
|
||||
|
||||
# Repair padding
|
||||
padding_attr = RenderSettings.get_padding_attr(renderer)
|
||||
cmds.setAttr("{}.{}".format(node, padding_attr),
|
||||
cls.DEFAULT_PADDING)
|
||||
cmds.setAttr(padding_attr, cls.DEFAULT_PADDING)
|
||||
else:
|
||||
# renderman handles stuff differently
|
||||
cmds.setAttr("rmanGlobals.imageFileFormat",
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ def _install_menu():
|
|||
"Create...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
tab="create"
|
||||
)
|
||||
|
|
@ -271,7 +271,7 @@ def _install_menu():
|
|||
"Publish...",
|
||||
lambda: host_tools.show_publisher(
|
||||
parent=(
|
||||
main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None
|
||||
main_window if nuke.NUKE_VERSION_MAJOR >= 14 else None
|
||||
),
|
||||
tab="publish"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -21,6 +21,11 @@ from openpype.pipeline import (
|
|||
CreatedInstance,
|
||||
get_current_task_name
|
||||
)
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_display_view_colorspace_name,
|
||||
get_colorspace_settings_from_publish_context,
|
||||
set_colorspace_data_to_representation
|
||||
)
|
||||
from openpype.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS
|
||||
)
|
||||
|
|
@ -612,7 +617,7 @@ class ExporterReview(object):
|
|||
|
||||
def get_representation_data(
|
||||
self, tags=None, range=False,
|
||||
custom_tags=None
|
||||
custom_tags=None, colorspace=None
|
||||
):
|
||||
""" Add representation data to self.data
|
||||
|
||||
|
|
@ -652,6 +657,14 @@ class ExporterReview(object):
|
|||
if self.publish_on_farm:
|
||||
repre["tags"].append("publish_on_farm")
|
||||
|
||||
# add colorspace data to representation
|
||||
if colorspace:
|
||||
set_colorspace_data_to_representation(
|
||||
repre,
|
||||
self.instance.context.data,
|
||||
colorspace=colorspace,
|
||||
log=self.log
|
||||
)
|
||||
self.data["representations"].append(repre)
|
||||
|
||||
def get_imageio_baking_profile(self):
|
||||
|
|
@ -866,6 +879,13 @@ class ExporterReviewMov(ExporterReview):
|
|||
return path
|
||||
|
||||
def generate_mov(self, farm=False, **kwargs):
|
||||
# colorspace data
|
||||
colorspace = None
|
||||
# get colorspace settings
|
||||
# get colorspace data from context
|
||||
config_data, _ = get_colorspace_settings_from_publish_context(
|
||||
self.instance.context.data)
|
||||
|
||||
add_tags = []
|
||||
self.publish_on_farm = farm
|
||||
read_raw = kwargs["read_raw"]
|
||||
|
|
@ -951,6 +971,14 @@ class ExporterReviewMov(ExporterReview):
|
|||
# assign viewer
|
||||
dag_node["view"].setValue(viewer)
|
||||
|
||||
if config_data:
|
||||
# convert display and view to colorspace
|
||||
colorspace = get_display_view_colorspace_name(
|
||||
config_path=config_data["path"],
|
||||
display=display,
|
||||
view=viewer
|
||||
)
|
||||
|
||||
self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`")
|
||||
# Write node
|
||||
write_node = nuke.createNode("Write")
|
||||
|
|
@ -996,9 +1024,10 @@ class ExporterReviewMov(ExporterReview):
|
|||
|
||||
# ---------- generate representation data
|
||||
self.get_representation_data(
|
||||
tags=["review", "delete"] + add_tags,
|
||||
tags=["review", "need_thumbnail", "delete"] + add_tags,
|
||||
custom_tags=add_custom_tags,
|
||||
range=True
|
||||
range=True,
|
||||
colorspace=colorspace
|
||||
)
|
||||
|
||||
self.log.debug("Representation... `{}`".format(self.data))
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
|
||||
if not matching_repre:
|
||||
self.log.info(
|
||||
"Matching reresentation was not found."
|
||||
"Matching representation was not found."
|
||||
" Representation files were not filled with slate."
|
||||
)
|
||||
return
|
||||
|
|
@ -294,7 +294,7 @@ class ExtractSlateFrame(publish.Extractor):
|
|||
self.log.debug(
|
||||
"__ matching_repre: {}".format(pformat(matching_repre)))
|
||||
|
||||
self.log.warning("Added slate frame to representation files")
|
||||
self.log.info("Added slate frame to representation files")
|
||||
|
||||
def add_comment_slate_node(self, instance, node):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,216 +0,0 @@
|
|||
import sys
|
||||
import os
|
||||
import nuke
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.nuke import api as napi
|
||||
from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings
|
||||
|
||||
|
||||
# Python 2/3 compatibility
|
||||
if sys.version_info[0] >= 3:
|
||||
unicode = str
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""Extracts movie and thumbnail with baked in luts
|
||||
|
||||
must be run after extract_render_local.py
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder + 0.011
|
||||
label = "Extract Thumbnail"
|
||||
|
||||
families = ["review"]
|
||||
hosts = ["nuke"]
|
||||
|
||||
# settings
|
||||
use_rendered = False
|
||||
bake_viewer_process = True
|
||||
bake_viewer_input_process = True
|
||||
nodes = {}
|
||||
reposition_nodes = None
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("farm"):
|
||||
return
|
||||
|
||||
with napi.maintained_selection():
|
||||
self.log.debug("instance: {}".format(instance))
|
||||
self.log.debug("instance.data[families]: {}".format(
|
||||
instance.data["families"]))
|
||||
|
||||
if instance.data.get("bakePresets"):
|
||||
for o_name, o_data in instance.data["bakePresets"].items():
|
||||
self.render_thumbnail(instance, o_name, **o_data)
|
||||
else:
|
||||
viewer_process_switches = {
|
||||
"bake_viewer_process": True,
|
||||
"bake_viewer_input_process": True
|
||||
}
|
||||
self.render_thumbnail(
|
||||
instance, None, **viewer_process_switches)
|
||||
|
||||
def render_thumbnail(self, instance, output_name=None, **kwargs):
|
||||
first_frame = instance.data["frameStartHandle"]
|
||||
last_frame = instance.data["frameEndHandle"]
|
||||
colorspace = instance.data["colorspace"]
|
||||
|
||||
# find frame range and define middle thumb frame
|
||||
mid_frame = int((last_frame - first_frame) / 2)
|
||||
|
||||
# solve output name if any is set
|
||||
output_name = output_name or ""
|
||||
|
||||
bake_viewer_process = kwargs["bake_viewer_process"]
|
||||
bake_viewer_input_process_node = kwargs[
|
||||
"bake_viewer_input_process"]
|
||||
|
||||
node = instance.data["transientData"]["node"] # group node
|
||||
self.log.debug("Creating staging dir...")
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = os.path.normpath(
|
||||
os.path.dirname(instance.data['path']))
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.debug(
|
||||
"StagingDir `{0}`...".format(instance.data["stagingDir"]))
|
||||
|
||||
temporary_nodes = []
|
||||
|
||||
# try to connect already rendered images
|
||||
previous_node = node
|
||||
collection = instance.data.get("collection", None)
|
||||
self.log.debug("__ collection: `{}`".format(collection))
|
||||
|
||||
if collection:
|
||||
# get path
|
||||
fhead = collection.format("{head}")
|
||||
|
||||
thumb_fname = list(collection)[mid_frame]
|
||||
else:
|
||||
fname = thumb_fname = os.path.basename(
|
||||
instance.data.get("path", None))
|
||||
fhead = os.path.splitext(fname)[0] + "."
|
||||
|
||||
self.log.debug("__ fhead: `{}`".format(fhead))
|
||||
|
||||
if "#" in fhead:
|
||||
fhead = fhead.replace("#", "")[:-1]
|
||||
|
||||
path_render = os.path.join(
|
||||
staging_dir, thumb_fname).replace("\\", "/")
|
||||
self.log.debug("__ path_render: `{}`".format(path_render))
|
||||
|
||||
if self.use_rendered and os.path.isfile(path_render):
|
||||
# check if file exist otherwise connect to write node
|
||||
rnode = nuke.createNode("Read")
|
||||
rnode["file"].setValue(path_render)
|
||||
rnode["colorspace"].setValue(colorspace)
|
||||
|
||||
# turn it raw if none of baking is ON
|
||||
if all([
|
||||
not self.bake_viewer_input_process,
|
||||
not self.bake_viewer_process
|
||||
]):
|
||||
rnode["raw"].setValue(True)
|
||||
|
||||
temporary_nodes.append(rnode)
|
||||
previous_node = rnode
|
||||
|
||||
if self.reposition_nodes is None:
|
||||
# [deprecated] create reformat node old way
|
||||
reformat_node = nuke.createNode("Reformat")
|
||||
ref_node = self.nodes.get("Reformat", None)
|
||||
if ref_node:
|
||||
for k, v in ref_node:
|
||||
self.log.debug("k, v: {0}:{1}".format(k, v))
|
||||
if isinstance(v, unicode):
|
||||
v = str(v)
|
||||
reformat_node[k].setValue(v)
|
||||
|
||||
reformat_node.setInput(0, previous_node)
|
||||
previous_node = reformat_node
|
||||
temporary_nodes.append(reformat_node)
|
||||
else:
|
||||
# create reformat node new way
|
||||
for repo_node in self.reposition_nodes:
|
||||
node_class = repo_node["node_class"]
|
||||
knobs = repo_node["knobs"]
|
||||
node = nuke.createNode(node_class)
|
||||
set_node_knobs_from_settings(node, knobs)
|
||||
|
||||
# connect in order
|
||||
node.setInput(0, previous_node)
|
||||
previous_node = node
|
||||
temporary_nodes.append(node)
|
||||
|
||||
# only create colorspace baking if toggled on
|
||||
if bake_viewer_process:
|
||||
if bake_viewer_input_process_node:
|
||||
# get input process and connect it to baking
|
||||
ipn = napi.get_view_process_node()
|
||||
if ipn is not None:
|
||||
ipn.setInput(0, previous_node)
|
||||
previous_node = ipn
|
||||
temporary_nodes.append(ipn)
|
||||
|
||||
dag_node = nuke.createNode("OCIODisplay")
|
||||
dag_node.setInput(0, previous_node)
|
||||
previous_node = dag_node
|
||||
temporary_nodes.append(dag_node)
|
||||
|
||||
thumb_name = "thumbnail"
|
||||
# only add output name and
|
||||
# if there are more than one bake preset
|
||||
if (
|
||||
output_name
|
||||
and len(instance.data.get("bakePresets", {}).keys()) > 1
|
||||
):
|
||||
thumb_name = "{}_{}".format(output_name, thumb_name)
|
||||
|
||||
# create write node
|
||||
write_node = nuke.createNode("Write")
|
||||
file = fhead[:-1] + thumb_name + ".jpg"
|
||||
thumb_path = os.path.join(staging_dir, file).replace("\\", "/")
|
||||
|
||||
# add thumbnail to cleanup
|
||||
instance.context.data["cleanupFullPaths"].append(thumb_path)
|
||||
|
||||
# make sure only one thumbnail path is set
|
||||
# and it is existing file
|
||||
instance_thumb_path = instance.data.get("thumbnailPath")
|
||||
if not instance_thumb_path or not os.path.isfile(instance_thumb_path):
|
||||
instance.data["thumbnailPath"] = thumb_path
|
||||
|
||||
write_node["file"].setValue(thumb_path)
|
||||
write_node["file_type"].setValue("jpg")
|
||||
write_node["raw"].setValue(1)
|
||||
write_node.setInput(0, previous_node)
|
||||
temporary_nodes.append(write_node)
|
||||
|
||||
repre = {
|
||||
'name': thumb_name,
|
||||
'ext': "jpg",
|
||||
"outputName": thumb_name,
|
||||
'files': file,
|
||||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "publish_on_farm", "delete"]
|
||||
}
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# Render frames
|
||||
nuke.execute(write_node.name(), mid_frame, mid_frame)
|
||||
|
||||
self.log.debug(
|
||||
"representations: {}".format(instance.data["representations"]))
|
||||
|
||||
# Clean up
|
||||
for node in temporary_nodes:
|
||||
nuke.delete(node)
|
||||
|
|
@ -9,7 +9,7 @@ The Photoshop integration requires two components to work; `extension` and `serv
|
|||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
|
||||
### Server
|
||||
|
|
@ -17,16 +17,16 @@ ExManCmd /install {path to avalon-core}\avalon\photoshop\extension.zxp
|
|||
The easiest way to get the server and Photoshop launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import avalon.photoshop;avalon.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
python -c ^"import openpype.hosts.photoshop;openpype.hosts.photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The Photoshop extension can be found under `Window > Extensions > Avalon`. Once launched you should be presented with a panel like this:
|
||||
The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
|
@ -37,7 +37,7 @@ When developing the extension you can load it [unsigned](https://github.com/Adob
|
|||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Avalon Avalon-Photoshop avalon extension.p12
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.PS.panel">
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<HostList>
|
||||
<Host Name="PHXS" Port="8078"/>
|
||||
<Host Name="FLPR" Port="8078"/>
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
</ExtensionList>
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<ExtensionManifest ExtensionBundleId="com.openpype.PS.panel" ExtensionBundleVersion="1.0.12" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionManifest ExtensionBundleId="io.ynput.PS.panel" ExtensionBundleVersion="1.1.0" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="com.openpype.PS.panel" Version="1.0.1" />
|
||||
<Extension Id="io.ynput.PS.panel" Version="1.0.1" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="com.openpype.PS.panel">
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<DispatchInfo>
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
|
|
@ -32,7 +32,7 @@
|
|||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>OpenPype</Menu>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Width>300</Width>
|
||||
|
|
@ -44,7 +44,7 @@
|
|||
</MaxSize>
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/avalon-logo-48.png</Icon>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
</Icons>
|
||||
</UI>
|
||||
</DispatchInfo>
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 1.3 KiB |
BIN
openpype/hosts/photoshop/api/extension/icons/ayon_logo.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
|
Before Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 13 KiB |
BIN
openpype/hosts/photoshop/api/panel_failure.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
|
|
@ -170,8 +170,7 @@ class ExtractReview(publish.Extractor):
|
|||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
self.log.info(f"Generate mov review: {mov_path}")
|
||||
args = [
|
||||
ffmpeg_path,
|
||||
args = ffmpeg_path + [
|
||||
"-y",
|
||||
"-i", source_files_pattern,
|
||||
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
||||
|
|
@ -224,6 +223,7 @@ class ExtractReview(publish.Extractor):
|
|||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "delete"]
|
||||
})
|
||||
instance.data["thumbnailPath"] = thumbnail_path
|
||||
|
||||
def _check_and_resize(self, processed_img_names, source_files_pattern,
|
||||
staging_dir):
|
||||
|
|
|
|||
|
|
@ -257,8 +257,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
if 'shot' not in instance.data.get('family', ''):
|
||||
continue
|
||||
|
||||
name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
|
@ -286,6 +284,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
parents = instance.data.get('parents', [])
|
||||
self.log.debug(f"parents: {pformat(parents)}")
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
name = instance.data["asset"].split("/")[-1]
|
||||
actual = {name: in_info}
|
||||
|
||||
for parent in reversed(parents):
|
||||
|
|
|
|||
|
|
@ -1,18 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Collect original base name for use in templates."""
|
||||
from pathlib import Path
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectOriginalBasename(pyblish.api.InstancePlugin):
|
||||
"""Collect original file base name."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.498
|
||||
label = "Collect Base Name"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
|
||||
def process(self, instance):
|
||||
file_name = Path(instance.data["representations"][0]["files"])
|
||||
instance.data["originalBasename"] = file_name.stem
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Validator for correct file naming."""
|
||||
import re
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
)
|
||||
|
||||
|
||||
class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin):
|
||||
label = "Validate Unreal Texture Names"
|
||||
hosts = ["standalonepublisher"]
|
||||
families = ["simpleUnrealTexture"]
|
||||
order = ValidateContentsOrder
|
||||
regex = "^T_{asset}.*"
|
||||
|
||||
def process(self, instance):
|
||||
file_name = instance.data.get("originalBasename")
|
||||
self.log.info(file_name)
|
||||
pattern = self.regex.format(asset=instance.data.get("asset"))
|
||||
if not re.match(pattern, file_name):
|
||||
msg = f"Invalid file name {file_name}"
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data={
|
||||
"invalid_file": file_name,
|
||||
"asset": instance.data.get("asset")
|
||||
})
|
||||
|
|
@ -583,18 +583,9 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
|
||||
# Give the explorer window time to refresh to the folder and select
|
||||
# the file
|
||||
while not file_dialog.selectedFiles():
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
|
||||
print(f"Selected: {file_dialog.selectedFiles()}")
|
||||
|
||||
# Set it again now we know the path is refreshed - without this
|
||||
# accepting the dialog will often not trigger the correct filepath
|
||||
file_dialog.setDirectory(os.path.dirname(mesh_filepath))
|
||||
url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath))
|
||||
file_dialog.selectUrl(url)
|
||||
# TODO: find a way to improve the process event to
|
||||
# load more complicated mesh
|
||||
app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 3000)
|
||||
|
||||
file_dialog.done(file_dialog.Accepted)
|
||||
app.processEvents(QtCore.QEventLoop.AllEvents)
|
||||
|
|
@ -628,7 +619,12 @@ def prompt_new_file_with_mesh(mesh_filepath):
|
|||
mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel)
|
||||
if not mesh_filename_label.text():
|
||||
dialog.close()
|
||||
raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}")
|
||||
substance_painter.logging.warning(
|
||||
"Failed to set mesh path with the prompt dialog:"
|
||||
f"{mesh_filepath}\n\n"
|
||||
"Creating new project directly with the mesh path instead.")
|
||||
else:
|
||||
dialog.done(dialog.Accepted)
|
||||
|
||||
new_action = _get_new_project_action()
|
||||
if not new_action:
|
||||
|
|
|
|||
|
|
@ -44,14 +44,22 @@ class SubstanceLoadProjectMesh(load.LoaderPlugin):
|
|||
# Get user inputs
|
||||
import_cameras = data.get("import_cameras", True)
|
||||
preserve_strokes = data.get("preserve_strokes", True)
|
||||
|
||||
sp_settings = substance_painter.project.Settings(
|
||||
import_cameras=import_cameras
|
||||
)
|
||||
if not substance_painter.project.is_open():
|
||||
# Allow to 'initialize' a new project
|
||||
path = self.filepath_from_context(context)
|
||||
# TODO: improve the prompt dialog function to not
|
||||
# only works for simple polygon scene
|
||||
result = prompt_new_file_with_mesh(mesh_filepath=path)
|
||||
if not result:
|
||||
self.log.info("User cancelled new project prompt.")
|
||||
return
|
||||
self.log.info("User cancelled new project prompt."
|
||||
"Creating new project directly from"
|
||||
" Substance Painter API Instead.")
|
||||
settings = substance_painter.project.create(
|
||||
mesh_file_path=path, settings=sp_settings
|
||||
)
|
||||
|
||||
else:
|
||||
# Reload the mesh
|
||||
|
|
|
|||
|
|
@ -663,7 +663,7 @@ or updating already created. Publishing will create OTIO file.
|
|||
variant_name = instance_data["variant"]
|
||||
|
||||
# basic unique asset name
|
||||
clip_name = os.path.splitext(otio_clip.name)[0].lower()
|
||||
clip_name = os.path.splitext(otio_clip.name)[0]
|
||||
project_doc = get_project(self.project_name)
|
||||
|
||||
shot_name, shot_metadata = self._shot_metadata_solver.generate_data(
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from openpype.pipeline import (
|
|||
)
|
||||
from openpype.lib import EnumDef
|
||||
from openpype.pipeline import colorspace
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
|
||||
class CollectColorspace(pyblish.api.InstancePlugin,
|
||||
|
|
@ -26,18 +27,44 @@ class CollectColorspace(pyblish.api.InstancePlugin,
|
|||
|
||||
def process(self, instance):
|
||||
values = self.get_attr_values_from_data(instance.data)
|
||||
colorspace = values.get("colorspace", None)
|
||||
if colorspace is None:
|
||||
colorspace_value = values.get("colorspace", None)
|
||||
if colorspace_value is None:
|
||||
return
|
||||
|
||||
self.log.debug("Explicit colorspace set to: {}".format(colorspace))
|
||||
color_data = colorspace.convert_colorspace_enumerator_item(
|
||||
colorspace_value, self.config_items)
|
||||
|
||||
colorspace_name = self._colorspace_name_by_type(color_data)
|
||||
self.log.debug("Explicit colorspace name: {}".format(colorspace_name))
|
||||
|
||||
context = instance.context
|
||||
for repre in instance.data.get("representations", {}):
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
colorspace=colorspace
|
||||
colorspace=colorspace_name
|
||||
)
|
||||
|
||||
def _colorspace_name_by_type(self, colorspace_data):
|
||||
"""
|
||||
Returns colorspace name by type
|
||||
|
||||
Arguments:
|
||||
colorspace_data (dict): colorspace data
|
||||
|
||||
Returns:
|
||||
str: colorspace name
|
||||
"""
|
||||
if colorspace_data["type"] == "colorspaces":
|
||||
return colorspace_data["name"]
|
||||
elif colorspace_data["type"] == "roles":
|
||||
return colorspace_data["colorspace"]
|
||||
else:
|
||||
raise KnownPublishError(
|
||||
(
|
||||
"Collecting of colorspace failed. used config is missing "
|
||||
"colorspace type: '{}' . Please contact your pipeline TD."
|
||||
).format(colorspace_data['type'])
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -155,8 +155,6 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
|
|||
else {}
|
||||
)
|
||||
|
||||
asset_name = instance.data["asset"]
|
||||
|
||||
# get handles
|
||||
handle_start = int(instance.data["handleStart"])
|
||||
handle_end = int(instance.data["handleEnd"])
|
||||
|
|
@ -177,6 +175,8 @@ class CollectShotInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
parents = instance.data.get('parents', [])
|
||||
|
||||
# Split by '/' for AYON where asset is a path
|
||||
asset_name = instance.data["asset"].split("/")[-1]
|
||||
actual = {asset_name: in_info}
|
||||
|
||||
for parent in reversed(parents):
|
||||
|
|
|
|||
|
|
@ -33,7 +33,19 @@ class ValidateColorspace(pyblish.api.InstancePlugin,
|
|||
config_path = colorspace_data["config"]["path"]
|
||||
if config_path not in config_colorspaces:
|
||||
colorspaces = get_ocio_config_colorspaces(config_path)
|
||||
config_colorspaces[config_path] = set(colorspaces)
|
||||
if not colorspaces.get("colorspaces"):
|
||||
message = (
|
||||
f"OCIO config '{config_path}' does not contain any "
|
||||
"colorspaces. This is an error in the OCIO config. "
|
||||
"Contact your pipeline TD.",
|
||||
)
|
||||
raise PublishValidationError(
|
||||
title="Colorspace validation",
|
||||
message=message,
|
||||
description=message
|
||||
)
|
||||
config_colorspaces[config_path] = set(
|
||||
colorspaces["colorspaces"])
|
||||
|
||||
colorspace = colorspace_data["colorspace"]
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ class CollectRenderInstances(pyblish.api.InstancePlugin):
|
|||
render_layer_id = creator_attributes["render_layer_instance_id"]
|
||||
for in_data in instance.context.data["workfileInstances"]:
|
||||
if (
|
||||
in_data["creator_identifier"] == "render.layer"
|
||||
in_data.get("creator_identifier") == "render.layer"
|
||||
and in_data["instance_id"] == render_layer_id
|
||||
):
|
||||
render_layer_data = in_data
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ from .transcoding import (
|
|||
get_ffmpeg_format_args,
|
||||
convert_ffprobe_fps_value,
|
||||
convert_ffprobe_fps_to_float,
|
||||
get_rescaled_command_arguments,
|
||||
)
|
||||
|
||||
from .local_settings import (
|
||||
|
|
@ -232,6 +233,7 @@ __all__ = [
|
|||
"get_ffmpeg_format_args",
|
||||
"convert_ffprobe_fps_value",
|
||||
"convert_ffprobe_fps_to_float",
|
||||
"get_rescaled_command_arguments",
|
||||
|
||||
"IniSettingRegistry",
|
||||
"JSONSettingRegistry",
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ def is_running_staging():
|
|||
latest_version = get_latest_version(local=False, remote=True)
|
||||
staging_version = latest_version
|
||||
|
||||
if current_version == production_version:
|
||||
if current_version == staging_version:
|
||||
return True
|
||||
|
||||
return is_staging_enabled()
|
||||
|
|
|
|||
|
|
@ -655,47 +655,6 @@ def convert_for_ffmpeg(
|
|||
run_subprocess(oiio_cmd, logger=logger)
|
||||
|
||||
|
||||
def get_oiio_input_and_channel_args(oiio_input_info):
|
||||
"""Get input and channel arguments for oiiotool.
|
||||
|
||||
Args:
|
||||
oiio_input_info (dict): Information about input from oiio tool.
|
||||
Should be output of function `get_oiio_info_for_input`.
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: Tuple of input and channel arguments.
|
||||
"""
|
||||
channel_names = oiio_input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
|
||||
# TODO find subimage where rgba is available for multipart exrs
|
||||
channels_arg = "R={},G={},B={}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = oiio_input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
|
||||
return input_arg, channels_arg
|
||||
|
||||
|
||||
def convert_input_paths_for_ffmpeg(
|
||||
input_paths,
|
||||
output_dir,
|
||||
|
|
@ -1236,3 +1195,221 @@ def split_cmd_args(in_args):
|
|||
continue
|
||||
splitted_args.extend(arg.split(" "))
|
||||
return splitted_args
|
||||
|
||||
|
||||
def get_rescaled_command_arguments(
|
||||
application,
|
||||
input_path,
|
||||
target_width,
|
||||
target_height,
|
||||
target_par=None,
|
||||
bg_color=None,
|
||||
log=None
|
||||
):
|
||||
"""Get command arguments for rescaling input to target size.
|
||||
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
Currently supported are "ffmpeg" and "oiiotool".
|
||||
input_path (str): Path to input file.
|
||||
target_width (int): Width of target.
|
||||
target_height (int): Height of target.
|
||||
target_par (Optional[float]): Pixel aspect ratio of target.
|
||||
bg_color (Optional[list[int]]): List of 8bit int values for
|
||||
background color. Should be in range 0 - 255.
|
||||
log (Optional[logging.Logger]): Logger used for logging.
|
||||
|
||||
Returns:
|
||||
list[str]: List of command arguments.
|
||||
"""
|
||||
command_args = []
|
||||
target_par = target_par or 1.0
|
||||
input_par = 1.0
|
||||
|
||||
# ffmpeg command
|
||||
input_file_metadata = get_ffprobe_data(input_path, logger=log)
|
||||
stream = input_file_metadata["streams"][0]
|
||||
input_width = int(stream["width"])
|
||||
input_height = int(stream["height"])
|
||||
stream_input_par = stream.get("sample_aspect_ratio")
|
||||
if stream_input_par:
|
||||
input_par = (
|
||||
float(stream_input_par.split(":")[0])
|
||||
/ float(stream_input_par.split(":")[1])
|
||||
)
|
||||
# recalculating input and target width
|
||||
input_width = int(input_width * input_par)
|
||||
target_width = int(target_width * target_par)
|
||||
|
||||
# calculate aspect ratios
|
||||
target_aspect = float(target_width) / target_height
|
||||
input_aspect = float(input_width) / input_height
|
||||
|
||||
# calculate scale size
|
||||
scale_size = float(input_width) / target_width
|
||||
if input_aspect < target_aspect:
|
||||
scale_size = float(input_height) / target_height
|
||||
|
||||
# calculate rescaled width and height
|
||||
rescaled_width = int(input_width / scale_size)
|
||||
rescaled_height = int(input_height / scale_size)
|
||||
|
||||
# calculate width and height shift
|
||||
rescaled_width_shift = int((target_width - rescaled_width) / 2)
|
||||
rescaled_height_shift = int((target_height - rescaled_height) / 2)
|
||||
|
||||
if application == "ffmpeg":
|
||||
# create scale command
|
||||
scale = "scale={0}:{1}".format(input_width, input_height)
|
||||
pad = "pad={0}:{1}:({2}-iw)/2:({3}-ih)/2".format(
|
||||
target_width,
|
||||
target_height,
|
||||
target_width,
|
||||
target_height
|
||||
)
|
||||
if input_width > target_width or input_height > target_height:
|
||||
scale = "scale={0}:{1}".format(rescaled_width, rescaled_height)
|
||||
pad = "pad={0}:{1}:{2}:{3}".format(
|
||||
target_width,
|
||||
target_height,
|
||||
rescaled_width_shift,
|
||||
rescaled_height_shift
|
||||
)
|
||||
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
pad += ":{0}".format(color)
|
||||
command_args.extend(["-vf", "{0},{1}".format(scale, pad)])
|
||||
|
||||
elif application == "oiiotool":
|
||||
input_info = get_oiio_info_for_input(input_path, logger=log)
|
||||
# Collect channels to export
|
||||
_, channels_arg = get_oiio_input_and_channel_args(
|
||||
input_info, alpha_default=1.0)
|
||||
|
||||
command_args.extend([
|
||||
# Tell oiiotool which channels should be put to top stack
|
||||
# (and output)
|
||||
"--ch", channels_arg,
|
||||
# Use first subimage
|
||||
"--subimage", "0"
|
||||
])
|
||||
|
||||
if input_par != 1.0:
|
||||
command_args.extend(["--pixelaspect", "1"])
|
||||
|
||||
width_shift = int((target_width - input_width) / 2)
|
||||
height_shift = int((target_height - input_height) / 2)
|
||||
|
||||
# default resample is not scaling source image
|
||||
resample = [
|
||||
"--resize",
|
||||
"{0}x{1}".format(input_width, input_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(width_shift, height_shift),
|
||||
]
|
||||
# scaled source image to target size
|
||||
if input_width > target_width or input_height > target_height:
|
||||
# form resample command
|
||||
resample = [
|
||||
"--resize:filter=lanczos3",
|
||||
"{0}x{1}".format(rescaled_width, rescaled_height),
|
||||
"--origin",
|
||||
"+{0}+{1}".format(rescaled_width_shift, rescaled_height_shift),
|
||||
]
|
||||
command_args.extend(resample)
|
||||
|
||||
fullsize = [
|
||||
"--fullsize",
|
||||
"{0}x{1}".format(target_width, target_height)
|
||||
]
|
||||
if bg_color:
|
||||
color = convert_color_values(application, bg_color)
|
||||
|
||||
fullsize.extend([
|
||||
"--pattern",
|
||||
"constant:color={0}".format(color),
|
||||
"{0}x{1}".format(target_width, target_height),
|
||||
"4", # 4 channels
|
||||
"--over"
|
||||
])
|
||||
command_args.extend(fullsize)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
return command_args
|
||||
|
||||
|
||||
def convert_color_values(application, color_value):
|
||||
"""Get color mapping for ffmpeg and oiiotool.
|
||||
Args:
|
||||
application (str): Application for which command should be created.
|
||||
color_value (list[int]): List of 8bit int values for RGBA.
|
||||
Returns:
|
||||
str: ffmpeg returns hex string, oiiotool is string with floats.
|
||||
"""
|
||||
red, green, blue, alpha = color_value
|
||||
|
||||
if application == "ffmpeg":
|
||||
return "{0:0>2X}{1:0>2X}{2:0>2X}@{3}".format(
|
||||
red, green, blue, (alpha / 255.0)
|
||||
)
|
||||
elif application == "oiiotool":
|
||||
red = float(red / 255)
|
||||
green = float(green / 255)
|
||||
blue = float(blue / 255)
|
||||
alpha = float(alpha / 255)
|
||||
|
||||
return "{0:.3f},{1:.3f},{2:.3f},{3:.3f}".format(
|
||||
red, green, blue, alpha)
|
||||
else:
|
||||
raise ValueError(
|
||||
"\"application\" input argument should "
|
||||
"be either \"ffmpeg\" or \"oiiotool\""
|
||||
)
|
||||
|
||||
|
||||
def get_oiio_input_and_channel_args(oiio_input_info, alpha_default=None):
|
||||
"""Get input and channel arguments for oiiotool.
|
||||
Args:
|
||||
oiio_input_info (dict): Information about input from oiio tool.
|
||||
Should be output of function `get_oiio_info_for_input`.
|
||||
alpha_default (float, optional): Default value for alpha channel.
|
||||
Returns:
|
||||
tuple[str, str]: Tuple of input and channel arguments.
|
||||
"""
|
||||
channel_names = oiio_input_info["channelnames"]
|
||||
review_channels = get_convert_rgb_channels(channel_names)
|
||||
|
||||
if review_channels is None:
|
||||
raise ValueError(
|
||||
"Couldn't find channels that can be used for conversion."
|
||||
)
|
||||
|
||||
red, green, blue, alpha = review_channels
|
||||
input_channels = [red, green, blue]
|
||||
|
||||
channels_arg = "R={0},G={1},B={2}".format(red, green, blue)
|
||||
if alpha is not None:
|
||||
channels_arg += ",A={}".format(alpha)
|
||||
input_channels.append(alpha)
|
||||
elif alpha_default:
|
||||
channels_arg += ",A={}".format(float(alpha_default))
|
||||
input_channels.append("A")
|
||||
|
||||
input_channels_str = ",".join(input_channels)
|
||||
|
||||
subimages = oiio_input_info.get("subimages")
|
||||
input_arg = "-i"
|
||||
if subimages is None or subimages == 1:
|
||||
# Tell oiiotool which channels should be loaded
|
||||
# - other channels are not loaded to memory so helps to avoid memory
|
||||
# leak issues
|
||||
# - this option is crashing if used on multipart exrs
|
||||
input_arg += ":ch={}".format(input_channels_str)
|
||||
|
||||
return input_arg, channels_arg
|
||||
|
|
|
|||
|
|
@ -460,7 +460,21 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.plugin_info = self.get_plugin_info()
|
||||
self.aux_files = self.get_aux_files()
|
||||
|
||||
self.process_submission()
|
||||
job_id = self.process_submission()
|
||||
self.log.info("Submitted job to Deadline: {}.".format(job_id))
|
||||
|
||||
# TODO: Find a way that's more generic and not render type specific
|
||||
if "exportJob" in instance.data:
|
||||
self.log.info("Splitting export and render in two jobs")
|
||||
self.log.info("Export job id: %s", job_id)
|
||||
render_job_info = self.get_job_info(dependency_job_ids=[job_id])
|
||||
render_plugin_info = self.get_plugin_info(job_type="render")
|
||||
payload = self.assemble_payload(
|
||||
job_info=render_job_info,
|
||||
plugin_info=render_plugin_info
|
||||
)
|
||||
render_job_id = self.submit(payload)
|
||||
self.log.info("Render job id: %s", render_job_id)
|
||||
|
||||
def process_submission(self):
|
||||
"""Process data for submission.
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@
|
|||
"""Collect default Deadline server."""
|
||||
import pyblish.api
|
||||
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
|
||||
|
||||
class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
||||
"""Collect default Deadline Webservice URL.
|
||||
|
|
@ -30,24 +32,26 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin):
|
|||
self.log.error("Cannot get OpenPype Deadline module.")
|
||||
raise AssertionError("OpenPype Deadline module not found.")
|
||||
|
||||
# get default deadline webservice url from deadline module
|
||||
self.log.debug(deadline_module.deadline_urls)
|
||||
context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501
|
||||
deadline_settings = context.data["project_settings"]["deadline"]
|
||||
deadline_server_name = None
|
||||
if AYON_SERVER_ENABLED:
|
||||
deadline_server_name = deadline_settings["deadline_server"]
|
||||
else:
|
||||
deadline_servers = deadline_settings["deadline_servers"]
|
||||
if deadline_servers:
|
||||
deadline_server_name = deadline_servers[0]
|
||||
|
||||
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
|
||||
context.data["deadlinePassMongoUrl"] = self.pass_mongo_url
|
||||
|
||||
deadline_servers = (context.data
|
||||
["project_settings"]
|
||||
["deadline"]
|
||||
["deadline_servers"])
|
||||
if deadline_servers:
|
||||
deadline_server_name = deadline_servers[0]
|
||||
deadline_webservice = None
|
||||
if deadline_server_name:
|
||||
deadline_webservice = deadline_module.deadline_urls.get(
|
||||
deadline_server_name)
|
||||
if deadline_webservice:
|
||||
context.data["defaultDeadline"] = deadline_webservice
|
||||
self.log.debug("Overriding from project settings with {}".format( # noqa: E501
|
||||
deadline_webservice))
|
||||
|
||||
context.data["defaultDeadline"] = \
|
||||
context.data["defaultDeadline"].strip().rstrip("/")
|
||||
default_deadline_webservice = deadline_module.deadline_urls["default"]
|
||||
deadline_webservice = (
|
||||
deadline_webservice
|
||||
or default_deadline_webservice
|
||||
)
|
||||
|
||||
context.data["defaultDeadline"] = deadline_webservice.strip().rstrip("/") # noqa
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ import os
|
|||
import getpass
|
||||
from datetime import datetime
|
||||
|
||||
import hou
|
||||
|
||||
import attr
|
||||
import pyblish.api
|
||||
from openpype.lib import (
|
||||
|
|
@ -141,6 +139,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
instance = self._instance
|
||||
version = hou.applicationVersionString()
|
||||
version = ".".join(version.split(".")[:2])
|
||||
|
|
@ -167,6 +168,9 @@ class HoudiniCacheSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline
|
|||
instance.data["toBeRenderedOn"] = "deadline"
|
||||
|
||||
def get_rop_node(self, instance):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
rop = instance.data.get("instance_node")
|
||||
rop_node = hou.node(rop)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,12 +5,15 @@ from datetime import datetime
|
|||
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.pipeline import legacy_io, OpenPypePyblishPluginMixin
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype_modules.deadline import abstract_submit_deadline
|
||||
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
|
||||
from openpype.lib import is_running_from_build
|
||||
|
||||
from openpype.lib import (
|
||||
is_running_from_build,
|
||||
BoolDef,
|
||||
NumberDef
|
||||
)
|
||||
|
||||
@attr.s
|
||||
class DeadlinePluginInfo():
|
||||
|
|
@ -20,8 +23,29 @@ class DeadlinePluginInfo():
|
|||
IgnoreInputs = attr.ib(default=True)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
"""Submit Solaris USD Render ROPs to Deadline.
|
||||
@attr.s
|
||||
class ArnoldRenderDeadlinePluginInfo():
|
||||
InputFile = attr.ib(default=None)
|
||||
Verbose = attr.ib(default=4)
|
||||
|
||||
|
||||
@attr.s
|
||||
class MantraRenderDeadlinePluginInfo():
|
||||
SceneFile = attr.ib(default=None)
|
||||
Version = attr.ib(default=None)
|
||||
|
||||
|
||||
@attr.s
|
||||
class VrayRenderPluginInfo():
|
||||
InputFilename = attr.ib(default=None)
|
||||
SeparateFilesPerFrame = attr.ib(default=True)
|
||||
|
||||
|
||||
class HoudiniSubmitDeadline(
|
||||
abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
OpenPypePyblishPluginMixin
|
||||
):
|
||||
"""Submit Render ROPs to Deadline.
|
||||
|
||||
Renders are submitted to a Deadline Web Service as
|
||||
supplied via the environment variable AVALON_DEADLINE.
|
||||
|
|
@ -45,21 +69,95 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
targets = ["local"]
|
||||
use_published = True
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="Houdini")
|
||||
# presets
|
||||
priority = 50
|
||||
chunk_size = 1
|
||||
export_priority = 50
|
||||
export_chunk_size = 10
|
||||
group = ""
|
||||
export_group = ""
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
return [
|
||||
NumberDef(
|
||||
"priority",
|
||||
label="Priority",
|
||||
default=cls.priority,
|
||||
decimals=0
|
||||
),
|
||||
NumberDef(
|
||||
"chunk",
|
||||
label="Frames Per Task",
|
||||
default=cls.chunk_size,
|
||||
decimals=0,
|
||||
minimum=1,
|
||||
maximum=1000
|
||||
),
|
||||
NumberDef(
|
||||
"export_priority",
|
||||
label="Export Priority",
|
||||
default=cls.priority,
|
||||
decimals=0
|
||||
),
|
||||
NumberDef(
|
||||
"export_chunk",
|
||||
label="Export Frames Per Task",
|
||||
default=cls.export_chunk_size,
|
||||
decimals=0,
|
||||
minimum=1,
|
||||
maximum=1000
|
||||
),
|
||||
BoolDef(
|
||||
"suspend_publish",
|
||||
default=False,
|
||||
label="Suspend publish"
|
||||
)
|
||||
]
|
||||
|
||||
def get_job_info(self, dependency_job_ids=None):
|
||||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
attribute_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
# Whether Deadline render submission is being split in two
|
||||
# (extract + render)
|
||||
split_render_job = instance.data["exportJob"]
|
||||
|
||||
# If there's some dependency job ids we can assume this is a render job
|
||||
# and not an export job
|
||||
is_export_job = True
|
||||
if dependency_job_ids:
|
||||
is_export_job = False
|
||||
|
||||
if split_render_job and not is_export_job:
|
||||
# Convert from family to Deadline plugin name
|
||||
# i.e., arnold_rop -> Arnold
|
||||
plugin = instance.data["family"].replace("_rop", "").capitalize()
|
||||
else:
|
||||
plugin = "Houdini"
|
||||
|
||||
job_info = DeadlineJobInfo(Plugin=plugin)
|
||||
|
||||
filepath = context.data["currentFile"]
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
job_info.Name = "{} - {}".format(filename, instance.name)
|
||||
job_info.BatchName = filename
|
||||
job_info.Plugin = "Houdini"
|
||||
|
||||
job_info.UserName = context.data.get(
|
||||
"deadlineUser", getpass.getuser())
|
||||
|
||||
if split_render_job and is_export_job:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"export_priority", self.export_priority
|
||||
)
|
||||
else:
|
||||
job_info.Priority = attribute_values.get(
|
||||
"priority", self.priority
|
||||
)
|
||||
|
||||
if is_in_tests():
|
||||
job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S")
|
||||
|
||||
|
|
@ -73,9 +171,23 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
)
|
||||
job_info.Frames = frames
|
||||
|
||||
# Make sure we make job frame dependent so render tasks pick up a soon
|
||||
# as export tasks are done
|
||||
if split_render_job and not is_export_job:
|
||||
job_info.IsFrameDependent = True
|
||||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
job_info.ChunkSize = instance.data.get("chunkSize", 10)
|
||||
job_info.Group = self.group
|
||||
if split_render_job and is_export_job:
|
||||
job_info.ChunkSize = attribute_values.get(
|
||||
"export_chunk", self.export_chunk_size
|
||||
)
|
||||
else:
|
||||
job_info.ChunkSize = attribute_values.get(
|
||||
"chunk", self.chunk_size
|
||||
)
|
||||
|
||||
job_info.Comment = context.data.get("comment")
|
||||
|
||||
keys = [
|
||||
|
|
@ -101,6 +213,7 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for key in keys:
|
||||
value = environment.get(key)
|
||||
if value:
|
||||
|
|
@ -115,25 +228,51 @@ class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
job_info.OutputDirectory += dirname.replace("\\", "/")
|
||||
job_info.OutputFilename += fname
|
||||
|
||||
# Add dependencies if given
|
||||
if dependency_job_ids:
|
||||
job_info.JobDependencies = ",".join(dependency_job_ids)
|
||||
|
||||
return job_info
|
||||
|
||||
def get_plugin_info(self):
|
||||
def get_plugin_info(self, job_type=None):
|
||||
# Not all hosts can import this module.
|
||||
import hou
|
||||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
# Output driver to render
|
||||
driver = hou.node(instance.data["instance_node"])
|
||||
hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0]
|
||||
|
||||
plugin_info = DeadlinePluginInfo(
|
||||
SceneFile=context.data["currentFile"],
|
||||
OutputDriver=driver.path(),
|
||||
Version=hou_major_minor,
|
||||
IgnoreInputs=True
|
||||
)
|
||||
# Output driver to render
|
||||
if job_type == "render":
|
||||
family = instance.data.get("family")
|
||||
if family == "arnold_rop":
|
||||
plugin_info = ArnoldRenderDeadlinePluginInfo(
|
||||
InputFile=instance.data["ifdFile"]
|
||||
)
|
||||
elif family == "mantra_rop":
|
||||
plugin_info = MantraRenderDeadlinePluginInfo(
|
||||
SceneFile=instance.data["ifdFile"],
|
||||
Version=hou_major_minor,
|
||||
)
|
||||
elif family == "vray_rop":
|
||||
plugin_info = VrayRenderPluginInfo(
|
||||
InputFilename=instance.data["ifdFile"],
|
||||
)
|
||||
else:
|
||||
self.log.error(
|
||||
"Family '%s' not supported yet to split render job",
|
||||
family
|
||||
)
|
||||
return
|
||||
else:
|
||||
driver = hou.node(instance.data["instance_node"])
|
||||
plugin_info = DeadlinePluginInfo(
|
||||
SceneFile=context.data["currentFile"],
|
||||
OutputDriver=driver.path(),
|
||||
Version=hou_major_minor,
|
||||
IgnoreInputs=True
|
||||
)
|
||||
|
||||
return attr.asdict(plugin_info)
|
||||
|
||||
|
|
|
|||
|
|
@ -370,10 +370,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
environment = dict({key: os.environ[key] for key in keys
|
||||
if key in os.environ}, **legacy_io.Session)
|
||||
|
||||
for _path in os.environ:
|
||||
if _path.lower().startswith('openpype_'):
|
||||
environment[_path] = os.environ[_path]
|
||||
|
||||
# to recognize render jobs
|
||||
if AYON_SERVER_ENABLED:
|
||||
environment["AYON_BUNDLE_NAME"] = os.environ["AYON_BUNDLE_NAME"]
|
||||
|
|
@ -402,7 +398,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
self.log.debug("Submitting..")
|
||||
self.log.debug(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
# adding expectied files to instance.data
|
||||
# adding expected files to instance.data
|
||||
self.expected_files(
|
||||
instance,
|
||||
render_path,
|
||||
|
|
@ -458,7 +454,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
def expected_files(
|
||||
self,
|
||||
instance,
|
||||
path,
|
||||
filepath,
|
||||
start_frame,
|
||||
end_frame
|
||||
):
|
||||
|
|
@ -467,21 +463,44 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
if not instance.data.get("expectedFiles"):
|
||||
instance.data["expectedFiles"] = []
|
||||
|
||||
dirname = os.path.dirname(path)
|
||||
file = os.path.basename(path)
|
||||
dirname = os.path.dirname(filepath)
|
||||
file = os.path.basename(filepath)
|
||||
|
||||
# since some files might be already tagged as publish_on_farm
|
||||
# we need to avoid adding them to expected files since those would be
|
||||
# duplicated into metadata.json file
|
||||
representations = instance.data.get("representations", [])
|
||||
# check if file is not in representations with publish_on_farm tag
|
||||
for repre in representations:
|
||||
# Skip if 'publish_on_farm' not available
|
||||
if "publish_on_farm" not in repre.get("tags", []):
|
||||
continue
|
||||
|
||||
# in case where single file (video, image) is already in
|
||||
# representation file. Will be added to expected files via
|
||||
# submit_publish_job.py
|
||||
if file in repre.get("files", []):
|
||||
self.log.debug(
|
||||
"Skipping expected file: {}".format(filepath))
|
||||
return
|
||||
|
||||
# in case path is hashed sequence expression
|
||||
# (e.g. /path/to/file.####.png)
|
||||
if "#" in file:
|
||||
pparts = file.split("#")
|
||||
padding = "%0{}d".format(len(pparts) - 1)
|
||||
file = pparts[0] + padding + pparts[-1]
|
||||
|
||||
# in case input path was single file (video or image)
|
||||
if "%" not in file:
|
||||
instance.data["expectedFiles"].append(path)
|
||||
instance.data["expectedFiles"].append(filepath)
|
||||
return
|
||||
|
||||
# shift start frame by 1 if slate is present
|
||||
if instance.data.get("slate"):
|
||||
start_frame -= 1
|
||||
|
||||
# add sequence files to expected files
|
||||
for i in range(start_frame, (end_frame + 1)):
|
||||
instance.data["expectedFiles"].append(
|
||||
os.path.join(dirname, (file % i)).replace("\\", "/"))
|
||||
|
|
|
|||
|
|
@ -429,7 +429,7 @@ def inject_ayon_environment(deadlinePlugin):
|
|||
"separated list \"{}\"."
|
||||
"The path to the render executable can be configured"
|
||||
" from the Plugin Configuration in the Deadline Monitor."
|
||||
).format(";".join(exe_list)))
|
||||
).format(exe_list))
|
||||
|
||||
print("--- Ayon executable: {}".format(exe))
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ def get_openpype_attr(session, split_hierarchical=True, query_keys=None):
|
|||
"select {}"
|
||||
" from CustomAttributeConfiguration"
|
||||
# Kept `pype` for Backwards Compatibility
|
||||
" where group.name in (\"pype\", \"{}\")"
|
||||
" where group.name in (\"pype\", \"ayon\", \"{}\")"
|
||||
).format(", ".join(query_keys), CUST_ATTR_GROUP)
|
||||
all_avalon_attr = session.query(cust_attrs_query).all()
|
||||
for cust_attr in all_avalon_attr:
|
||||
|
|
|
|||
|
|
@ -127,17 +127,25 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
other_representations = []
|
||||
has_movie_review = False
|
||||
for repre in instance_repres:
|
||||
self.log.debug("Representation {}".format(repre))
|
||||
repre_tags = repre.get("tags") or []
|
||||
# exclude representations with are going to be published on farm
|
||||
if "publish_on_farm" in repre_tags:
|
||||
continue
|
||||
|
||||
self.log.debug("Representation {}".format(repre))
|
||||
|
||||
# include only thumbnail representations
|
||||
if repre.get("thumbnail") or "thumbnail" in repre_tags:
|
||||
thumbnail_representations.append(repre)
|
||||
|
||||
# include only review representations
|
||||
elif "ftrackreview" in repre_tags:
|
||||
review_representations.append(repre)
|
||||
if self._is_repre_video(repre):
|
||||
has_movie_review = True
|
||||
|
||||
else:
|
||||
# include all other representations
|
||||
other_representations.append(repre)
|
||||
|
||||
# Prepare ftrack locations
|
||||
|
|
@ -230,6 +238,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
# Create review components
|
||||
# Change asset name of each new component for review
|
||||
multiple_reviewable = len(review_representations) > 1
|
||||
extended_asset_name = None
|
||||
for index, repre in enumerate(review_representations):
|
||||
if not self._is_repre_video(repre) and has_movie_review:
|
||||
self.log.debug("Movie repre has priority "
|
||||
|
|
@ -343,7 +352,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
|
|||
|
||||
# add extended name if any
|
||||
if (
|
||||
not self.keep_first_subset_name_for_review
|
||||
multiple_reviewable
|
||||
and not self.keep_first_subset_name_for_review
|
||||
and extended_asset_name
|
||||
):
|
||||
other_item["asset_data"]["name"] = extended_asset_name
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ def get_pype_attr(session, split_hierarchical=True):
|
|||
"select id, entity_type, object_type_id, is_hierarchical, default"
|
||||
" from CustomAttributeConfiguration"
|
||||
# Kept `pype` for Backwards Compatibility
|
||||
" where group.name in (\"pype\", \"{}\")"
|
||||
" where group.name in (\"pype\", \"ayon\", \"{}\")"
|
||||
).format(CUST_ATTR_GROUP)
|
||||
all_avalon_attr = session.query(cust_attrs_query).all()
|
||||
for cust_attr in all_avalon_attr:
|
||||
|
|
|
|||
|
|
@ -8,10 +8,12 @@ import appdirs
|
|||
from qtpy import QtCore, QtWidgets, QtGui
|
||||
|
||||
from openpype import resources
|
||||
from openpype import AYON_SERVER_ENABLED
|
||||
from openpype.style import load_stylesheet
|
||||
from openpype.lib import JSONSettingRegistry
|
||||
|
||||
|
||||
|
||||
openpype_art = """
|
||||
. . .. . ..
|
||||
_oOOP3OPP3Op_. .
|
||||
|
|
@ -27,6 +29,18 @@ openpype_art = """
|
|||
~P3.OPPPO3OP~ . .. .
|
||||
. ' '. . .. . . . .. .
|
||||
|
||||
"""
|
||||
|
||||
ayon_art = r"""
|
||||
|
||||
▄██▄
|
||||
▄███▄ ▀██▄ ▀██▀ ▄██▀ ▄██▀▀▀██▄ ▀███▄ █▄
|
||||
▄▄ ▀██▄ ▀██▄ ▄██▀ ██▀ ▀██▄ ▄ ▀██▄ ███
|
||||
▄██▀ ██▄ ▀ ▄▄ ▀ ██ ▄██ ███ ▀██▄ ███
|
||||
▄██▀ ▀██▄ ██ ▀██▄ ▄██▀ ███ ▀██ ▀█▀
|
||||
▄██▀ ▀██▄ ▀█ ▀██▄▄▄▄██▀ █▀ ▀██▄
|
||||
|
||||
· · - =[ by YNPUT ]:[ http://ayon.ynput.io ]= - · ·
|
||||
|
||||
"""
|
||||
|
||||
|
|
@ -41,8 +55,12 @@ class PythonInterpreterRegistry(JSONSettingRegistry):
|
|||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.vendor = "pypeclub"
|
||||
self.product = "openpype"
|
||||
if AYON_SERVER_ENABLED:
|
||||
self.vendor = "ynput"
|
||||
self.product = "ayon"
|
||||
else:
|
||||
self.vendor = "pypeclub"
|
||||
self.product = "openpype"
|
||||
name = "python_interpreter_tool"
|
||||
path = appdirs.user_data_dir(self.product, self.vendor)
|
||||
super(PythonInterpreterRegistry, self).__init__(name, path)
|
||||
|
|
@ -336,10 +354,12 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
default_width = 1000
|
||||
default_height = 600
|
||||
|
||||
def __init__(self, parent=None):
|
||||
def __init__(self, allow_save_registry=True, parent=None):
|
||||
super(PythonInterpreterWidget, self).__init__(parent)
|
||||
|
||||
self.setWindowTitle("OpenPype Console")
|
||||
self.setWindowTitle("{} Console".format(
|
||||
"AYON" if AYON_SERVER_ENABLED else "OpenPype"
|
||||
))
|
||||
self.setWindowIcon(QtGui.QIcon(resources.get_openpype_icon_filepath()))
|
||||
|
||||
self.ansi_escape = re.compile(
|
||||
|
|
@ -387,10 +407,15 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
self._tab_widget = tab_widget
|
||||
self._line_check_timer = line_check_timer
|
||||
|
||||
self._append_lines([openpype_art])
|
||||
if AYON_SERVER_ENABLED:
|
||||
self._append_lines([ayon_art])
|
||||
else:
|
||||
self._append_lines([openpype_art])
|
||||
|
||||
self._first_show = True
|
||||
self._splitter_size_ratio = None
|
||||
self._allow_save_registry = allow_save_registry
|
||||
self._registry_saved = True
|
||||
|
||||
self._init_from_registry()
|
||||
|
||||
|
|
@ -434,6 +459,11 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
pass
|
||||
|
||||
def save_registry(self):
|
||||
# Window was not showed
|
||||
if not self._allow_save_registry or self._registry_saved:
|
||||
return
|
||||
|
||||
self._registry_saved = True
|
||||
setting_registry = PythonInterpreterRegistry()
|
||||
|
||||
setting_registry.set_item("width", self.width())
|
||||
|
|
@ -627,6 +657,7 @@ class PythonInterpreterWidget(QtWidgets.QWidget):
|
|||
|
||||
def showEvent(self, event):
|
||||
self._line_check_timer.start()
|
||||
self._registry_saved = False
|
||||
super(PythonInterpreterWidget, self).showEvent(event)
|
||||
# First show setup
|
||||
if self._first_show:
|
||||
|
|
|
|||
|
|
@ -1,23 +1,28 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Submitting render job to RoyalRender."""
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
import tempfile
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
import pyblish.api
|
||||
from openpype.tests.lib import is_in_tests
|
||||
from openpype.pipeline.publish.lib import get_published_workfile_instance
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
|
||||
from openpype.lib import BoolDef, NumberDef, is_running_from_build
|
||||
from openpype.lib.execute import run_openpype_process
|
||||
from openpype.modules.royalrender.api import Api as rrApi
|
||||
from openpype.modules.royalrender.rr_job import (
|
||||
RRJob, CustomAttribute, get_rr_platform)
|
||||
from openpype.lib import (
|
||||
is_running_from_build,
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
CustomAttribute,
|
||||
RRJob,
|
||||
RREnvList,
|
||||
get_rr_platform,
|
||||
)
|
||||
from openpype.pipeline import OpenPypePyblishPluginMixin
|
||||
from openpype.pipeline.publish import KnownPublishError
|
||||
from openpype.pipeline.publish.lib import get_published_workfile_instance
|
||||
from openpype.tests.lib import is_in_tests
|
||||
|
||||
|
||||
class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
|
||||
|
|
@ -302,3 +307,68 @@ class BaseCreateRoyalRenderJob(pyblish.api.InstancePlugin,
|
|||
path = path.replace(first_frame, "#" * padding)
|
||||
|
||||
return path
|
||||
|
||||
def inject_environment(self, instance, job):
|
||||
# type: (pyblish.api.Instance, RRJob) -> RRJob
|
||||
"""Inject environment variables for RR submission.
|
||||
|
||||
This function mimics the behaviour of the Deadline
|
||||
integration. It is just temporary solution until proper
|
||||
runtime environment injection is implemented in RR.
|
||||
|
||||
Args:
|
||||
instance (pyblish.api.Instance): Publishing instance
|
||||
job (RRJob): RRJob instance to be injected.
|
||||
|
||||
Returns:
|
||||
RRJob: Injected RRJob instance.
|
||||
|
||||
Throws:
|
||||
RuntimeError: If any of the required env vars is missing.
|
||||
|
||||
"""
|
||||
|
||||
temp_file_name = "{}_{}.json".format(
|
||||
datetime.utcnow().strftime('%Y%m%d%H%M%S%f'),
|
||||
str(uuid.uuid1())
|
||||
)
|
||||
|
||||
export_url = os.path.join(tempfile.gettempdir(), temp_file_name)
|
||||
print(">>> Temporary path: {}".format(export_url))
|
||||
|
||||
args = [
|
||||
"--headless",
|
||||
"extractenvironments",
|
||||
export_url
|
||||
]
|
||||
|
||||
anatomy_data = instance.context.data["anatomyData"]
|
||||
|
||||
add_kwargs = {
|
||||
"project": anatomy_data["project"]["name"],
|
||||
"asset": instance.context.data["asset"],
|
||||
"task": anatomy_data["task"]["name"],
|
||||
"app": instance.context.data.get("appName"),
|
||||
"envgroup": "farm"
|
||||
}
|
||||
|
||||
if os.getenv('IS_TEST'):
|
||||
args.append("--automatic-tests")
|
||||
|
||||
if not all(add_kwargs.values()):
|
||||
raise RuntimeError((
|
||||
"Missing required env vars: AVALON_PROJECT, AVALON_ASSET,"
|
||||
" AVALON_TASK, AVALON_APP_NAME"
|
||||
))
|
||||
|
||||
for key, value in add_kwargs.items():
|
||||
args.extend([f"--{key}", value])
|
||||
self.log.debug("Executing: {}".format(" ".join(args)))
|
||||
run_openpype_process(*args, logger=self.log)
|
||||
|
||||
self.log.debug("Loading file ...")
|
||||
with open(export_url) as fp:
|
||||
contents = json.load(fp)
|
||||
|
||||
job.rrEnvList = RREnvList(contents).serialize()
|
||||
return job
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
"""Submitting render job to RoyalRender."""
|
||||
import os
|
||||
|
||||
from maya.OpenMaya import MGlobal
|
||||
from maya.OpenMaya import MGlobal # noqa: F401
|
||||
|
||||
from openpype.modules.royalrender import lib
|
||||
from openpype.pipeline.farm.tools import iter_expected_files
|
||||
|
|
@ -38,5 +38,6 @@ class CreateMayaRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
|
|||
job = self.get_job(instance, self.scene_path, first_file_path,
|
||||
layer_name)
|
||||
job = self.update_job_with_host_specific(instance, job)
|
||||
job = self.inject_environment(instance, job)
|
||||
|
||||
instance.data["rrJobs"].append(job)
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ class CreateNukeRoyalRenderJob(lib.BaseCreateRoyalRenderJob):
|
|||
jobs = self.create_jobs(instance)
|
||||
for job in jobs:
|
||||
job = self.update_job_with_host_specific(instance, job)
|
||||
job = self.inject_environment(instance, job)
|
||||
|
||||
instance.data["rrJobs"].append(job)
|
||||
|
||||
|
|
|
|||
|
|
@ -205,6 +205,9 @@ class CreatePublishRoyalRenderJob(pyblish.api.InstancePlugin,
|
|||
jobs_pre_ids = []
|
||||
for job in instance.data["rrJobs"]: # type: RRJob
|
||||
if job.rrEnvList:
|
||||
if len(job.rrEnvList) > 2000:
|
||||
self.log.warning(("Job environment is too long "
|
||||
f"{len(job.rrEnvList)} > 2000"))
|
||||
job_environ.update(
|
||||
dict(RREnvList.parse(job.rrEnvList))
|
||||
)
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class RREnvList(dict):
|
|||
"""Parse rrEnvList string and return it as RREnvList object."""
|
||||
out = RREnvList()
|
||||
for var in data.split("~~~"):
|
||||
k, v = var.split("=")
|
||||
k, v = var.split("=", maxsplit=1)
|
||||
out[k] = v
|
||||
return out
|
||||
|
||||
|
|
@ -172,7 +172,7 @@ class RRJob(object):
|
|||
|
||||
# Environment
|
||||
# only used in RR 8.3 and newer
|
||||
rrEnvList = attr.ib(default=None) # type: str
|
||||
rrEnvList = attr.ib(default=None, type=str) # type: str
|
||||
|
||||
|
||||
class SubmitterParameter:
|
||||
|
|
|
|||
|
|
@ -145,6 +145,9 @@ def get_transferable_representations(instance):
|
|||
|
||||
trans_rep = representation.copy()
|
||||
|
||||
# remove publish_on_farm from representations tags
|
||||
trans_rep["tags"].remove("publish_on_farm")
|
||||
|
||||
staging_dir = trans_rep.get("stagingDir")
|
||||
|
||||
if staging_dir:
|
||||
|
|
|
|||
|
|
@ -61,8 +61,9 @@ class CollectHierarchy(pyblish.api.ContextPlugin):
|
|||
"resolutionHeight": instance.data["resolutionHeight"],
|
||||
"pixelAspect": instance.data["pixelAspect"]
|
||||
}
|
||||
|
||||
actual = {instance.data["asset"]: shot_data}
|
||||
# Split by '/' for AYON where asset is a path
|
||||
name = instance.data["asset"].split("/")[-1]
|
||||
actual = {name: shot_data}
|
||||
|
||||
for parent in reversed(instance.data["parents"]):
|
||||
next_dict = {}
|
||||
|
|
|
|||
|
|
@ -68,12 +68,6 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
]
|
||||
|
||||
def process(self, instance):
|
||||
# editorial would fail since they might not be in database yet
|
||||
new_asset_publishing = instance.data.get("newAssetPublishing")
|
||||
if new_asset_publishing:
|
||||
self.log.debug("Instance is creating new asset. Skipping.")
|
||||
return
|
||||
|
||||
anatomy = instance.context.data["anatomy"]
|
||||
|
||||
template_data = copy.deepcopy(instance.data["anatomyData"])
|
||||
|
|
@ -85,11 +79,18 @@ class CollectResourcesPath(pyblish.api.InstancePlugin):
|
|||
"representation": "TEMP"
|
||||
})
|
||||
|
||||
# For the first time publish
|
||||
if instance.data.get("hierarchy"):
|
||||
template_data.update({
|
||||
"hierarchy": instance.data["hierarchy"]
|
||||
})
|
||||
# Add fill keys for editorial publishing creating new entity
|
||||
# TODO handle in editorial plugin
|
||||
if instance.data.get("newAssetPublishing"):
|
||||
if "hierarchy" not in template_data:
|
||||
template_data["hierarchy"] = instance.data["hierarchy"]
|
||||
|
||||
if "asset" not in template_data:
|
||||
asset_name = instance.data["asset"].split("/")[-1]
|
||||
template_data["asset"] = asset_name
|
||||
template_data["folder"] = {
|
||||
"name": asset_name
|
||||
}
|
||||
|
||||
publish_templates = anatomy.templates_obj["publish"]
|
||||
if "folder" in publish_templates:
|
||||
|
|
|
|||
|
|
@ -89,8 +89,8 @@ class ExtractBurnin(publish.Extractor):
|
|||
|
||||
self.main_process(instance)
|
||||
|
||||
# Remove any representations tagged for deletion.
|
||||
# QUESTION Is possible to have representation with "delete" tag?
|
||||
# Remove only representation tagged with both
|
||||
# tags `delete` and `burnin`
|
||||
for repre in tuple(instance.data["representations"]):
|
||||
if all(x in repre.get("tags", []) for x in ['delete', 'burnin']):
|
||||
self.log.debug("Removing representation: {}".format(repre))
|
||||
|
|
|
|||
|
|
@ -204,7 +204,8 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
|
|||
|
||||
project_item = None
|
||||
project_children_context = None
|
||||
for key, value in context.data["hierarchyContext"].items():
|
||||
hierarchy_context = copy.deepcopy(context.data["hierarchyContext"])
|
||||
for key, value in hierarchy_context.items():
|
||||
project_item = copy.deepcopy(value)
|
||||
project_children_context = project_item.pop("childs", None)
|
||||
project_item["name"] = key
|
||||
|
|
@ -223,23 +224,24 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
|
|||
valid_ids = set()
|
||||
|
||||
hierarchy_queue = collections.deque()
|
||||
hierarchy_queue.append((project_id, project_children_context))
|
||||
hierarchy_queue.append((project_id, "", project_children_context))
|
||||
while hierarchy_queue:
|
||||
queue_item = hierarchy_queue.popleft()
|
||||
parent_id, children_context = queue_item
|
||||
parent_id, parent_path, children_context = queue_item
|
||||
if not children_context:
|
||||
continue
|
||||
|
||||
for asset, asset_info in children_context.items():
|
||||
for folder_name, folder_info in children_context.items():
|
||||
folder_path = "{}/{}".format(parent_path, folder_name)
|
||||
if (
|
||||
asset not in active_folder_paths
|
||||
and not asset_info.get("childs")
|
||||
folder_path not in active_folder_paths
|
||||
and not folder_info.get("childs")
|
||||
):
|
||||
continue
|
||||
asset_name = asset.split("/")[-1]
|
||||
|
||||
item_id = uuid.uuid4().hex
|
||||
new_item = copy.deepcopy(asset_info)
|
||||
new_item["name"] = asset_name
|
||||
new_item = copy.deepcopy(folder_info)
|
||||
new_item["name"] = folder_name
|
||||
new_item["children"] = []
|
||||
new_children_context = new_item.pop("childs", None)
|
||||
tasks = new_item.pop("tasks", {})
|
||||
|
|
@ -253,9 +255,11 @@ class ExtractHierarchyToAYON(pyblish.api.ContextPlugin):
|
|||
items_by_id[item_id] = new_item
|
||||
parent_id_by_item_id[item_id] = parent_id
|
||||
|
||||
if asset in active_folder_paths:
|
||||
if folder_path in active_folder_paths:
|
||||
valid_ids.add(item_id)
|
||||
hierarchy_queue.append((item_id, new_children_context))
|
||||
hierarchy_queue.append(
|
||||
(item_id, folder_path, new_children_context)
|
||||
)
|
||||
|
||||
if not valid_ids:
|
||||
return None
|
||||
|
|
|
|||