Merge branch 'develop' into feature/AY-742_copy-to-breakdown-project

This commit is contained in:
Jakub Jezek 2024-06-14 11:18:41 +02:00
commit 7e7995baf9
No known key found for this signature in database
GPG key ID: 06DBD609ADF27FD9
2249 changed files with 8698 additions and 194913 deletions

View file

@ -0,0 +1,15 @@
from .version import __version__
from .addon import (
AFTEREFFECTS_ADDON_ROOT,
AfterEffectsAddon,
get_launch_script_path,
)
__all__ = (
"__version__",
"AFTEREFFECTS_ADDON_ROOT",
"AfterEffectsAddon",
"get_launch_script_path",
)

View file

@ -0,0 +1,39 @@
import os
from ayon_core.addon import AYONAddon, IHostAddon
from .version import __version__
AFTEREFFECTS_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
class AfterEffectsAddon(AYONAddon, IHostAddon):
name = "aftereffects"
version = __version__
host_name = "aftereffects"
def add_implementation_envs(self, env, _app):
"""Modify environments to contain all required for implementation."""
defaults = {
"AYON_LOG_NO_COLORS": "1",
"WEBSOCKET_URL": "ws://localhost:8097/ws/"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".aep"]
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(AFTEREFFECTS_ADDON_ROOT, "hooks")
]
def get_launch_script_path():
return os.path.join(
AFTEREFFECTS_ADDON_ROOT, "api", "launch_script.py"
)

View file

@ -0,0 +1,68 @@
# AfterEffects Integration
Requirements: This extension requires use of Javascript engine, which is
available since CC 16.0.
Please check your File>Project Settings>Expressions>Expressions Engine
## Setup
The After Effects integration requires two components to work; `extension` and `server`.
### Extension
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
```
ExManCmd /install {path to addon}/api/extension.zxp
```
OR
download [Anastasiys Extension Manager](https://install.anastasiy.com/)
`{path to addon}` will be most likely in your AppData (on Windows, in your user data folder in Linux and MacOS.)
### Server
The easiest way to get the server and After Effects launch is with:
```
python -c ^"import ayon_core.hosts.photoshop;ayon_aftereffects.launch(""c:\Program Files\Adobe\Adobe After Effects 2020\Support Files\AfterFX.exe"")^"
```
`avalon.aftereffects.launch` launches the application and server, and also closes the server when After Effects exists.
## Usage
The After Effects extension can be found under `Window > Extensions > AYON`. Once launched you should be presented with a panel like this:
![Ayon Panel](panel.png "Ayon Panel")
## Developing
### Extension
When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions).
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
```
ZXPSignCmd -selfSignedCert NA NA Ayon Avalon-After-Effects Ayon extension.p12
ZXPSignCmd -sign {path to addon}/api/extension {path to addon}/api/extension.zxp extension.p12 Ayon
```
### Plugin Examples
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
Expected deployed extension location on default Windows:
`c:\Program Files (x86)\Common Files\Adobe\CEP\extensions\io.ynput.AE.panel`
For easier debugging of Javascript:
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
Add (optional) --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
then localhost:8092
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
## Resources
- https://javascript-tools-guide.readthedocs.io/introduction/index.html
- https://github.com/Adobe-CEP/Getting-Started-guides
- https://github.com/Adobe-CEP/CEP-Resources

View file

@ -0,0 +1,46 @@
"""Public API
Anything that isn't defined here is INTERNAL and unreliable for external use.
"""
from .ws_stub import (
get_stub,
)
from .pipeline import (
AfterEffectsHost,
ls,
containerise
)
from .lib import (
maintained_selection,
get_extension_manifest_path,
get_folder_settings,
set_settings
)
from .plugin import (
AfterEffectsLoader
)
__all__ = [
# ws_stub
"get_stub",
# pipeline
"AfterEffectsHost",
"ls",
"containerise",
# lib
"maintained_selection",
"get_extension_manifest_path",
"get_folder_settings",
"set_settings",
# plugin
"AfterEffectsLoader"
]

View file

@ -0,0 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionList>
<Extension Id="io.ynput.AE.panel">
<HostList>
<!-- Comment Host tags according to the apps you want your panel to support -->
<!-- Photoshop -->
<Host Name="PHXS" Port="8088"/>
<!-- Illustrator -->
<Host Name="ILST" Port="8089"/>
<!-- InDesign -->
<Host Name="IDSN" Port="8090" />
<!-- Premiere -->
<Host Name="PPRO" Port="8091" />
<!-- AfterEffects -->
<Host Name="AEFT" Port="8092" />
<!-- PRELUDE -->
<Host Name="PRLD" Port="8093" />
<!-- FLASH Pro -->
<Host Name="FLPR" Port="8094" />
</HostList>
</Extension>
</ExtensionList>

View file

@ -0,0 +1,79 @@
<?xml version="1.0" encoding="UTF-8"?>
<ExtensionManifest Version="8.0" ExtensionBundleId="io.ynput.AE.panel" ExtensionBundleVersion="1.1.0"
ExtensionBundleName="io.ynput.AE.panel" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ExtensionList>
<Extension Id="io.ynput.AE.panel" Version="1.0" />
</ExtensionList>
<ExecutionEnvironment>
<HostList>
<!-- Uncomment Host tags according to the apps you want your panel to support -->
<!-- Photoshop -->
<!--<Host Name="PHXS" Version="[14.0,19.0]" /> -->
<!-- <Host Name="PHSP" Version="[14.0,19.0]" /> -->
<!-- Illustrator -->
<!-- <Host Name="ILST" Version="[18.0,22.0]" /> -->
<!-- InDesign -->
<!-- <Host Name="IDSN" Version="[10.0,13.0]" /> -->
<!-- Premiere -->
<!-- <Host Name="PPRO" Version="[8.0,12.0]" /> -->
<!-- AfterEffects -->
<Host Name="AEFT" Version="[13.0,99.0]" />
<!-- PRELUDE -->
<!-- <Host Name="PRLD" Version="[3.0,7.0]" /> -->
<!-- FLASH Pro -->
<!-- <Host Name="FLPR" Version="[14.0,18.0]" /> -->
</HostList>
<LocaleList>
<Locale Code="All" />
</LocaleList>
<RequiredRuntimeList>
<RequiredRuntime Name="CSXS" Version="9.0" />
</RequiredRuntimeList>
</ExecutionEnvironment>
<DispatchInfoList>
<Extension Id="io.ynput.AE.panel">
<DispatchInfo >
<Resources>
<MainPath>./index.html</MainPath>
<ScriptPath>./jsx/hostscript.jsx</ScriptPath>
</Resources>
<Lifecycle>
<AutoVisible>true</AutoVisible>
</Lifecycle>
<UI>
<Type>Panel</Type>
<Menu>AYON</Menu>
<Geometry>
<Size>
<Height>200</Height>
<Width>100</Width>
</Size>
<!--<MinSize>
<Height>550</Height>
<Width>400</Width>
</MinSize>
<MaxSize>
<Height>550</Height>
<Width>400</Width>
</MaxSize>-->
</Geometry>
<Icons>
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
<Icon Type="RollOver">./icons/iconRollover.png</Icon>
<Icon Type="Disabled">./icons/iconDisabled.png</Icon>
<Icon Type="DarkNormal">./icons/iconDarkNormal.png</Icon>
<Icon Type="DarkRollOver">./icons/iconDarkRollover.png</Icon>
</Icons>
</UI>
</DispatchInfo>
</Extension>
</DispatchInfoList>
</ExtensionManifest>

View file

@ -0,0 +1,327 @@
/*
* HTML5 Boilerplate
*
* What follows is the result of much research on cross-browser styling.
* Credit left inline and big thanks to Nicolas Gallagher, Jonathan Neal,
* Kroc Camen, and the H5BP dev community and team.
*
* Detailed information about this CSS: h5bp.com/css
*
* ==|== normalize ==========================================================
*/
/* =============================================================================
HTML5 display definitions
========================================================================== */
article, aside, details, figcaption, figure, footer, header, hgroup, nav, section { display: block; }
audio, canvas, video { display: inline-block; *display: inline; *zoom: 1; }
audio:not([controls]) { display: none; }
[hidden] { display: none; }
/* =============================================================================
Base
========================================================================== */
/*
* 1. Correct text resizing oddly in IE6/7 when body font-size is set using em units
* 2. Force vertical scrollbar in non-IE
* 3. Prevent iOS text size adjust on device orientation change, without disabling user zoom: h5bp.com/g
*/
html { font-size: 100%; overflow-y: scroll; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; }
body { margin: 0; font-size: 100%; line-height: 1.231; }
body, button, input, select, textarea { font-family: helvetica, arial,"lucida grande", verdana, "メイリオ", " Pゴシック", sans-serif; color: #222; }
/*
* Remove text-shadow in selection highlight: h5bp.com/i
* These selection declarations have to be separate
* Also: hot pink! (or customize the background color to match your design)
*/
::selection { text-shadow: none; background-color: highlight; color: highlighttext; }
/* =============================================================================
Links
========================================================================== */
a { color: #00e; }
a:visited { color: #551a8b; }
a:hover { color: #06e; }
a:focus { outline: thin dotted; }
/* Improve readability when focused and hovered in all browsers: h5bp.com/h */
a:hover, a:active { outline: 0; }
/* =============================================================================
Typography
========================================================================== */
abbr[title] { border-bottom: 1px dotted; }
b, strong { font-weight: bold; }
blockquote { margin: 1em 40px; }
dfn { font-style: italic; }
hr { display: block; height: 1px; border: 0; border-top: 1px solid #ccc; margin: 1em 0; padding: 0; }
ins { background: #ff9; color: #000; text-decoration: none; }
mark { background: #ff0; color: #000; font-style: italic; font-weight: bold; }
/* Redeclare monospace font family: h5bp.com/j */
pre, code, kbd, samp { font-family: monospace, serif; _font-family: 'courier new', monospace; font-size: 1em; }
/* Improve readability of pre-formatted text in all browsers */
pre { white-space: pre; white-space: pre-wrap; word-wrap: break-word; }
q { quotes: none; }
q:before, q:after { content: ""; content: none; }
small { font-size: 85%; }
/* Position subscript and superscript content without affecting line-height: h5bp.com/k */
sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; }
sup { top: -0.5em; }
sub { bottom: -0.25em; }
/* =============================================================================
Lists
========================================================================== */
ul, ol { margin: 1em 0; padding: 0 0 0 40px; }
dd { margin: 0 0 0 40px; }
nav ul, nav ol { list-style: none; list-style-image: none; margin: 0; padding: 0; }
/* =============================================================================
Embedded content
========================================================================== */
/*
* 1. Improve image quality when scaled in IE7: h5bp.com/d
* 2. Remove the gap between images and borders on image containers: h5bp.com/e
*/
img { border: 0; -ms-interpolation-mode: bicubic; vertical-align: middle; }
/*
* Correct overflow not hidden in IE9
*/
svg:not(:root) { overflow: hidden; }
/* =============================================================================
Figures
========================================================================== */
figure { margin: 0; }
/* =============================================================================
Forms
========================================================================== */
form { margin: 0; }
fieldset { border: 0; margin: 0; padding: 0; }
/* Indicate that 'label' will shift focus to the associated form element */
label { cursor: pointer; }
/*
* 1. Correct color not inheriting in IE6/7/8/9
* 2. Correct alignment displayed oddly in IE6/7
*/
legend { border: 0; *margin-left: -7px; padding: 0; }
/*
* 1. Correct font-size not inheriting in all browsers
* 2. Remove margins in FF3/4 S5 Chrome
* 3. Define consistent vertical alignment display in all browsers
*/
button, input, select, textarea { font-size: 100%; margin: 0; vertical-align: baseline; *vertical-align: middle; }
/*
* 1. Define line-height as normal to match FF3/4 (set using !important in the UA stylesheet)
*/
button, input { line-height: normal; }
/*
* 1. Display hand cursor for clickable form elements
* 2. Allow styling of clickable form elements in iOS
* 3. Correct inner spacing displayed oddly in IE7 (doesn't effect IE6)
*/
button, input[type="button"], input[type="reset"], input[type="submit"] { cursor: pointer; -webkit-appearance: button; *overflow: visible; }
/*
* Consistent box sizing and appearance
*/
input[type="checkbox"], input[type="radio"] { box-sizing: border-box; padding: 0; }
input[type="search"] { -webkit-appearance: textfield; -moz-box-sizing: content-box; -webkit-box-sizing: content-box; box-sizing: content-box; }
input[type="search"]::-webkit-search-decoration { -webkit-appearance: none; }
/*
* Remove inner padding and border in FF3/4: h5bp.com/l
*/
button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; }
/*
* 1. Remove default vertical scrollbar in IE6/7/8/9
* 2. Allow only vertical resizing
*/
textarea { overflow: auto; vertical-align: top; resize: vertical; }
/* Colors for form validity */
input:valid, textarea:valid { }
input:invalid, textarea:invalid { background-color: #f0dddd; }
/* =============================================================================
Tables
========================================================================== */
table { border-collapse: collapse; border-spacing: 0; }
td { vertical-align: top; }
/* ==|== primary styles =====================================================
Author:
========================================================================== */
/* ==|== media queries ======================================================
PLACEHOLDER Media Queries for Responsive Design.
These override the primary ('mobile first') styles
Modify as content requires.
========================================================================== */
@media only screen and (min-width: 480px) {
/* Style adjustments for viewports 480px and over go here */
}
@media only screen and (min-width: 768px) {
/* Style adjustments for viewports 768px and over go here */
}
/* ==|== non-semantic helper classes ========================================
Please define your styles before this section.
========================================================================== */
/* For image replacement */
.ir { display: block; border: 0; text-indent: -999em; overflow: hidden; background-color: transparent; background-repeat: no-repeat; text-align: left; direction: ltr; }
.ir br { display: none; }
/* Hide from both screenreaders and browsers: h5bp.com/u */
.hidden { display: none !important; visibility: hidden; }
/* Hide only visually, but have it available for screenreaders: h5bp.com/v */
.visuallyhidden { border: 0; clip: rect(0 0 0 0); height: 1px; margin: -1px; overflow: hidden; padding: 0; position: absolute; width: 1px; }
/* Extends the .visuallyhidden class to allow the element to be focusable when navigated to via the keyboard: h5bp.com/p */
.visuallyhidden.focusable:active, .visuallyhidden.focusable:focus { clip: auto; height: auto; margin: 0; overflow: visible; position: static; width: auto; }
/* Hide visually and from screenreaders, but maintain layout */
.invisible { visibility: hidden; }
/* Contain floats: h5bp.com/q */
.clearfix:before, .clearfix:after { content: ""; display: table; }
.clearfix:after { clear: both; }
.clearfix { *zoom: 1; }
/* ==|== print styles =======================================================
Print styles.
Inlined to avoid required HTTP connection: h5bp.com/r
========================================================================== */
@media print {
* { background: transparent !important; color: black !important; box-shadow:none !important; text-shadow: none !important; filter:none !important; -ms-filter: none !important; } /* Black prints faster: h5bp.com/s */
a, a:visited { text-decoration: underline; }
a[href]:after { content: " (" attr(href) ")"; }
abbr[title]:after { content: " (" attr(title) ")"; }
.ir a:after, a[href^="javascript:"]:after, a[href^="#"]:after { content: ""; } /* Don't show links for images, or javascript/internal links */
pre, blockquote { border: 1px solid #999; page-break-inside: avoid; }
table { display: table-header-group; } /* h5bp.com/t */
tr, img { page-break-inside: avoid; }
img { max-width: 100% !important; }
@page { margin: 0.5cm; }
p, h2, h3 { orphans: 3; widows: 3; }
h2, h3 { page-break-after: avoid; }
}
/* reflow reset for -webkit-margin-before: 1em */
p { margin: 0; }
html {
overflow-y: auto;
background-color: transparent;
height: 100%;
}
body {
background: #fff;
font: normal 100%;
position: relative;
height: 100%;
}
body, div, img, p, button, input, select, textarea {
box-sizing: border-box;
}
.image {
display: block;
}
input {
cursor: default;
display: block;
}
input[type=button] {
background-color: #e5e9e8;
border: 1px solid #9daca9;
border-radius: 4px;
box-shadow: inset 0 1px #fff;
font: inherit;
letter-spacing: inherit;
text-indent: inherit;
color: inherit;
}
input[type=button]:hover {
background-color: #eff1f1;
}
input[type=button]:active {
background-color: #d2d6d6;
border: 1px solid #9daca9;
box-shadow: inset 0 1px rgba(0,0,0,0.1);
}
/* Reset anchor styles to an unstyled default to be in parity with design surface. It
is presumed that most link styles in real-world designs are custom (non-default). */
a, a:visited, a:hover, a:active {
color: inherit;
text-decoration: inherit;
}

View file

@ -0,0 +1,51 @@
/*Your styles*/
body {
margin: 10px;
}
#content {
margin-right:auto;
margin-left:auto;
vertical-align:middle;
width:100%;
}
#btn_test{
width: 100%;
}
/*
Those classes will be edited at runtime with values specified
by the settings of the CC application
*/
.hostFontColor{}
.hostFontFamily{}
.hostFontSize{}
/*font family, color and size*/
.hostFont{}
/*background color*/
.hostBgd{}
/*lighter background color*/
.hostBgdLight{}
/*darker background color*/
.hostBgdDark{}
/*background color and font*/
.hostElt{}
.hostButton{
border:1px solid;
border-radius:2px;
height:20px;
vertical-align:bottom;
font-family:inherit;
color:inherit;
font-size:inherit;
}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View file

@ -0,0 +1,187 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" href="css/topcoat-desktop-dark.min.css"/>
<link id="hostStyle" rel="stylesheet" href="css/styles.css"/>
<style type="text/css">
html, body, iframe {
width: 100%;
height: 100%;
border: 0px;
margin: 0px;
overflow: hidden;
}
button {width: 100%;}
</style>
<style>
button {width: 100%;}
body {margin:0; padding:0; height: 100%;}
html {height: 100%;}
</style>
<title></title>
<script src="js/libs/jquery-2.0.2.min.js"></script>
<script type=text/javascript>
$(function() {
$("a#workfiles-button").bind("click", function() {
RPC.call('AfterEffects.workfiles_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#loader-button").bind("click", function() {
RPC.call('AfterEffects.loader_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#publish-button").bind("click", function() {
RPC.call('AfterEffects.publish_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#sceneinventory-button").bind("click", function() {
RPC.call('AfterEffects.sceneinventory_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#setresolution-button").bind("click", function() {
RPC.call('AfterEffects.setresolution_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#setframes-button").bind("click", function() {
RPC.call('AfterEffects.setframes_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#setall-button").bind("click", function() {
RPC.call('AfterEffects.setall_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#create-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.create_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#update-placeholder-button").bind("click", function() {
RPC.call('AfterEffects.update_placeholder_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#build-workfile-button").bind("click", function() {
RPC.call('AfterEffects.build_workfile_template_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
<script type=text/javascript>
$(function() {
$("a#experimental-button").bind("click", function() {
RPC.call('AfterEffects.experimental_tools_route').then(function (data) {
}, function (error) {
alert(error);
});
});
});
</script>
</head>
<body class="hostElt">
<div id="content">
<div>
<div></div><a href=# id=workfiles-button><button class="hostFontSize">Workfiles...</button></a></div>
<div><a href=# id=loader-button><button class="hostFontSize">Load...</button></a></div>
<div><a href=# id=publish-button><button class="hostFontSize">Publish...</button></a></div>
<div><a href=# id=sceneinventory-button><button class="hostFontSize">Manage...</button></a></div>
<div><a href=# id=separator0><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=setresolution-button><button class="hostFontSize">Set Resolution</button></a></div>
<div><a href=# id=setframes-button><button class="hostFontSize">Set Frame Range</button></a></div>
<div><a href=# id=setall-button><button class="hostFontSize">Apply All Settings</button></a></div>
<div><a href=# id=separator1><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=create-placeholder-button><button class="hostFontSize">Create placeholder</button></a></div>
<div><a href=# id=update-placeholder-button><button class="hostFontSize">Update placeholder</button></a></div>
<div><a href=# id=build-workfile-button><button class="hostFontSize">Build Workfile from template</button></a></div>
<div><a href=# id=separator3><button class="hostFontSize">&nbsp;</button></a></div>
<div><a href=# id=experimental-button><button class="hostFontSize">Experimental Tools...</button></a></div>
</div>
</div>
<!-- <script src="js/libs/PlayerDebugMode"></script> -->
<script src="js/libs/wsrpc.js"></script>
<script src="js/libs/loglevel.min.js"></script>
<script src="js/libs/CSInterface.js"></script>
<script src="js/themeManager.js"></script>
<script src="js/main.js"></script>
</body>
</html>

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,530 @@
// json2.js
// 2017-06-12
// Public Domain.
// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
// NOT CONTROL.
// This file creates a global JSON object containing two methods: stringify
// and parse. This file provides the ES5 JSON capability to ES3 systems.
// If a project might run on IE8 or earlier, then this file should be included.
// This file does nothing on ES5 systems.
// JSON.stringify(value, replacer, space)
// value any JavaScript value, usually an object or array.
// replacer an optional parameter that determines how object
// values are stringified for objects. It can be a
// function or an array of strings.
// space an optional parameter that specifies the indentation
// of nested structures. If it is omitted, the text will
// be packed without extra whitespace. If it is a number,
// it will specify the number of spaces to indent at each
// level. If it is a string (such as "\t" or "&nbsp;"),
// it contains the characters used to indent at each level.
// This method produces a JSON text from a JavaScript value.
// When an object value is found, if the object contains a toJSON
// method, its toJSON method will be called and the result will be
// stringified. A toJSON method does not serialize: it returns the
// value represented by the name/value pair that should be serialized,
// or undefined if nothing should be serialized. The toJSON method
// will be passed the key associated with the value, and this will be
// bound to the value.
// For example, this would serialize Dates as ISO strings.
// Date.prototype.toJSON = function (key) {
// function f(n) {
// // Format integers to have at least two digits.
// return (n < 10)
// ? "0" + n
// : n;
// }
// return this.getUTCFullYear() + "-" +
// f(this.getUTCMonth() + 1) + "-" +
// f(this.getUTCDate()) + "T" +
// f(this.getUTCHours()) + ":" +
// f(this.getUTCMinutes()) + ":" +
// f(this.getUTCSeconds()) + "Z";
// };
// You can provide an optional replacer method. It will be passed the
// key and value of each member, with this bound to the containing
// object. The value that is returned from your method will be
// serialized. If your method returns undefined, then the member will
// be excluded from the serialization.
// If the replacer parameter is an array of strings, then it will be
// used to select the members to be serialized. It filters the results
// such that only members with keys listed in the replacer array are
// stringified.
// Values that do not have JSON representations, such as undefined or
// functions, will not be serialized. Such values in objects will be
// dropped; in arrays they will be replaced with null. You can use
// a replacer function to replace those with JSON values.
// JSON.stringify(undefined) returns undefined.
// The optional space parameter produces a stringification of the
// value that is filled with line breaks and indentation to make it
// easier to read.
// If the space parameter is a non-empty string, then that string will
// be used for indentation. If the space parameter is a number, then
// the indentation will be that many spaces.
// Example:
// text = JSON.stringify(["e", {pluribus: "unum"}]);
// // text is '["e",{"pluribus":"unum"}]'
// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
// text = JSON.stringify([new Date()], function (key, value) {
// return this[key] instanceof Date
// ? "Date(" + this[key] + ")"
// : value;
// });
// // text is '["Date(---current time---)"]'
// JSON.parse(text, reviver)
// This method parses a JSON text to produce an object or array.
// It can throw a SyntaxError exception.
// The optional reviver parameter is a function that can filter and
// transform the results. It receives each of the keys and values,
// and its return value is used instead of the original value.
// If it returns what it received, then the structure is not modified.
// If it returns undefined then the member is deleted.
// Example:
// // Parse the text. Values that look like ISO date strings will
// // be converted to Date objects.
// myData = JSON.parse(text, function (key, value) {
// var a;
// if (typeof value === "string") {
// a =
// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
// if (a) {
// return new Date(Date.UTC(
// +a[1], +a[2] - 1, +a[3], +a[4], +a[5], +a[6]
// ));
// }
// return value;
// }
// });
// myData = JSON.parse(
// "[\"Date(09/09/2001)\"]",
// function (key, value) {
// var d;
// if (
// typeof value === "string"
// && value.slice(0, 5) === "Date("
// && value.slice(-1) === ")"
// ) {
// d = new Date(value.slice(5, -1));
// if (d) {
// return d;
// }
// }
// return value;
// }
// );
// This is a reference implementation. You are free to copy, modify, or
// redistribute.
/*jslint
eval, for, this
*/
/*property
JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
lastIndex, length, parse, prototype, push, replace, slice, stringify,
test, toJSON, toString, valueOf
*/
// Create a JSON object only if one does not already exist. We create the
// methods in a closure to avoid creating global variables.
if (typeof JSON !== "object") {
JSON = {};
}
(function () {
"use strict";
var rx_one = /^[\],:{}\s]*$/;
var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
function f(n) {
// Format integers to have at least two digits.
return (n < 10)
? "0" + n
: n;
}
function this_value() {
return this.valueOf();
}
if (typeof Date.prototype.toJSON !== "function") {
Date.prototype.toJSON = function () {
return isFinite(this.valueOf())
? (
this.getUTCFullYear()
+ "-"
+ f(this.getUTCMonth() + 1)
+ "-"
+ f(this.getUTCDate())
+ "T"
+ f(this.getUTCHours())
+ ":"
+ f(this.getUTCMinutes())
+ ":"
+ f(this.getUTCSeconds())
+ "Z"
)
: null;
};
Boolean.prototype.toJSON = this_value;
Number.prototype.toJSON = this_value;
String.prototype.toJSON = this_value;
}
var gap;
var indent;
var meta;
var rep;
function quote(string) {
// If the string contains no control characters, no quote characters, and no
// backslash characters, then we can safely slap some quotes around it.
// Otherwise we must also replace the offending characters with safe escape
// sequences.
rx_escapable.lastIndex = 0;
return rx_escapable.test(string)
? "\"" + string.replace(rx_escapable, function (a) {
var c = meta[a];
return typeof c === "string"
? c
: "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
}) + "\""
: "\"" + string + "\"";
}
function str(key, holder) {
// Produce a string from holder[key].
var i; // The loop counter.
var k; // The member key.
var v; // The member value.
var length;
var mind = gap;
var partial;
var value = holder[key];
// If the value has a toJSON method, call it to obtain a replacement value.
if (
value
&& typeof value === "object"
&& typeof value.toJSON === "function"
) {
value = value.toJSON(key);
}
// If we were called with a replacer function, then call the replacer to
// obtain a replacement value.
if (typeof rep === "function") {
value = rep.call(holder, key, value);
}
// What happens next depends on the value's type.
switch (typeof value) {
case "string":
return quote(value);
case "number":
// JSON numbers must be finite. Encode non-finite numbers as null.
return (isFinite(value))
? String(value)
: "null";
case "boolean":
case "null":
// If the value is a boolean or null, convert it to a string. Note:
// typeof null does not produce "null". The case is included here in
// the remote chance that this gets fixed someday.
return String(value);
// If the type is "object", we might be dealing with an object or an array or
// null.
case "object":
// Due to a specification blunder in ECMAScript, typeof null is "object",
// so watch out for that case.
if (!value) {
return "null";
}
// Make an array to hold the partial results of stringifying this object value.
gap += indent;
partial = [];
// Is the value an array?
if (Object.prototype.toString.apply(value) === "[object Array]") {
// The value is an array. Stringify every element. Use null as a placeholder
// for non-JSON values.
length = value.length;
for (i = 0; i < length; i += 1) {
partial[i] = str(i, value) || "null";
}
// Join all of the elements together, separated with commas, and wrap them in
// brackets.
v = partial.length === 0
? "[]"
: gap
? (
"[\n"
+ gap
+ partial.join(",\n" + gap)
+ "\n"
+ mind
+ "]"
)
: "[" + partial.join(",") + "]";
gap = mind;
return v;
}
// If the replacer is an array, use it to select the members to be stringified.
if (rep && typeof rep === "object") {
length = rep.length;
for (i = 0; i < length; i += 1) {
if (typeof rep[i] === "string") {
k = rep[i];
v = str(k, value);
if (v) {
partial.push(quote(k) + (
(gap)
? ": "
: ":"
) + v);
}
}
}
} else {
// Otherwise, iterate through all of the keys in the object.
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = str(k, value);
if (v) {
partial.push(quote(k) + (
(gap)
? ": "
: ":"
) + v);
}
}
}
}
// Join all of the member texts together, separated with commas,
// and wrap them in braces.
v = partial.length === 0
? "{}"
: gap
? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
: "{" + partial.join(",") + "}";
gap = mind;
return v;
}
}
// If the JSON object does not yet have a stringify method, give it one.
if (typeof JSON.stringify !== "function") {
meta = { // table of character substitutions
"\b": "\\b",
"\t": "\\t",
"\n": "\\n",
"\f": "\\f",
"\r": "\\r",
"\"": "\\\"",
"\\": "\\\\"
};
JSON.stringify = function (value, replacer, space) {
// The stringify method takes a value and an optional replacer, and an optional
// space parameter, and returns a JSON text. The replacer can be a function
// that can replace values, or an array of strings that will select the keys.
// A default replacer method can be provided. Use of the space parameter can
// produce text that is more easily readable.
var i;
gap = "";
indent = "";
// If the space parameter is a number, make an indent string containing that
// many spaces.
if (typeof space === "number") {
for (i = 0; i < space; i += 1) {
indent += " ";
}
// If the space parameter is a string, it will be used as the indent string.
} else if (typeof space === "string") {
indent = space;
}
// If there is a replacer, it must be a function or an array.
// Otherwise, throw an error.
rep = replacer;
if (replacer && typeof replacer !== "function" && (
typeof replacer !== "object"
|| typeof replacer.length !== "number"
)) {
throw new Error("JSON.stringify");
}
// Make a fake root object containing our value under the key of "".
// Return the result of stringifying the value.
return str("", {"": value});
};
}
// If the JSON object does not yet have a parse method, give it one.
if (typeof JSON.parse !== "function") {
JSON.parse = function (text, reviver) {
// The parse method takes a text and an optional reviver function, and returns
// a JavaScript value if the text is a valid JSON text.
var j;
function walk(holder, key) {
// The walk method is used to recursively walk the resulting structure so
// that modifications can be made.
var k;
var v;
var value = holder[key];
if (value && typeof value === "object") {
for (k in value) {
if (Object.prototype.hasOwnProperty.call(value, k)) {
v = walk(value, k);
if (v !== undefined) {
value[k] = v;
} else {
delete value[k];
}
}
}
}
return reviver.call(holder, key, value);
}
// Parsing happens in four stages. In the first stage, we replace certain
// Unicode characters with escape sequences. JavaScript handles many characters
// incorrectly, either silently deleting them, or treating them as line endings.
text = String(text);
rx_dangerous.lastIndex = 0;
if (rx_dangerous.test(text)) {
text = text.replace(rx_dangerous, function (a) {
return (
"\\u"
+ ("0000" + a.charCodeAt(0).toString(16)).slice(-4)
);
});
}
// In the second stage, we run the text against regular expressions that look
// for non-JSON patterns. We are especially concerned with "()" and "new"
// because they can cause invocation, and "=" because it can cause mutation.
// But just to be safe, we want to reject all unexpected forms.
// We split the second stage into 4 regexp operations in order to work around
// crippling inefficiencies in IE's and Safari's regexp engines. First we
// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
// replace all simple value tokens with "]" characters. Third, we delete all
// open brackets that follow a colon or comma or that begin the text. Finally,
// we look to see that the remaining characters are only whitespace or "]" or
// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
if (
rx_one.test(
text
.replace(rx_two, "@")
.replace(rx_three, "]")
.replace(rx_four, "")
)
) {
// In the third stage we use the eval function to compile the text into a
// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
// in JavaScript: it can begin a block or an object literal. We wrap the text
// in parens to eliminate the ambiguity.
j = eval("(" + text + ")");
// In the optional fourth stage, we recursively walk the new structure, passing
// each name/value pair to a reviver function for possible transformation.
return (typeof reviver === "function")
? walk({"": j}, "")
: j;
}
// If the text is not JSON parseable, then a SyntaxError is thrown.
throw new SyntaxError("JSON.parse");
};
}
}());

View file

@ -0,0 +1,2 @@
/*! loglevel - v1.6.8 - https://github.com/pimterry/loglevel - (c) 2020 Tim Perry - licensed MIT */
!function(a,b){"use strict";"function"==typeof define&&define.amd?define(b):"object"==typeof module&&module.exports?module.exports=b():a.log=b()}(this,function(){"use strict";function a(a,b){var c=a[b];if("function"==typeof c.bind)return c.bind(a);try{return Function.prototype.bind.call(c,a)}catch(b){return function(){return Function.prototype.apply.apply(c,[a,arguments])}}}function b(){console.log&&(console.log.apply?console.log.apply(console,arguments):Function.prototype.apply.apply(console.log,[console,arguments])),console.trace&&console.trace()}function c(c){return"debug"===c&&(c="log"),typeof console!==i&&("trace"===c&&j?b:void 0!==console[c]?a(console,c):void 0!==console.log?a(console,"log"):h)}function d(a,b){for(var c=0;c<k.length;c++){var d=k[c];this[d]=c<a?h:this.methodFactory(d,a,b)}this.log=this.debug}function e(a,b,c){return function(){typeof console!==i&&(d.call(this,b,c),this[a].apply(this,arguments))}}function f(a,b,d){return c(a)||e.apply(this,arguments)}function g(a,b,c){function e(a){var b=(k[a]||"silent").toUpperCase();if(typeof window!==i){try{return void(window.localStorage[l]=b)}catch(a){}try{window.document.cookie=encodeURIComponent(l)+"="+b+";"}catch(a){}}}function g(){var a;if(typeof window!==i){try{a=window.localStorage[l]}catch(a){}if(typeof a===i)try{var b=window.document.cookie,c=b.indexOf(encodeURIComponent(l)+"=");-1!==c&&(a=/^([^;]+)/.exec(b.slice(c))[1])}catch(a){}return void 0===j.levels[a]&&(a=void 0),a}}var h,j=this,l="loglevel";a&&(l+=":"+a),j.name=a,j.levels={TRACE:0,DEBUG:1,INFO:2,WARN:3,ERROR:4,SILENT:5},j.methodFactory=c||f,j.getLevel=function(){return h},j.setLevel=function(b,c){if("string"==typeof b&&void 0!==j.levels[b.toUpperCase()]&&(b=j.levels[b.toUpperCase()]),!("number"==typeof b&&b>=0&&b<=j.levels.SILENT))throw"log.setLevel() called with invalid level: "+b;if(h=b,!1!==c&&e(b),d.call(j,b,a),typeof console===i&&b<j.levels.SILENT)return"No console available for logging"},j.setDefaultLevel=function(a){g()||j.setLevel(a,!1)},j.enableAll=function(a){j.setLevel(j.levels.TRACE,a)},j.disableAll=function(a){j.setLevel(j.levels.SILENT,a)};var m=g();null==m&&(m=null==b?"WARN":b),j.setLevel(m,!1)}var h=function(){},i="undefined",j=typeof window!==i&&typeof window.navigator!==i&&/Trident\/|MSIE /.test(window.navigator.userAgent),k=["trace","debug","info","warn","error"],l=new g,m={};l.getLogger=function(a){if("string"!=typeof a||""===a)throw new TypeError("You must supply a name when creating a logger.");var b=m[a];return b||(b=m[a]=new g(a,l.getLevel(),l.methodFactory)),b};var n=typeof window!==i?window.log:void 0;return l.noConflict=function(){return typeof window!==i&&window.log===l&&(window.log=n),l},l.getLoggers=function(){return m},l});

View file

@ -0,0 +1,393 @@
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global = global || self, global.WSRPC = factory());
}(this, function () { 'use strict';
function _classCallCheck(instance, Constructor) {
if (!(instance instanceof Constructor)) {
throw new TypeError("Cannot call a class as a function");
}
}
var Deferred = function Deferred() {
_classCallCheck(this, Deferred);
var self = this;
self.resolve = null;
self.reject = null;
self.done = false;
function wrapper(func) {
return function () {
if (self.done) throw new Error('Promise already done');
self.done = true;
return func.apply(this, arguments);
};
}
self.promise = new Promise(function (resolve, reject) {
self.resolve = wrapper(resolve);
self.reject = wrapper(reject);
});
self.promise.isPending = function () {
return !self.done;
};
return self;
};
function logGroup(group, level, args) {
console.group(group);
console[level].apply(this, args);
console.groupEnd();
}
function log() {
if (!WSRPC.DEBUG) return;
logGroup('WSRPC.DEBUG', 'trace', arguments);
}
function trace(msg) {
if (!WSRPC.TRACE) return;
var payload = msg;
if ('data' in msg) payload = JSON.parse(msg.data);
logGroup("WSRPC.TRACE", 'trace', [payload]);
}
function getAbsoluteWsUrl(url) {
if (/^\w+:\/\//.test(url)) return url;
if (typeof window == 'undefined' && window.location.host.length < 1) throw new Error("Can not construct absolute URL from ".concat(window.location));
var scheme = window.location.protocol === "https:" ? "wss:" : "ws:";
var port = window.location.port === '' ? ":".concat(window.location.port) : '';
var host = window.location.host;
var path = url.replace(/^\/+/gm, '');
return "".concat(scheme, "//").concat(host).concat(port, "/").concat(path);
}
var readyState = Object.freeze({
0: 'CONNECTING',
1: 'OPEN',
2: 'CLOSING',
3: 'CLOSED'
});
var WSRPC = function WSRPC(URL) {
var reconnectTimeout = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 1000;
_classCallCheck(this, WSRPC);
var self = this;
URL = getAbsoluteWsUrl(URL);
self.id = 1;
self.eventId = 0;
self.socketStarted = false;
self.eventStore = {
onconnect: {},
onerror: {},
onclose: {},
onchange: {}
};
self.connectionNumber = 0;
self.oneTimeEventStore = {
onconnect: [],
onerror: [],
onclose: [],
onchange: []
};
self.callQueue = [];
function createSocket() {
var ws = new WebSocket(URL);
var rejectQueue = function rejectQueue() {
self.connectionNumber++; // rejects incoming calls
var deferred; //reject all pending calls
while (0 < self.callQueue.length) {
var callObj = self.callQueue.shift();
deferred = self.store[callObj.id];
delete self.store[callObj.id];
if (deferred && deferred.promise.isPending()) {
deferred.reject('WebSocket error occurred');
}
} // reject all from the store
for (var key in self.store) {
if (!self.store.hasOwnProperty(key)) continue;
deferred = self.store[key];
if (deferred && deferred.promise.isPending()) {
deferred.reject('WebSocket error occurred');
}
}
};
function reconnect(callEvents) {
setTimeout(function () {
try {
self.socket = createSocket();
self.id = 1;
} catch (exc) {
callEvents('onerror', exc);
delete self.socket;
console.error(exc);
}
}, reconnectTimeout);
}
ws.onclose = function (err) {
log('ONCLOSE CALLED', 'STATE', self.public.state());
trace(err);
for (var serial in self.store) {
if (!self.store.hasOwnProperty(serial)) continue;
if (self.store[serial].hasOwnProperty('reject')) {
self.store[serial].reject('Connection closed');
}
}
rejectQueue();
callEvents('onclose', err);
callEvents('onchange', err);
reconnect(callEvents);
};
ws.onerror = function (err) {
log('ONERROR CALLED', 'STATE', self.public.state());
trace(err);
rejectQueue();
callEvents('onerror', err);
callEvents('onchange', err);
log('WebSocket has been closed by error: ', err);
};
function tryCallEvent(func, event) {
try {
return func(event);
} catch (e) {
if (e.hasOwnProperty('stack')) {
log(e.stack);
} else {
log('Event function', func, 'raised unknown error:', e);
}
console.error(e);
}
}
function callEvents(evName, event) {
while (0 < self.oneTimeEventStore[evName].length) {
var deferred = self.oneTimeEventStore[evName].shift();
if (deferred.hasOwnProperty('resolve') && deferred.promise.isPending()) deferred.resolve();
}
for (var i in self.eventStore[evName]) {
if (!self.eventStore[evName].hasOwnProperty(i)) continue;
var cur = self.eventStore[evName][i];
tryCallEvent(cur, event);
}
}
ws.onopen = function (ev) {
log('ONOPEN CALLED', 'STATE', self.public.state());
trace(ev);
while (0 < self.callQueue.length) {
// noinspection JSUnresolvedFunction
self.socket.send(JSON.stringify(self.callQueue.shift(), 0, 1));
}
callEvents('onconnect', ev);
callEvents('onchange', ev);
};
function handleCall(self, data) {
if (!self.routes.hasOwnProperty(data.method)) throw new Error('Route not found');
var connectionNumber = self.connectionNumber;
var deferred = new Deferred();
deferred.promise.then(function (result) {
if (connectionNumber !== self.connectionNumber) return;
self.socket.send(JSON.stringify({
id: data.id,
result: result
}));
}, function (error) {
if (connectionNumber !== self.connectionNumber) return;
self.socket.send(JSON.stringify({
id: data.id,
error: error
}));
});
var func = self.routes[data.method];
if (self.asyncRoutes[data.method]) return func.apply(deferred, [data.params]);
function badPromise() {
throw new Error("You should register route with async flag.");
}
var promiseMock = {
resolve: badPromise,
reject: badPromise
};
try {
deferred.resolve(func.apply(promiseMock, [data.params]));
} catch (e) {
deferred.reject(e);
console.error(e);
}
}
function handleError(self, data) {
if (!self.store.hasOwnProperty(data.id)) return log('Unknown callback');
var deferred = self.store[data.id];
if (typeof deferred === 'undefined') return log('Confirmation without handler');
delete self.store[data.id];
log('REJECTING', data.error);
deferred.reject(data.error);
}
function handleResult(self, data) {
var deferred = self.store[data.id];
if (typeof deferred === 'undefined') return log('Confirmation without handler');
delete self.store[data.id];
if (data.hasOwnProperty('result')) {
return deferred.resolve(data.result);
}
return deferred.reject(data.error);
}
ws.onmessage = function (message) {
log('ONMESSAGE CALLED', 'STATE', self.public.state());
trace(message);
if (message.type !== 'message') return;
var data;
try {
data = JSON.parse(message.data);
log(data);
if (data.hasOwnProperty('method')) {
return handleCall(self, data);
} else if (data.hasOwnProperty('error') && data.error === null) {
return handleError(self, data);
} else {
return handleResult(self, data);
}
} catch (exception) {
var err = {
error: exception.message,
result: null,
id: data ? data.id : null
};
self.socket.send(JSON.stringify(err));
console.error(exception);
}
};
return ws;
}
function makeCall(func, args, params) {
self.id += 2;
var deferred = new Deferred();
var callObj = Object.freeze({
id: self.id,
method: func,
params: args
});
var state = self.public.state();
if (state === 'OPEN') {
self.store[self.id] = deferred;
self.socket.send(JSON.stringify(callObj));
} else if (state === 'CONNECTING') {
log('SOCKET IS', state);
self.store[self.id] = deferred;
self.callQueue.push(callObj);
} else {
log('SOCKET IS', state);
if (params && params['noWait']) {
deferred.reject("Socket is: ".concat(state));
} else {
self.store[self.id] = deferred;
self.callQueue.push(callObj);
}
}
return deferred.promise;
}
self.asyncRoutes = {};
self.routes = {};
self.store = {};
self.public = Object.freeze({
call: function call(func, args, params) {
return makeCall(func, args, params);
},
addRoute: function addRoute(route, callback, isAsync) {
self.asyncRoutes[route] = isAsync || false;
self.routes[route] = callback;
},
deleteRoute: function deleteRoute(route) {
delete self.asyncRoutes[route];
return delete self.routes[route];
},
addEventListener: function addEventListener(event, func) {
var eventId = self.eventId++;
self.eventStore[event][eventId] = func;
return eventId;
},
removeEventListener: function removeEventListener(event, index) {
if (self.eventStore[event].hasOwnProperty(index)) {
delete self.eventStore[event][index];
return true;
} else {
return false;
}
},
onEvent: function onEvent(event) {
var deferred = new Deferred();
self.oneTimeEventStore[event].push(deferred);
return deferred.promise;
},
destroy: function destroy() {
return self.socket.close();
},
state: function state() {
return readyState[this.stateCode()];
},
stateCode: function stateCode() {
if (self.socketStarted && self.socket) return self.socket.readyState;
return 3;
},
connect: function connect() {
self.socketStarted = true;
self.socket = createSocket();
}
});
self.public.addRoute('log', function (argsObj) {
//console.info("Websocket sent: ".concat(argsObj));
});
self.public.addRoute('ping', function (data) {
return data;
});
return self.public;
};
WSRPC.DEBUG = false;
WSRPC.TRACE = false;
return WSRPC;
}));
//# sourceMappingURL=wsrpc.js.map

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,412 @@
/*jslint vars: true, plusplus: true, devel: true, nomen: true, regexp: true,
indent: 4, maxerr: 50 */
/*global $, window, location, CSInterface, SystemPath, themeManager*/
var csInterface = new CSInterface();
log.warn("script start");
WSRPC.DEBUG = false;
WSRPC.TRACE = false;
// get websocket server url from environment value
async function startUp(url){
promis = runEvalScript("getEnv('" + url + "')");
var res = await promis;
log.warn("res: " + res);
promis = runEvalScript("getEnv('AYON_DEBUG')");
var debug = await promis;
log.warn("debug: " + debug);
if (debug && debug.toString() == '3'){
WSRPC.DEBUG = true;
WSRPC.TRACE = true;
}
// run rest only after resolved promise
main(res);
}
function get_extension_version(){
/** Returns version number from extension manifest.xml **/
log.debug("get_extension_version")
var path = csInterface.getSystemPath(SystemPath.EXTENSION);
log.debug("extension path " + path);
var result = window.cep.fs.readFile(path + "/CSXS/manifest.xml");
var version = undefined;
if(result.err === 0){
if (window.DOMParser) {
const parser = new DOMParser();
const xmlDoc = parser.parseFromString(result.data.toString(),
'text/xml');
const children = xmlDoc.children;
for (let i = 0; i <= children.length; i++) {
if (children[i] &&
children[i].getAttribute('ExtensionBundleVersion')) {
version =
children[i].getAttribute('ExtensionBundleVersion');
}
}
}
}
return '{"result":"' + version + '"}'
}
function main(websocket_url){
// creates connection to 'websocket_url', registers routes
var default_url = 'ws://localhost:8099/ws/';
if (websocket_url == ''){
websocket_url = default_url;
}
RPC = new WSRPC(websocket_url, 5000); // spin connection
RPC.connect();
log.warn("connected");
RPC.addRoute('AfterEffects.open', function (data) {
log.warn('Server called client route "open":', data);
var escapedPath = EscapeStringForJSX(data.path);
return runEvalScript("fileOpen('" + escapedPath +"')")
.then(function(result){
log.warn("open: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_metadata', function (data) {
log.warn('Server called client route "get_metadata":', data);
return runEvalScript("getMetadata()")
.then(function(result){
log.warn("getMetadata: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_active_document_name', function (data) {
log.warn('Server called client route ' +
'"get_active_document_name":', data);
return runEvalScript("getActiveDocumentName()")
.then(function(result){
log.warn("get_active_document_name: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_active_document_full_name', function (data){
log.warn('Server called client route ' +
'"get_active_document_full_name":', data);
return runEvalScript("getActiveDocumentFullName()")
.then(function(result){
log.warn("get_active_document_full_name: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.add_item', function (data) {
log.warn('Server called client route "add_item":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addItem('" + escapedName +"', " +
"'" + data.item_type + "')")
.then(function(result){
log.warn("get_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_items', function (data) {
log.warn('Server called client route "get_items":', data);
return runEvalScript("getItems(" + data.comps + "," +
data.folders + "," +
data.footages + ")")
.then(function(result){
log.warn("get_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.select_items', function (data) {
log.warn('Server called client route "select_items":', data);
return runEvalScript("selectItems(" + JSON.stringify(data.items) + ")")
.then(function(result){
log.warn("select_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_selected_items', function (data) {
log.warn('Server called client route "get_selected_items":', data);
return runEvalScript("getSelectedItems(" + data.comps + "," +
data.folders + "," +
data.footages + ")")
.then(function(result){
log.warn("get_items: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.import_file', function (data) {
log.warn('Server called client route "import_file":', data);
var escapedPath = EscapeStringForJSX(data.path);
return runEvalScript("importFile('" + escapedPath +"', " +
"'" + data.item_name + "'," +
"'" + JSON.stringify(
data.import_options) + "')")
.then(function(result){
log.warn("importFile: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.replace_item', function (data) {
log.warn('Server called client route "replace_item":', data);
var escapedPath = EscapeStringForJSX(data.path);
return runEvalScript("replaceItem(" + data.item_id + ", " +
"'" + escapedPath + "', " +
"'" + data.item_name + "')")
.then(function(result){
log.warn("replaceItem: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.rename_item', function (data) {
log.warn('Server called client route "rename_item":', data);
return runEvalScript("renameItem(" + data.item_id + ", " +
"'" + data.item_name + "')")
.then(function(result){
log.warn("renameItem: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.delete_item', function (data) {
log.warn('Server called client route "delete_item":', data);
return runEvalScript("deleteItem(" + data.item_id + ")")
.then(function(result){
log.warn("deleteItem: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.imprint', function (data) {
log.warn('Server called client route "imprint":', data);
var escaped = data.payload.replace(/\n/g, "\\n");
return runEvalScript("imprint('" + escaped +"')")
.then(function(result){
log.warn("imprint: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.set_label_color', function (data) {
log.warn('Server called client route "set_label_color":', data);
return runEvalScript("setLabelColor(" + data.item_id + "," +
data.color_idx + ")")
.then(function(result){
log.warn("imprint: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_comp_properties', function (data) {
log.warn('Server called client route "get_comp_properties":', data);
return runEvalScript("getCompProperties(" + data.item_id + ")")
.then(function(result){
log.warn("get_comp_properties: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.set_comp_properties', function (data) {
log.warn('Server called client route "set_work_area":', data);
return runEvalScript("setCompProperties(" + data.item_id + ',' +
data.start + ',' +
data.duration + ',' +
data.frame_rate + ',' +
data.width + ',' +
data.height + ")")
.then(function(result){
log.warn("set_comp_properties: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.saveAs', function (data) {
log.warn('Server called client route "saveAs":', data);
var escapedPath = EscapeStringForJSX(data.image_path);
return runEvalScript("saveAs('" + escapedPath + "', " +
data.as_copy + ")")
.then(function(result){
log.warn("saveAs: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.save', function (data) {
log.warn('Server called client route "save":', data);
return runEvalScript("save()")
.then(function(result){
log.warn("save: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_render_info', function (data) {
log.warn('Server called client route "get_render_info":', data);
return runEvalScript("getRenderInfo(" + data.comp_id +")")
.then(function(result){
log.warn("get_render_info: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_audio_url', function (data) {
log.warn('Server called client route "get_audio_url":', data);
return runEvalScript("getAudioUrlForComp(" + data.item_id + ")")
.then(function(result){
log.warn("getAudioUrlForComp: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.import_background', function (data) {
log.warn('Server called client route "import_background":', data);
return runEvalScript("importBackground(" + data.comp_id + ", " +
"'" + data.comp_name + "', " +
JSON.stringify(data.files) + ")")
.then(function(result){
log.warn("importBackground: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.reload_background', function (data) {
log.warn('Server called client route "reload_background":', data);
return runEvalScript("reloadBackground(" + data.comp_id + ", " +
"'" + data.comp_name + "', " +
JSON.stringify(data.files) + ")")
.then(function(result){
log.warn("reloadBackground: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.add_item_as_layer', function (data) {
log.warn('Server called client route "add_item_as_layer":', data);
return runEvalScript("addItemAsLayerToComp(" + data.comp_id + ", " +
data.item_id + "," +
" null )")
.then(function(result){
log.warn("addItemAsLayerToComp: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.add_item_instead_placeholder', function (data) {
log.warn('Server called client route "add_item_instead_placeholder":', data);
return runEvalScript("addItemInstead(" + data.placeholder_item_id + ", " +
data.item_id + ")")
.then(function(result){
log.warn("add_item_instead_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.render', function (data) {
log.warn('Server called client route "render":', data);
var escapedPath = EscapeStringForJSX(data.folder_url);
return runEvalScript("render('" + escapedPath +"', " + data.comp_id + ")")
.then(function(result){
log.warn("render: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.get_extension_version', function (data) {
log.warn('Server called client route "get_extension_version":', data);
return get_extension_version();
});
RPC.addRoute('AfterEffects.get_app_version', function (data) {
log.warn('Server called client route "get_app_version":', data);
return runEvalScript("getAppVersion()")
.then(function(result){
log.warn("get_app_version: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.add_placeholder', function (data) {
log.warn('Server called client route "add_placeholder":', data);
var escapedName = EscapeStringForJSX(data.name);
return runEvalScript("addPlaceholder('" + escapedName +"',"+
data.width + ',' +
data.height + ',' +
data.fps + ',' +
data.duration + ")")
.then(function(result){
log.warn("add_placeholder: " + result);
return result;
});
});
RPC.addRoute('AfterEffects.close', function (data) {
log.warn('Server called client route "close":', data);
return runEvalScript("close()");
});
RPC.addRoute('AfterEffects.print_msg', function (data) {
log.warn('Server called client route "print_msg":', data);
var escaped_msg = EscapeStringForJSX(data.msg);
return runEvalScript("printMsg('" + escaped_msg +"')")
.then(function(result){
log.warn("print_msg: " + result);
return result;
});
});
}
/** main entry point **/
startUp("WEBSOCKET_URL");
(function () {
'use strict';
var csInterface = new CSInterface();
function init() {
themeManager.init();
$("#btn_test").click(function () {
csInterface.evalScript('sayHello()');
});
}
init();
}());
function EscapeStringForJSX(str){
// Replaces:
// \ with \\
// ' with \'
// " with \"
// See: https://stackoverflow.com/a/3967927/5285364
return str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g,'\\"');
}
function runEvalScript(script) {
// because of asynchronous nature of functions in jsx
// this waits for response
return new Promise(function(resolve, reject){
csInterface.evalScript(script, resolve);
});
}

View file

@ -0,0 +1,128 @@
/*jslint vars: true, plusplus: true, devel: true, nomen: true, regexp: true, indent: 4, maxerr: 50 */
/*global window, document, CSInterface*/
/*
Responsible for overwriting CSS at runtime according to CC app
settings as defined by the end user.
*/
var themeManager = (function () {
'use strict';
/**
* Convert the Color object to string in hexadecimal format;
*/
function toHex(color, delta) {
function computeValue(value, delta) {
var computedValue = !isNaN(delta) ? value + delta : value;
if (computedValue < 0) {
computedValue = 0;
} else if (computedValue > 255) {
computedValue = 255;
}
computedValue = Math.floor(computedValue);
computedValue = computedValue.toString(16);
return computedValue.length === 1 ? "0" + computedValue : computedValue;
}
var hex = "";
if (color) {
hex = computeValue(color.red, delta) + computeValue(color.green, delta) + computeValue(color.blue, delta);
}
return hex;
}
function reverseColor(color, delta) {
return toHex({
red: Math.abs(255 - color.red),
green: Math.abs(255 - color.green),
blue: Math.abs(255 - color.blue)
},
delta);
}
function addRule(stylesheetId, selector, rule) {
var stylesheet = document.getElementById(stylesheetId);
if (stylesheet) {
stylesheet = stylesheet.sheet;
if (stylesheet.addRule) {
stylesheet.addRule(selector, rule);
} else if (stylesheet.insertRule) {
stylesheet.insertRule(selector + ' { ' + rule + ' }', stylesheet.cssRules.length);
}
}
}
/**
* Update the theme with the AppSkinInfo retrieved from the host product.
*/
function updateThemeWithAppSkinInfo(appSkinInfo) {
var panelBgColor = appSkinInfo.panelBackgroundColor.color;
var bgdColor = toHex(panelBgColor);
var darkBgdColor = toHex(panelBgColor, 20);
var fontColor = "F0F0F0";
if (panelBgColor.red > 122) {
fontColor = "000000";
}
var lightBgdColor = toHex(panelBgColor, -100);
var styleId = "hostStyle";
addRule(styleId, ".hostElt", "background-color:" + "#" + bgdColor);
addRule(styleId, ".hostElt", "font-size:" + appSkinInfo.baseFontSize + "px;");
addRule(styleId, ".hostElt", "font-family:" + appSkinInfo.baseFontFamily);
addRule(styleId, ".hostElt", "color:" + "#" + fontColor);
addRule(styleId, ".hostBgd", "background-color:" + "#" + bgdColor);
addRule(styleId, ".hostBgdDark", "background-color: " + "#" + darkBgdColor);
addRule(styleId, ".hostBgdLight", "background-color: " + "#" + lightBgdColor);
addRule(styleId, ".hostFontSize", "font-size:" + appSkinInfo.baseFontSize + "px;");
addRule(styleId, ".hostFontFamily", "font-family:" + appSkinInfo.baseFontFamily);
addRule(styleId, ".hostFontColor", "color:" + "#" + fontColor);
addRule(styleId, ".hostFont", "font-size:" + appSkinInfo.baseFontSize + "px;");
addRule(styleId, ".hostFont", "font-family:" + appSkinInfo.baseFontFamily);
addRule(styleId, ".hostFont", "color:" + "#" + fontColor);
addRule(styleId, ".hostButton", "background-color:" + "#" + darkBgdColor);
addRule(styleId, ".hostButton:hover", "background-color:" + "#" + bgdColor);
addRule(styleId, ".hostButton:active", "background-color:" + "#" + darkBgdColor);
addRule(styleId, ".hostButton", "border-color: " + "#" + lightBgdColor);
}
function onAppThemeColorChanged(event) {
var skinInfo = JSON.parse(window.__adobe_cep__.getHostEnvironment()).appSkinInfo;
updateThemeWithAppSkinInfo(skinInfo);
}
function init() {
var csInterface = new CSInterface();
updateThemeWithAppSkinInfo(csInterface.hostEnvironment.appSkinInfo);
csInterface.addEventListener(CSInterface.THEME_COLOR_CHANGED_EVENT, onAppThemeColorChanged);
}
return {
init: init
};
}());

View file

@ -0,0 +1,946 @@
/*jslint vars: true, plusplus: true, devel: true, nomen: true, regexp: true,
indent: 4, maxerr: 50 */
/*global $, Folder*/
//@include "../js/libs/json.js"
/* All public API function should return JSON! */
app.preferences.savePrefAsBool("General Section", "Show Welcome Screen", false) ;
if(!Array.prototype.indexOf) {
Array.prototype.indexOf = function ( item ) {
var index = 0, length = this.length;
for ( ; index < length; index++ ) {
if ( this[index] === item )
return index;
}
return -1;
};
}
function sayHello(){
alert("hello from ExtendScript");
}
function getEnv(variable){
return $.getenv(variable);
}
function getMetadata(){
/**
* Returns payload in 'Label' field of project's metadata
*
**/
if (ExternalObject.AdobeXMPScript === undefined){
ExternalObject.AdobeXMPScript =
new ExternalObject('lib:AdobeXMPScript');
}
var proj = app.project;
var meta = new XMPMeta(app.project.xmpPacket);
var schemaNS = XMPMeta.getNamespaceURI("xmp");
var label = "xmp:Label";
if (meta.doesPropertyExist(schemaNS, label)){
var prop = meta.getProperty(schemaNS, label);
return prop.value;
}
return _prepareSingleValue([]);
}
function imprint(payload){
/**
* Stores payload in 'Label' field of project's metadata
*
* Args:
* payload (string): json content
*/
if (ExternalObject.AdobeXMPScript === undefined){
ExternalObject.AdobeXMPScript =
new ExternalObject('lib:AdobeXMPScript');
}
var proj = app.project;
var meta = new XMPMeta(app.project.xmpPacket);
var schemaNS = XMPMeta.getNamespaceURI("xmp");
var label = "xmp:Label";
meta.setProperty(schemaNS, label, payload);
app.project.xmpPacket = meta.serialize();
}
function fileOpen(path){
/**
* Opens (project) file on 'path'
*/
fp = new File(path);
return _prepareSingleValue(app.open(fp))
}
function getActiveDocumentName(){
/**
* Returns file name of active document
* */
var file = app.project.file;
if (file){
return _prepareSingleValue(file.name)
}
return _prepareError("No file open currently");
}
function getActiveDocumentFullName(){
/**
* Returns absolute path to current project
* */
var file = app.project.file;
if (file){
var f = new File(file.fullName);
var path = f.fsName;
f.close();
return _prepareSingleValue(path)
}
return _prepareError("No file open currently");
}
function addItem(name, item_type){
/**
* Adds comp or folder to project items.
*
* Could be called when creating publishable instance to prepare
* composition (and render queue).
*
* Args:
* name (str): composition name
* item_type (str): COMP|FOLDER
* Returns:
* SingleItemValue: eg {"result": VALUE}
*/
if (item_type == "COMP"){
// dummy values, will be rewritten later
item = app.project.items.addComp(name, 1920, 1060, 1, 10, 25);
}else if (item_type == "FOLDER"){
item = app.project.items.addFolder(name);
}else{
return _prepareError("Only 'COMP' or 'FOLDER' can be created");
}
return _prepareSingleValue(item.id);
}
function getItems(comps, folders, footages){
/**
* Returns JSON representation of compositions and
* if 'collectLayers' then layers in comps too.
*
* Args:
* comps (bool): return selected compositions
* folders (bool): return folders
* footages (bool): return FootageItem
* Returns:
* (list) of JSON items
*/
var items = []
for (i = 1; i <= app.project.items.length; ++i){
var item = app.project.items[i];
if (!item){
continue;
}
var ret = _getItem(item, comps, folders, footages);
if (ret){
items.push(ret);
}
}
return '[' + items.join() + ']';
}
function selectItems(items){
/**
* Select all items from `items`, deselect other.
*
* Args:
* items (list)
*/
for (i = 1; i <= app.project.items.length; ++i){
item = app.project.items[i];
if (items.indexOf(item.id) > -1){
item.selected = true;
}else{
item.selected = false;
}
}
}
function getSelectedItems(comps, folders, footages){
/**
* Returns list of selected items from Project menu
*
* Args:
* comps (bool): return selected compositions
* folders (bool): return folders
* footages (bool): return FootageItem
* Returns:
* (list) of JSON items
*/
var items = []
for (i = 0; i < app.project.selection.length; ++i){
var item = app.project.selection[i];
if (!item){
continue;
}
var ret = _getItem(item, comps, folders, footages);
if (ret){
items.push(ret);
}
}
return '[' + items.join() + ']';
}
function _getItem(item, comps, folders, footages){
/**
* Auxiliary function as project items and selections
* are indexed in different way :/
* Refactor
*/
var item_type = '';
var path = '';
var containing_comps = [];
if (item instanceof FolderItem){
item_type = 'folder';
if (!folders){
return "{}";
}
}
if (item instanceof FootageItem){
if (!footages){
return "{}";
}
item_type = 'footage';
if (item.file){
path = item.file.fsName;
}
if (item.usedIn){
for (j = 0; j < item.usedIn.length; ++j){
containing_comps.push(item.usedIn[j].id);
}
}
}
if (item instanceof CompItem){
item_type = 'comp';
if (!comps){
return "{}";
}
}
var item = {"name": item.name,
"id": item.id,
"type": item_type,
"path": path,
"containing_comps": containing_comps};
return JSON.stringify(item);
}
function importFile(path, item_name, import_options){
/**
* Imports file (image tested for now) as a FootageItem.
* Creates new composition
*
* Args:
* path (string): absolute path to image file
* item_name (string): label for composition
* Returns:
* JSON {name, id}
*/
var comp;
var ret = {};
try{
import_options = JSON.parse(import_options);
} catch (e){
return _prepareError("Couldn't parse import options " + import_options);
}
app.beginUndoGroup("Import File");
fp = new File(path);
if (fp.exists){
try {
im_opt = new ImportOptions(fp);
importAsType = import_options["ImportAsType"];
if ('ImportAsType' in import_options){ // refactor
if (importAsType.indexOf('COMP') > 0){
im_opt.importAs = ImportAsType.COMP;
}
if (importAsType.indexOf('FOOTAGE') > 0){
im_opt.importAs = ImportAsType.FOOTAGE;
}
if (importAsType.indexOf('COMP_CROPPED_LAYERS') > 0){
im_opt.importAs = ImportAsType.COMP_CROPPED_LAYERS;
}
if (importAsType.indexOf('PROJECT') > 0){
im_opt.importAs = ImportAsType.PROJECT;
}
}
if ('sequence' in import_options){
im_opt.sequence = true;
}
comp = app.project.importFile(im_opt);
if (app.project.selection.length == 2 &&
app.project.selection[0] instanceof FolderItem){
comp.parentFolder = app.project.selection[0]
}
} catch (error) {
return _prepareError(error.toString() + importOptions.file.fsName);
} finally {
fp.close();
}
}else{
return _prepareError("File " + path + " not found.");
}
if (comp){
comp.name = item_name;
comp.label = 9; // Green
ret = {"name": comp.name, "id": comp.id}
}
app.endUndoGroup();
return JSON.stringify(ret);
}
function setLabelColor(comp_id, color_idx){
/**
* Set item_id label to 'color_idx' color
* Args:
* item_id (int): item id
* color_idx (int): 0-16 index from Label
*/
var item = app.project.itemByID(comp_id);
if (item){
item.label = color_idx;
}else{
return _prepareError("There is no composition with "+ comp_id);
}
}
function replaceItem(item_id, path, item_name){
/**
* Replaces loaded file with new file and updates name
*
* Args:
* item_id (int): id of composition, not a index!
* path (string): absolute path to new file
* item_name (string): new composition name
*/
app.beginUndoGroup("Replace File");
fp = new File(path);
if (!fp.exists){
return _prepareError("File " + path + " not found.");
}
var item = app.project.itemByID(item_id);
if (item){
try{
if (isFileSequence(item)) {
item.replaceWithSequence(fp, false);
}else{
item.replace(fp);
}
item.name = item_name;
} catch (error) {
return _prepareError(error.toString() + path);
} finally {
fp.close();
}
}else{
return _prepareError("There is no item with "+ item_id);
}
app.endUndoGroup();
}
function renameItem(item_id, new_name){
/**
* Renames item with 'item_id' to 'new_name'
*
* Args:
* item_id (int): id to search item
* new_name (str)
*/
var item = app.project.itemByID(item_id);
if (item){
item.name = new_name;
}else{
return _prepareError("There is no composition with "+ comp_id);
}
}
function deleteItem(item_id){
/**
* Delete any 'item_id'
*
* Not restricted only to comp, it could delete
* any item with 'id'
*/
var item = app.project.itemByID(item_id);
if (item){
item.remove();
}else{
return _prepareError("There is no composition with "+ comp_id);
}
}
function getCompProperties(comp_id){
/**
* Returns information about composition - are that will be
* rendered.
*
* Returns
* (dict)
*/
var comp = app.project.itemByID(comp_id);
if (!comp){
return _prepareError("There is no composition with "+ comp_id);
}
return JSON.stringify({
"id": comp.id,
"name": comp.name,
"frameStart": comp.displayStartFrame,
"framesDuration": comp.duration * comp.frameRate,
"frameRate": comp.frameRate,
"width": comp.width,
"height": comp.height});
}
function setCompProperties(comp_id, frameStart, framesCount, frameRate,
width, height){
/**
* Sets work area info from outside (from Ftrack via OpenPype)
*/
var comp = app.project.itemByID(comp_id);
if (!comp){
return _prepareError("There is no composition with "+ comp_id);
}
app.beginUndoGroup('change comp properties');
if (frameStart && framesCount && frameRate){
comp.displayStartFrame = frameStart;
comp.duration = framesCount / frameRate;
comp.frameRate = frameRate;
}
if (width && height){
var widthOld = comp.width;
var widthNew = width;
var widthDelta = widthNew - widthOld;
var heightOld = comp.height;
var heightNew = height;
var heightDelta = heightNew - heightOld;
var offset = [widthDelta / 2, heightDelta / 2];
comp.width = widthNew;
comp.height = heightNew;
for (var i = 1, il = comp.numLayers; i <= il; i++) {
var layer = comp.layer(i);
var positionProperty = layer.property('ADBE Transform Group').property('ADBE Position');
if (positionProperty.numKeys > 0) {
for (var j = 1, jl = positionProperty.numKeys; j <= jl; j++) {
var keyValue = positionProperty.keyValue(j);
positionProperty.setValueAtKey(j, keyValue + offset);
}
} else {
var positionValue = positionProperty.value;
positionProperty.setValue(positionValue + offset);
}
}
}
app.endUndoGroup();
}
function save(){
/**
* Saves current project
*/
app.project.save(); //TODO path is wrong, File instead
}
function saveAs(path){
/**
* Saves current project as 'path'
* */
app.project.save(fp = new File(path));
}
function getRenderInfo(comp_id){
/***
Get info from render queue.
Currently pulls only file name to parse extension and
if it is sequence in Python
Args:
comp_id (int): id of composition
Return:
(list) [{file_name:"xx.png", width:00, height:00}]
**/
var item = app.project.itemByID(comp_id);
if (!item){
return _prepareError("Composition with '" + comp_id + "' wasn't found! Recreate publishable instance(s)")
}
var comp_name = item.name;
var output_metadata = []
try{
// render_item.duplicate() should create new item on renderQueue
// BUT it works only sometimes, there are some weird synchronization issue
// this method will be called always before render, so prepare items here
// for render to spare the hassle
for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i);
if (render_item.comp.id != comp_id){
continue;
}
if (render_item.status == RQItemStatus.DONE){
render_item.duplicate(); // create new, cannot change status if DONE
render_item.remove(); // remove existing to limit duplications
continue;
}
}
// properly validate as `numItems` won't change magically
var comp_id_count = 0;
for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i);
if (render_item.comp.id != comp_id){
continue;
}
comp_id_count += 1;
var item = render_item.outputModule(1);
for (j = 1; j<= render_item.numOutputModules; ++j){
var file_url = item.file.toString();
output_metadata.push(
JSON.stringify({
"file_name": file_url,
"width": render_item.comp.width,
"height": render_item.comp.height
})
);
}
}
} catch (error) {
return _prepareError("There is no render queue, create one");
}
if (comp_id_count > 1){
return _prepareError("There cannot be more items in Render Queue for '" + comp_name + "'!")
}
if (comp_id_count == 0){
return _prepareError("There is no item in Render Queue for '" + comp_name + "'! Add composition to Render Queue.")
}
return '[' + output_metadata.join() + ']';
}
function getAudioUrlForComp(comp_id){
/**
* Searches composition for audio layer
*
* Only single AVLayer is expected!
* Used for collecting Audio
*
* Args:
* comp_id (int): id of composition
* Return:
* (str) with url to audio content
*/
var item = app.project.itemByID(comp_id);
if (item){
for (i = 1; i <= item.numLayers; ++i){
var layer = item.layers[i];
if (layer instanceof AVLayer){
if (layer.hasAudio){
source_url = layer.source.file.fsName.toString()
return _prepareSingleValue(source_url);
}
}
}
}else{
return _prepareError("There is no composition with "+ comp_id);
}
}
function addItemAsLayerToComp(comp_id, item_id, found_comp){
/**
* Adds already imported FootageItem ('item_id') as a new
* layer to composition ('comp_id').
*
* Args:
* comp_id (int): id of target composition
* item_id (int): FootageItem.id
* found_comp (CompItem, optional): to limit quering if
* comp already found previously
*/
var comp = found_comp || app.project.itemByID(comp_id);
if (comp){
item = app.project.itemByID(item_id);
if (item){
comp.layers.add(item);
}else{
return _prepareError("There is no item with " + item_id);
}
}else{
return _prepareError("There is no composition with "+ comp_id);
}
}
function importBackground(comp_id, composition_name, files_to_import){
/**
* Imports backgrounds images to existing or new composition.
*
* If comp_id is not provided, new composition is created, basic
* values (width, heights, frameRatio) takes from first imported
* image.
*
* Args:
* comp_id (int): id of existing composition (null if new)
* composition_name (str): used when new composition
* files_to_import (list): list of absolute paths to import and
* add as layers
*
* Returns:
* (str): json representation (id, name, members)
*/
var comp;
var folder;
var imported_ids = [];
if (comp_id){
comp = app.project.itemByID(comp_id);
folder = comp.parentFolder;
}else{
if (app.project.selection.length > 1){
return _prepareError(
"Too many items selected, select only target composition!");
}else{
selected_item = app.project.activeItem;
if (selected_item instanceof Folder){
comp = selected_item;
folder = selected_item;
}
}
}
if (files_to_import){
for (i = 0; i < files_to_import.length; ++i){
item = _importItem(files_to_import[i]);
if (!item){
return _prepareError(
"No item for " + item_json["id"] +
". Import background failed.")
}
if (!comp){
folder = app.project.items.addFolder(composition_name);
imported_ids.push(folder.id);
comp = app.project.items.addComp(composition_name, item.width,
item.height, item.pixelAspect,
1, 26.7); // hardcode defaults
imported_ids.push(comp.id);
comp.parentFolder = folder;
}
imported_ids.push(item.id)
item.parentFolder = folder;
addItemAsLayerToComp(comp.id, item.id, comp);
}
}
var item = {"name": comp.name,
"id": folder.id,
"members": imported_ids};
return JSON.stringify(item);
}
function reloadBackground(comp_id, composition_name, files_to_import){
/**
* Reloads existing composition.
*
* It deletes complete composition with encompassing folder, recreates
* from scratch via 'importBackground' functionality.
*
* Args:
* comp_id (int): id of existing composition (null if new)
* composition_name (str): used when new composition
* files_to_import (list): list of absolute paths to import and
* add as layers
*
* Returns:
* (str): json representation (id, name, members)
*
*/
var imported_ids = []; // keep track of members of composition
comp = app.project.itemByID(comp_id);
folder = comp.parentFolder;
if (folder){
renameItem(folder.id, composition_name);
imported_ids.push(folder.id);
}
if (comp){
renameItem(comp.id, composition_name);
imported_ids.push(comp.id);
}
var existing_layer_names = [];
var existing_layer_ids = []; // because ExtendedScript doesnt have keys()
for (i = 1; i <= folder.items.length; ++i){
layer = folder.items[i];
//because comp.layers[i] doesnt have 'id' accessible
if (layer instanceof CompItem){
continue;
}
existing_layer_names.push(layer.name);
existing_layer_ids.push(layer.id);
}
var new_filenames = [];
if (files_to_import){
for (i = 0; i < files_to_import.length; ++i){
file_name = _get_file_name(files_to_import[i]);
new_filenames.push(file_name);
idx = existing_layer_names.indexOf(file_name);
if (idx >= 0){ // update
var layer_id = existing_layer_ids[idx];
replaceItem(layer_id, files_to_import[i], file_name);
imported_ids.push(layer_id);
}else{ // new layer
item = _importItem(files_to_import[i]);
if (!item){
return _prepareError(
"No item for " + files_to_import[i] +
". Reload background failed.");
}
imported_ids.push(item.id);
item.parentFolder = folder;
addItemAsLayerToComp(comp.id, item.id, comp);
}
}
}
_delete_obsolete_items(folder, new_filenames);
var item = {"name": comp.name,
"id": folder.id,
"members": imported_ids};
return JSON.stringify(item);
}
function _get_file_name(file_url){
/**
* Returns file name without extension from 'file_url'
*
* Args:
* file_url (str): full absolute url
* Returns:
* (str)
*/
fp = new File(file_url);
file_name = fp.name.substring(0, fp.name.lastIndexOf("."));
return file_name;
}
function _delete_obsolete_items(folder, new_filenames){
/***
* Goes through 'folder' and removes layers not in new
* background
*
* Args:
* folder (FolderItem)
* new_filenames (array): list of layer names in new bg
*/
// remove items in old, but not in new
delete_ids = []
for (i = 1; i <= folder.items.length; ++i){
layer = folder.items[i];
//because comp.layers[i] doesnt have 'id' accessible
if (layer instanceof CompItem){
continue;
}
if (new_filenames.indexOf(layer.name) < 0){
delete_ids.push(layer.id);
}
}
for (i = 0; i < delete_ids.length; ++i){
deleteItem(delete_ids[i]);
}
}
function _importItem(file_url){
/**
* Imports 'file_url' as new FootageItem
*
* Args:
* file_url (str): file url with content
* Returns:
* (FootageItem)
*/
file_name = _get_file_name(file_url);
//importFile prepared previously to return json
item_json = importFile(file_url, file_name, JSON.stringify({"ImportAsType":"FOOTAGE"}));
item_json = JSON.parse(item_json);
item = app.project.itemByID(item_json["id"]);
return item;
}
function isFileSequence (item){
/**
* Check that item is a recognizable sequence
*/
if (item instanceof FootageItem && item.mainSource instanceof FileSource && !(item.mainSource.isStill) && item.hasVideo){
var extname = item.mainSource.file.fsName.split('.').pop();
return extname.match(new RegExp("(ai|bmp|bw|cin|cr2|crw|dcr|dng|dib|dpx|eps|erf|exr|gif|hdr|ico|icb|iff|jpe|jpeg|jpg|mos|mrw|nef|orf|pbm|pef|pct|pcx|pdf|pic|pict|png|ps|psd|pxr|raf|raw|rgb|rgbe|rla|rle|rpf|sgi|srf|tdi|tga|tif|tiff|vda|vst|x3f|xyze)", "i")) !== null;
}
return false;
}
function render(target_folder, comp_id){
var out_dir = new Folder(target_folder);
var out_dir = out_dir.fsName;
for (i = 1; i <= app.project.renderQueue.numItems; ++i){
var render_item = app.project.renderQueue.item(i);
var composition = render_item.comp;
if (composition.id == comp_id){
if (render_item.status == RQItemStatus.DONE){
var new_item = render_item.duplicate();
render_item.remove();
render_item = new_item;
}
render_item.render = true;
var om1 = app.project.renderQueue.item(i).outputModule(1);
var file_name = File.decode( om1.file.name ).replace('℗', ''); // Name contains special character, space?
var omItem1_settable_str = app.project.renderQueue.item(i).outputModule(1).getSettings( GetSettingsFormat.STRING_SETTABLE );
var targetFolder = new Folder(target_folder);
if (!targetFolder.exists) {
targetFolder.create();
}
om1.file = new File(targetFolder.fsName + '/' + file_name);
}else{
if (render_item.status != RQItemStatus.DONE){
render_item.render = false;
}
}
}
app.beginSuppressDialogs();
app.project.renderQueue.render();
app.endSuppressDialogs(false);
}
function close(){
app.project.close(CloseOptions.DO_NOT_SAVE_CHANGES);
app.quit();
}
function getAppVersion(){
return _prepareSingleValue(app.version);
}
function printMsg(msg){
alert(msg);
}
function addPlaceholder(name, width, height, fps, duration){
/** Add AE PlaceholderItem to Project list.
*
* PlaceholderItem chosen as it doesn't require existing file and
* might potentially allow nice functionality in the future.
*
*/
app.beginUndoGroup('change comp properties');
try{
item = app.project.importPlaceholder(name, width, height,
fps, duration);
return _prepareSingleValue(item.id);
}catch (error) {
writeLn(_prepareError("Cannot add placeholder " + error.toString()));
}
app.endUndoGroup();
}
function addItemInstead(placeholder_item_id, item_id){
/** Add new loaded item in place of load placeholder.
*
* Each placeholder could be placed multiple times into multiple
* composition. This loops through all compositions and
* places loaded item under placeholder.
* Placeholder item gets deleted later separately according
* to configuration in Settings.
*
* Args:
* placeholder_item_id (int)
* item_id (int)
*/
var item = app.project.itemByID(item_id);
if (!item){
return _prepareError("There is no item with "+ item_id);
}
app.beginUndoGroup('Add loaded items');
for (i = 1; i <= app.project.items.length; ++i){
var comp = app.project.items[i];
if (!(comp instanceof CompItem)){
continue
}
var i = 1;
while (i <= comp.numLayers) {
var layer = comp.layer(i);
var layer_source = layer.source;
if (layer_source && layer_source.id == placeholder_item_id){
var new_layer = comp.layers.add(item);
new_layer.moveAfter(layer);
// copy all(?) properties to new layer
layer.property("ADBE Transform Group").copyToComp(new_layer);
i = i + 1;
}
i = i + 1;
}
}
app.endUndoGroup();
}
function _prepareSingleValue(value){
return JSON.stringify({"result": value})
}
function _prepareError(error_msg){
return JSON.stringify({"error": error_msg})
}

View file

@ -0,0 +1,385 @@
import os
import sys
import subprocess
import collections
import logging
import asyncio
import functools
import traceback
from wsrpc_aiohttp import (
WebSocketRoute,
WebSocketAsync
)
from qtpy import QtCore
from ayon_core.lib import Logger, is_in_tests
from ayon_core.pipeline import install_host
from ayon_core.addon import AddonsManager
from ayon_core.tools.utils import host_tools, get_ayon_qt_app
from .webserver import WebServerTool
from .ws_stub import get_stub
from .lib import set_settings
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def safe_excepthook(*args):
traceback.print_exception(*args)
def main(*subprocess_args):
"""Main entrypoint to AE launching, called from pre hook."""
sys.excepthook = safe_excepthook
from ayon_aftereffects.api import AfterEffectsHost
host = AfterEffectsHost()
install_host(host)
os.environ["AYON_LOG_NO_COLORS"] = "0"
app = get_ayon_qt_app()
app.setQuitOnLastWindowClosed(False)
launcher = ProcessLauncher(subprocess_args)
launcher.start()
if os.environ.get("HEADLESS_PUBLISH"):
manager = AddonsManager()
webpublisher_addon = manager["webpublisher"]
launcher.execute_in_main_thread(
functools.partial(
webpublisher_addon.headless_publish,
log,
"CloseAE",
is_in_tests()
)
)
elif os.environ.get("AVALON_AFTEREFFECTS_WORKFILES_ON_LAUNCH", True):
save = False
if os.getenv("WORKFILES_SAVE_AS"):
save = True
launcher.execute_in_main_thread(
lambda: host_tools.show_tool_by_name("workfiles", save=save)
)
sys.exit(app.exec_())
def show_tool_by_name(tool_name):
kwargs = {}
if tool_name == "loader":
kwargs["use_context"] = True
host_tools.show_tool_by_name(tool_name, **kwargs)
class ProcessLauncher(QtCore.QObject):
"""Launches webserver, connects to it, runs main thread."""
route_name = "AfterEffects"
_main_thread_callbacks = collections.deque()
def __init__(self, subprocess_args):
self._subprocess_args = subprocess_args
self._log = None
super(ProcessLauncher, self).__init__()
# Keep track if launcher was alreadu started
self._started = False
self._process = None
self._websocket_server = None
start_process_timer = QtCore.QTimer()
start_process_timer.setInterval(100)
loop_timer = QtCore.QTimer()
loop_timer.setInterval(200)
start_process_timer.timeout.connect(self._on_start_process_timer)
loop_timer.timeout.connect(self._on_loop_timer)
self._start_process_timer = start_process_timer
self._loop_timer = loop_timer
@property
def log(self):
if self._log is None:
self._log = Logger.get_logger("{}-launcher".format(
self.route_name))
return self._log
@property
def websocket_server_is_running(self):
if self._websocket_server is not None:
return self._websocket_server.is_running
return False
@property
def is_process_running(self):
if self._process is not None:
return self._process.poll() is None
return False
@property
def is_host_connected(self):
"""Returns True if connected, False if app is not running at all."""
if not self.is_process_running:
return False
try:
_stub = get_stub()
if _stub:
return True
except Exception:
pass
return None
@classmethod
def execute_in_main_thread(cls, callback):
cls._main_thread_callbacks.append(callback)
def start(self):
if self._started:
return
self.log.info("Started launch logic of AfterEffects")
self._started = True
self._start_process_timer.start()
def exit(self):
""" Exit whole application. """
if self._start_process_timer.isActive():
self._start_process_timer.stop()
if self._loop_timer.isActive():
self._loop_timer.stop()
if self._websocket_server is not None:
self._websocket_server.stop()
if self._process:
self._process.kill()
self._process.wait()
QtCore.QCoreApplication.exit()
def _on_loop_timer(self):
# TODO find better way and catch errors
# Run only callbacks that are in queue at the moment
cls = self.__class__
for _ in range(len(cls._main_thread_callbacks)):
if cls._main_thread_callbacks:
callback = cls._main_thread_callbacks.popleft()
callback()
if not self.is_process_running:
self.log.info("Host process is not running. Closing")
self.exit()
elif not self.websocket_server_is_running:
self.log.info("Websocket server is not running. Closing")
self.exit()
def _on_start_process_timer(self):
# TODO add try except validations for each part in this method
# Start server as first thing
if self._websocket_server is None:
self._init_server()
return
# TODO add waiting time
# Wait for webserver
if not self.websocket_server_is_running:
return
# Start application process
if self._process is None:
self._start_process()
self.log.info("Waiting for host to connect")
return
# TODO add waiting time
# Wait until host is connected
if self.is_host_connected:
self._start_process_timer.stop()
self._loop_timer.start()
elif (
not self.is_process_running
or not self.websocket_server_is_running
):
self.exit()
def _init_server(self):
if self._websocket_server is not None:
return
self.log.debug(
"Initialization of websocket server for host communication"
)
self._websocket_server = websocket_server = WebServerTool()
if websocket_server.port_occupied(
websocket_server.host_name,
websocket_server.port
):
self.log.info(
"Server already running, sending actual context and exit."
)
asyncio.run(websocket_server.send_context_change(self.route_name))
self.exit()
return
# Add Websocket route
websocket_server.add_route("*", "/ws/", WebSocketAsync)
# Add after effects route to websocket handler
print("Adding {} route".format(self.route_name))
WebSocketAsync.add_route(
self.route_name, AfterEffectsRoute
)
self.log.info("Starting websocket server for host communication")
websocket_server.start_server()
def _start_process(self):
if self._process is not None:
return
self.log.info("Starting host process")
try:
self._process = subprocess.Popen(
self._subprocess_args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
except Exception:
self.log.info("exce", exc_info=True)
self.exit()
class AfterEffectsRoute(WebSocketRoute):
"""
One route, mimicking external application (like Harmony, etc).
All functions could be called from client.
'do_notify' function calls function on the client - mimicking
notification after long running job on the server or similar
"""
instance = None
def init(self, **kwargs):
# Python __init__ must be return "self".
# This method might return anything.
log.debug("someone called AfterEffects route")
self.instance = self
return kwargs
# server functions
async def ping(self):
log.debug("someone called AfterEffects route ping")
# This method calls function on the client side
# client functions
async def set_context(self, project, folder, task):
"""
Sets 'project', 'folder' and 'task' to envs, eg. setting context
Args:
project (str)
folder (str)
task (str)
"""
log.info("Setting context change")
log.info("project {} folder {} ".format(project, folder))
if project:
os.environ["AYON_PROJECT_NAME"] = project
if folder:
os.environ["AYON_FOLDER_PATH"] = folder
if task:
os.environ["AYON_TASK_NAME"] = task
async def read(self):
log.debug("aftereffects.read client calls server server calls "
"aftereffects client")
return await self.socket.call('aftereffects.read')
# panel routes for tools
async def workfiles_route(self):
self._tool_route("workfiles")
async def loader_route(self):
self._tool_route("loader")
async def publish_route(self):
self._tool_route("publisher")
async def sceneinventory_route(self):
self._tool_route("sceneinventory")
async def setresolution_route(self):
self._settings_route(False, True)
async def setframes_route(self):
self._settings_route(True, False)
async def setall_route(self):
self._settings_route(True, True)
async def experimental_tools_route(self):
self._tool_route("experimental_tools")
def _tool_route(self, _tool_name):
"""The address accessed when clicking on the buttons."""
partial_method = functools.partial(show_tool_by_name,
_tool_name)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def _settings_route(self, frames, resolution):
partial_method = functools.partial(set_settings,
frames,
resolution)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def create_placeholder_route(self):
from ayon_aftereffects.api.workfile_template_builder import \
create_placeholder
partial_method = functools.partial(create_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def update_placeholder_route(self):
from ayon_aftereffects.api.workfile_template_builder import \
update_placeholder
partial_method = functools.partial(update_placeholder)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"
def build_workfile_template_route(self):
from ayon_aftereffects.api.workfile_template_builder import \
build_workfile_template
partial_method = functools.partial(build_workfile_template)
ProcessLauncher.execute_in_main_thread(partial_method)
# Required return statement.
return "nothing"

View file

@ -0,0 +1,93 @@
"""Script wraps launch mechanism of AfterEffects implementations.
Arguments passed to the script are passed to launch function in host
implementation. In all cases requires host app executable and may contain
workfile or others.
"""
import os
import sys
from ayon_aftereffects.api.launch_logic import main as host_main
# Get current file to locate start point of sys.argv
CURRENT_FILE = os.path.abspath(__file__)
def show_error_messagebox(title, message, detail_message=None):
"""Function will show message and process ends after closing it."""
from qtpy import QtWidgets, QtCore
from ayon_core import style
app = QtWidgets.QApplication([])
app.setStyleSheet(style.load_stylesheet())
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle(title)
msgbox.setText(message)
if detail_message:
msgbox.setDetailedText(detail_message)
msgbox.setWindowModality(QtCore.Qt.ApplicationModal)
msgbox.show()
sys.exit(app.exec_())
def on_invalid_args(script_not_found):
"""Show to user message box saying that something went wrong.
Tell user that arguments to launch implementation are invalid with
arguments details.
Args:
script_not_found (bool): Use different message based on this value.
"""
title = "Invalid arguments"
joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv)
if script_not_found:
submsg = "Where couldn't find script path:\n\"{}\""
else:
submsg = "Expected Host executable after script path:\n\"{}\""
message = "BUG: Got invalid arguments so can't launch Host application."
detail_message = "Process was launched with arguments:\n{}\n\n{}".format(
joined_args,
submsg.format(CURRENT_FILE)
)
show_error_messagebox(title, message, detail_message)
def main(argv):
# Modify current file path to find match in sys.argv which may be different
# on windows (different letter cases and slashes).
modified_current_file = CURRENT_FILE.replace("\\", "/").lower()
# Create a copy of sys argv
sys_args = list(argv)
after_script_idx = None
# Find script path in sys.argv to know index of argv where host
# executable should be.
for idx, item in enumerate(sys_args):
if item.replace("\\", "/").lower() == modified_current_file:
after_script_idx = idx + 1
break
# Validate that there is at least one argument after script path
launch_args = None
if after_script_idx is not None:
launch_args = sys_args[after_script_idx:]
if launch_args:
# Launch host implementation
host_main(*launch_args)
else:
# Show message box
on_invalid_args(after_script_idx is None)
if __name__ == "__main__":
main(sys.argv)

View file

@ -0,0 +1,164 @@
import os
import re
import json
import contextlib
import logging
import ayon_api
from ayon_core.pipeline.context_tools import get_current_context
from .ws_stub import get_stub
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context."""
selection = get_stub().get_selected_items(True, False, False)
try:
yield selection
finally:
pass
def get_extension_manifest_path():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"extension",
"CSXS",
"manifest.xml"
)
def get_unique_layer_name(layers, name):
"""
Gets all layer names and if 'name' is present in them, increases
suffix by 1 (eg. creates unique layer name - for Loader)
Args:
layers (list): of strings, names only
name (string): checked value
Returns:
(string): name_00X (without version)
"""
names = {}
for layer in layers:
layer_name = re.sub(r'_\d{3}$', '', layer)
if layer_name in names.keys():
names[layer_name] = names[layer_name] + 1
else:
names[layer_name] = 1
occurrences = names.get(name, 0)
return "{}_{:0>3d}".format(name, occurrences + 1)
def get_background_layers(file_url):
"""
Pulls file name from background json file, enrich with folder url for
AE to be able import files.
Order is important, follows order in json.
Args:
file_url (str): abs url of background json
Returns:
(list): of abs paths to images
"""
with open(file_url) as json_file:
data = json.load(json_file)
layers = list()
bg_folder = os.path.dirname(file_url)
for child in data['children']:
if child.get("filename"):
layers.append(os.path.join(bg_folder, child.get("filename")).
replace("\\", "/"))
else:
for layer in child['children']:
if layer.get("filename"):
layers.append(os.path.join(bg_folder,
layer.get("filename")).
replace("\\", "/"))
return layers
def get_folder_settings(folder_entity):
"""Get settings of current folder.
Returns:
dict: Scene data.
"""
folder_attributes = folder_entity["attrib"]
fps = folder_attributes.get("fps", 0)
frame_start = folder_attributes.get("frameStart", 0)
frame_end = folder_attributes.get("frameEnd", 0)
handle_start = folder_attributes.get("handleStart", 0)
handle_end = folder_attributes.get("handleEnd", 0)
resolution_width = folder_attributes.get("resolutionWidth", 0)
resolution_height = folder_attributes.get("resolutionHeight", 0)
duration = (frame_end - frame_start + 1) + handle_start + handle_end
return {
"fps": fps,
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": handle_start,
"handleEnd": handle_end,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"duration": duration
}
def set_settings(frames, resolution, comp_ids=None, print_msg=True):
"""Sets number of frames and resolution to selected comps.
Args:
frames (bool): True if set frame info
resolution (bool): True if set resolution
comp_ids (list): specific composition ids, if empty
it tries to look for currently selected
print_msg (bool): True throw JS alert with msg
"""
frame_start = frames_duration = fps = width = height = None
current_context = get_current_context()
folder_entity = ayon_api.get_folder_by_path(
current_context["project_name"],
current_context["folder_path"]
)
settings = get_folder_settings(folder_entity)
msg = ''
if frames:
frame_start = settings["frameStart"] - settings["handleStart"]
frames_duration = settings["duration"]
fps = settings["fps"]
msg += f"frame start:{frame_start}, duration:{frames_duration}, "\
f"fps:{fps}"
if resolution:
width = settings["resolutionWidth"]
height = settings["resolutionHeight"]
msg += f"width:{width} and height:{height}"
stub = get_stub()
if not comp_ids:
comps = stub.get_selected_items(True, False, False)
comp_ids = [comp.id for comp in comps]
if not comp_ids:
stub.print_msg("Select at least one composition to apply settings.")
return
for comp_id in comp_ids:
msg = f"Setting for comp {comp_id} " + msg
log.debug(msg)
stub.set_comp_properties(comp_id, frame_start, frames_duration,
fps, width, height)
if print_msg:
stub.print_msg(msg)

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View file

@ -0,0 +1,286 @@
import os
from qtpy import QtWidgets
import pyblish.api
from ayon_core.lib import Logger, register_event_callback
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_workfile_build_plugin_path,
AVALON_CONTAINER_ID,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
from ayon_core.pipeline.load import any_outdated_containers
from ayon_core.host import (
HostBase,
IWorkfileHost,
ILoadHost,
IPublishHost
)
from ayon_core.tools.utils import get_ayon_qt_app
from ayon_aftereffects import AFTEREFFECTS_ADDON_ROOT
from .launch_logic import get_stub
from .ws_stub import ConnectionNotEstablishedYet
log = Logger.get_logger(__name__)
PLUGINS_DIR = os.path.join(AFTEREFFECTS_ADDON_ROOT, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
WORKFILE_BUILD_PATH = os.path.join(PLUGINS_DIR, "workfile_build")
class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "aftereffects"
def __init__(self):
self._stub = None
super(AfterEffectsHost, self).__init__()
@property
def stub(self):
"""
Handle pulling stub from PS to run operations on host
Returns:
(AEServerStub) or None
"""
if self._stub:
return self._stub
try:
stub = get_stub() # only after Photoshop is up
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
self._stub = stub
return self._stub
def install(self):
print("Installing Pype config...")
pyblish.api.register_host("aftereffects")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_workfile_build_plugin_path(WORKFILE_BUILD_PATH)
register_event_callback("application.launched", application_launch)
def get_workfile_extensions(self):
return [".aep"]
def save_workfile(self, dst_path=None):
self.stub.saveAs(dst_path, True)
def open_workfile(self, filepath):
self.stub.open(filepath)
return True
def get_current_workfile(self):
try:
full_name = get_stub().get_active_document_full_name()
if full_name and full_name != "null":
return os.path.normpath(full_name).replace("\\", "/")
except ValueError:
print("Nothing opened")
pass
return None
def get_containers(self):
return ls()
def get_context_data(self):
meta = self.stub.get_metadata()
for item in meta:
if item.get("id") == "publish_context":
item.pop("id")
return item
return {}
def update_context_data(self, data, changes):
item = data
item["id"] = "publish_context"
self.stub.imprint(item["id"], item)
# created instances section
def list_instances(self):
"""List all created instances from current workfile which
will be published.
Pulls from File > File Info
For SubsetManager
Returns:
(list) of dictionaries matching instances format
"""
stub = self.stub
if not stub:
return []
instances = []
layers_meta = stub.get_metadata()
for instance in layers_meta:
if instance.get("id") in {
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
}:
instances.append(instance)
return instances
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Updates metadata of current file in File > File Info and removes
icon highlight on group layer.
For SubsetManager
Args:
instance (dict): instance representation from subsetmanager model
"""
stub = self.stub
if not stub:
return
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
if not inst_id:
log.warning("No instance identifier for {}".format(instance))
return
stub.remove_instance(inst_id)
if instance.get("members"):
item = stub.get_item(instance["members"][0])
if item:
stub.rename_item(item.id,
item.name.replace(stub.PUBLISH_ICON, ''))
def application_launch():
"""Triggered after start of app"""
check_inventory()
def ls():
"""Yields containers from active AfterEffects document.
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in AE; once loaded
they are called 'containers'. Used in Manage tool.
Containers could be on multiple levels, single images/videos/was as a
FootageItem, or multiple items - backgrounds (folder with automatically
created composition and all imported layers).
Yields:
dict: container
"""
try:
stub = get_stub() # only after AfterEffects is up
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
layers_meta = stub.get_metadata()
for item in stub.get_items(comps=True,
folders=True,
footages=True):
data = stub.read(item, layers_meta)
# Skip non-tagged layers.
if not data:
continue
# Filter to only containers.
if "container" not in data["id"]:
continue
# Append transient data
data["objectName"] = item.name.replace(stub.LOADED_ICON, '')
data["layer"] = item
yield data
def check_inventory():
"""Checks loaded containers if they are of highest version"""
if not any_outdated_containers():
return
# Warn about outdated containers.
_app = get_ayon_qt_app()
message_box = QtWidgets.QMessageBox()
message_box.setIcon(QtWidgets.QMessageBox.Warning)
msg = "There are outdated containers in the scene."
message_box.setText(msg)
message_box.exec_()
def containerise(name,
namespace,
comp,
context,
loader=None,
suffix="_CON"):
"""
Containerisation enables a tracking of version, author and origin
for loaded assets.
Creates dictionary payloads that gets saved into file metadata. Each
container contains of who loaded (loader) and members (single or multiple
in case of background).
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
comp (AEItem): Composition to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (str): Name of container assembly
"""
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace,
"loader": str(loader),
"representation": context["representation"]["id"],
"members": comp.members or [comp.id]
}
stub = get_stub()
stub.imprint(comp.id, data)
return comp
def cache_and_get_instances(creator):
"""Cache instances in shared data.
Storing all instances as a list as legacy instances might be still present.
Args:
creator (Creator): Plugin which would like to get instances from host.
Returns:
List[]: list of all instances stored in metadata
"""
shared_key = "openpype.photoshop.instances"
if shared_key not in creator.collection_shared_data:
creator.collection_shared_data[shared_key] = \
creator.host.list_instances()
return creator.collection_shared_data[shared_key]

View file

@ -0,0 +1,12 @@
import six
from abc import ABCMeta
from ayon_core.pipeline import LoaderPlugin
from .launch_logic import get_stub
@six.add_metaclass(ABCMeta)
class AfterEffectsLoader(LoaderPlugin):
@staticmethod
def get_stub():
return get_stub()

View file

@ -0,0 +1,241 @@
"""Webserver for communication with AfterEffects.
Aiohttp (Asyncio) based websocket server used for communication with host
application.
This webserver is started in spawned Python process that opens DCC during
its launch, waits for connection from DCC and handles communication going
forward. Server is closed before Python process is killed.
"""
import os
import logging
import urllib
import threading
import asyncio
import socket
from aiohttp import web
from wsrpc_aiohttp import WSRPCClient
from ayon_core.pipeline import get_global_context
log = logging.getLogger(__name__)
class WebServerTool:
"""
Basic POC implementation of asychronic websocket RPC server.
Uses class in external_app_1.py to mimic implementation for single
external application.
'test_client' folder contains two test implementations of client
"""
_instance = None
def __init__(self):
WebServerTool._instance = self
self.client = None
self.handlers = {}
self.on_stop_callbacks = []
port = None
host_name = "localhost"
websocket_url = os.getenv("WEBSOCKET_URL")
if websocket_url:
parsed = urllib.parse.urlparse(websocket_url)
port = parsed.port
host_name = parsed.netloc.split(":")[0]
if not port:
port = 8098 # fallback
self.port = port
self.host_name = host_name
self.app = web.Application()
# add route with multiple methods for single "external app"
self.webserver_thread = WebServerThread(self, self.port)
def add_route(self, *args, **kwargs):
self.app.router.add_route(*args, **kwargs)
def add_static(self, *args, **kwargs):
self.app.router.add_static(*args, **kwargs)
def start_server(self):
if self.webserver_thread and not self.webserver_thread.is_alive():
self.webserver_thread.start()
def stop_server(self):
self.stop()
async def send_context_change(self, host):
"""
Calls running webserver to inform about context change
Used when new PS/AE should be triggered,
but one already running, without
this publish would point to old context.
"""
client = WSRPCClient(os.getenv("WEBSOCKET_URL"),
loop=asyncio.get_event_loop())
await client.connect()
context = get_global_context()
project_name = context["project_name"]
folder_path = context["folder_path"]
task_name = context["task_name"]
log.info("Sending context change to {}{}/{}".format(
project_name, folder_path, task_name
))
await client.call(
'{}.set_context'.format(host),
project=project_name,
folder=folder_path,
task=task_name
)
await client.close()
def port_occupied(self, host_name, port):
"""
Check if 'url' is already occupied.
This could mean, that app is already running and we are trying open it
again. In that case, use existing running webserver.
Check here is easier than capturing exception from thread.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
result = con.connect_ex((host_name, port)) == 0
if result:
print(f"Port {port} is already in use")
return result
def call(self, func):
log.debug("websocket.call {}".format(func))
future = asyncio.run_coroutine_threadsafe(
func,
self.webserver_thread.loop
)
result = future.result()
return result
@staticmethod
def get_instance():
if WebServerTool._instance is None:
WebServerTool()
return WebServerTool._instance
@property
def is_running(self):
if not self.webserver_thread:
return False
return self.webserver_thread.is_running
def stop(self):
if not self.is_running:
return
try:
log.debug("Stopping websocket server")
self.webserver_thread.is_running = False
self.webserver_thread.stop()
except Exception:
log.warning(
"Error has happened during Killing websocket server",
exc_info=True
)
def thread_stopped(self):
for callback in self.on_stop_callbacks:
callback()
class WebServerThread(threading.Thread):
""" Listener for websocket rpc requests.
It would be probably better to "attach" this to main thread (as for
example Harmony needs to run something on main thread), but currently
it creates separate thread and separate asyncio event loop
"""
def __init__(self, module, port):
super(WebServerThread, self).__init__()
self.is_running = False
self.port = port
self.module = module
self.loop = None
self.runner = None
self.site = None
self.tasks = []
def run(self):
self.is_running = True
try:
log.info("Starting web server")
self.loop = asyncio.new_event_loop() # create new loop for thread
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self.start_server())
websocket_url = "ws://localhost:{}/ws".format(self.port)
log.debug(
"Running Websocket server on URL: \"{}\"".format(websocket_url)
)
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
self.loop.run_forever()
except Exception:
self.is_running = False
log.warning(
"Websocket Server service has failed", exc_info=True
)
raise
finally:
self.loop.close() # optional
self.is_running = False
self.module.thread_stopped()
log.info("Websocket server stopped")
async def start_server(self):
""" Starts runner and TCPsite """
self.runner = web.AppRunner(self.module.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, 'localhost', self.port)
await self.site.start()
def stop(self):
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
self.is_running = False
async def check_shutdown(self):
""" Future that is running and checks if server should be running
periodically.
"""
while self.is_running:
while self.tasks:
task = self.tasks.pop(0)
log.debug("waiting for task {}".format(task))
await task
log.debug("returned value {}".format(task.result))
await asyncio.sleep(0.5)
log.debug("Starting shutdown")
await self.site.stop()
log.debug("Site stopped")
await self.runner.cleanup()
log.debug("Runner stopped")
tasks = [task for task in asyncio.all_tasks() if
task is not asyncio.current_task()]
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
await self.loop.shutdown_asyncgens()
# to really make sure everything else has time to stop
await asyncio.sleep(0.07)
self.loop.stop()

View file

@ -0,0 +1,181 @@
import os.path
import uuid
import shutil
from abc import abstractmethod
from ayon_core.pipeline import registered_host
from ayon_core.tools.workfile_template_build import (
WorkfileBuildPlaceholderDialog,
)
from ayon_core.pipeline.workfile.workfile_template_builder import (
AbstractTemplateBuilder,
PlaceholderPlugin,
PlaceholderItem
)
from ayon_aftereffects.api import get_stub
PLACEHOLDER_SET = "PLACEHOLDERS_SET"
PLACEHOLDER_ID = "openpype.placeholder"
class AETemplateBuilder(AbstractTemplateBuilder):
"""Concrete implementation of AbstractTemplateBuilder for AE"""
def import_template(self, path):
"""Import template into current scene.
Block if a template is already loaded.
Args:
path (str): A path to current template (usually given by
get_template_preset implementation)
Returns:
bool: Whether the template was successfully imported or not
"""
stub = get_stub()
if not os.path.exists(path):
stub.print_msg(f"Template file on {path} doesn't exist.")
return
stub.save()
workfile_path = stub.get_active_document_full_name()
shutil.copy2(path, workfile_path)
stub.open(workfile_path)
return True
class AEPlaceholderPlugin(PlaceholderPlugin):
"""Contains generic methods for all PlaceholderPlugins."""
@abstractmethod
def _create_placeholder_item(self, item_data: dict) -> PlaceholderItem:
pass
def collect_placeholders(self):
"""Collect info from file metadata about created placeholders.
Returns:
(list) (LoadPlaceholderItem)
"""
output = []
scene_placeholders = self._collect_scene_placeholders()
for item in scene_placeholders:
if item.get("plugin_identifier") != self.identifier:
continue
item = self._create_placeholder_item(item)
output.append(item)
return output
def update_placeholder(self, placeholder_item, placeholder_data):
"""Resave changed properties for placeholders"""
item_id, metadata_item = self._get_item(placeholder_item)
stub = get_stub()
if not item_id:
stub.print_msg("Cannot find item for "
f"{placeholder_item.scene_identifier}")
return
metadata_item["data"] = placeholder_data
stub.imprint(item_id, metadata_item)
def _get_item(self, placeholder_item):
"""Returns item id and item metadata for placeholder from file meta"""
stub = get_stub()
placeholder_uuid = placeholder_item.scene_identifier
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if placeholder_uuid in metadata_item.get("uuid"):
return metadata_item["members"][0], metadata_item
return None, None
def _collect_scene_placeholders(self):
"""" Cache placeholder data to shared data.
Returns:
(list) of dicts
"""
placeholder_items = self.builder.get_shared_populate_data(
"placeholder_items"
)
if not placeholder_items:
placeholder_items = []
for item in get_stub().get_metadata():
if not item.get("is_placeholder"):
continue
placeholder_items.append(item)
self.builder.set_shared_populate_data(
"placeholder_items", placeholder_items
)
return placeholder_items
def _imprint_item(self, item_id, name, placeholder_data, stub):
if not item_id:
raise ValueError("Couldn't create a placeholder")
container_data = {
"id": "openpype.placeholder",
"name": name,
"is_placeholder": True,
"plugin_identifier": self.identifier,
"uuid": str(uuid.uuid4()), # scene_identifier
"data": placeholder_data,
"members": [item_id]
}
stub.imprint(item_id, container_data)
def build_workfile_template(*args, **kwargs):
builder = AETemplateBuilder(registered_host())
builder.build_template(*args, **kwargs)
def update_workfile_template(*args):
builder = AETemplateBuilder(registered_host())
builder.rebuild_template()
def create_placeholder(*args):
"""Called when new workile placeholder should be created."""
host = registered_host()
builder = AETemplateBuilder(host)
window = WorkfileBuildPlaceholderDialog(host, builder)
window.exec_()
def update_placeholder(*args):
"""Called after placeholder item is selected to modify it."""
host = registered_host()
builder = AETemplateBuilder(host)
stub = get_stub()
selected_items = stub.get_selected_items(True, True, True)
if len(selected_items) != 1:
stub.print_msg("Please select just 1 placeholder")
return
selected_id = selected_items[0].id
placeholder_item = None
placeholder_items_by_id = {
placeholder_item.scene_identifier: placeholder_item
for placeholder_item in builder.get_placeholders()
}
for metadata_item in stub.get_metadata():
if not metadata_item.get("is_placeholder"):
continue
if selected_id in metadata_item.get("members"):
placeholder_item = placeholder_items_by_id.get(
metadata_item["uuid"])
break
if not placeholder_item:
stub.print_msg("Didn't find placeholder metadata. "
"Remove and re-create placeholder.")
return
window = WorkfileBuildPlaceholderDialog(host, builder)
window.set_update_mode(placeholder_item)
window.exec_()

View file

@ -0,0 +1,732 @@
"""
Stub handling connection from server to client.
Used anywhere solution is calling client methods.
"""
import json
import logging
import attr
from wsrpc_aiohttp import WebSocketAsync
from .webserver import WebServerTool
class ConnectionNotEstablishedYet(Exception):
pass
@attr.s
class AEItem(object):
"""
Object denoting Item in AE. Each item is created in AE by any Loader,
but contains same fields, which are being used in later processing.
"""
# metadata
id = attr.ib() # id created by AE, could be used for querying
name = attr.ib() # name of item
item_type = attr.ib(default=None) # item type (footage, folder, comp)
# all imported elements, single for
# regular image, array for Backgrounds
members = attr.ib(factory=list)
frameStart = attr.ib(default=None)
framesDuration = attr.ib(default=None)
frameRate = attr.ib(default=None)
file_name = attr.ib(default=None)
instance_id = attr.ib(default=None) # New Publisher
width = attr.ib(default=None)
height = attr.ib(default=None)
is_placeholder = attr.ib(default=False)
uuid = attr.ib(default=False)
path = attr.ib(default=False) # path to FootageItem to validate
# list of composition Footage is in
containing_comps = attr.ib(factory=list)
class AfterEffectsServerStub():
"""
Stub for calling function on client (Photoshop js) side.
Expects that client is already connected (started when avalon menu
is opened).
'self.websocketserver.call' is used as async wrapper
"""
PUBLISH_ICON = '\u2117 '
LOADED_ICON = '\u25bc'
def __init__(self):
self.websocketserver = WebServerTool.get_instance()
self.client = self.get_client()
self.log = logging.getLogger(self.__class__.__name__)
@staticmethod
def get_client():
"""
Return first connected client to WebSocket
TODO implement selection by Route
:return: <WebSocketAsync> client
"""
clients = WebSocketAsync.get_clients()
client = None
if len(clients) > 0:
key = list(clients.keys())[0]
client = clients.get(key)
return client
def open(self, path):
"""
Open file located at 'path' (local).
Args:
path(string): file path locally
Returns: None
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.open', path=path))
return self._handle_return(res)
def get_metadata(self):
"""
Get complete stored JSON with metadata from AE.Metadata.Label
field.
It contains containers loaded by any Loader OR instances created
by Creator.
Returns:
(list)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_metadata'))
metadata = self._handle_return(res)
return metadata or []
def read(self, item, layers_meta=None):
"""
Parses item metadata from Label field of active document.
Used as filter to pick metadata for specific 'item' only.
Args:
item (AEItem): pulled info from AE
layers_meta (dict): full list from Headline
(load and inject for better performance in loops)
Returns:
(dict):
"""
if layers_meta is None:
layers_meta = self.get_metadata()
for item_meta in layers_meta:
if 'container' in item_meta.get('id') and \
str(item.id) == str(item_meta.get('members')[0]):
return item_meta
self.log.debug("Couldn't find layer metadata")
def imprint(self, item_id, data, all_items=None, items_meta=None):
"""
Save item metadata to Label field of metadata of active document
Args:
item_id (int|str): id of FootageItem or instance_id for workfiles
data(string): json representation for single layer
all_items (list of item): for performance, could be
injected for usage in loop, if not, single call will be
triggered
items_meta(string): json representation from Headline
(for performance - provide only if imprint is in
loop - value should be same)
Returns: None
"""
if not items_meta:
items_meta = self.get_metadata()
result_meta = []
# fix existing
is_new = True
for item_meta in items_meta:
if ((item_meta.get('members') and
str(item_id) == str(item_meta.get('members')[0])) or
item_meta.get("instance_id") == item_id):
is_new = False
if data:
item_meta.update(data)
result_meta.append(item_meta)
else:
result_meta.append(item_meta)
if is_new:
result_meta.append(data)
# Ensure only valid ids are stored.
if not all_items:
# loaders create FootageItem now
all_items = self.get_items(comps=True,
folders=True,
footages=True)
item_ids = [int(item.id) for item in all_items]
cleaned_data = []
for meta in result_meta:
# do not added instance with nonexistend item id
if meta.get("members"):
if int(meta["members"][0]) not in item_ids:
continue
cleaned_data.append(meta)
payload = json.dumps(cleaned_data, indent=4)
res = self.websocketserver.call(self.client.call
('AfterEffects.imprint',
payload=payload))
return self._handle_return(res)
def get_active_document_full_name(self):
"""
Returns absolute path of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_active_document_full_name'))
return self._handle_return(res)
def get_active_document_name(self):
"""
Returns just a name of active document via ws call
Returns(string): file name
"""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_active_document_name'))
return self._handle_return(res)
def get_items(self, comps, folders=False, footages=False):
"""
Get all items from Project panel according to arguments.
There are multiple different types:
CompItem (could have multiple layers - source for Creator,
will be rendered)
FolderItem (collection type, currently used for Background
loading)
FootageItem (imported file - created by Loader)
Args:
comps (bool): return CompItems
folders (bool): return FolderItem
footages (bool: return FootageItem
Returns:
(list) of namedtuples
"""
res = self.websocketserver.call(
self.client.call('AfterEffects.get_items',
comps=comps,
folders=folders,
footages=footages)
)
return self._to_records(self._handle_return(res))
def select_items(self, items):
"""
Select items in Project list
Args:
items (list): of int item ids
"""
self.websocketserver.call(
self.client.call('AfterEffects.select_items', items=items))
def get_selected_items(self, comps, folders=False, footages=False):
"""
Same as get_items but using selected items only
Args:
comps (bool): return CompItems
folders (bool): return FolderItem
footages (bool: return FootageItem
Returns:
(list) of namedtuples
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_selected_items',
comps=comps,
folders=folders,
footages=footages)
)
return self._to_records(self._handle_return(res))
def add_item(self, name, item_type):
"""
Adds either composition or folder to project item list.
Args:
name (str)
item_type (str): COMP|FOLDER
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item',
name=name,
item_type=item_type))
return self._handle_return(res)
def get_item(self, item_id):
"""
Returns metadata for particular 'item_id' or None
Args:
item_id (int, or string)
"""
for item in self.get_items(True, True, True):
if str(item.id) == str(item_id):
return item
return None
def import_file(self, path, item_name, import_options=None):
"""
Imports file as a FootageItem. Used in Loader
Args:
path (string): absolute path for asset file
item_name (string): label for created FootageItem
import_options (dict): different files (img vs psd) need different
config
"""
res = self.websocketserver.call(
self.client.call('AfterEffects.import_file',
path=path,
item_name=item_name,
import_options=import_options)
)
records = self._to_records(self._handle_return(res))
if records:
return records.pop()
def replace_item(self, item_id, path, item_name):
""" Replace FootageItem with new file
Args:
item_id (int):
path (string):absolute path
item_name (string): label on item in Project list
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.replace_item',
item_id=item_id,
path=path, item_name=item_name))
return self._handle_return(res)
def rename_item(self, item_id, item_name):
""" Replace item with item_name
Args:
item_id (int):
item_name (string): label on item in Project list
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.rename_item',
item_id=item_id,
item_name=item_name))
return self._handle_return(res)
def delete_item(self, item_id):
""" Deletes *Item in a file
Args:
item_id (int):
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.delete_item',
item_id=item_id))
return self._handle_return(res)
def remove_instance(self, instance_id, metadata=None):
"""
Removes instance with 'instance_id' from file's metadata and
saves them.
Keep matching item in file though.
Args:
instance_id(string): instance id
"""
cleaned_data = []
if metadata is None:
metadata = self.get_metadata()
for instance in metadata:
inst_id = instance.get("instance_id") or instance.get("uuid")
if inst_id != instance_id:
cleaned_data.append(instance)
payload = json.dumps(cleaned_data, indent=4)
res = self.websocketserver.call(self.client.call
('AfterEffects.imprint',
payload=payload))
return self._handle_return(res)
def is_saved(self):
# TODO
return True
def set_label_color(self, item_id, color_idx):
"""
Used for highlight additional information in Project panel.
Green color is loaded asset, blue is created asset
Args:
item_id (int):
color_idx (int): 0-16 Label colors from AE Project view
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.set_label_color',
item_id=item_id,
color_idx=color_idx))
return self._handle_return(res)
def get_comp_properties(self, comp_id):
""" Get composition information for render purposes
Returns startFrame, frameDuration, fps, width, height.
Args:
comp_id (int):
Returns:
(AEItem)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_comp_properties',
item_id=comp_id
))
records = self._to_records(self._handle_return(res))
if records:
return records.pop()
def set_comp_properties(self, comp_id, start, duration, frame_rate,
width, height):
"""
Set work area to predefined values (from Ftrack).
Work area directs what gets rendered.
Beware of rounding, AE expects seconds, not frames directly.
Args:
comp_id (int):
start (int): workAreaStart in frames
duration (int): in frames
frame_rate (float): frames in seconds
width (int): resolution width
height (int): resolution height
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.set_comp_properties',
item_id=comp_id,
start=start,
duration=duration,
frame_rate=frame_rate,
width=width,
height=height))
return self._handle_return(res)
def save(self):
"""
Saves active document
Returns: None
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.save'))
return self._handle_return(res)
def saveAs(self, project_path, as_copy):
"""
Saves active project to aep (copy) or png or jpg
Args:
project_path(string): full local path
as_copy: <boolean>
Returns: None
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.saveAs',
image_path=project_path,
as_copy=as_copy))
return self._handle_return(res)
def get_render_info(self, comp_id):
""" Get render queue info for render purposes
Returns:
(list) of (AEItem): with 'file_name' field
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_render_info',
comp_id=comp_id))
records = self._to_records(self._handle_return(res))
return records
def get_audio_url(self, item_id):
""" Get audio layer absolute url for comp
Args:
item_id (int): composition id
Returns:
(str): absolute path url
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.get_audio_url',
item_id=item_id))
return self._handle_return(res)
def import_background(self, comp_id, comp_name, files):
"""
Imports backgrounds images to existing or new composition.
If comp_id is not provided, new composition is created, basic
values (width, heights, frameRatio) takes from first imported
image.
All images from background json are imported as a FootageItem and
separate layer is created for each of them under composition.
Order of imported 'files' is important.
Args:
comp_id (int): id of existing composition (null if new)
comp_name (str): used when new composition
files (list): list of absolute paths to import and
add as layers
Returns:
(AEItem): object with id of created folder, all imported images
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.import_background',
comp_id=comp_id,
comp_name=comp_name,
files=files))
records = self._to_records(self._handle_return(res))
if records:
return records.pop()
def reload_background(self, comp_id, comp_name, files):
"""
Reloads backgrounds images to existing composition.
It actually deletes complete folder with imported images and
created composition for safety.
Args:
comp_id (int): id of existing composition to be overwritten
comp_name (str): new name of composition (could be same as old
if version up only)
files (list): list of absolute paths to import and
add as layers
Returns:
(AEItem): object with id of created folder, all imported images
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.reload_background',
comp_id=comp_id,
comp_name=comp_name,
files=files))
records = self._to_records(self._handle_return(res))
if records:
return records.pop()
def add_item_as_layer(self, comp_id, item_id):
"""
Adds already imported FootageItem ('item_id') as a new
layer to composition ('comp_id').
Args:
comp_id (int): id of target composition
item_id (int): FootageItem.id
comp already found previously
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item_as_layer',
comp_id=comp_id,
item_id=item_id))
records = self._to_records(self._handle_return(res))
if records:
return records.pop()
def add_item_instead_placeholder(self, placeholder_item_id, item_id):
"""
Adds item_id to layers where plaeholder_item_id is present.
1 placeholder could result in multiple loaded containers (eg items)
Args:
placeholder_item_id (int): id of placeholder item
item_id (int): loaded FootageItem id
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_item_instead_placeholder', # noqa
placeholder_item_id=placeholder_item_id, # noqa
item_id=item_id))
return self._handle_return(res)
def add_placeholder(self, name, width, height, fps, duration):
"""
Adds new FootageItem as a placeholder for workfile builder
Placeholder requires width etc, currently probably only hardcoded
values.
Args:
name (str)
width (int)
height (int)
fps (float)
duration (int)
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.add_placeholder',
name=name,
width=width,
height=height,
fps=fps,
duration=duration))
return self._handle_return(res)
def render(self, folder_url, comp_id):
"""
Render all renderqueueitem to 'folder_url'
Args:
folder_url(string): local folder path for collecting
Returns: None
"""
res = self.websocketserver.call(self.client.call
('AfterEffects.render',
folder_url=folder_url,
comp_id=comp_id))
return self._handle_return(res)
def get_extension_version(self):
"""Returns version number of installed extension."""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_extension_version'))
return self._handle_return(res)
def get_app_version(self):
"""Returns version number of installed application (17.5...)."""
res = self.websocketserver.call(self.client.call(
'AfterEffects.get_app_version'))
return self._handle_return(res)
def close(self):
res = self.websocketserver.call(self.client.call('AfterEffects.close'))
return self._handle_return(res)
def print_msg(self, msg):
"""Triggers Javascript alert dialog."""
self.websocketserver.call(self.client.call
('AfterEffects.print_msg',
msg=msg))
def _handle_return(self, res):
"""Wraps return, throws ValueError if 'error' key is present."""
if res and isinstance(res, str) and res != "undefined":
try:
parsed = json.loads(res)
except json.decoder.JSONDecodeError:
raise ValueError("Received broken JSON {}".format(res))
if not parsed: # empty list
return parsed
first_item = parsed
if isinstance(parsed, list):
first_item = parsed[0]
if first_item:
if first_item.get("error"):
raise ValueError(first_item["error"])
# singular values (file name etc)
if first_item.get("result") is not None:
return first_item["result"]
return parsed # parsed
return res
def _to_records(self, payload):
"""
Converts string json representation into list of AEItem
dot notation access to work.
Returns: <list of AEItem>
payload(dict): - dictionary from json representation, expected to
come from _handle_return
"""
if not payload:
return []
if isinstance(payload, str): # safety fallback
try:
payload = json.loads(payload)
except json.decoder.JSONDecodeError:
raise ValueError("Received broken JSON {}".format(payload))
if isinstance(payload, dict):
payload = [payload]
ret = []
# convert to AEItem to use dot donation
for d in payload:
if not d:
continue
# currently implemented and expected fields
item = AEItem(d.get('id'),
d.get('name'),
d.get('type'),
d.get('members'),
d.get('frameStart'),
d.get('framesDuration'),
d.get('frameRate'),
d.get('file_name'),
d.get("instance_id"),
d.get("width"),
d.get("height"),
d.get("is_placeholder"),
d.get("uuid"),
d.get("path"),
d.get("containing_comps"),)
ret.append(item)
return ret
def get_stub():
"""
Convenience function to get server RPC stub to call methods directed
for host (Photoshop).
It expects already created connection, started from client.
Currently created when panel is opened (PS: Window>Extensions>Avalon)
:return: <PhotoshopClientStub> where functions could be called from
"""
ae_stub = AfterEffectsServerStub()
if not ae_stub.client:
raise ConnectionNotEstablishedYet("Connection is not created yet")
return ae_stub

View file

@ -0,0 +1,88 @@
import os
import platform
import subprocess
from ayon_core.lib import (
get_ayon_launcher_args,
is_using_ayon_console,
)
from ayon_applications import PreLaunchHook, LaunchTypes
from ayon_aftereffects import get_launch_script_path
def get_launch_kwargs(kwargs):
"""Explicit setting of kwargs for Popen for AfterEffects.
Expected behavior
- ayon_console opens window with logs
- ayon has stdout/stderr available for capturing
Args:
kwargs (Union[dict, None]): Current kwargs or None.
"""
if kwargs is None:
kwargs = {}
if platform.system().lower() != "windows":
return kwargs
if is_using_ayon_console():
kwargs.update({
"creationflags": subprocess.CREATE_NEW_CONSOLE
})
else:
kwargs.update({
"creationflags": subprocess.CREATE_NO_WINDOW,
"stdout": subprocess.DEVNULL,
"stderr": subprocess.DEVNULL
})
return kwargs
class AEPrelaunchHook(PreLaunchHook):
"""Launch arguments preparation.
Hook add python executable and script path to AE implementation before
AE executable and add last workfile path to launch arguments.
Existence of last workfile is checked. If workfile does not exists tries
to copy templated workfile from predefined path.
"""
app_groups = {"aftereffects"}
order = 20
launch_types = {LaunchTypes.local}
def execute(self):
# Pop executable
executable_path = self.launch_context.launch_args.pop(0)
# Pop rest of launch arguments - There should not be other arguments!
remainders = []
while self.launch_context.launch_args:
remainders.append(self.launch_context.launch_args.pop(0))
script_path = get_launch_script_path()
new_launch_args = get_ayon_launcher_args(
"run", script_path, executable_path
)
# Add workfile path if exists
workfile_path = self.data["last_workfile_path"]
if (
self.data.get("start_last_workfile")
and workfile_path
and os.path.exists(workfile_path)
):
new_launch_args.append(workfile_path)
# Append as whole list as these arguments should not be separated
self.launch_context.launch_args.append(new_launch_args)
if remainders:
self.launch_context.launch_args.extend(remainders)
self.launch_context.kwargs = get_launch_kwargs(
self.launch_context.kwargs
)

View file

@ -0,0 +1,260 @@
import re
from ayon_core import resources
from ayon_core.lib import BoolDef, UISeparatorDef
from ayon_core.pipeline import (
Creator,
CreatedInstance,
CreatorError
)
from ayon_core.lib import prepare_template_data
from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS
from ayon_aftereffects import api
from ayon_aftereffects.api.pipeline import cache_and_get_instances
from ayon_aftereffects.api.lib import set_settings
class RenderCreator(Creator):
"""Creates 'render' instance for publishing.
Result of 'render' instance is video or sequence of images for particular
composition based of configuration in its RenderQueue.
"""
identifier = "render"
label = "Render"
product_type = "render"
description = "Render creator"
create_allow_context_change = True
# Settings
mark_for_review = True
force_setting_values = True
def create(self, product_name, data, pre_create_data):
stub = api.get_stub() # only after After Effects is up
try:
_ = stub.get_active_document_full_name()
except ValueError:
raise CreatorError(
"Please save workfile via Workfile app first!"
)
if pre_create_data.get("use_selection"):
comps = stub.get_selected_items(
comps=True, folders=False, footages=False
)
else:
comps = stub.get_items(comps=True, folders=False, footages=False)
if not comps:
raise CreatorError(
"Nothing to create. Select composition in Project Bin if "
"'Use selection' is toggled or create at least "
"one composition."
)
use_composition_name = (pre_create_data.get("use_composition_name") or
len(comps) > 1)
for comp in comps:
composition_name = re.sub(
"[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS),
"",
comp.name
)
if use_composition_name:
if "{composition}" not in product_name.lower():
product_name += "{Composition}"
dynamic_fill = prepare_template_data({"composition":
composition_name})
comp_product_name = product_name.format(**dynamic_fill)
data["composition_name"] = composition_name
else:
comp_product_name = re.sub(
r"\{composition\}",
"",
product_name,
flags=re.IGNORECASE
)
for inst in self.create_context.instances:
if comp_product_name == inst.product_name:
raise CreatorError("{} already exists".format(
inst.product_name))
data["members"] = [comp.id]
data["orig_comp_name"] = composition_name
new_instance = CreatedInstance(
self.product_type, comp_product_name, data, self
)
if "farm" in pre_create_data:
use_farm = pre_create_data["farm"]
new_instance.creator_attributes["farm"] = use_farm
review = pre_create_data["mark_for_review"]
new_instance. creator_attributes["mark_for_review"] = review
api.get_stub().imprint(new_instance.id,
new_instance.data_to_store())
self._add_instance_to_context(new_instance)
stub.rename_item(comp.id, comp_product_name)
if self.force_setting_values:
set_settings(True, True, [comp.id], print_msg=False)
def get_pre_create_attr_defs(self):
output = [
BoolDef("use_selection",
tooltip="Composition for publishable instance should be "
"selected by default.",
default=True, label="Use selection"),
BoolDef("use_composition_name",
label="Use composition name in product"),
UISeparatorDef(),
BoolDef("farm", label="Render on farm"),
BoolDef(
"mark_for_review",
label="Review",
default=self.mark_for_review
)
]
return output
def get_instance_attr_defs(self):
return [
BoolDef("farm", label="Render on farm"),
BoolDef(
"mark_for_review",
label="Review",
default=False
)
]
def get_icon(self):
return resources.get_openpype_splash_filepath()
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
# legacy instances have product_type=='render' or 'renderLocal', use them
creator_id = instance_data.get("creator_identifier")
if not creator_id:
# NOTE this is for backwards compatibility but probably can be
# removed
creator_id = instance_data.get("family", "")
creator_id = creator_id.replace("Local", "")
if creator_id == self.identifier:
instance_data = self._handle_legacy(instance_data)
instance = CreatedInstance.from_existing(
instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
api.get_stub().imprint(created_inst.get("instance_id"),
created_inst.data_to_store())
name_change = _changes.get("productName")
if name_change:
api.get_stub().rename_item(created_inst.data["members"][0],
name_change.new_value)
def remove_instances(self, instances):
"""Removes metadata and renames to original comp name if available."""
for instance in instances:
self._remove_instance_from_context(instance)
self.host.remove_instance(instance)
comp_id = instance.data["members"][0]
comp = api.get_stub().get_item(comp_id)
orig_comp_name = instance.data.get("orig_comp_name")
if comp:
if orig_comp_name:
new_comp_name = orig_comp_name
else:
new_comp_name = "dummyCompName"
api.get_stub().rename_item(comp_id,
new_comp_name)
def apply_settings(self, project_settings):
plugin_settings = (
project_settings["aftereffects"]["create"]["RenderCreator"]
)
self.mark_for_review = plugin_settings["mark_for_review"]
self.default_variants = plugin_settings.get(
"default_variants",
plugin_settings.get("defaults") or []
)
def get_detail_description(self):
return """Creator for Render instances
Main publishable item in AfterEffects will be of `render` product type.
Result of this item (instance) is picture sequence or video that could
be a final delivery product or loaded and used in another DCCs.
Select single composition and create instance of 'render' product type
or turn off 'Use selection' to create instance for all compositions.
'Use composition name in product' allows to explicitly add composition
name into created product name.
Position of composition name could be set in
`project_settings/global/tools/creator/product_name_profiles` with
some form of '{composition}' placeholder.
Composition name will be used implicitly if multiple composition should
be handled at same time.
If {composition} placeholder is not us 'product_name_profiles'
composition name will be capitalized and set at the end of
product name if necessary.
If composition name should be used, it will be cleaned up of characters
that would cause an issue in published file names.
"""
def get_dynamic_data(
self,
project_name,
folder_entity,
task_entity,
variant,
host_name,
instance
):
dynamic_data = {}
if instance is not None:
composition_name = instance.get("composition_name")
if composition_name:
dynamic_data["composition"] = composition_name
else:
dynamic_data["composition"] = "{composition}"
return dynamic_data
def _handle_legacy(self, instance_data):
"""Converts old instances to new format."""
if not instance_data.get("members"):
instance_data["members"] = [instance_data.get("uuid")]
if instance_data.get("uuid"):
# uuid not needed, replaced with unique instance_id
api.get_stub().remove_instance(instance_data.get("uuid"))
instance_data.pop("uuid")
if not instance_data.get("task"):
instance_data["task"] = self.create_context.get_current_task_name()
if not instance_data.get("creator_attributes"):
is_old_farm = instance_data.get("family") != "renderLocal"
instance_data["creator_attributes"] = {"farm": is_old_farm}
instance_data["productType"] = self.product_type
if instance_data["creator_attributes"].get("mark_for_review") is None:
instance_data["creator_attributes"]["mark_for_review"] = True
return instance_data

View file

@ -0,0 +1,106 @@
import ayon_api
from ayon_core.pipeline import (
AutoCreator,
CreatedInstance
)
from ayon_aftereffects import api
from ayon_aftereffects.api.pipeline import cache_and_get_instances
class AEWorkfileCreator(AutoCreator):
identifier = "workfile"
product_type = "workfile"
default_variant = "Main"
def get_instance_attr_defs(self):
return []
def collect_instances(self):
for instance_data in cache_and_get_instances(self):
creator_id = instance_data.get("creator_identifier")
if creator_id == self.identifier:
product_name = instance_data["productName"]
instance = CreatedInstance(
self.product_type, product_name, instance_data, self
)
self._add_instance_to_context(instance)
def update_instances(self, update_list):
# nothing to change on workfiles
pass
def create(self, options=None):
existing_instance = None
for instance in self.create_context.instances:
if instance.product_type == self.product_type:
existing_instance = instance
break
context = self.create_context
project_name = context.get_current_project_name()
folder_path = context.get_current_folder_path()
task_name = context.get_current_task_name()
host_name = context.host_name
existing_folder_path = None
if existing_instance is not None:
existing_folder_path = existing_instance.get("folderPath")
if existing_instance is None:
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
data = {
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant,
}
data.update(self.get_dynamic_data(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
None,
))
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
self._add_instance_to_context(new_instance)
api.get_stub().imprint(new_instance.get("instance_id"),
new_instance.data_to_store())
elif (
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -0,0 +1,111 @@
import re
from ayon_core.pipeline import get_representation_path
from ayon_aftereffects import api
from ayon_aftereffects.api.lib import (
get_background_layers,
get_unique_layer_name,
)
class BackgroundLoader(api.AfterEffectsLoader):
"""
Load images from Background product type
Creates for each background separate folder with all imported images
from background json AND automatically created composition with layers,
each layer for separate image.
For each load container is created and stored in project (.aep)
metadata
"""
label = "Load JSON Background"
product_types = {"background"}
representations = {"json"}
def load(self, context, name=None, namespace=None, data=None):
stub = self.get_stub()
items = stub.get_items(comps=True)
existing_items = [layer.name.replace(stub.LOADED_ICON, '')
for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(context["folder"]["name"], name))
path = self.filepath_from_context(context)
layers = get_background_layers(path)
if not layers:
raise ValueError("No layers found in {}".format(path))
comp = stub.import_background(None, stub.LOADED_ICON + comp_name,
layers)
if not comp:
raise ValueError("Import background failed. "
"Please contact support")
self[:] = [comp]
namespace = namespace or comp_name
return api.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, context):
""" Switch asset or change version """
stub = self.get_stub()
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
repre_entity = context["representation"]
_ = container.pop("layer")
# without iterator number (_001, 002...)
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
comp_name = "{}_{}".format(folder_name, product_name)
# switching assets
if namespace_from_container != comp_name:
items = stub.get_items(comps=True)
existing_items = [layer.name for layer in items]
comp_name = get_unique_layer_name(
existing_items,
"{}_{}".format(folder_name, product_name))
else: # switching version - keep same name
comp_name = container["namespace"]
path = get_representation_path(repre_entity)
layers = get_background_layers(path)
comp = stub.reload_background(container["members"][1],
stub.LOADED_ICON + comp_name,
layers)
# update container
container["representation"] = repre_entity["id"]
container["name"] = product_name
container["namespace"] = comp_name
container["members"] = comp.members
stub.imprint(comp.id, container)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from file
metadata.
Args:
container (dict): container to be removed - used to get layer_id
"""
stub = self.get_stub()
layer = container.pop("layer")
stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, context):
self.update(container, context)

View file

@ -0,0 +1,119 @@
import re
import os
from ayon_core.pipeline import get_representation_path
from ayon_aftereffects import api
from ayon_aftereffects.api.lib import get_unique_layer_name
class FileLoader(api.AfterEffectsLoader):
"""Load images
Stores the imported asset in a container named after the asset.
"""
label = "Load file"
product_types = {
"image",
"plate",
"render",
"prerender",
"review",
"audio",
}
representations = {"*"}
def load(self, context, name=None, namespace=None, data=None):
stub = self.get_stub()
selected_folders = stub.get_selected_items(
comps=False, folders=True, footages=False)
if selected_folders:
stub.select_items([folder.id for folder in selected_folders])
layers = stub.get_items(comps=True, folders=True, footages=True)
existing_layers = [layer.name for layer in layers]
comp_name = get_unique_layer_name(
existing_layers, "{}_{}".format(
context["folder"]["name"], name
)
)
import_options = {}
path = self.filepath_from_context(context)
if len(context["representation"]["files"]) > 1:
import_options['sequence'] = True
if not path:
repr_id = context["representation"]["id"]
self.log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return
path = path.replace("\\", "/")
if '.psd' in path:
import_options['ImportAsType'] = 'ImportAsType.COMP'
comp = stub.import_file(path, stub.LOADED_ICON + comp_name,
import_options)
if not comp:
self.log.warning(
"Representation `{}` is failing to load".format(path))
self.log.warning("Check host app for alert error.")
return
self[:] = [comp]
namespace = namespace or comp_name
return api.containerise(
name,
namespace,
comp,
context,
self.__class__.__name__
)
def update(self, container, context):
""" Switch asset or change version """
stub = self.get_stub()
layer = container.pop("layer")
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
repre_entity = context["representation"]
namespace_from_container = re.sub(r'_\d{3}$', '',
container["namespace"])
layer_name = "{}_{}".format(folder_name, product_name)
# switching assets
if namespace_from_container != layer_name:
layers = stub.get_items(comps=True)
existing_layers = [layer.name for layer in layers]
layer_name = get_unique_layer_name(
existing_layers,
"{}_{}".format(folder_name, product_name))
else: # switching version - keep same name
layer_name = container["namespace"]
path = get_representation_path(repre_entity)
if len(repre_entity["files"]) > 1:
path = os.path.dirname(path)
# with aftereffects.maintained_selection(): # TODO
stub.replace_item(layer.id, path, stub.LOADED_ICON + layer_name)
stub.imprint(
layer.id, {"representation": repre_entity["id"],
"name": product_name,
"namespace": layer_name}
)
def remove(self, container):
"""
Removes element from scene: deletes layer + removes from Headline
Args:
container (dict): container to be removed - used to get layer_id
"""
stub = self.get_stub()
layer = container.pop("layer")
stub.imprint(layer.id, {})
stub.delete_item(layer.id)
def switch(self, container, context):
self.update(container, context)

View file

@ -0,0 +1,21 @@
import pyblish.api
from ayon_aftereffects.api import get_stub
class AddPublishHighlight(pyblish.api.InstancePlugin):
"""
Revert back rendered comp name and add publish highlight
"""
label = "Add render highlight"
order = pyblish.api.IntegratorOrder + 8.0
hosts = ["aftereffects"]
families = ["render.farm"]
optional = True
def process(self, instance):
stub = get_stub()
item = instance.data
# comp name contains highlight icon
stub.rename_item(item["comp_id"], item["comp_name"])

View file

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""Close AE after publish. For Webpublishing only."""
import pyblish.api
from ayon_aftereffects.api import get_stub
class CloseAE(pyblish.api.ContextPlugin):
"""Close AE after publish. For Webpublishing only.
"""
order = pyblish.api.IntegratorOrder + 14
label = "Close AE"
optional = True
active = True
hosts = ["aftereffects"]
targets = ["automated"]
def process(self, context):
self.log.info("CloseAE")
stub = get_stub()
self.log.info("Shutting down AE")
stub.save()
stub.close()
self.log.info("AE closed")

View file

@ -0,0 +1,27 @@
import os
import pyblish.api
from ayon_aftereffects.api import get_stub
class CollectAudio(pyblish.api.ContextPlugin):
"""Inject audio file url for rendered composition into context.
Needs to run AFTER 'collect_render'. Use collected comp_id to check
if there is an AVLayer in this composition
"""
order = pyblish.api.CollectorOrder + 0.499
label = "Collect Audio"
hosts = ["aftereffects"]
def process(self, context):
for instance in context:
if 'render.farm' in instance.data.get("families", []):
comp_id = instance.data["comp_id"]
if not comp_id:
self.log.debug("No comp_id filled in instance")
continue
context.data["audioFile"] = os.path.normpath(
get_stub().get_audio_url(comp_id)
).replace("\\", "/")

View file

@ -0,0 +1,18 @@
import os
import pyblish.api
from ayon_aftereffects.api import get_stub
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.49
label = "Current File"
hosts = ["aftereffects"]
def process(self, context):
context.data["currentFile"] = os.path.normpath(
get_stub().get_active_document_full_name()
).replace("\\", "/")

View file

@ -0,0 +1,58 @@
import os
import re
import pyblish.api
from ayon_aftereffects.api import (
get_stub,
get_extension_manifest_path
)
class CollectExtensionVersion(pyblish.api.ContextPlugin):
""" Pulls and compares version of installed extension.
It is recommended to use same extension as in provided Openpype code.
Please use Anastasiys Extension Manager or ZXPInstaller to update
extension in case of an error.
You can locate extension.zxp in your installed Openpype code in
`repos/avalon-core/avalon/aftereffects`
"""
# This technically should be a validator, but other collectors might be
# impacted with usage of obsolete extension, so collector that runs first
# was chosen
order = pyblish.api.CollectorOrder - 0.5
label = "Collect extension version"
hosts = ["aftereffects"]
optional = True
active = True
def process(self, context):
installed_version = get_stub().get_extension_version()
if not installed_version:
raise ValueError("Unknown version, probably old extension")
manifest_url = get_extension_manifest_path()
if not os.path.exists(manifest_url):
self.log.debug("Unable to locate extension manifest, not checking")
return
expected_version = None
with open(manifest_url) as fp:
content = fp.read()
found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
content)
if found:
expected_version = found[0][1]
if expected_version != installed_version:
msg = (
"Expected version '{}' found '{}'\n Please update"
" your installed extension, it might not work properly."
).format(expected_version, installed_version)
raise ValueError(msg)

View file

@ -0,0 +1,225 @@
import os
import tempfile
import attr
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import RenderInstance
from ayon_aftereffects.api import get_stub
@attr.s
class AERenderInstance(RenderInstance):
# extend generic, composition name is needed
comp_name = attr.ib(default=None)
comp_id = attr.ib(default=None)
fps = attr.ib(default=None)
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
publish_attributes = attr.ib(default={})
file_names = attr.ib(default=[])
class CollectAERender(publish.AbstractCollectRender):
order = pyblish.api.CollectorOrder + 0.100
label = "Collect After Effects Render Layers"
hosts = ["aftereffects"]
padding_width = 6
rendered_extension = 'png'
_stub = None
@classmethod
def get_stub(cls):
if not cls._stub:
cls._stub = get_stub()
return cls._stub
def get_instances(self, context):
instances = []
app_version = CollectAERender.get_stub().get_app_version()
app_version = app_version[0:4]
current_file = context.data["currentFile"]
version = context.data["version"]
project_entity = context.data["projectEntity"]
compositions = CollectAERender.get_stub().get_items(True)
compositions_by_id = {item.id: item for item in compositions}
for inst in context:
if not inst.data.get("active", True):
continue
product_type = inst.data["productType"]
if product_type not in ["render", "renderLocal"]: # legacy
continue
comp_id = int(inst.data["members"][0])
comp_info = CollectAERender.get_stub().get_comp_properties(
comp_id)
if not comp_info:
self.log.warning("Orphaned instance, deleting metadata")
inst_id = inst.data.get("instance_id") or str(comp_id)
CollectAERender.get_stub().remove_instance(inst_id)
continue
frame_start = comp_info.frameStart
frame_end = round(comp_info.frameStart +
comp_info.framesDuration) - 1
fps = comp_info.frameRate
# TODO add resolution when supported by extension
task_name = inst.data.get("task")
render_q = CollectAERender.get_stub().get_render_info(comp_id)
if not render_q:
raise ValueError("No file extension set in Render Queue")
render_item = render_q[0]
product_type = "render"
instance_families = inst.data.get("families", [])
instance_families.append(product_type)
product_name = inst.data["productName"]
instance = AERenderInstance(
productType=product_type,
family=product_type,
families=instance_families,
version=version,
time="",
source=current_file,
label="{} - {}".format(product_name, product_type),
productName=product_name,
folderPath=inst.data["folderPath"],
task=task_name,
attachTo=False,
setMembers='',
publish=True,
name=product_name,
resolutionWidth=render_item.width,
resolutionHeight=render_item.height,
pixelAspect=1,
tileRendering=False,
tilesX=0,
tilesY=0,
review="review" in instance_families,
frameStart=frame_start,
frameEnd=frame_end,
frameStep=1,
fps=fps,
app_version=app_version,
publish_attributes=inst.data.get("publish_attributes", {}),
file_names=[item.file_name for item in render_q],
# The source instance this render instance replaces
source_instance=inst
)
comp = compositions_by_id.get(comp_id)
if not comp:
raise ValueError("There is no composition for item {}".
format(comp_id))
instance.outputDir = self._get_output_dir(instance)
instance.comp_name = comp.name
instance.comp_id = comp_id
is_local = "renderLocal" in inst.data["family"] # legacy
if inst.data.get("creator_attributes"):
is_local = not inst.data["creator_attributes"].get("farm")
if is_local:
# for local renders
instance = self._update_for_local(instance, project_entity)
else:
fam = "render.farm"
if fam not in instance.families:
instance.families.append(fam)
instance.renderer = "aerender"
instance.farm = True # to skip integrate
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)
return instances
def get_expected_files(self, render_instance):
"""
Returns list of rendered files that should be created by
Deadline. These are not published directly, they are source
for later 'submit_publish_job'.
Args:
render_instance (RenderInstance): to pull anatomy and parts used
in url
Returns:
(list) of absolute urls to rendered file
"""
start = render_instance.frameStart
end = render_instance.frameEnd
base_dir = self._get_output_dir(render_instance)
expected_files = []
for file_name in render_instance.file_names:
_, ext = os.path.splitext(os.path.basename(file_name))
ext = ext.replace('.', '')
version_str = "v{:03d}".format(render_instance.version)
if "#" not in file_name: # single frame (mov)W
path = os.path.join(base_dir, "{}_{}_{}.{}".format(
render_instance.folderPath,
render_instance.productName,
version_str,
ext
))
expected_files.append(path)
else:
for frame in range(start, end + 1):
path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format(
render_instance.folderPath,
render_instance.productName,
version_str,
str(frame).zfill(self.padding_width),
ext
))
expected_files.append(path)
return expected_files
def _get_output_dir(self, render_instance):
"""
Returns dir path of rendered files, used in submit_publish_job
for metadata.json location.
Should be in separate folder inside of work area.
Args:
render_instance (RenderInstance):
Returns:
(str): absolute path to rendered files
"""
# render to folder of workfile
base_dir = os.path.dirname(render_instance.source)
file_name, _ = os.path.splitext(
os.path.basename(render_instance.source))
base_dir = os.path.join(base_dir, 'renders', 'aftereffects', file_name)
# for submit_publish_job
return base_dir
def _update_for_local(self, instance, project_entity):
"""Update old saved instances to current publishing format"""
instance.stagingDir = tempfile.mkdtemp()
instance.projectEntity = project_entity
fam = "render.local"
if fam not in instance.families:
instance.families.append(fam)
return instance

View file

@ -0,0 +1,26 @@
"""
Requires:
None
Provides:
instance -> families ("review")
"""
import pyblish.api
class CollectReview(pyblish.api.ContextPlugin):
"""Add review to families if instance created with 'mark_for_review' flag
"""
label = "Collect Review"
hosts = ["aftereffects"]
order = pyblish.api.CollectorOrder + 0.1
settings_category = "aftereffects"
def process(self, context):
for instance in context:
creator_attributes = instance.data.get("creator_attributes") or {}
if (
creator_attributes.get("mark_for_review")
and "review" not in instance.data["families"]
):
instance.data["families"].append("review")

View file

@ -0,0 +1,35 @@
import os
import pyblish.api
class CollectWorkfile(pyblish.api.ContextPlugin):
""" Adds the AE render instances """
label = "Collect After Effects Workfile Instance"
order = pyblish.api.CollectorOrder + 0.1
default_variant = "Main"
def process(self, context):
workfile_instance = None
for instance in context:
if instance.data["productType"] == "workfile":
self.log.debug("Workfile instance found")
workfile_instance = instance
break
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
if workfile_instance is None:
self.log.debug("Workfile instance not found. Skipping")
return
# creating representation
workfile_instance.data["representations"].append({
"name": "aep",
"ext": "aep",
"files": scene_file,
"stagingDir": staging_dir,
})

View file

@ -0,0 +1,69 @@
import os
from ayon_core.pipeline import publish
from ayon_aftereffects.api import get_stub
class ExtractLocalRender(publish.Extractor):
"""Render RenderQueue locally."""
order = publish.Extractor.order - 0.47
label = "Extract Local Render"
hosts = ["aftereffects"]
families = ["renderLocal", "render.local"]
def process(self, instance):
stub = get_stub()
staging_dir = instance.data["stagingDir"]
self.log.debug("staging_dir::{}".format(staging_dir))
# pull file name collected value from Render Queue Output module
if not instance.data["file_names"]:
raise ValueError("No file extension set in Render Queue")
comp_id = instance.data['comp_id']
stub.render(staging_dir, comp_id)
representations = []
for file_name in instance.data["file_names"]:
_, ext = os.path.splitext(os.path.basename(file_name))
ext = ext[1:]
first_file_path = None
files = []
for found_file_name in os.listdir(staging_dir):
if not found_file_name.endswith(ext):
continue
files.append(found_file_name)
if first_file_path is None:
first_file_path = os.path.join(staging_dir,
found_file_name)
if not files:
self.log.info("no files")
return
# single file cannot be wrapped in array
resulting_files = files
if len(files) == 1:
resulting_files = files[0]
repre_data = {
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"name": ext,
"ext": ext,
"files": resulting_files,
"stagingDir": staging_dir
}
first_repre = not representations
if instance.data["review"] and first_repre:
repre_data["tags"] = ["review"]
# TODO return back when Extract from source same as regular
# thumbnail_path = os.path.join(staging_dir, files[0])
# instance.data["thumbnailSource"] = thumbnail_path
representations.append(repre_data)
instance.data["representations"] = representations

View file

@ -0,0 +1,16 @@
import pyblish.api
from ayon_core.pipeline import publish
from ayon_aftereffects.api import get_stub
class ExtractSaveScene(pyblish.api.ContextPlugin):
"""Save scene before extraction."""
order = publish.Extractor.order - 0.48
label = "Extract Save Scene"
hosts = ["aftereffects"]
def process(self, context):
stub = get_stub()
stub.save()

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Footage item missing</title>
<description>
## Footage item missing
FootageItem `{name}` contains missing `{path}`. Render will not produce any frames and AE will stop react to any integration
### How to repair?
Remove `{name}` or provide missing file.
</description>
</error>
</root>

View file

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Product context</title>
<description>
## Invalid product context
Context of the given product doesn't match your current scene.
### How to repair?
You can fix this with "repair" button on the right and refresh Publish at the bottom right.
</description>
<detail>
### __Detailed Info__ (optional)
This might happen if you are reuse old workfile and open it in different context.
(Eg. you created product name "renderCompositingDefault" from folder "Robot' in "your_project_Robot_compositing.aep", now you opened this workfile in a context "Sloth" but existing product for "Robot" folder stayed in the workfile.)
</detail>
</error>
</root>

View file

@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<root>
<error id="main">
<title>Scene setting</title>
<description>
## Invalid scene setting found
One of the settings in a scene doesn't match to folder settings in database.
{invalid_setting_str}
### How to repair?
Change values for {invalid_keys_str} in the scene OR change them in the folder database if they are wrong there.
In the scene it is right mouse click on published composition > `Composition Settings`.
</description>
<detail>
### __Detailed Info__ (optional)
This error is shown when for example resolution in the scene doesn't match to resolution set on the folder in the database.
Either value in the database or in the scene is wrong.
</detail>
</error>
<error id="file_not_found">
<title>Scene file doesn't exist</title>
<description>
## Scene file doesn't exist
Collected scene {scene_url} doesn't exist.
### How to repair?
Re-save file, start publish from the beginning again.
</description>
</error>
</root>

View file

@ -0,0 +1,30 @@
import pyblish.api
from ayon_core.lib import version_up
from ayon_core.pipeline.publish import get_errored_plugins_from_context
from ayon_aftereffects.api import get_stub
class IncrementWorkfile(pyblish.api.InstancePlugin):
"""Increment the current workfile.
Saves the current scene with an increased version number.
"""
label = "Increment Workfile"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["aftereffects"]
families = ["workfile"]
optional = True
def process(self, instance):
errored_plugins = get_errored_plugins_from_context(instance.context)
if errored_plugins:
raise RuntimeError(
"Skipping incrementing current file because publishing failed."
)
scene_path = version_up(instance.context.data["currentFile"])
get_stub().saveAs(scene_path, True)
self.log.info("Incremented workfile to: {}".format(scene_path))

View file

@ -0,0 +1,24 @@
from ayon_core.pipeline import publish
from ayon_aftereffects.api import get_stub
class RemovePublishHighlight(publish.Extractor):
"""Clean utf characters which are not working in DL
Published compositions are marked with unicode icon which causes
problems on specific render environments. Clean it first, sent to
rendering, add it later back to avoid confusion.
"""
order = publish.Extractor.order - 0.49 # just before save
label = "Clean render comp"
hosts = ["aftereffects"]
families = ["render.farm"]
def process(self, instance):
stub = get_stub()
self.log.debug("instance::{}".format(instance.data))
item = instance.data
comp_name = item["comp_name"].replace(stub.PUBLISH_ICON, '')
stub.rename_item(item["comp_id"], comp_name)
instance.data["comp_name"] = comp_name

View file

@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
"""Validate presence of footage items in composition
Requires:
"""
import os
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError
)
from ayon_aftereffects.api import get_stub
class ValidateFootageItems(pyblish.api.InstancePlugin):
"""
Validates if FootageItems contained in composition exist.
AE fails silently and doesn't render anything if footage item file is
missing. This will result in nonresponsiveness of AE UI as it expects
reaction from user, but it will not provide dialog.
This validator tries to check existence of the files.
It will not protect from missing frame in multiframes though
(as AE api doesn't provide this information and it cannot be told how many
frames should be there easily). Missing frame is replaced by placeholder.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Footage Items"
families = ["render.farm", "render.local", "render"]
hosts = ["aftereffects"]
optional = True
def process(self, instance):
"""Plugin entry point."""
comp_id = instance.data["comp_id"]
for footage_item in get_stub().get_items(comps=False, folders=False,
footages=True):
self.log.info(footage_item)
if comp_id not in footage_item.containing_comps:
continue
path = footage_item.path
if path and not os.path.exists(path):
msg = f"File {path} not found."
formatting = {"name": footage_item.name, "path": path}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting)

View file

@ -0,0 +1,64 @@
import pyblish.api
from ayon_core.pipeline import get_current_folder_path
from ayon_core.pipeline.publish import (
ValidateContentsOrder,
PublishXmlValidationError,
)
from ayon_aftereffects.api import get_stub
class ValidateInstanceFolderRepair(pyblish.api.Action):
"""Repair the instance folder with value from Context."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = get_stub()
for instance in instances:
data = stub.read(instance[0])
data["folderPath"] = get_current_folder_path()
stub.imprint(instance[0].instance_id, data)
class ValidateInstanceFolder(pyblish.api.InstancePlugin):
"""Validate the instance folder is the current selected context folder.
As it might happen that multiple worfiles are opened at same time,
switching between them would mess with selected context. (From Launcher
or Ftrack).
In that case outputs might be output under wrong folder!
Repair action will use Context folder value (from Workfiles or Launcher)
Closing and reopening with Workfiles will refresh Context value.
"""
label = "Validate Instance Folder"
hosts = ["aftereffects"]
actions = [ValidateInstanceFolderRepair]
order = ValidateContentsOrder
def process(self, instance):
instance_folder = instance.data["folderPath"]
current_folder = get_current_folder_path()
msg = (
f"Instance folder {instance_folder} is not the same "
f"as current context {current_folder}."
)
if instance_folder != current_folder:
raise PublishXmlValidationError(self, msg)

View file

@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
"""Validate scene settings.
Requires:
instance -> folderEntity
instance -> anatomyData
"""
import os
import re
import pyblish.api
from ayon_core.pipeline import (
PublishXmlValidationError,
OptionalPyblishPluginMixin
)
from ayon_aftereffects.api import get_folder_settings
class ValidateSceneSettings(OptionalPyblishPluginMixin,
pyblish.api.InstancePlugin):
"""
Ensures that Composition Settings (right mouse on comp) are same as
in FTrack on task.
By default checks only duration - how many frames should be rendered.
Compares:
Frame start - Frame end + 1 from FTrack
against
Duration in Composition Settings.
If this complains:
Check error message where is discrepancy.
Check FTrack task 'pype' section of task attributes for expected
values.
Check/modify rendered Composition Settings.
If you know what you are doing run publishing again, uncheck this
validation before Validation phase.
"""
"""
Dev docu:
Could be configured by 'presets/plugins/aftereffects/publish'
skip_timelines_check - fill task name for which skip validation of
frameStart
frameEnd
fps
handleStart
handleEnd
skip_resolution_check - fill entity type ('folder') to skip validation
resolutionWidth
resolutionHeight
TODO support in extension is missing for now
By defaults validates duration (how many frames should be published)
"""
order = pyblish.api.ValidatorOrder
label = "Validate Scene Settings"
families = ["render.farm", "render.local", "render"]
hosts = ["aftereffects"]
settings_category = "aftereffects"
optional = True
skip_timelines_check = [".*"] # * >> skip for all
skip_resolution_check = [".*"]
def process(self, instance):
"""Plugin entry point."""
# Skip the instance if is not active by data on the instance
if not self.is_active(instance.data):
return
folder_entity = instance.data["folderEntity"]
expected_settings = get_folder_settings(folder_entity)
self.log.info("config from DB::{}".format(expected_settings))
task_name = instance.data["task"]
if any(re.search(pattern, task_name)
for pattern in self.skip_resolution_check):
expected_settings.pop("resolutionWidth")
expected_settings.pop("resolutionHeight")
if any(re.search(pattern, task_name)
for pattern in self.skip_timelines_check):
expected_settings.pop('fps', None)
expected_settings.pop('frameStart', None)
expected_settings.pop('frameEnd', None)
expected_settings.pop('handleStart', None)
expected_settings.pop('handleEnd', None)
# handle case where ftrack uses only two decimal places
# 23.976023976023978 vs. 23.98
fps = instance.data.get("fps")
if fps:
if isinstance(fps, float):
fps = float(
"{:.2f}".format(fps))
expected_settings["fps"] = fps
duration = instance.data.get("frameEndHandle") - \
instance.data.get("frameStartHandle") + 1
self.log.debug("validated items::{}".format(expected_settings))
current_settings = {
"fps": fps,
"frameStart": instance.data.get("frameStart"),
"frameEnd": instance.data.get("frameEnd"),
"handleStart": instance.data.get("handleStart"),
"handleEnd": instance.data.get("handleEnd"),
"frameStartHandle": instance.data.get("frameStartHandle"),
"frameEndHandle": instance.data.get("frameEndHandle"),
"resolutionWidth": instance.data.get("resolutionWidth"),
"resolutionHeight": instance.data.get("resolutionHeight"),
"duration": duration
}
self.log.info("current_settings:: {}".format(current_settings))
invalid_settings = []
invalid_keys = set()
for key, value in expected_settings.items():
if value != current_settings[key]:
msg = "'{}' expected: '{}' found: '{}'".format(
key, value, current_settings[key])
if key == "duration" and expected_settings.get("handleStart"):
msg += "Handles included in calculation. Remove " \
"handles in DB or extend frame range in " \
"Composition Setting."
invalid_settings.append(msg)
invalid_keys.add(key)
if invalid_settings:
msg = "Found invalid settings:\n{}".format(
"\n".join(invalid_settings)
)
invalid_keys_str = ",".join(invalid_keys)
break_str = "<br/>"
invalid_setting_str = "<b>Found invalid settings:</b><br/>{}".\
format(break_str.join(invalid_settings))
formatting_data = {
"invalid_setting_str": invalid_setting_str,
"invalid_keys_str": invalid_keys_str
}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data)
if not os.path.exists(instance.data.get("source")):
scene_url = instance.data.get("source")
msg = "Scene file {} not found (saved under wrong name)".format(
scene_url
)
formatting_data = {
"scene_url": scene_url
}
raise PublishXmlValidationError(self, msg, key="file_not_found",
formatting_data=formatting_data)

View file

@ -0,0 +1,51 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
CreatePlaceholderItem,
PlaceholderCreateMixin
)
from ayon_aftereffects.api import (
get_stub,
workfile_template_builder as wtb,
)
from ayon_aftereffects.api.lib import set_settings
class AEPlaceholderCreatePlugin(wtb.AEPlaceholderPlugin,
PlaceholderCreateMixin):
"""Adds Create placeholder.
This adds composition and runs Create
"""
identifier = "aftereffects.create"
label = "AfterEffects create"
def _create_placeholder_item(self, item_data) -> CreatePlaceholderItem:
return CreatePlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
stub = get_stub()
name = "CREATEPLACEHOLDER"
item_id = stub.add_item(name, "COMP")
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Replace 'placeholder' with publishable instance.
Renames prepared composition name, creates publishable instance, sets
frame/duration settings according to DB.
"""
pre_create_data = {"use_selection": True}
item_id, item = self._get_item(placeholder)
get_stub().select_items([item_id])
self.populate_create_placeholder(placeholder, pre_create_data)
# apply settings for populated composition
item_id, metadata_item = self._get_item(placeholder)
set_settings(True, True, [item_id])
def get_placeholder_options(self, options=None):
return self.get_create_plugin_options(options)

View file

@ -0,0 +1,62 @@
from ayon_core.pipeline.workfile.workfile_template_builder import (
LoadPlaceholderItem,
PlaceholderLoadMixin
)
from ayon_aftereffects.api import (
get_stub,
workfile_template_builder as wtb,
)
class AEPlaceholderLoadPlugin(wtb.AEPlaceholderPlugin, PlaceholderLoadMixin):
identifier = "aftereffects.load"
label = "AfterEffects load"
def _create_placeholder_item(self, item_data) -> LoadPlaceholderItem:
return LoadPlaceholderItem(
scene_identifier=item_data["uuid"],
data=item_data["data"],
plugin=self
)
def create_placeholder(self, placeholder_data):
"""Creates AE's Placeholder item in Project items list.
Sets dummy resolution/duration/fps settings, will be replaced when
populated.
"""
stub = get_stub()
name = "LOADERPLACEHOLDER"
item_id = stub.add_placeholder(name, 1920, 1060, 25, 10)
self._imprint_item(item_id, name, placeholder_data, stub)
def populate_placeholder(self, placeholder):
"""Use Openpype Loader from `placeholder` to create new FootageItems
New FootageItems are created, files are imported.
"""
self.populate_load_placeholder(placeholder)
errors = placeholder.get_errors()
stub = get_stub()
if errors:
stub.print_msg("\n".join(errors))
else:
if not placeholder.data["keep_placeholder"]:
metadata = stub.get_metadata()
for item in metadata:
if not item.get("is_placeholder"):
continue
scene_identifier = item.get("uuid")
if (scene_identifier and
scene_identifier == placeholder.scene_identifier):
stub.delete_item(item["members"][0])
stub.remove_instance(placeholder.scene_identifier, metadata)
def get_placeholder_options(self, options=None):
return self.get_load_plugin_options(options)
def load_succeed(self, placeholder, container):
placeholder_item_id, _ = self._get_item(placeholder)
item_id = container.id
get_stub().add_item_instead_placeholder(placeholder_item_id, item_id)

View file

@ -0,0 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'aftereffects' version."""
__version__ = "0.2.1"

View file

@ -0,0 +1,6 @@
[project]
name="aftereffects"
description="AYON AfterEffects addon."
[ayon.runtimeDependencies]
wsrpc_aiohttp = "^3.1.1" # websocket server

View file

@ -1,3 +1,10 @@
name = "aftereffects"
title = "AfterEffects"
version = "0.1.4"
version = "0.2.1"
client_dir = "ayon_aftereffects"
ayon_required_addons = {
"core": ">0.3.2",
}
ayon_compatible_addons = {}

View file

@ -1,3 +1,4 @@
from .version import __version__
from .constants import (
APPLICATIONS_ADDON_ROOT,
DEFAULT_ENV_SUBGROUP,
@ -31,6 +32,8 @@ from .addon import ApplicationsAddon
__all__ = (
"__version__",
"APPLICATIONS_ADDON_ROOT",
"DEFAULT_ENV_SUBGROUP",
"PLATFORM_NAMES",

View file

@ -3,6 +3,7 @@ import json
from ayon_core.addon import AYONAddon, IPluginPaths, click_wrap
from .version import __version__
from .constants import APPLICATIONS_ADDON_ROOT
from .defs import LaunchTypes
from .manager import ApplicationManager
@ -10,6 +11,7 @@ from .manager import ApplicationManager
class ApplicationsAddon(AYONAddon, IPluginPaths):
name = "applications"
version = __version__
def initialize(self, settings):
# TODO remove when addon is removed from ayon-core

View file

@ -0,0 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'applications' version."""
__version__ = "0.2.4"

View file

@ -1,6 +1,8 @@
name = "applications"
title = "Applications"
version = "0.2.2"
version = "0.2.4"
client_dir = "ayon_applications"
ayon_server_version = ">=1.0.7"
ayon_launcher_version = ">=1.0.2"

View file

@ -1293,6 +1293,41 @@
}
]
},
"motionbuilder": {
"enabled": true,
"label": "Motion Builder",
"icon": "{}/app_icons/motionbuilder.png",
"host_name": "motionbuilder",
"environment": "{}",
"variants": [
{
"name": "2025",
"label": "2025",
"use_python_2": false,
"executables": {
"windows": [
"C:\\Program Files\\Autodesk\\MotionBuilder 2025\\bin\\x64\\motionbuilder.exe"
],
"darwin": [],
"linux": []
},
"environment": "{}"
},
{
"name": "2024",
"label": "2024",
"use_python_2": false,
"executables": {
"windows": [
"C:\\Program Files\\Autodesk\\MotionBuilder 2024\\bin\\x64\\motionbuilder.exe"
],
"darwin": [],
"linux": []
},
"environment": "{}"
}
]
},
"additional_apps": []
}
}

View file

@ -192,6 +192,8 @@ class ApplicationsSettings(BaseSettingsModel):
default_factory=AppGroupWithPython, title="Zbrush")
equalizer: AppGroup = SettingsField(
default_factory=AppGroupWithPython, title="3DEqualizer")
motionbuilder: AppGroup = SettingsField(
default_factory=AppGroupWithPython, title="Motion Builder")
additional_apps: list[AdditionalAppGroup] = SettingsField(
default_factory=list, title="Additional Applications")
@ -212,7 +214,13 @@ class ApplicationsAddonSettings(BaseSettingsModel):
scope=["studio"]
)
only_available: bool = SettingsField(
True, title="Show only available applications")
True,
title="Show only available applications",
description="Enable to show only applications in AYON Launcher"
" for which the executable paths are found on the running machine."
" This applies as an additional filter to the applications defined in a "
" project's anatomy settings to ignore unavailable applications."
)
@validator("tool_groups")
def validate_unique_name(cls, value):

View file

@ -0,0 +1,13 @@
from .version import __version__
from .addon import (
BlenderAddon,
BLENDER_ADDON_ROOT,
)
__all__ = (
"__version__",
"BlenderAddon",
"BLENDER_ADDON_ROOT",
)

View file

@ -0,0 +1,71 @@
import os
from ayon_core.addon import AYONAddon, IHostAddon
from .version import __version__
BLENDER_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
class BlenderAddon(AYONAddon, IHostAddon):
name = "blender"
version = __version__
host_name = "blender"
def add_implementation_envs(self, env, _app):
"""Modify environments to contain all required for implementation."""
# Prepare path to implementation script
implementation_user_script_path = os.path.join(
BLENDER_ADDON_ROOT,
"blender_addon"
)
# Add blender implementation script path to PYTHONPATH
python_path = env.get("PYTHONPATH") or ""
python_path_parts = [
path
for path in python_path.split(os.pathsep)
if path
]
python_path_parts.insert(0, implementation_user_script_path)
env["PYTHONPATH"] = os.pathsep.join(python_path_parts)
# Modify Blender user scripts path
previous_user_scripts = set()
# Implementation path is added to set for easier paths check inside
# loops - will be removed at the end
previous_user_scripts.add(implementation_user_script_path)
ayon_blender_user_scripts = (
env.get("AYON_BLENDER_USER_SCRIPTS") or ""
)
for path in ayon_blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
blender_user_scripts = env.get("BLENDER_USER_SCRIPTS") or ""
for path in blender_user_scripts.split(os.pathsep):
if path:
previous_user_scripts.add(os.path.normpath(path))
# Remove implementation path from user script paths as is set to
# `BLENDER_USER_SCRIPTS`
previous_user_scripts.remove(implementation_user_script_path)
env["BLENDER_USER_SCRIPTS"] = implementation_user_script_path
# Set custom user scripts env
env["AYON_BLENDER_USER_SCRIPTS"] = os.pathsep.join(
previous_user_scripts
)
# Define Qt binding if not defined
env.pop("QT_PREFERRED_BINDING", None)
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(BLENDER_ADDON_ROOT, "hooks")
]
def get_workfile_extensions(self):
return [".blend"]

View file

@ -0,0 +1,72 @@
"""Public API
Anything that isn't defined here is INTERNAL and unreliable for external use.
"""
from .pipeline import (
install,
uninstall,
ls,
publish,
containerise,
BlenderHost,
)
from .plugin import (
Creator,
)
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root,
)
from .lib import (
lsattr,
lsattrs,
read,
maintained_selection,
maintained_time,
get_selection,
# unique_name,
)
from .capture import capture
from .render_lib import prepare_rendering
__all__ = [
"install",
"uninstall",
"ls",
"publish",
"containerise",
"BlenderHost",
"Creator",
# Workfiles API
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
# Utility functions
"maintained_selection",
"maintained_time",
"lsattr",
"lsattrs",
"read",
"get_selection",
"capture",
# "unique_name",
"prepare_rendering",
]

View file

@ -0,0 +1,47 @@
import bpy
import pyblish.api
from ayon_core.pipeline.publish import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid objects in Blender when a publish plug-in failed."""
label = "Select Invalid"
on = "failed"
icon = "search"
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(context,
plugin=plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes...")
invalid = list()
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Failed plug-in doesn't have any selectable objects."
)
bpy.ops.object.select_all(action='DESELECT')
# Make sure every node is only processed once
invalid = list(set(invalid))
if not invalid:
self.log.info("No invalid nodes found.")
return
invalid_names = [obj.name for obj in invalid]
self.log.info(
"Selecting invalid objects: %s", ", ".join(invalid_names)
)
# Select the objects and also make the last one the active object.
for obj in invalid:
obj.select_set(True)
bpy.context.view_layer.objects.active = invalid[-1]

View file

@ -0,0 +1,282 @@
"""Blender Capture
Playblasting with independent viewport, camera and display options
"""
import contextlib
import bpy
from .lib import maintained_time
from .plugin import deselect_all, create_blender_context
def capture(
camera=None,
width=None,
height=None,
filename=None,
start_frame=None,
end_frame=None,
step_frame=None,
sound=None,
isolate=None,
maintain_aspect_ratio=True,
overwrite=False,
image_settings=None,
display_options=None
):
"""Playblast in an independent windows
Arguments:
camera (str, optional): Name of camera, defaults to "Camera"
width (int, optional): Width of output in pixels
height (int, optional): Height of output in pixels
filename (str, optional): Name of output file path. Defaults to current
render output path.
start_frame (int, optional): Defaults to current start frame.
end_frame (int, optional): Defaults to current end frame.
step_frame (int, optional): Defaults to 1.
sound (str, optional): Specify the sound node to be used during
playblast. When None (default) no sound will be used.
isolate (list): List of nodes to isolate upon capturing
maintain_aspect_ratio (bool, optional): Modify height in order to
maintain aspect ratio.
overwrite (bool, optional): Whether or not to overwrite if file
already exists. If disabled and file exists and error will be
raised.
image_settings (dict, optional): Supplied image settings for render,
using `ImageSettings`
display_options (dict, optional): Supplied display options for render
"""
scene = bpy.context.scene
camera = camera or "Camera"
# Ensure camera exists.
if camera not in scene.objects and camera != "AUTO":
raise RuntimeError("Camera does not exist: {0}".format(camera))
# Ensure resolution.
if width and height:
maintain_aspect_ratio = False
width = width or scene.render.resolution_x
height = height or scene.render.resolution_y
if maintain_aspect_ratio:
ratio = scene.render.resolution_x / scene.render.resolution_y
height = round(width / ratio)
# Get frame range.
if start_frame is None:
start_frame = scene.frame_start
if end_frame is None:
end_frame = scene.frame_end
if step_frame is None:
step_frame = 1
frame_range = (start_frame, end_frame, step_frame)
if filename is None:
filename = scene.render.filepath
render_options = {
"filepath": "{}.".format(filename.rstrip(".")),
"resolution_x": width,
"resolution_y": height,
"use_overwrite": overwrite,
}
with _independent_window() as window:
applied_view(window, camera, isolate, options=display_options)
with contextlib.ExitStack() as stack:
stack.enter_context(maintain_camera(window, camera))
stack.enter_context(applied_frame_range(window, *frame_range))
stack.enter_context(applied_render_options(window, render_options))
stack.enter_context(applied_image_settings(window, image_settings))
stack.enter_context(maintained_time())
bpy.ops.render.opengl(
animation=True,
render_keyed_only=False,
sequencer=False,
write_still=False,
view_context=True
)
return filename
ImageSettings = {
"file_format": "FFMPEG",
"color_mode": "RGB",
"ffmpeg": {
"format": "QUICKTIME",
"use_autosplit": False,
"codec": "H264",
"constant_rate_factor": "MEDIUM",
"gopsize": 18,
"use_max_b_frames": False,
},
}
def isolate_objects(window, objects):
"""Isolate selection"""
deselect_all()
for obj in objects:
obj.select_set(True)
context = create_blender_context(selected=objects, window=window)
with bpy.context.temp_override(**context):
bpy.ops.view3d.view_axis(type="FRONT")
bpy.ops.view3d.localview()
deselect_all()
def _apply_options(entity, options):
for option, value in options.items():
if isinstance(value, dict):
_apply_options(getattr(entity, option), value)
else:
setattr(entity, option, value)
def applied_view(window, camera, isolate=None, options=None):
"""Apply view options to window."""
area = window.screen.areas[0]
space = area.spaces[0]
area.ui_type = "VIEW_3D"
types = {"MESH", "GPENCIL"}
objects = [obj for obj in window.scene.objects if obj.type in types]
if camera == "AUTO":
space.region_3d.view_perspective = "ORTHO"
isolate_objects(window, isolate or objects)
else:
isolate_objects(window, isolate or objects)
space.camera = window.scene.objects.get(camera)
space.region_3d.view_perspective = "CAMERA"
if isinstance(options, dict):
_apply_options(space, options)
else:
space.shading.type = "SOLID"
space.shading.color_type = "MATERIAL"
space.show_gizmo = False
space.overlay.show_overlays = False
@contextlib.contextmanager
def applied_frame_range(window, start, end, step):
"""Context manager for setting frame range."""
# Store current frame range
current_frame_start = window.scene.frame_start
current_frame_end = window.scene.frame_end
current_frame_step = window.scene.frame_step
# Apply frame range
window.scene.frame_start = start
window.scene.frame_end = end
window.scene.frame_step = step
try:
yield
finally:
# Restore frame range
window.scene.frame_start = current_frame_start
window.scene.frame_end = current_frame_end
window.scene.frame_step = current_frame_step
@contextlib.contextmanager
def applied_render_options(window, options):
"""Context manager for setting render options."""
render = window.scene.render
# Store current settings
original = {}
for opt in options.copy():
try:
original[opt] = getattr(render, opt)
except ValueError:
options.pop(opt)
# Apply settings
_apply_options(render, options)
try:
yield
finally:
# Restore previous settings
_apply_options(render, original)
@contextlib.contextmanager
def applied_image_settings(window, options):
"""Context manager to override image settings."""
options = options or ImageSettings.copy()
ffmpeg = options.pop("ffmpeg", {})
render = window.scene.render
# Store current image settings
original = {}
for opt in options.copy():
try:
original[opt] = getattr(render.image_settings, opt)
except ValueError:
options.pop(opt)
# Store current ffmpeg settings
original_ffmpeg = {}
for opt in ffmpeg.copy():
try:
original_ffmpeg[opt] = getattr(render.ffmpeg, opt)
except ValueError:
ffmpeg.pop(opt)
# Apply image settings
for opt, value in options.items():
setattr(render.image_settings, opt, value)
# Apply ffmpeg settings
for opt, value in ffmpeg.items():
setattr(render.ffmpeg, opt, value)
try:
yield
finally:
# Restore previous settings
for opt, value in original.items():
setattr(render.image_settings, opt, value)
for opt, value in original_ffmpeg.items():
setattr(render.ffmpeg, opt, value)
@contextlib.contextmanager
def maintain_camera(window, camera):
"""Context manager to override camera."""
current_camera = window.scene.camera
if camera in window.scene.objects:
window.scene.camera = window.scene.objects.get(camera)
try:
yield
finally:
window.scene.camera = current_camera
@contextlib.contextmanager
def _independent_window():
"""Create capture-window context."""
context = create_blender_context()
current_windows = set(bpy.context.window_manager.windows)
with bpy.context.temp_override(**context):
bpy.ops.wm.window_new()
window = list(
set(bpy.context.window_manager.windows) - current_windows)[0]
context["window"] = window
try:
yield window
finally:
bpy.ops.wm.window_close()

View file

@ -0,0 +1,51 @@
import attr
import bpy
@attr.s
class LayerMetadata(object):
"""Data class for Render Layer metadata."""
frameStart = attr.ib()
frameEnd = attr.ib()
@attr.s
class RenderProduct(object):
"""
Getting Colorspace as Specific Render Product Parameter for submitting
publish job.
"""
colorspace = attr.ib() # colorspace
view = attr.ib() # OCIO view transform
productName = attr.ib(default=None)
class ARenderProduct(object):
def __init__(self):
"""Constructor."""
# Initialize
self.layer_data = self._get_layer_data()
self.layer_data.products = self.get_render_products()
def _get_layer_data(self):
scene = bpy.context.scene
return LayerMetadata(
frameStart=int(scene.frame_start),
frameEnd=int(scene.frame_end),
)
def get_render_products(self):
"""To be implemented by renderer class.
This should return a list of RenderProducts.
Returns:
list: List of RenderProduct
"""
return [
RenderProduct(
colorspace="sRGB",
view="ACES 1.0",
productName=""
)
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 632 B

View file

@ -0,0 +1,426 @@
import os
import traceback
import importlib
import contextlib
from typing import Dict, List, Union
import bpy
import addon_utils
from ayon_core.lib import Logger
from . import pipeline
log = Logger.get_logger(__name__)
def load_scripts(paths):
"""Copy of `load_scripts` from Blender's implementation.
It is possible that this function will be changed in future and usage will
be based on Blender version.
"""
import bpy_types
loaded_modules = set()
previous_classes = [
cls
for cls in bpy.types.bpy_struct.__subclasses__()
]
def register_module_call(mod):
register = getattr(mod, "register", None)
if register:
try:
register()
except: # noqa E722
traceback.print_exc()
else:
print("\nWarning! '%s' has no register function, "
"this is now a requirement for registerable scripts" %
mod.__file__)
def unregister_module_call(mod):
unregister = getattr(mod, "unregister", None)
if unregister:
try:
unregister()
except: # noqa E722
traceback.print_exc()
def test_reload(mod):
# reloading this causes internal errors
# because the classes from this module are stored internally
# possibly to refresh internal references too but for now, best not to.
if mod == bpy_types:
return mod
try:
return importlib.reload(mod)
except: # noqa E722
traceback.print_exc()
def test_register(mod):
if mod:
register_module_call(mod)
bpy.utils._global_loaded_modules.append(mod.__name__)
from bpy_restrict_state import RestrictBlend
with RestrictBlend():
for base_path in paths:
for path_subdir in bpy.utils._script_module_dirs:
path = os.path.join(base_path, path_subdir)
if not os.path.isdir(path):
continue
bpy.utils._sys_path_ensure_prepend(path)
# Only add to 'sys.modules' unless this is 'startup'.
if path_subdir != "startup":
continue
for mod in bpy.utils.modules_from_path(path, loaded_modules):
test_register(mod)
addons_paths = []
for base_path in paths:
addons_path = os.path.join(base_path, "addons")
if not os.path.exists(addons_path):
continue
addons_paths.append(addons_path)
addons_module_path = os.path.join(addons_path, "modules")
if os.path.exists(addons_module_path):
bpy.utils._sys_path_ensure_prepend(addons_module_path)
if addons_paths:
# Fake addons
origin_paths = addon_utils.paths
def new_paths():
paths = origin_paths() + addons_paths
return paths
addon_utils.paths = new_paths
addon_utils.modules_refresh()
# load template (if set)
if any(bpy.utils.app_template_paths()):
import bl_app_template_utils
bl_app_template_utils.reset(reload_scripts=False)
del bl_app_template_utils
for cls in bpy.types.bpy_struct.__subclasses__():
if cls in previous_classes:
continue
if not getattr(cls, "is_registered", False):
continue
for subcls in cls.__subclasses__():
if not subcls.is_registered:
print(
"Warning, unregistered class: %s(%s)" %
(subcls.__name__, cls.__name__)
)
def append_user_scripts():
user_scripts = os.environ.get("AYON_BLENDER_USER_SCRIPTS")
if not user_scripts:
return
try:
load_scripts(user_scripts.split(os.pathsep))
except Exception:
print("Couldn't load user scripts \"{}\"".format(user_scripts))
traceback.print_exc()
def set_app_templates_path():
# Blender requires the app templates to be in `BLENDER_USER_SCRIPTS`.
# After running Blender, we set that variable to our custom path, so
# that the user can use their custom app templates.
# We look among the scripts paths for one of the paths that contains
# the app templates. The path must contain the subfolder
# `startup/bl_app_templates_user`.
paths = os.environ.get("AYON_BLENDER_USER_SCRIPTS").split(os.pathsep)
app_templates_path = None
for path in paths:
if os.path.isdir(
os.path.join(path, "startup", "bl_app_templates_user")):
app_templates_path = path
break
if app_templates_path and os.path.isdir(app_templates_path):
os.environ["BLENDER_USER_SCRIPTS"] = app_templates_path
def imprint(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
r"""Write `data` to `node` as userDefined attributes
Arguments:
node: Long name of node
data: Dictionary of key/value pairs
Example:
>>> import bpy
>>> def compute():
... return 6
...
>>> bpy.ops.mesh.primitive_cube_add()
>>> cube = bpy.context.view_layer.objects.active
>>> imprint(cube, {
... "regularString": "myFamily",
... "computedValue": lambda: compute()
... })
...
>>> cube['avalon']['computedValue']
6
"""
imprint_data = dict()
for key, value in data.items():
if value is None:
continue
if callable(value):
# Support values evaluated at imprint
value = value()
if not isinstance(value, (int, float, bool, str, list, dict)):
raise TypeError(f"Unsupported type: {type(value)}")
imprint_data[key] = value
pipeline.metadata_update(node, imprint_data)
def lsattr(attr: str,
value: Union[str, int, bool, List, Dict, None] = None) -> List:
r"""Return nodes matching `attr` and `value`
Arguments:
attr: Name of Blender property
value: Value of attribute. If none
is provided, return all nodes with this attribute.
Example:
>>> lsattr("id", "myId")
... [bpy.data.objects["myNode"]
>>> lsattr("id")
... [bpy.data.objects["myNode"], bpy.data.objects["myOtherNode"]]
Returns:
list
"""
return lsattrs({attr: value})
def lsattrs(attrs: Dict) -> List:
r"""Return nodes with the given attribute(s).
Arguments:
attrs: Name and value pairs of expected matches
Example:
>>> lsattrs({"age": 5}) # Return nodes with an `age` of 5
# Return nodes with both `age` and `color` of 5 and blue
>>> lsattrs({"age": 5, "color": "blue"})
Returns a list.
"""
# For now return all objects, not filtered by scene/collection/view_layer.
matches = set()
for coll in dir(bpy.data):
if not isinstance(
getattr(bpy.data, coll),
bpy.types.bpy_prop_collection,
):
continue
for node in getattr(bpy.data, coll):
for attr, value in attrs.items():
avalon_prop = node.get(pipeline.AVALON_PROPERTY)
if not avalon_prop:
continue
if (avalon_prop.get(attr)
and (value is None or avalon_prop.get(attr) == value)):
matches.add(node)
return list(matches)
def read(node: bpy.types.bpy_struct_meta_idprop):
"""Return user-defined attributes from `node`"""
data = dict(node.get(pipeline.AVALON_PROPERTY, {}))
# Ignore hidden/internal data
data = {
key: value
for key, value in data.items() if not key.startswith("_")
}
return data
def get_selected_collections():
"""
Returns a list of the currently selected collections in the outliner.
Raises:
RuntimeError: If the outliner cannot be found in the main Blender
window.
Returns:
list: A list of `bpy.types.Collection` objects that are currently
selected in the outliner.
"""
window = bpy.context.window or bpy.context.window_manager.windows[0]
try:
area = next(
area for area in window.screen.areas
if area.type == 'OUTLINER')
region = next(
region for region in area.regions
if region.type == 'WINDOW')
except StopIteration as e:
raise RuntimeError("Could not find outliner. An outliner space "
"must be in the main Blender window.") from e
with bpy.context.temp_override(
window=window,
area=area,
region=region,
screen=window.screen
):
ids = bpy.context.selected_ids
return [id for id in ids if isinstance(id, bpy.types.Collection)]
def get_selection(include_collections: bool = False) -> List[bpy.types.Object]:
"""
Returns a list of selected objects in the current Blender scene.
Args:
include_collections (bool, optional): Whether to include selected
collections in the result. Defaults to False.
Returns:
List[bpy.types.Object]: A list of selected objects.
"""
selection = [obj for obj in bpy.context.scene.objects if obj.select_get()]
if include_collections:
selection.extend(get_selected_collections())
return selection
@contextlib.contextmanager
def maintained_selection():
r"""Maintain selection during context
Example:
>>> with maintained_selection():
... # Modify selection
... bpy.ops.object.select_all(action='DESELECT')
>>> # Selection restored
"""
previous_selection = get_selection()
previous_active = bpy.context.view_layer.objects.active
try:
yield
finally:
# Clear the selection
for node in get_selection():
node.select_set(state=False)
if previous_selection:
for node in previous_selection:
try:
node.select_set(state=True)
except ReferenceError:
# This could happen if a selected node was deleted during
# the context.
log.exception("Failed to reselect")
continue
try:
bpy.context.view_layer.objects.active = previous_active
except ReferenceError:
# This could happen if the active node was deleted during the
# context.
log.exception("Failed to set active object.")
@contextlib.contextmanager
def maintained_time():
"""Maintain current frame during context."""
current_time = bpy.context.scene.frame_current
try:
yield
finally:
bpy.context.scene.frame_current = current_time
def get_all_parents(obj):
"""Get all recursive parents of object.
Arguments:
obj (bpy.types.Object): Object to get all parents for.
Returns:
List[bpy.types.Object]: All parents of object
"""
result = []
while True:
obj = obj.parent
if not obj:
break
result.append(obj)
return result
def get_highest_root(objects):
"""Get the highest object (the least parents) among the objects.
If multiple objects have the same amount of parents (or no parents) the
first object found in the input iterable will be returned.
Note that this will *not* return objects outside of the input list, as
such it will not return the root of node from a child node. It is purely
intended to find the highest object among a list of objects. To instead
get the root from one object use, e.g. `get_all_parents(obj)[-1]`
Arguments:
objects (List[bpy.types.Object]): Objects to find the highest root in.
Returns:
Optional[bpy.types.Object]: First highest root found or None if no
`bpy.types.Object` found in input list.
"""
included_objects = {obj.name_full for obj in objects}
num_parents_to_obj = {}
for obj in objects:
if isinstance(obj, bpy.types.Object):
parents = get_all_parents(obj)
# included parents
parents = [parent for parent in parents if
parent.name_full in included_objects]
if not parents:
# A node without parents must be a highest root
return obj
num_parents_to_obj.setdefault(len(parents), obj)
if not num_parents_to_obj:
return
minimum_parent = min(num_parents_to_obj)
return num_parents_to_obj[minimum_parent]

View file

@ -0,0 +1,456 @@
"""Blender operators and menus for use with Avalon."""
import os
import sys
import platform
import time
import traceback
import collections
from pathlib import Path
from types import ModuleType
from typing import Dict, List, Optional, Union
from qtpy import QtWidgets, QtCore
import bpy
import bpy.utils.previews
from ayon_core import style
from ayon_core.pipeline import get_current_folder_path, get_current_task_name
from ayon_core.tools.utils import host_tools
from .workio import OpenFileCacher
from . import pipeline
PREVIEW_COLLECTIONS: Dict = dict()
# This seems like a good value to keep the Qt app responsive and doesn't slow
# down Blender. At least on macOS I the interface of Blender gets very laggy if
# you make it smaller.
TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1
def execute_function_in_main_thread(f):
"""Decorator to move a function call into main thread items"""
def wrapper(*args, **kwargs):
mti = MainThreadItem(f, *args, **kwargs)
execute_in_main_thread(mti)
return wrapper
class BlenderApplication(QtWidgets.QApplication):
_instance = None
blender_windows = {}
def __init__(self, *args, **kwargs):
super(BlenderApplication, self).__init__(*args, **kwargs)
self.setQuitOnLastWindowClosed(False)
self.setStyleSheet(style.load_stylesheet())
self.lastWindowClosed.connect(self.__class__.reset)
@classmethod
def get_app(cls):
if cls._instance is None:
cls._instance = cls(sys.argv)
return cls._instance
@classmethod
def reset(cls):
cls._instance = None
@classmethod
def store_window(cls, identifier, window):
current_window = cls.get_window(identifier)
cls.blender_windows[identifier] = window
if current_window:
current_window.close()
# current_window.deleteLater()
@classmethod
def get_window(cls, identifier):
return cls.blender_windows.get(identifier)
class MainThreadItem:
"""Structure to store information about callback in main thread.
Item should be used to execute callback in main thread which may be needed
for execution of Qt objects.
Item store callback (callable variable), arguments and keyword arguments
for the callback. Item hold information about it's process.
"""
not_set = object()
sleep_time = 0.1
def __init__(self, callback, *args, **kwargs):
self.done = False
self.exception = self.not_set
self.result = self.not_set
self.callback = callback
self.args = args
self.kwargs = kwargs
def execute(self):
"""Execute callback and store its result.
Method must be called from main thread. Item is marked as `done`
when callback execution finished. Store output of callback of exception
information when callback raises one.
"""
print("Executing process in main thread")
if self.done:
print("- item is already processed")
return
callback = self.callback
args = self.args
kwargs = self.kwargs
print("Running callback: {}".format(str(callback)))
try:
result = callback(*args, **kwargs)
self.result = result
except Exception:
self.exception = sys.exc_info()
finally:
print("Done")
self.done = True
def wait(self):
"""Wait for result from main thread.
This method stops current thread until callback is executed.
Returns:
object: Output of callback. May be any type or object.
Raises:
Exception: Reraise any exception that happened during callback
execution.
"""
while not self.done:
print(self.done)
time.sleep(self.sleep_time)
if self.exception is self.not_set:
return self.result
raise self.exception
class GlobalClass:
app = None
main_thread_callbacks = collections.deque()
is_windows = platform.system().lower() == "windows"
def execute_in_main_thread(main_thead_item):
print("execute_in_main_thread")
GlobalClass.main_thread_callbacks.append(main_thead_item)
def _process_app_events() -> Optional[float]:
"""Process the events of the Qt app if the window is still visible.
If the app has any top level windows and at least one of them is visible
return the time after which this function should be run again. Else return
None, so the function is not run again and will be unregistered.
"""
while GlobalClass.main_thread_callbacks:
main_thread_item = GlobalClass.main_thread_callbacks.popleft()
main_thread_item.execute()
if main_thread_item.exception is not MainThreadItem.not_set:
_clc, val, tb = main_thread_item.exception
msg = str(val)
detail = "\n".join(traceback.format_exception(_clc, val, tb))
dialog = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
"Error",
msg)
dialog.setMinimumWidth(500)
dialog.setDetailedText(detail)
dialog.exec_()
# Refresh Manager
if GlobalClass.app:
manager = GlobalClass.app.get_window("WM_OT_avalon_manager")
if manager:
manager.refresh()
if not GlobalClass.is_windows:
if OpenFileCacher.opening_file:
return TIMER_INTERVAL
app = GlobalClass.app
if app._instance:
app.processEvents()
return TIMER_INTERVAL
return TIMER_INTERVAL
class LaunchQtApp(bpy.types.Operator):
"""A Base class for operators to launch a Qt app."""
_app: QtWidgets.QApplication
_window = Union[QtWidgets.QDialog, ModuleType]
_tool_name: str = None
_init_args: Optional[List] = list()
_init_kwargs: Optional[Dict] = dict()
bl_idname: str = None
def __init__(self):
if self.bl_idname is None:
raise NotImplementedError("Attribute `bl_idname` must be set!")
print(f"Initialising {self.bl_idname}...")
self._app = BlenderApplication.get_app()
GlobalClass.app = self._app
if not bpy.app.timers.is_registered(_process_app_events):
bpy.app.timers.register(
_process_app_events,
persistent=True
)
def execute(self, context):
"""Execute the operator.
The child class must implement `execute()` where it only has to set
`self._window` to the desired Qt window and then simply run
`return super().execute(context)`.
`self._window` is expected to have a `show` method.
If the `show` method requires arguments, you can set `self._show_args`
and `self._show_kwargs`. `args` should be a list, `kwargs` a
dictionary.
"""
if self._tool_name is None:
if self._window is None:
raise AttributeError("`self._window` is not set.")
else:
window = self._app.get_window(self.bl_idname)
if window is None:
window = host_tools.get_tool_by_name(self._tool_name)
self._app.store_window(self.bl_idname, window)
self._window = window
if not isinstance(self._window, (QtWidgets.QWidget, ModuleType)):
raise AttributeError(
"`window` should be a `QWidget or module`. Got: {}".format(
str(type(window))
)
)
self.before_window_show()
def pull_to_front(window):
"""Pull window forward to screen.
If Window is minimized this will un-minimize, then it can be raised
and activated to the front.
"""
window.setWindowState(
(window.windowState() & ~QtCore.Qt.WindowMinimized) |
QtCore.Qt.WindowActive
)
window.raise_()
window.activateWindow()
if isinstance(self._window, ModuleType):
self._window.show()
pull_to_front(self._window)
# Pull window to the front
window = None
if hasattr(self._window, "window"):
window = self._window.window
elif hasattr(self._window, "_window"):
window = self._window.window
if window:
self._app.store_window(self.bl_idname, window)
else:
origin_flags = self._window.windowFlags()
on_top_flags = origin_flags | QtCore.Qt.WindowStaysOnTopHint
self._window.setWindowFlags(on_top_flags)
self._window.show()
pull_to_front(self._window)
# if on_top_flags != origin_flags:
# self._window.setWindowFlags(origin_flags)
# self._window.show()
return {'FINISHED'}
def before_window_show(self):
return
class LaunchCreator(LaunchQtApp):
"""Launch Avalon Creator."""
bl_idname = "wm.avalon_creator"
bl_label = "Create..."
_tool_name = "creator"
def before_window_show(self):
self._window.refresh()
def execute(self, context):
host_tools.show_publisher(tab="create")
return {"FINISHED"}
class LaunchLoader(LaunchQtApp):
"""Launch AYON Loader."""
bl_idname = "wm.avalon_loader"
bl_label = "Load..."
_tool_name = "loader"
class LaunchPublisher(LaunchQtApp):
"""Launch Avalon Publisher."""
bl_idname = "wm.avalon_publisher"
bl_label = "Publish..."
def execute(self, context):
host_tools.show_publisher(tab="publish")
return {"FINISHED"}
class LaunchManager(LaunchQtApp):
"""Launch Avalon Manager."""
bl_idname = "wm.avalon_manager"
bl_label = "Manage..."
_tool_name = "sceneinventory"
class LaunchLibrary(LaunchQtApp):
"""Launch Library Loader."""
bl_idname = "wm.library_loader"
bl_label = "Library..."
_tool_name = "libraryloader"
class LaunchWorkFiles(LaunchQtApp):
"""Launch Avalon Work Files."""
bl_idname = "wm.avalon_workfiles"
bl_label = "Work Files..."
_tool_name = "workfiles"
def execute(self, context):
return super().execute(context)
class SetFrameRange(bpy.types.Operator):
bl_idname = "wm.ayon_set_frame_range"
bl_label = "Set Frame Range"
def execute(self, context):
data = pipeline.get_folder_attributes()
pipeline.set_frame_range(data)
return {"FINISHED"}
class SetResolution(bpy.types.Operator):
bl_idname = "wm.ayon_set_resolution"
bl_label = "Set Resolution"
def execute(self, context):
data = pipeline.get_folder_attributes()
pipeline.set_resolution(data)
return {"FINISHED"}
class TOPBAR_MT_avalon(bpy.types.Menu):
"""Avalon menu."""
bl_idname = "TOPBAR_MT_avalon"
bl_label = os.environ.get("AYON_MENU_LABEL")
def draw(self, context):
"""Draw the menu in the UI."""
layout = self.layout
pcoll = PREVIEW_COLLECTIONS.get("avalon")
if pcoll:
pyblish_menu_icon = pcoll["pyblish_menu_icon"]
pyblish_menu_icon_id = pyblish_menu_icon.icon_id
else:
pyblish_menu_icon_id = 0
folder_path = get_current_folder_path()
task_name = get_current_task_name()
context_label = f"{folder_path}, {task_name}"
context_label_item = layout.row()
context_label_item.operator(
LaunchWorkFiles.bl_idname, text=context_label
)
context_label_item.enabled = False
layout.separator()
layout.operator(LaunchCreator.bl_idname, text="Create...")
layout.operator(LaunchLoader.bl_idname, text="Load...")
layout.operator(
LaunchPublisher.bl_idname,
text="Publish...",
icon_value=pyblish_menu_icon_id,
)
layout.operator(LaunchManager.bl_idname, text="Manage...")
layout.operator(LaunchLibrary.bl_idname, text="Library...")
layout.separator()
layout.operator(SetFrameRange.bl_idname, text="Set Frame Range")
layout.operator(SetResolution.bl_idname, text="Set Resolution")
layout.separator()
layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...")
def draw_avalon_menu(self, context):
"""Draw the Avalon menu in the top bar."""
self.layout.menu(TOPBAR_MT_avalon.bl_idname)
classes = [
LaunchCreator,
LaunchLoader,
LaunchPublisher,
LaunchManager,
LaunchLibrary,
LaunchWorkFiles,
SetFrameRange,
SetResolution,
TOPBAR_MT_avalon,
]
def register():
"Register the operators and menu."
pcoll = bpy.utils.previews.new()
pyblish_icon_file = Path(__file__).parent / "icons" / "pyblish-32x32.png"
pcoll.load("pyblish_menu_icon", str(pyblish_icon_file.absolute()), 'IMAGE')
PREVIEW_COLLECTIONS["avalon"] = pcoll
BlenderApplication.get_app()
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_editor_menus.append(draw_avalon_menu)
def unregister():
"""Unregister the operators and menu."""
pcoll = PREVIEW_COLLECTIONS.pop("avalon")
bpy.utils.previews.remove(pcoll)
bpy.types.TOPBAR_MT_editor_menus.remove(draw_avalon_menu)
for cls in reversed(classes):
bpy.utils.unregister_class(cls)

View file

@ -0,0 +1,574 @@
import os
import sys
import traceback
from typing import Callable, Dict, Iterator, List, Optional
import bpy
import pyblish.api
import ayon_api
from ayon_core.host import (
HostBase,
IWorkfileHost,
IPublishHost,
ILoadHost
)
from ayon_core.pipeline import (
schema,
get_current_project_name,
get_current_folder_path,
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
AYON_CONTAINER_ID,
)
from ayon_core.lib import (
Logger,
register_event_callback,
emit_event
)
from ayon_core.settings import get_project_settings
from ayon_blender import BLENDER_ADDON_ROOT
from . import lib
from . import ops
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root,
)
PLUGINS_DIR = os.path.join(BLENDER_ADDON_ROOT, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
ORIGINAL_EXCEPTHOOK = sys.excepthook
AVALON_INSTANCES = "AVALON_INSTANCES"
AVALON_CONTAINERS = "AVALON_CONTAINERS"
AVALON_PROPERTY = 'avalon'
IS_HEADLESS = bpy.app.background
log = Logger.get_logger(__name__)
class BlenderHost(HostBase, IWorkfileHost, IPublishHost, ILoadHost):
name = "blender"
def install(self):
"""Override install method from HostBase.
Install Blender host functionality."""
install()
def get_containers(self) -> Iterator:
"""List containers from active Blender scene."""
return ls()
def get_workfile_extensions(self) -> List[str]:
"""Override get_workfile_extensions method from IWorkfileHost.
Get workfile possible extensions.
Returns:
List[str]: Workfile extensions.
"""
return file_extensions()
def save_workfile(self, dst_path: str = None):
"""Override save_workfile method from IWorkfileHost.
Save currently opened workfile.
Args:
dst_path (str): Where the current scene should be saved. Or use
current path if `None` is passed.
"""
save_file(dst_path if dst_path else bpy.data.filepath)
def open_workfile(self, filepath: str):
"""Override open_workfile method from IWorkfileHost.
Open workfile at specified filepath in the host.
Args:
filepath (str): Path to workfile.
"""
open_file(filepath)
def get_current_workfile(self) -> str:
"""Override get_current_workfile method from IWorkfileHost.
Retrieve currently opened workfile path.
Returns:
str: Path to currently opened workfile.
"""
return current_file()
def workfile_has_unsaved_changes(self) -> bool:
"""Override wokfile_has_unsaved_changes method from IWorkfileHost.
Returns True if opened workfile has no unsaved changes.
Returns:
bool: True if scene is saved and False if it has unsaved
modifications.
"""
return has_unsaved_changes()
def work_root(self, session) -> str:
"""Override work_root method from IWorkfileHost.
Modify workdir per host.
Args:
session (dict): Session context data.
Returns:
str: Path to new workdir.
"""
return work_root(session)
def get_context_data(self) -> dict:
"""Override abstract method from IPublishHost.
Get global data related to creation-publishing from workfile.
Returns:
dict: Context data stored using 'update_context_data'.
"""
property = bpy.context.scene.get(AVALON_PROPERTY)
if property:
return property.to_dict()
return {}
def update_context_data(self, data: dict, changes: dict):
"""Override abstract method from IPublishHost.
Store global context data to workfile.
Args:
data (dict): New data as are.
changes (dict): Only data that has been changed. Each value has
tuple with '(<old>, <new>)' value.
"""
bpy.context.scene[AVALON_PROPERTY] = data
def pype_excepthook_handler(*args):
traceback.print_exception(*args)
def install():
"""Install Blender configuration for Avalon."""
sys.excepthook = pype_excepthook_handler
pyblish.api.register_host("blender")
pyblish.api.register_plugin_path(str(PUBLISH_PATH))
register_loader_plugin_path(str(LOAD_PATH))
register_creator_plugin_path(str(CREATE_PATH))
lib.append_user_scripts()
lib.set_app_templates_path()
register_event_callback("new", on_new)
register_event_callback("open", on_open)
_register_callbacks()
_register_events()
if not IS_HEADLESS:
ops.register()
def uninstall():
"""Uninstall Blender configuration for Avalon."""
sys.excepthook = ORIGINAL_EXCEPTHOOK
pyblish.api.deregister_host("blender")
pyblish.api.deregister_plugin_path(str(PUBLISH_PATH))
deregister_loader_plugin_path(str(LOAD_PATH))
deregister_creator_plugin_path(str(CREATE_PATH))
if not IS_HEADLESS:
ops.unregister()
def show_message(title, message):
from ayon_core.tools.utils import show_message_dialog
from .ops import BlenderApplication
BlenderApplication.get_app()
show_message_dialog(
title=title,
message=message,
level="warning")
def message_window(title, message):
from .ops import (
MainThreadItem,
execute_in_main_thread,
_process_app_events
)
mti = MainThreadItem(show_message, title, message)
execute_in_main_thread(mti)
_process_app_events()
def get_folder_attributes():
project_name = get_current_project_name()
folder_path = get_current_folder_path()
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
return folder_entity["attrib"]
def set_frame_range(data):
scene = bpy.context.scene
# Default scene settings
frameStart = scene.frame_start
frameEnd = scene.frame_end
fps = scene.render.fps / scene.render.fps_base
if not data:
return
if data.get("frameStart"):
frameStart = data.get("frameStart")
if data.get("frameEnd"):
frameEnd = data.get("frameEnd")
if data.get("fps"):
fps = data.get("fps")
scene.frame_start = frameStart
scene.frame_end = frameEnd
scene.render.fps = round(fps)
scene.render.fps_base = round(fps) / fps
def set_resolution(data):
scene = bpy.context.scene
# Default scene settings
resolution_x = scene.render.resolution_x
resolution_y = scene.render.resolution_y
if not data:
return
if data.get("resolutionWidth"):
resolution_x = data.get("resolutionWidth")
if data.get("resolutionHeight"):
resolution_y = data.get("resolutionHeight")
scene.render.resolution_x = resolution_x
scene.render.resolution_y = resolution_y
def on_new():
project = os.environ.get("AYON_PROJECT_NAME")
settings = get_project_settings(project).get("blender")
set_resolution_startup = settings.get("set_resolution_startup")
set_frames_startup = settings.get("set_frames_startup")
data = get_folder_attributes()
if set_resolution_startup:
set_resolution(data)
if set_frames_startup:
set_frame_range(data)
unit_scale_settings = settings.get("unit_scale_settings")
unit_scale_enabled = unit_scale_settings.get("enabled")
if unit_scale_enabled:
unit_scale = unit_scale_settings.get("base_file_unit_scale")
bpy.context.scene.unit_settings.scale_length = unit_scale
def on_open():
project = os.environ.get("AYON_PROJECT_NAME")
settings = get_project_settings(project).get("blender")
set_resolution_startup = settings.get("set_resolution_startup")
set_frames_startup = settings.get("set_frames_startup")
data = get_folder_attributes()
if set_resolution_startup:
set_resolution(data)
if set_frames_startup:
set_frame_range(data)
unit_scale_settings = settings.get("unit_scale_settings")
unit_scale_enabled = unit_scale_settings.get("enabled")
apply_on_opening = unit_scale_settings.get("apply_on_opening")
if unit_scale_enabled and apply_on_opening:
unit_scale = unit_scale_settings.get("base_file_unit_scale")
prev_unit_scale = bpy.context.scene.unit_settings.scale_length
if unit_scale != prev_unit_scale:
bpy.context.scene.unit_settings.scale_length = unit_scale
message_window(
"Base file unit scale changed",
"Base file unit scale changed to match the project settings.")
@bpy.app.handlers.persistent
def _on_save_pre(*args):
emit_event("before.save")
@bpy.app.handlers.persistent
def _on_save_post(*args):
emit_event("save")
@bpy.app.handlers.persistent
def _on_load_post(*args):
# Detect new file or opening an existing file
if bpy.data.filepath:
# Likely this was an open operation since it has a filepath
emit_event("open")
else:
emit_event("new")
ops.OpenFileCacher.post_load()
def _register_callbacks():
"""Register callbacks for certain events."""
def _remove_handler(handlers: List, callback: Callable):
"""Remove the callback from the given handler list."""
try:
handlers.remove(callback)
except ValueError:
pass
# TODO (jasper): implement on_init callback?
# Be sure to remove existig ones first.
_remove_handler(bpy.app.handlers.save_pre, _on_save_pre)
_remove_handler(bpy.app.handlers.save_post, _on_save_post)
_remove_handler(bpy.app.handlers.load_post, _on_load_post)
bpy.app.handlers.save_pre.append(_on_save_pre)
bpy.app.handlers.save_post.append(_on_save_post)
bpy.app.handlers.load_post.append(_on_load_post)
log.info("Installed event handler _on_save_pre...")
log.info("Installed event handler _on_save_post...")
log.info("Installed event handler _on_load_post...")
def _on_task_changed():
"""Callback for when the task in the context is changed."""
# TODO (jasper): Blender has no concept of projects or workspace.
# It would be nice to override 'bpy.ops.wm.open_mainfile' so it takes the
# workdir as starting directory. But I don't know if that is possible.
# Another option would be to create a custom 'File Selector' and add the
# `directory` attribute, so it opens in that directory (does it?).
# https://docs.blender.org/api/blender2.8/bpy.types.Operator.html#calling-a-file-selector
# https://docs.blender.org/api/blender2.8/bpy.types.WindowManager.html#bpy.types.WindowManager.fileselect_add
workdir = os.getenv("AYON_WORKDIR")
log.debug("New working directory: %s", workdir)
def _register_events():
"""Install callbacks for specific events."""
register_event_callback("taskChanged", _on_task_changed)
log.info("Installed event callback for 'taskChanged'...")
def _discover_gui() -> Optional[Callable]:
"""Return the most desirable of the currently registered GUIs"""
# Prefer last registered
guis = reversed(pyblish.api.registered_guis())
for gui in guis:
try:
gui = __import__(gui).show
except (ImportError, AttributeError):
continue
else:
return gui
return None
def add_to_avalon_container(container: bpy.types.Collection):
"""Add the container to the Avalon container."""
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS)
# Link the container to the scene so it's easily visible to the artist
# and can be managed easily. Otherwise it's only found in "Blender
# File" view and it will be removed by Blenders garbage collection,
# unless you set a 'fake user'.
bpy.context.scene.collection.children.link(avalon_container)
avalon_container.children.link(container)
# Disable Avalon containers for the view layers.
for view_layer in bpy.context.scene.view_layers:
for child in view_layer.layer_collection.children:
if child.collection == avalon_container:
child.exclude = True
def metadata_update(node: bpy.types.bpy_struct_meta_idprop, data: Dict):
"""Imprint the node with metadata.
Existing metadata will be updated.
"""
if not node.get(AVALON_PROPERTY):
node[AVALON_PROPERTY] = dict()
for key, value in data.items():
if value is None:
continue
node[AVALON_PROPERTY][key] = value
def containerise(name: str,
namespace: str,
nodes: List,
context: Dict,
loader: Optional[str] = None,
suffix: Optional[str] = "CON") -> bpy.types.Collection:
"""Bundle `nodes` into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
name: Name of resulting assembly
namespace: Namespace under which to host container
nodes: Long names of nodes to containerise
context: Asset information
loader: Name of loader used to produce this container.
suffix: Suffix of container, defaults to `_CON`.
Returns:
The container assembly
"""
node_name = f"{context['folder']['name']}_{name}"
if namespace:
node_name = f"{namespace}:{node_name}"
if suffix:
node_name = f"{node_name}_{suffix}"
container = bpy.data.collections.new(name=node_name)
# Link the children nodes
for obj in nodes:
container.objects.link(obj)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(loader),
"representation": context["representation"]["id"],
}
metadata_update(container, data)
add_to_avalon_container(container)
return container
def containerise_existing(
container: bpy.types.Collection,
name: str,
namespace: str,
context: Dict,
loader: Optional[str] = None,
suffix: Optional[str] = "CON") -> bpy.types.Collection:
"""Imprint or update container with metadata.
Arguments:
name: Name of resulting assembly
namespace: Namespace under which to host container
context: Asset information
loader: Name of loader used to produce this container.
suffix: Suffix of container, defaults to `_CON`.
Returns:
The container assembly
"""
node_name = container.name
if suffix:
node_name = f"{node_name}_{suffix}"
container.name = node_name
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": name,
"namespace": namespace or '',
"loader": str(loader),
"representation": context["representation"]["id"],
}
metadata_update(container, data)
add_to_avalon_container(container)
return container
def parse_container(container: bpy.types.Collection,
validate: bool = True) -> Dict:
"""Return the container node's full container data.
Args:
container: A container node name.
validate: turn the validation for the container on or off
Returns:
The container schema data for this container node.
"""
data = lib.read(container)
# Append transient data
data["objectName"] = container.name
if validate:
schema.validate(data)
return data
def ls() -> Iterator:
"""List containers from active Blender scene.
This is the host-equivalent of api.ls(), but instead of listing assets on
disk, it lists assets already loaded in Blender; once loaded they are
called containers.
"""
for id_type in {AYON_CONTAINER_ID, AVALON_CONTAINER_ID}:
for container in lib.lsattr("id", id_type):
yield parse_container(container)
def publish():
"""Shorthand to publish from within host."""
return pyblish.util.publish()

View file

@ -0,0 +1,542 @@
"""Shared functionality for pipeline plugins for Blender."""
import itertools
from pathlib import Path
from typing import Dict, List, Optional
import pyblish.api
import bpy
from ayon_core.pipeline import (
Creator,
CreatedInstance,
LoaderPlugin,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
from ayon_core.pipeline.publish import Extractor
from ayon_core.lib import BoolDef
from .pipeline import (
AVALON_CONTAINERS,
AVALON_INSTANCES,
AVALON_PROPERTY,
)
from .ops import (
MainThreadItem,
execute_in_main_thread
)
from .lib import imprint
VALID_EXTENSIONS = [".blend", ".json", ".abc", ".fbx",
".usd", ".usdc", ".usda"]
def prepare_scene_name(
folder_name: str, product_name: str, namespace: Optional[str] = None
) -> str:
"""Return a consistent name for an asset."""
name = f"{folder_name}"
if namespace:
name = f"{name}_{namespace}"
name = f"{name}_{product_name}"
# Blender name for a collection or object cannot be longer than 63
# characters. If the name is longer, it will raise an error.
if len(name) > 63:
raise ValueError(f"Scene name '{name}' would be too long.")
return name
def get_unique_number(
folder_name: str, product_name: str
) -> str:
"""Return a unique number based on the folder name."""
avalon_container = bpy.data.collections.get(AVALON_CONTAINERS)
if not avalon_container:
return "01"
# Check the names of both object and collection containers
obj_asset_groups = avalon_container.objects
obj_group_names = {
c.name for c in obj_asset_groups
if c.type == 'EMPTY' and c.get(AVALON_PROPERTY)}
coll_asset_groups = avalon_container.children
coll_group_names = {
c.name for c in coll_asset_groups
if c.get(AVALON_PROPERTY)}
container_names = obj_group_names.union(coll_group_names)
count = 1
name = f"{folder_name}_{count:0>2}_{product_name}"
while name in container_names:
count += 1
name = f"{folder_name}_{count:0>2}_{product_name}"
return f"{count:0>2}"
def prepare_data(data, container_name=None):
name = data.name
local_data = data.make_local()
if container_name:
local_data.name = f"{container_name}:{name}"
else:
local_data.name = f"{name}"
return local_data
def create_blender_context(active: Optional[bpy.types.Object] = None,
selected: Optional[bpy.types.Object] = None,
window: Optional[bpy.types.Window] = None):
"""Create a new Blender context. If an object is passed as
parameter, it is set as selected and active.
"""
if not isinstance(selected, list):
selected = [selected]
override_context = bpy.context.copy()
windows = [window] if window else bpy.context.window_manager.windows
for win in windows:
for area in win.screen.areas:
if area.type == 'VIEW_3D':
for region in area.regions:
if region.type == 'WINDOW':
override_context['window'] = win
override_context['screen'] = win.screen
override_context['area'] = area
override_context['region'] = region
override_context['scene'] = bpy.context.scene
override_context['active_object'] = active
override_context['selected_objects'] = selected
return override_context
raise Exception("Could not create a custom Blender context.")
def get_parent_collection(collection):
"""Get the parent of the input collection"""
check_list = [bpy.context.scene.collection]
for c in check_list:
if collection.name in c.children.keys():
return c
check_list.extend(c.children)
return None
def get_local_collection_with_name(name):
for collection in bpy.data.collections:
if collection.name == name and collection.library is None:
return collection
return None
def deselect_all():
"""Deselect all objects in the scene.
Blender gives context error if trying to deselect object that it isn't
in object mode.
"""
modes = []
active = bpy.context.view_layer.objects.active
for obj in bpy.data.objects:
if obj.mode != 'OBJECT':
modes.append((obj, obj.mode))
bpy.context.view_layer.objects.active = obj
context_override = create_blender_context(active=obj)
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode='OBJECT')
context_override = create_blender_context()
with bpy.context.temp_override(**context_override):
bpy.ops.object.select_all(action='DESELECT')
for p in modes:
bpy.context.view_layer.objects.active = p[0]
context_override = create_blender_context(active=p[0])
with bpy.context.temp_override(**context_override):
bpy.ops.object.mode_set(mode=p[1])
bpy.context.view_layer.objects.active = active
class BlenderInstancePlugin(pyblish.api.InstancePlugin):
settings_category = "blender"
class BlenderContextPlugin(pyblish.api.ContextPlugin):
settings_category = "blender"
class BlenderExtractor(Extractor):
settings_category = "blender"
class BlenderCreator(Creator):
"""Base class for Blender Creator plug-ins."""
defaults = ['Main']
settings_category = "blender"
create_as_asset_group = False
@staticmethod
def cache_instance_data(shared_data):
"""Cache instances for Creators shared data.
Create `blender_cached_instances` key when needed in shared data and
fill it with all collected instances from the scene under its
respective creator identifiers.
If legacy instances are detected in the scene, create
`blender_cached_legacy_instances` key and fill it with
all legacy products from this family as a value. # key or value?
Args:
shared_data(Dict[str, Any]): Shared data.
"""
if not shared_data.get('blender_cached_instances'):
cache = {}
cache_legacy = {}
avalon_instances = bpy.data.collections.get(AVALON_INSTANCES)
avalon_instance_objs = (
avalon_instances.objects if avalon_instances else []
)
for obj_or_col in itertools.chain(
avalon_instance_objs,
bpy.data.collections
):
avalon_prop = obj_or_col.get(AVALON_PROPERTY, {})
if not avalon_prop:
continue
if avalon_prop.get('id') not in {
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
}:
continue
creator_id = avalon_prop.get('creator_identifier')
if creator_id:
# Creator instance
cache.setdefault(creator_id, []).append(obj_or_col)
else:
family = avalon_prop.get('family')
if family:
# Legacy creator instance
cache_legacy.setdefault(family, []).append(obj_or_col)
shared_data["blender_cached_instances"] = cache
shared_data["blender_cached_legacy_instances"] = cache_legacy
return shared_data
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
"""Override abstract method from Creator.
Create new instance and store it.
Args:
product_name (str): Product name of created instance.
instance_data (dict): Instance base data.
pre_create_data (dict): Data based on pre creation attributes.
Those may affect how creator works.
"""
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create asset group
folder_name = instance_data["folderPath"].split("/")[-1]
name = prepare_scene_name(folder_name, product_name)
if self.create_as_asset_group:
# Create instance as empty
instance_node = bpy.data.objects.new(name=name, object_data=None)
instance_node.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(instance_node)
else:
# Create instance collection
instance_node = bpy.data.collections.new(name=name)
instances.children.link(instance_node)
self.set_instance_data(product_name, instance_data)
instance = CreatedInstance(
self.product_type, product_name, instance_data, self
)
instance.transient_data["instance_node"] = instance_node
self._add_instance_to_context(instance)
imprint(instance_node, instance_data)
return instance_node
def collect_instances(self):
"""Override abstract method from BlenderCreator.
Collect existing instances related to this creator plugin."""
# Cache instances in shared data
self.cache_instance_data(self.collection_shared_data)
# Get cached instances
cached_instances = self.collection_shared_data.get(
"blender_cached_instances"
)
if not cached_instances:
return
# Process only instances that were created by this creator
for instance_node in cached_instances.get(self.identifier, []):
property = instance_node.get(AVALON_PROPERTY)
# Create instance object from existing data
instance = CreatedInstance.from_existing(
instance_data=property.to_dict(),
creator=self
)
instance.transient_data["instance_node"] = instance_node
# Add instance to create context
self._add_instance_to_context(instance)
def update_instances(self, update_list):
"""Override abstract method from BlenderCreator.
Store changes of existing instances so they can be recollected.
Args:
update_list(List[UpdateData]): Changed instances
and their changes, as a list of tuples.
"""
for created_instance, changes in update_list:
data = created_instance.data_to_store()
node = created_instance.transient_data["instance_node"]
if not node:
# We can't update if we don't know the node
self.log.error(
f"Unable to update instance {created_instance} "
f"without instance node."
)
return
# Rename the instance node in the scene if product
# or folder changed.
# Do not rename the instance if the family is workfile, as the
# workfile instance is included in the AVALON_CONTAINER collection.
if (
"productName" in changes.changed_keys
or "folderPath" in changes.changed_keys
) and created_instance.product_type != "workfile":
folder_name = data["folderPath"].split("/")[-1]
name = prepare_scene_name(
folder_name, data["productName"]
)
node.name = name
imprint(node, data)
def remove_instances(self, instances: List[CreatedInstance]):
for instance in instances:
node = instance.transient_data["instance_node"]
if isinstance(node, bpy.types.Collection):
for children in node.children_recursive:
if isinstance(children, bpy.types.Collection):
bpy.data.collections.remove(children)
else:
bpy.data.objects.remove(children)
bpy.data.collections.remove(node)
elif isinstance(node, bpy.types.Object):
bpy.data.objects.remove(node)
self._remove_instance_from_context(instance)
def set_instance_data(
self,
product_name: str,
instance_data: dict
):
"""Fill instance data with required items.
Args:
product_name(str): Product name of created instance.
instance_data(dict): Instance base data.
instance_node(bpy.types.ID): Instance node in blender scene.
"""
if not instance_data:
instance_data = {}
instance_data.update(
{
"id": AVALON_INSTANCE_ID,
"creator_identifier": self.identifier,
"productName": product_name,
}
)
def get_pre_create_attr_defs(self):
return [
BoolDef("use_selection",
label="Use selection",
default=True)
]
class BlenderLoader(LoaderPlugin):
"""A basic AssetLoader for Blender
This will implement the basic logic for linking/appending assets
into another Blender scene.
The `update` method should be implemented by a sub-class, because
it's different for different types (e.g. model, rig, animation,
etc.).
"""
settings_category = "blender"
@staticmethod
def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
"""Get the 'instance empty' that holds the collection instance."""
for node in nodes:
if not isinstance(node, bpy.types.Object):
continue
if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
and node.instance_collection and node.name == instance_name):
return node
return None
@staticmethod
def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
"""Get the 'instance collection' (container) for this asset."""
for node in nodes:
if not isinstance(node, bpy.types.Collection):
continue
if node.name == instance_name:
return node
return None
@staticmethod
def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
"""Find the library file from the container.
It traverses the objects from this collection, checks if there is only
1 library from which the objects come from and returns the library.
Warning:
No nested collections are supported at the moment!
"""
assert not container.children, "Nested collections are not supported."
assert container.objects, "The collection doesn't contain any objects."
libraries = set()
for obj in container.objects:
assert obj.library, f"'{obj.name}' is not linked."
libraries.add(obj.library)
assert len(
libraries) == 1, "'{container.name}' contains objects from more then 1 library."
return list(libraries)[0]
def process_asset(self,
context: dict,
name: str,
namespace: Optional[str] = None,
options: Optional[Dict] = None):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
""" Run the loader on Blender main thread"""
mti = MainThreadItem(self._load, context, name, namespace, options)
execute_in_main_thread(mti)
def _load(self,
context: dict,
name: Optional[str] = None,
namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[bpy.types.Collection]:
"""Load asset via database
Arguments:
context: Full parenthood of representation to load
name: Use pre-defined name
namespace: Use pre-defined namespace
options: Additional settings dictionary
"""
# TODO: make it possible to add the asset several times by
# just re-using the collection
filepath = self.filepath_from_context(context)
assert Path(filepath).exists(), f"{filepath} doesn't exist."
folder_name = context["folder"]["name"]
product_name = context["product"]["name"]
unique_number = get_unique_number(
folder_name, product_name
)
namespace = namespace or f"{folder_name}_{unique_number}"
name = name or prepare_scene_name(
folder_name, product_name, unique_number
)
nodes = self.process_asset(
context=context,
name=name,
namespace=namespace,
options=options,
)
# Only containerise if anything was loaded by the Loader.
if not nodes:
return None
# Only containerise if it's not already a collection from a .blend file.
# representation = context["representation"]["name"]
# if representation != "blend":
# from ayon_blender.api.pipeline import containerise
# return containerise(
# name=name,
# namespace=namespace,
# nodes=nodes,
# context=context,
# loader=self.__class__.__name__,
# )
# folder_name = context["folder"]["name"]
# product_name = context["product"]["name"]
# instance_name = prepare_scene_name(
# folder_name, product_name, unique_number
# ) + '_CON'
# return self._get_instance_collection(instance_name, nodes)
def exec_update(self, container: Dict, context: Dict):
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def update(self, container: Dict, context: Dict):
""" Run the update on Blender main thread"""
mti = MainThreadItem(self.exec_update, container, context)
execute_in_main_thread(mti)
def exec_remove(self, container: Dict) -> bool:
"""Must be implemented by a sub-class"""
raise NotImplementedError("Must be implemented by a sub-class")
def remove(self, container: Dict) -> bool:
""" Run the remove on Blender main thread"""
mti = MainThreadItem(self.exec_remove, container)
execute_in_main_thread(mti)

View file

@ -0,0 +1,364 @@
from pathlib import Path
import bpy
from ayon_core.settings import get_project_settings
from ayon_core.pipeline import get_current_project_name
def get_default_render_folder(settings):
"""Get default render folder from blender settings."""
return (settings["blender"]
["RenderSettings"]
["default_render_image_folder"])
def get_aov_separator(settings):
"""Get aov separator from blender settings."""
aov_sep = (settings["blender"]
["RenderSettings"]
["aov_separator"])
if aov_sep == "dash":
return "-"
elif aov_sep == "underscore":
return "_"
elif aov_sep == "dot":
return "."
else:
raise ValueError(f"Invalid aov separator: {aov_sep}")
def get_image_format(settings):
"""Get image format from blender settings."""
return (settings["blender"]
["RenderSettings"]
["image_format"])
def get_multilayer(settings):
"""Get multilayer from blender settings."""
return (settings["blender"]
["RenderSettings"]
["multilayer_exr"])
def get_renderer(settings):
"""Get renderer from blender settings."""
return (settings["blender"]
["RenderSettings"]
["renderer"])
def get_compositing(settings):
"""Get compositing from blender settings."""
return (settings["blender"]
["RenderSettings"]
["compositing"])
def get_render_product(output_path, name, aov_sep):
"""
Generate the path to the render product. Blender interprets the `#`
as the frame number, when it renders.
Args:
file_path (str): The path to the blender scene.
render_folder (str): The render folder set in settings.
file_name (str): The name of the blender scene.
instance (pyblish.api.Instance): The instance to publish.
ext (str): The image format to render.
"""
filepath = output_path / name.lstrip("/")
render_product = f"{filepath}{aov_sep}beauty.####"
render_product = render_product.replace("\\", "/")
return render_product
def set_render_format(ext, multilayer):
# Set Blender to save the file with the right extension
bpy.context.scene.render.use_file_extension = True
image_settings = bpy.context.scene.render.image_settings
if ext == "exr":
image_settings.file_format = (
"OPEN_EXR_MULTILAYER" if multilayer else "OPEN_EXR")
elif ext == "bmp":
image_settings.file_format = "BMP"
elif ext == "rgb":
image_settings.file_format = "IRIS"
elif ext == "png":
image_settings.file_format = "PNG"
elif ext == "jpeg":
image_settings.file_format = "JPEG"
elif ext == "jp2":
image_settings.file_format = "JPEG2000"
elif ext == "tga":
image_settings.file_format = "TARGA"
elif ext == "tif":
image_settings.file_format = "TIFF"
def set_render_passes(settings, renderer):
aov_list = set(settings["blender"]["RenderSettings"]["aov_list"])
custom_passes = settings["blender"]["RenderSettings"]["custom_passes"]
# Common passes for both renderers
vl = bpy.context.view_layer
# Data Passes
vl.use_pass_combined = "combined" in aov_list
vl.use_pass_z = "z" in aov_list
vl.use_pass_mist = "mist" in aov_list
vl.use_pass_normal = "normal" in aov_list
# Light Passes
vl.use_pass_diffuse_direct = "diffuse_light" in aov_list
vl.use_pass_diffuse_color = "diffuse_color" in aov_list
vl.use_pass_glossy_direct = "specular_light" in aov_list
vl.use_pass_glossy_color = "specular_color" in aov_list
vl.use_pass_emit = "emission" in aov_list
vl.use_pass_environment = "environment" in aov_list
vl.use_pass_ambient_occlusion = "ao" in aov_list
# Cryptomatte Passes
vl.use_pass_cryptomatte_object = "cryptomatte_object" in aov_list
vl.use_pass_cryptomatte_material = "cryptomatte_material" in aov_list
vl.use_pass_cryptomatte_asset = "cryptomatte_asset" in aov_list
if renderer == "BLENDER_EEVEE":
# Eevee exclusive passes
eevee = vl.eevee
# Light Passes
vl.use_pass_shadow = "shadow" in aov_list
eevee.use_pass_volume_direct = "volume_light" in aov_list
# Effects Passes
eevee.use_pass_bloom = "bloom" in aov_list
eevee.use_pass_transparent = "transparent" in aov_list
# Cryptomatte Passes
vl.use_pass_cryptomatte_accurate = "cryptomatte_accurate" in aov_list
elif renderer == "CYCLES":
# Cycles exclusive passes
cycles = vl.cycles
# Data Passes
vl.use_pass_position = "position" in aov_list
vl.use_pass_vector = "vector" in aov_list
vl.use_pass_uv = "uv" in aov_list
cycles.denoising_store_passes = "denoising" in aov_list
vl.use_pass_object_index = "object_index" in aov_list
vl.use_pass_material_index = "material_index" in aov_list
cycles.pass_debug_sample_count = "sample_count" in aov_list
# Light Passes
vl.use_pass_diffuse_indirect = "diffuse_indirect" in aov_list
vl.use_pass_glossy_indirect = "specular_indirect" in aov_list
vl.use_pass_transmission_direct = "transmission_direct" in aov_list
vl.use_pass_transmission_indirect = "transmission_indirect" in aov_list
vl.use_pass_transmission_color = "transmission_color" in aov_list
cycles.use_pass_volume_direct = "volume_light" in aov_list
cycles.use_pass_volume_indirect = "volume_indirect" in aov_list
cycles.use_pass_shadow_catcher = "shadow" in aov_list
aovs_names = [aov.name for aov in vl.aovs]
for cp in custom_passes:
cp_name = cp["attribute"]
if cp_name not in aovs_names:
aov = vl.aovs.add()
aov.name = cp_name
else:
aov = vl.aovs[cp_name]
aov.type = cp["value"]
return list(aov_list), custom_passes
def _create_aov_slot(name, aov_sep, slots, rpass_name, multi_exr, output_path):
filename = f"{name}{aov_sep}{rpass_name}.####"
slot = slots.new(rpass_name if multi_exr else filename)
filepath = str(output_path / filename.lstrip("/"))
return slot, filepath
def set_node_tree(
output_path, render_product, name, aov_sep, ext, multilayer, compositing
):
# Set the scene to use the compositor node tree to render
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
comp_layer_type = "CompositorNodeRLayers"
output_type = "CompositorNodeOutputFile"
compositor_type = "CompositorNodeComposite"
# Get the Render Layer, Composite and the previous output nodes
render_layer_node = None
composite_node = None
old_output_node = None
for node in tree.nodes:
if node.bl_idname == comp_layer_type:
render_layer_node = node
elif node.bl_idname == compositor_type:
composite_node = node
elif node.bl_idname == output_type and "AYON" in node.name:
old_output_node = node
if render_layer_node and composite_node and old_output_node:
break
# If there's not a Render Layers node, we create it
if not render_layer_node:
render_layer_node = tree.nodes.new(comp_layer_type)
# Get the enabled output sockets, that are the active passes for the
# render.
# We also exclude some layers.
exclude_sockets = ["Image", "Alpha", "Noisy Image"]
passes = [
socket
for socket in render_layer_node.outputs
if socket.enabled and socket.name not in exclude_sockets
]
# Create a new output node
output = tree.nodes.new(output_type)
image_settings = bpy.context.scene.render.image_settings
output.format.file_format = image_settings.file_format
slots = None
# In case of a multilayer exr, we don't need to use the output node,
# because the blender render already outputs a multilayer exr.
multi_exr = ext == "exr" and multilayer
slots = output.layer_slots if multi_exr else output.file_slots
output.base_path = render_product if multi_exr else str(output_path)
slots.clear()
aov_file_products = []
old_links = {
link.from_socket.name: link for link in tree.links
if link.to_node == old_output_node}
# Create a new socket for the beauty output
pass_name = "rgba" if multi_exr else "beauty"
slot, _ = _create_aov_slot(
name, aov_sep, slots, pass_name, multi_exr, output_path)
tree.links.new(render_layer_node.outputs["Image"], slot)
if compositing:
# Create a new socket for the composite output
pass_name = "composite"
comp_socket, filepath = _create_aov_slot(
name, aov_sep, slots, pass_name, multi_exr, output_path)
aov_file_products.append(("Composite", filepath))
# For each active render pass, we add a new socket to the output node
# and link it
for rpass in passes:
slot, filepath = _create_aov_slot(
name, aov_sep, slots, rpass.name, multi_exr, output_path)
aov_file_products.append((rpass.name, filepath))
# If the rpass was not connected with the old output node, we connect
# it with the new one.
if not old_links.get(rpass.name):
tree.links.new(rpass, slot)
for link in list(old_links.values()):
# Check if the socket is still available in the new output node.
socket = output.inputs.get(link.to_socket.name)
# If it is, we connect it with the new output node.
if socket:
tree.links.new(link.from_socket, socket)
# Then, we remove the old link.
tree.links.remove(link)
# If there's a composite node, we connect its input with the new output
if compositing and composite_node:
for link in tree.links:
if link.to_node == composite_node:
tree.links.new(link.from_socket, comp_socket)
break
if old_output_node:
output.location = old_output_node.location
tree.nodes.remove(old_output_node)
output.name = "AYON File Output"
output.label = "AYON File Output"
return [] if multi_exr else aov_file_products
def imprint_render_settings(node, data):
RENDER_DATA = "render_data"
if not node.get(RENDER_DATA):
node[RENDER_DATA] = {}
for key, value in data.items():
if value is None:
continue
node[RENDER_DATA][key] = value
def prepare_rendering(asset_group):
name = asset_group.name
filepath = Path(bpy.data.filepath)
assert filepath, "Workfile not saved. Please save the file first."
dirpath = filepath.parent
file_name = Path(filepath.name).stem
project = get_current_project_name()
settings = get_project_settings(project)
render_folder = get_default_render_folder(settings)
aov_sep = get_aov_separator(settings)
ext = get_image_format(settings)
multilayer = get_multilayer(settings)
renderer = get_renderer(settings)
compositing = get_compositing(settings)
set_render_format(ext, multilayer)
bpy.context.scene.render.engine = renderer
aov_list, custom_passes = set_render_passes(settings, renderer)
output_path = Path.joinpath(dirpath, render_folder, file_name)
render_product = get_render_product(output_path, name, aov_sep)
aov_file_product = set_node_tree(
output_path, render_product, name, aov_sep,
ext, multilayer, compositing)
# Clear the render filepath, so that the output is handled only by the
# output node in the compositor.
bpy.context.scene.render.filepath = ""
render_settings = {
"render_folder": render_folder,
"aov_separator": aov_sep,
"image_format": ext,
"multilayer_exr": multilayer,
"aov_list": aov_list,
"custom_passes": custom_passes,
"render_product": render_product,
"aov_file_product": aov_file_product,
"review": True,
}
imprint_render_settings(asset_group, render_settings)

View file

@ -0,0 +1,89 @@
"""Host API required for Work Files."""
from pathlib import Path
from typing import List, Optional
import bpy
class OpenFileCacher:
"""Store information about opening file.
When file is opening QApplcation events should not be processed.
"""
opening_file = False
@classmethod
def post_load(cls):
cls.opening_file = False
@classmethod
def set_opening(cls):
cls.opening_file = True
def open_file(filepath: str) -> Optional[str]:
"""Open the scene file in Blender."""
OpenFileCacher.set_opening()
preferences = bpy.context.preferences
load_ui = preferences.filepaths.use_load_ui
use_scripts = preferences.filepaths.use_scripts_auto_execute
result = bpy.ops.wm.open_mainfile(
filepath=filepath,
load_ui=load_ui,
use_scripts=use_scripts,
)
if result == {'FINISHED'}:
return filepath
return None
def save_file(filepath: str, copy: bool = False) -> Optional[str]:
"""Save the open scene file."""
preferences = bpy.context.preferences
compress = preferences.filepaths.use_file_compression
relative_remap = preferences.filepaths.use_relative_paths
result = bpy.ops.wm.save_as_mainfile(
filepath=filepath,
compress=compress,
relative_remap=relative_remap,
copy=copy,
)
if result == {'FINISHED'}:
return filepath
return None
def current_file() -> Optional[str]:
"""Return the path of the open scene file."""
current_filepath = bpy.data.filepath
if Path(current_filepath).is_file():
return current_filepath
return None
def has_unsaved_changes() -> bool:
"""Does the open scene file have unsaved changes?"""
return bpy.data.is_dirty
def file_extensions() -> List[str]:
"""Return the supported file extensions for Blender scene files."""
return [".blend"]
def work_root(session: dict) -> str:
"""Return the default root to browse for work files."""
work_dir = session["AYON_WORKDIR"]
scene_dir = session.get("AVALON_SCENEDIR")
if scene_dir:
return str(Path(work_dir, scene_dir))
return work_dir

View file

@ -0,0 +1,10 @@
from ayon_core.pipeline import install_host
from ayon_blender.api import BlenderHost
def register():
install_host(BlenderHost())
def unregister():
pass

View file

@ -0,0 +1,54 @@
from pathlib import Path
from ayon_applications import PreLaunchHook, LaunchTypes
class AddPythonScriptToLaunchArgs(PreLaunchHook):
"""Add python script to be executed before Blender launch."""
# Append after file argument
order = 15
app_groups = {"blender"}
launch_types = {LaunchTypes.local}
def execute(self):
if not self.launch_context.data.get("python_scripts"):
return
# Add path to workfile to arguments
for python_script_path in self.launch_context.data["python_scripts"]:
self.log.info(
f"Adding python script {python_script_path} to launch"
)
# Test script path exists
python_script_path = Path(python_script_path)
if not python_script_path.exists():
self.log.warning(
f"Python script {python_script_path} doesn't exist. "
"Skipped..."
)
continue
if "--" in self.launch_context.launch_args:
# Insert before separator
separator_index = self.launch_context.launch_args.index("--")
self.launch_context.launch_args.insert(
separator_index,
"-P",
)
self.launch_context.launch_args.insert(
separator_index + 1,
python_script_path.as_posix(),
)
else:
self.launch_context.launch_args.extend(
["-P", python_script_path.as_posix()]
)
# Ensure separator
if "--" not in self.launch_context.launch_args:
self.launch_context.launch_args.append("--")
self.launch_context.launch_args.extend(
[*self.launch_context.data.get("script_args", [])]
)

View file

@ -0,0 +1,295 @@
import os
import re
import subprocess
from platform import system
from ayon_applications import PreLaunchHook, LaunchTypes
class InstallPySideToBlender(PreLaunchHook):
"""Install Qt binding to blender's python packages.
Prelaunch hook does 2 things:
1.) Blender's python packages are pushed to the beginning of PYTHONPATH.
2.) Check if blender has installed PySide2 and will try to install if not.
For pipeline implementation is required to have Qt binding installed in
blender's python packages.
"""
app_groups = {"blender"}
launch_types = {LaunchTypes.local}
def execute(self):
# Prelaunch hook is not crucial
try:
self.inner_execute()
except Exception:
self.log.warning(
"Processing of {} crashed.".format(self.__class__.__name__),
exc_info=True
)
def inner_execute(self):
# Get blender's python directory
version_regex = re.compile(r"^([2-4])\.[0-9]+$")
platform = system().lower()
executable = self.launch_context.executable.executable_path
expected_executable = "blender"
if platform == "windows":
expected_executable += ".exe"
if os.path.basename(executable).lower() != expected_executable:
self.log.info((
f"Executable does not lead to {expected_executable} file."
"Can't determine blender's python to check/install"
" Qt binding."
))
return
versions_dir = os.path.dirname(executable)
if platform == "darwin":
versions_dir = os.path.join(
os.path.dirname(versions_dir), "Resources"
)
version_subfolders = []
for dir_entry in os.scandir(versions_dir):
if dir_entry.is_dir() and version_regex.match(dir_entry.name):
version_subfolders.append(dir_entry.name)
if not version_subfolders:
self.log.info(
"Didn't find version subfolder next to Blender executable"
)
return
if len(version_subfolders) > 1:
self.log.info((
"Found more than one version subfolder next"
" to blender executable. {}"
).format(", ".join([
'"./{}"'.format(name)
for name in version_subfolders
])))
return
version_subfolder = version_subfolders[0]
before_blender_4 = False
if int(version_regex.match(version_subfolder).group(1)) < 4:
before_blender_4 = True
# Blender 4 has Python 3.11 which does not support 'PySide2'
# QUESTION could we always install PySide6?
qt_binding = "PySide2" if before_blender_4 else "PySide6"
# Use PySide6 6.6.3 because 6.7.0 had a bug
# - 'QTextEdit' can't be added to 'QBoxLayout'
qt_binding_version = None if before_blender_4 else "6.6.3"
python_dir = os.path.join(versions_dir, version_subfolder, "python")
python_lib = os.path.join(python_dir, "lib")
python_version = "python"
if platform != "windows":
for dir_entry in os.scandir(python_lib):
if dir_entry.is_dir() and dir_entry.name.startswith("python"):
python_lib = dir_entry.path
python_version = dir_entry.name
break
# Change PYTHONPATH to contain blender's packages as first
python_paths = [
python_lib,
os.path.join(python_lib, "site-packages"),
]
python_path = self.launch_context.env.get("PYTHONPATH") or ""
for path in python_path.split(os.pathsep):
if path:
python_paths.append(path)
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths)
# Get blender's python executable
python_bin = os.path.join(python_dir, "bin")
if platform == "windows":
python_executable = os.path.join(python_bin, "python.exe")
else:
python_executable = os.path.join(python_bin, python_version)
# Check for python with enabled 'pymalloc'
if not os.path.exists(python_executable):
python_executable += "m"
if not os.path.exists(python_executable):
self.log.warning(
"Couldn't find python executable for blender. {}".format(
executable
)
)
return
# Check if PySide2 is installed and skip if yes
if self.is_pyside_installed(python_executable, qt_binding):
self.log.debug("Blender has already installed PySide2.")
return
# Install PySide2 in blender's python
if platform == "windows":
result = self.install_pyside_windows(
python_executable,
qt_binding,
qt_binding_version,
before_blender_4,
)
else:
result = self.install_pyside(
python_executable,
qt_binding,
qt_binding_version,
)
if result:
self.log.info(
f"Successfully installed {qt_binding} module to blender."
)
else:
self.log.warning(
f"Failed to install {qt_binding} module to blender."
)
def install_pyside_windows(
self,
python_executable,
qt_binding,
qt_binding_version,
before_blender_4,
):
"""Install PySide2 python module to blender's python.
Installation requires administration rights that's why it is required
to use "pywin32" module which can execute command's and ask for
administration rights.
"""
try:
import win32con
import win32process
import win32event
import pywintypes
from win32comext.shell.shell import ShellExecuteEx
from win32comext.shell import shellcon
except Exception:
self.log.warning("Couldn't import \"pywin32\" modules")
return
if qt_binding_version:
qt_binding = f"{qt_binding}=={qt_binding_version}"
try:
# Parameters
# - use "-m pip" as module pip to install PySide2 and argument
# "--ignore-installed" is to force install module to blender's
# site-packages and make sure it is binary compatible
fake_exe = "fake.exe"
site_packages_prefix = os.path.dirname(
os.path.dirname(python_executable)
)
args = [
fake_exe,
"-m",
"pip",
"install",
"--ignore-installed",
qt_binding,
]
if not before_blender_4:
# Define prefix for site package
# Python in blender 4.x is installing packages in AppData and
# not in blender's directory.
args.extend(["--prefix", site_packages_prefix])
parameters = (
subprocess.list2cmdline(args)
.lstrip(fake_exe)
.lstrip(" ")
)
# Execute command and ask for administrator's rights
process_info = ShellExecuteEx(
nShow=win32con.SW_SHOWNORMAL,
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,
lpVerb="runas",
lpFile=python_executable,
lpParameters=parameters,
lpDirectory=os.path.dirname(python_executable)
)
process_handle = process_info["hProcess"]
win32event.WaitForSingleObject(process_handle, win32event.INFINITE)
returncode = win32process.GetExitCodeProcess(process_handle)
return returncode == 0
except pywintypes.error:
pass
def install_pyside(
self,
python_executable,
qt_binding,
qt_binding_version,
):
"""Install Qt binding python module to blender's python."""
if qt_binding_version:
qt_binding = f"{qt_binding}=={qt_binding_version}"
try:
# Parameters
# - use "-m pip" as module pip to install qt binding and argument
# "--ignore-installed" is to force install module to blender's
# site-packages and make sure it is binary compatible
# TODO find out if blender 4.x on linux/darwin does install
# qt binding to correct place.
args = [
python_executable,
"-m",
"pip",
"install",
"--ignore-installed",
qt_binding,
]
process = subprocess.Popen(
args, stdout=subprocess.PIPE, universal_newlines=True
)
process.communicate()
return process.returncode == 0
except PermissionError:
self.log.warning(
"Permission denied with command:"
"\"{}\".".format(" ".join(args))
)
except OSError as error:
self.log.warning(f"OS error has occurred: \"{error}\".")
except subprocess.SubprocessError:
pass
def is_pyside_installed(self, python_executable, qt_binding):
"""Check if PySide2 module is in blender's pip list.
Check that PySide2 is installed directly in blender's site-packages.
It is possible that it is installed in user's site-packages but that
may be incompatible with blender's python.
"""
qt_binding_low = qt_binding.lower()
# Get pip list from blender's python executable
args = [python_executable, "-m", "pip", "list"]
process = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
lines = stdout.decode().split(os.linesep)
# Second line contain dashes that define maximum length of module name.
# Second column of dashes define maximum length of module version.
package_dashes, *_ = lines[1].split(" ")
package_len = len(package_dashes)
# Got through printed lines starting at line 3
for idx in range(2, len(lines)):
line = lines[idx]
if not line:
continue
package_name = line[0:package_len].strip()
if package_name.lower() == qt_binding_low:
return True
return False

View file

@ -0,0 +1,29 @@
import subprocess
from ayon_applications import PreLaunchHook, LaunchTypes
class BlenderConsoleWindows(PreLaunchHook):
"""Foundry applications have specific way how to launch them.
Blender is executed "like" python process so it is required to pass
`CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console.
At the same time the newly created console won't create it's own stdout
and stderr handlers so they should not be redirected to DEVNULL.
"""
# Should be as last hook because must change launch arguments to string
order = 1000
app_groups = {"blender"}
platforms = {"windows"}
launch_types = {LaunchTypes.local}
def execute(self):
# Change `creationflags` to CREATE_NEW_CONSOLE
# - on Windows will blender create new window using it's console
# Set `stdout` and `stderr` to None so new created console does not
# have redirected output to DEVNULL in build
self.launch_context.kwargs.update({
"creationflags": subprocess.CREATE_NEW_CONSOLE,
"stdout": None,
"stderr": None
})

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
"""Converter for legacy Houdini products."""
from ayon_core.pipeline.create.creator_plugins import ProductConvertorPlugin
from ayon_blender.api.lib import imprint
class BlenderLegacyConvertor(ProductConvertorPlugin):
"""Find and convert any legacy products in the scene.
This Converter will find all legacy products in the scene and will
transform them to the current system. Since the old products doesn't
retain any information about their original creators, the only mapping
we can do is based on their product types.
Its limitation is that you can have multiple creators creating product
of the same product type and there is no way to handle it. This code
should nevertheless cover all creators that came with OpenPype.
"""
identifier = "io.openpype.creators.blender.legacy"
product_type_to_id = {
"action": "io.openpype.creators.blender.action",
"camera": "io.openpype.creators.blender.camera",
"animation": "io.openpype.creators.blender.animation",
"blendScene": "io.openpype.creators.blender.blendscene",
"layout": "io.openpype.creators.blender.layout",
"model": "io.openpype.creators.blender.model",
"pointcache": "io.openpype.creators.blender.pointcache",
"render": "io.openpype.creators.blender.render",
"review": "io.openpype.creators.blender.review",
"rig": "io.openpype.creators.blender.rig",
}
def __init__(self, *args, **kwargs):
super(BlenderLegacyConvertor, self).__init__(*args, **kwargs)
self.legacy_instances = {}
def find_instances(self):
"""Find legacy products in the scene.
Legacy products are the ones that doesn't have `creator_identifier`
parameter on them.
This is using cached entries done in
:py:meth:`~BlenderCreator.cache_instance_data()`
"""
self.legacy_instances = self.collection_shared_data.get(
"blender_cached_legacy_instances")
if not self.legacy_instances:
return
self.add_convertor_item(
"Found {} incompatible product{}".format(
len(self.legacy_instances),
"s" if len(self.legacy_instances) > 1 else ""
)
)
def convert(self):
"""Convert all legacy products to current.
It is enough to add `creator_identifier` and `instance_node`.
"""
if not self.legacy_instances:
return
for product_type, instance_nodes in self.legacy_instances.items():
if product_type in self.product_type_to_id:
for instance_node in instance_nodes:
creator_identifier = self.product_type_to_id[product_type]
self.log.info(
"Converting {} to {}".format(instance_node.name,
creator_identifier)
)
imprint(instance_node, data={
"creator_identifier": creator_identifier
})

View file

@ -0,0 +1,41 @@
"""Create an animation asset."""
import bpy
from ayon_blender.api import lib, plugin
class CreateAction(plugin.BlenderCreator):
"""Action output for character rigs."""
identifier = "io.openpype.creators.blender.action"
label = "Action"
product_type = "action"
icon = "male"
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
product_name, instance_data, pre_create_data
)
# Get instance name
name = plugin.prepare_scene_name(
instance_data["folderPath"], product_name
)
if pre_create_data.get("use_selection"):
for obj in lib.get_selection():
if (obj.animation_data is not None
and obj.animation_data.action is not None):
empty_obj = bpy.data.objects.new(name=name,
object_data=None)
empty_obj.animation_data_create()
empty_obj.animation_data.action = obj.animation_data.action
empty_obj.animation_data.action.name = name
collection.objects.link(empty_obj)
return collection

View file

@ -0,0 +1,32 @@
"""Create an animation asset."""
from ayon_blender.api import plugin, lib
class CreateAnimation(plugin.BlenderCreator):
"""Animation output for character rigs."""
identifier = "io.openpype.creators.blender.animation"
label = "Animation"
product_type = "animation"
icon = "male"
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
product_name, instance_data, pre_create_data
)
if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
collection.objects.link(obj)
elif pre_create_data.get("asset_group"):
# Use for Load Blend automated creation of animation instances
# upon loading rig files
obj = pre_create_data.get("asset_group")
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,34 @@
"""Create a Blender scene asset."""
import bpy
from ayon_blender.api import plugin, lib
class CreateBlendScene(plugin.BlenderCreator):
"""Generic group of assets."""
identifier = "io.openpype.creators.blender.blendscene"
label = "Blender Scene"
product_type = "blendScene"
icon = "cubes"
maintain_selection = False
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
instance_node = super().create(product_name,
instance_data,
pre_create_data)
if pre_create_data.get("use_selection"):
selection = lib.get_selection(include_collections=True)
for data in selection:
if isinstance(data, bpy.types.Collection):
instance_node.children.link(data)
elif isinstance(data, bpy.types.Object):
instance_node.objects.link(data)
return instance_node

View file

@ -0,0 +1,42 @@
"""Create a camera asset."""
import bpy
from ayon_blender.api import plugin, lib
from ayon_blender.api.pipeline import AVALON_INSTANCES
class CreateCamera(plugin.BlenderCreator):
"""Polygonal static geometry."""
identifier = "io.openpype.creators.blender.camera"
label = "Camera"
product_type = "camera"
icon = "video-camera"
create_as_asset_group = True
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(product_name,
instance_data,
pre_create_data)
bpy.context.view_layer.objects.active = asset_group
if pre_create_data.get("use_selection"):
for obj in lib.get_selection():
obj.parent = asset_group
else:
plugin.deselect_all()
camera = bpy.data.cameras.new(product_name)
camera_obj = bpy.data.objects.new(product_name, camera)
instances = bpy.data.collections.get(AVALON_INSTANCES)
instances.objects.link(camera_obj)
bpy.context.view_layer.objects.active = asset_group
camera_obj.parent = asset_group
return asset_group

View file

@ -0,0 +1,32 @@
"""Create a layout asset."""
import bpy
from ayon_blender.api import plugin, lib
class CreateLayout(plugin.BlenderCreator):
"""Layout output for character rigs."""
identifier = "io.openpype.creators.blender.layout"
label = "Layout"
product_type = "layout"
icon = "cubes"
create_as_asset_group = True
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(product_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

View file

@ -0,0 +1,31 @@
"""Create a model asset."""
import bpy
from ayon_blender.api import plugin, lib
class CreateModel(plugin.BlenderCreator):
"""Polygonal static geometry."""
identifier = "io.openpype.creators.blender.model"
label = "Model"
product_type = "model"
icon = "cube"
create_as_asset_group = True
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(product_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

View file

@ -0,0 +1,29 @@
"""Create a pointcache asset."""
from ayon_blender.api import plugin, lib
class CreatePointcache(plugin.BlenderCreator):
"""Polygonal static geometry."""
identifier = "io.openpype.creators.blender.pointcache"
label = "Point Cache"
product_type = "pointcache"
icon = "gears"
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
product_name, instance_data, pre_create_data
)
if pre_create_data.get("use_selection"):
objects = lib.get_selection()
for obj in objects:
collection.objects.link(obj)
if obj.type == 'EMPTY':
objects.extend(obj.children)
return collection

View file

@ -0,0 +1,45 @@
"""Create render."""
import bpy
from ayon_core.lib import version_up
from ayon_blender.api import plugin
from ayon_blender.api.render_lib import prepare_rendering
from ayon_blender.api.workio import save_file
class CreateRenderlayer(plugin.BlenderCreator):
"""Single baked camera."""
identifier = "io.openpype.creators.blender.render"
label = "Render"
product_type = "render"
icon = "eye"
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
try:
# Run parent create method
collection = super().create(
product_name, instance_data, pre_create_data
)
prepare_rendering(collection)
except Exception:
# Remove the instance if there was an error
bpy.data.collections.remove(collection)
raise
# TODO: this is undesiderable, but it's the only way to be sure that
# the file is saved before the render starts.
# Blender, by design, doesn't set the file as dirty if modifications
# happen by script. So, when creating the instance and setting the
# render settings, the file is not marked as dirty. This means that
# there is the risk of sending to deadline a file without the right
# settings. Even the validator to check that the file is saved will
# detect the file as saved, even if it isn't. The only solution for
# now it is to force the file to be saved.
filepath = version_up(bpy.data.filepath)
save_file(filepath, copy=False)
return collection

View file

@ -0,0 +1,27 @@
"""Create review."""
from ayon_blender.api import plugin, lib
class CreateReview(plugin.BlenderCreator):
"""Single baked camera."""
identifier = "io.openpype.creators.blender.review"
label = "Review"
product_type = "review"
icon = "video-camera"
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
# Run parent create method
collection = super().create(
product_name, instance_data, pre_create_data
)
if pre_create_data.get("use_selection"):
selected = lib.get_selection()
for obj in selected:
collection.objects.link(obj)
return collection

View file

@ -0,0 +1,31 @@
"""Create a rig asset."""
import bpy
from ayon_blender.api import plugin, lib
class CreateRig(plugin.BlenderCreator):
"""Artist-friendly rig with controls to direct motion."""
identifier = "io.openpype.creators.blender.rig"
label = "Rig"
product_type = "rig"
icon = "wheelchair"
create_as_asset_group = True
def create(
self, product_name: str, instance_data: dict, pre_create_data: dict
):
asset_group = super().create(product_name,
instance_data,
pre_create_data)
# Add selected objects to instance
if pre_create_data.get("use_selection"):
bpy.context.view_layer.objects.active = asset_group
for obj in lib.get_selection():
obj.parent = asset_group
return asset_group

Some files were not shown because too many files have changed in this diff Show more