mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into enhancement/AY-4913_Move-Fusion-client-code
This commit is contained in:
commit
fac0961696
101 changed files with 362 additions and 82 deletions
15
server_addon/photoshop/client/ayon_photoshop/__init__.py
Normal file
15
server_addon/photoshop/client/ayon_photoshop/__init__.py
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
PHOTOSHOP_ADDON_ROOT,
|
||||
PhotoshopAddon,
|
||||
get_launch_script_path,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"PHOTOSHOP_ADDON_ROOT",
|
||||
"PhotoshopAddon",
|
||||
"get_launch_script_path",
|
||||
)
|
||||
38
server_addon/photoshop/client/ayon_photoshop/addon.py
Normal file
38
server_addon/photoshop/client/ayon_photoshop/addon.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
import os
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
|
||||
PHOTOSHOP_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class PhotoshopAddon(AYONAddon, IHostAddon):
|
||||
name = "photoshop"
|
||||
version = __version__
|
||||
host_name = "photoshop"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
"""Modify environments to contain all required for implementation."""
|
||||
defaults = {
|
||||
"AYON_LOG_NO_COLORS": "1",
|
||||
"WEBSOCKET_URL": "ws://localhost:8099/ws/"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".psd", ".psb"]
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(PHOTOSHOP_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
|
||||
def get_launch_script_path():
|
||||
return os.path.join(
|
||||
PHOTOSHOP_ADDON_ROOT, "api", "launch_script.py"
|
||||
)
|
||||
257
server_addon/photoshop/client/ayon_photoshop/api/README.md
Normal file
257
server_addon/photoshop/client/ayon_photoshop/api/README.md
Normal file
|
|
@ -0,0 +1,257 @@
|
|||
# Photoshop Integration
|
||||
|
||||
## Setup
|
||||
|
||||
The Photoshop integration requires two components to work; `extension` and `server`.
|
||||
|
||||
### Extension
|
||||
|
||||
To install the extension download [Extension Manager Command Line tool (ExManCmd)](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#option-2---exmancmd).
|
||||
|
||||
```
|
||||
ExManCmd /install {path to addon}/api/extension.zxp
|
||||
```
|
||||
|
||||
### Server
|
||||
|
||||
The easiest way to get the server and Photoshop launch is with:
|
||||
|
||||
```
|
||||
python -c ^"import ayon_photoshop;ayon_photoshop.launch(""C:\Program Files\Adobe\Adobe Photoshop 2020\Photoshop.exe"")^"
|
||||
```
|
||||
|
||||
`avalon.photoshop.launch` launches the application and server, and also closes the server when Photoshop exists.
|
||||
|
||||
## Usage
|
||||
|
||||
The Photoshop extension can be found under `Window > Extensions > Ayon`. Once launched you should be presented with a panel like this:
|
||||
|
||||

|
||||
|
||||
|
||||
## Developing
|
||||
|
||||
### Extension
|
||||
When developing the extension you can load it [unsigned](https://github.com/Adobe-CEP/CEP-Resources/blob/master/CEP_9.x/Documentation/CEP%209.0%20HTML%20Extension%20Cookbook.md#debugging-unsigned-extensions).
|
||||
|
||||
When signing the extension you can use this [guide](https://github.com/Adobe-CEP/Getting-Started-guides/tree/master/Package%20Distribute%20Install#package-distribute-install-guide).
|
||||
|
||||
```
|
||||
ZXPSignCmd -selfSignedCert NA NA Ayon Ayon-Photoshop Ayon extension.p12
|
||||
ZXPSignCmd -sign {path to avalon-core}\avalon\photoshop\extension {path to avalon-core}\avalon\photoshop\extension.zxp extension.p12 avalon
|
||||
```
|
||||
|
||||
### Plugin Examples
|
||||
|
||||
These plugins were made with the [polly config](https://github.com/mindbender-studio/config). To fully integrate and load, you will have to use this config and add `image` to the [integration plugin](https://github.com/mindbender-studio/config/blob/master/polly/plugins/publish/integrate_asset.py).
|
||||
|
||||
#### Creator Plugin
|
||||
```python
|
||||
from avalon import photoshop
|
||||
|
||||
|
||||
class CreateImage(photoshop.Creator):
|
||||
"""Image folder for publish."""
|
||||
|
||||
name = "imageDefault"
|
||||
label = "Image"
|
||||
product_type = "image"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateImage, self).__init__(*args, **kwargs)
|
||||
```
|
||||
|
||||
#### Collector Plugin
|
||||
```python
|
||||
import pythoncom
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by LayerSet and file metadata
|
||||
|
||||
This collector takes into account assets that are associated with
|
||||
an LayerSet and marked with a unique identifier;
|
||||
|
||||
Identifier:
|
||||
id (str): "ayon.create.instance"
|
||||
"""
|
||||
|
||||
label = "Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["photoshop"]
|
||||
families_mapping = {
|
||||
"image": []
|
||||
}
|
||||
|
||||
def process(self, context):
|
||||
# Necessary call when running in a different thread which pyblish-qml
|
||||
# can be.
|
||||
pythoncom.CoInitialize()
|
||||
|
||||
photoshop_client = PhotoshopClientStub()
|
||||
layers = photoshop_client.get_layers()
|
||||
layers_meta = photoshop_client.get_layers_metadata()
|
||||
for layer in layers:
|
||||
layer_data = photoshop_client.read(layer, layers_meta)
|
||||
|
||||
# Skip layers without metadata.
|
||||
if layer_data is None:
|
||||
continue
|
||||
|
||||
# Skip containers.
|
||||
if "container" in layer_data["id"]:
|
||||
continue
|
||||
|
||||
# child_layers = [*layer.Layers]
|
||||
# self.log.debug("child_layers {}".format(child_layers))
|
||||
# if not child_layers:
|
||||
# self.log.info("%s skipped, it was empty." % layer.Name)
|
||||
# continue
|
||||
|
||||
instance = context.create_instance(layer.name)
|
||||
instance.append(layer)
|
||||
instance.data.update(layer_data)
|
||||
instance.data["families"] = self.families_mapping[
|
||||
layer_data["productType"]
|
||||
]
|
||||
instance.data["publish"] = layer.visible
|
||||
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
```
|
||||
|
||||
#### Extractor Plugin
|
||||
```python
|
||||
import os
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ExtractImage(publish.Extractor):
|
||||
"""Produce a flattened image file from instance
|
||||
|
||||
This plug-in takes into account only the layers in the group.
|
||||
"""
|
||||
|
||||
label = "Extract Image"
|
||||
hosts = ["photoshop"]
|
||||
families = ["image"]
|
||||
formats = ["png", "jpg"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
# Perform extraction
|
||||
stub = photoshop.stub()
|
||||
files = {}
|
||||
with photoshop.maintained_selection():
|
||||
self.log.info("Extracting %s" % str(list(instance)))
|
||||
with photoshop.maintained_visibility():
|
||||
# Hide all other layers.
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers([instance[0]])])
|
||||
|
||||
for layer in stub.get_layers():
|
||||
# limit unnecessary calls to client
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
stub.set_visible(layer.id, False)
|
||||
|
||||
save_options = []
|
||||
if "png" in self.formats:
|
||||
save_options.append('png')
|
||||
if "jpg" in self.formats:
|
||||
save_options.append('jpg')
|
||||
|
||||
file_basename = os.path.splitext(
|
||||
stub.get_active_document_name()
|
||||
)[0]
|
||||
for extension in save_options:
|
||||
_filename = "{}.{}".format(file_basename, extension)
|
||||
files[extension] = _filename
|
||||
|
||||
full_filename = os.path.join(staging_dir, _filename)
|
||||
stub.saveAs(full_filename, extension, True)
|
||||
|
||||
representations = []
|
||||
for extension, filename in files.items():
|
||||
representations.append({
|
||||
"name": extension,
|
||||
"ext": extension,
|
||||
"files": filename,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
instance.data["representations"] = representations
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
```
|
||||
|
||||
#### Loader Plugin
|
||||
```python
|
||||
from avalon import api, photoshop
|
||||
from ayon_core.pipeline import load, get_representation_path
|
||||
|
||||
stub = photoshop.stub()
|
||||
|
||||
|
||||
class ImageLoader(load.LoaderPlugin):
|
||||
"""Load images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
families = ["image"]
|
||||
representations = {"*"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
path = self.filepath_from_context(context)
|
||||
with photoshop.maintained_selection():
|
||||
layer = stub.import_smart_object(path)
|
||||
|
||||
self[:] = [layer]
|
||||
|
||||
return photoshop.containerise(
|
||||
name,
|
||||
namespace,
|
||||
layer,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
layer = container.pop("layer")
|
||||
repre_entity = context["representation"]
|
||||
with photoshop.maintained_selection():
|
||||
stub.replace_smart_object(
|
||||
layer, get_representation_path(repre_entity)
|
||||
)
|
||||
|
||||
stub.imprint(
|
||||
layer, {"representation": repre_entity["id"]}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
container["layer"].Delete()
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
```
|
||||
For easier debugging of Javascript:
|
||||
https://community.adobe.com/t5/download-install/adobe-extension-debuger-problem/td-p/10911704?page=1
|
||||
Add --enable-blink-features=ShadowDOMV0,CustomElementsV0 when starting Chrome
|
||||
then localhost:8078 (port set in `photoshop\extension\.debug`)
|
||||
|
||||
Or use Visual Studio Code https://medium.com/adobetech/extendscript-debugger-for-visual-studio-code-public-release-a2ff6161fa01
|
||||
|
||||
Or install CEF client from https://github.com/Adobe-CEP/CEP-Resources/tree/master/CEP_9.x
|
||||
## Resources
|
||||
- https://github.com/lohriialo/photoshop-scripting-python
|
||||
- https://www.adobe.com/devnet/photoshop/scripting.html
|
||||
- https://github.com/Adobe-CEP/Getting-Started-guides
|
||||
- https://github.com/Adobe-CEP/CEP-Resources
|
||||
41
server_addon/photoshop/client/ayon_photoshop/api/__init__.py
Normal file
41
server_addon/photoshop/client/ayon_photoshop/api/__init__.py
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
"""Public API
|
||||
|
||||
Anything that isn't defined here is INTERNAL and unreliable for external use.
|
||||
|
||||
"""
|
||||
|
||||
from .launch_logic import stub
|
||||
|
||||
from .pipeline import (
|
||||
PhotoshopHost,
|
||||
ls,
|
||||
containerise
|
||||
)
|
||||
from .plugin import (
|
||||
PhotoshopLoader,
|
||||
get_unique_layer_name
|
||||
)
|
||||
|
||||
|
||||
from .lib import (
|
||||
maintained_selection,
|
||||
maintained_visibility
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# launch_logic
|
||||
"stub",
|
||||
|
||||
# pipeline
|
||||
"PhotoshopHost",
|
||||
"ls",
|
||||
"containerise",
|
||||
|
||||
# Plugin
|
||||
"PhotoshopLoader",
|
||||
"get_unique_layer_name",
|
||||
|
||||
# lib
|
||||
"maintained_selection",
|
||||
"maintained_visibility",
|
||||
]
|
||||
BIN
server_addon/photoshop/client/ayon_photoshop/api/extension.zxp
Normal file
BIN
server_addon/photoshop/client/ayon_photoshop/api/extension.zxp
Normal file
Binary file not shown.
|
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ExtensionList>
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<HostList>
|
||||
<Host Name="PHXS" Port="8078"/>
|
||||
<Host Name="FLPR" Port="8078"/>
|
||||
</HostList>
|
||||
</Extension>
|
||||
</ExtensionList>
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<ExtensionManifest ExtensionBundleId="io.ynput.PS.panel" ExtensionBundleVersion="1.1.0" Version="7.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<ExtensionList>
|
||||
<Extension Id="io.ynput.PS.panel" Version="1.0.1" />
|
||||
</ExtensionList>
|
||||
<ExecutionEnvironment>
|
||||
<HostList>
|
||||
<Host Name="PHSP" Version="19" />
|
||||
<Host Name="PHXS" Version="19" />
|
||||
</HostList>
|
||||
<LocaleList>
|
||||
<Locale Code="All" />
|
||||
</LocaleList>
|
||||
<RequiredRuntimeList>
|
||||
<RequiredRuntime Name="CSXS" Version="7.0" />
|
||||
</RequiredRuntimeList>
|
||||
</ExecutionEnvironment>
|
||||
<DispatchInfoList>
|
||||
<Extension Id="io.ynput.PS.panel">
|
||||
<DispatchInfo>
|
||||
<Resources>
|
||||
<MainPath>./index.html</MainPath>
|
||||
<CEFCommandLine />
|
||||
</Resources>
|
||||
<Lifecycle>
|
||||
<AutoVisible>true</AutoVisible>
|
||||
<StartOn>
|
||||
<!-- Photoshop dispatches this event on startup -->
|
||||
<Event>applicationActivate</Event>
|
||||
<Event>com.adobe.csxs.events.ApplicationInitialized</Event>
|
||||
</StartOn>
|
||||
</Lifecycle>
|
||||
<UI>
|
||||
<Type>Panel</Type>
|
||||
<Menu>AYON</Menu>
|
||||
<Geometry>
|
||||
<Size>
|
||||
<Width>300</Width>
|
||||
<Height>140</Height>
|
||||
</Size>
|
||||
<MaxSize>
|
||||
<Width>400</Width>
|
||||
<Height>200</Height>
|
||||
</MaxSize>
|
||||
</Geometry>
|
||||
<Icons>
|
||||
<Icon Type="Normal">./icons/ayon_logo.png</Icon>
|
||||
</Icons>
|
||||
</UI>
|
||||
</DispatchInfo>
|
||||
</Extension>
|
||||
</DispatchInfoList>
|
||||
</ExtensionManifest>
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,300 @@
|
|||
// client facing part of extension, creates WSRPC client (jsx cannot
|
||||
// do that)
|
||||
// consumes RPC calls from server (OpenPype) calls ./host/index.jsx and
|
||||
// returns values back (in json format)
|
||||
|
||||
var logReturn = function(result){ log.warn('Result: ' + result);};
|
||||
|
||||
var csInterface = new CSInterface();
|
||||
|
||||
log.warn("script start");
|
||||
|
||||
WSRPC.DEBUG = false;
|
||||
WSRPC.TRACE = false;
|
||||
|
||||
function myCallBack(){
|
||||
log.warn("Triggered index.jsx");
|
||||
}
|
||||
// importing through manifest.xml isn't working because relative paths
|
||||
// possibly TODO
|
||||
jsx.evalFile('./host/index.jsx', myCallBack);
|
||||
|
||||
function runEvalScript(script) {
|
||||
// because of asynchronous nature of functions in jsx
|
||||
// this waits for response
|
||||
return new Promise(function(resolve, reject){
|
||||
csInterface.evalScript(script, resolve);
|
||||
});
|
||||
}
|
||||
|
||||
/** main entry point **/
|
||||
startUp("WEBSOCKET_URL");
|
||||
|
||||
// get websocket server url from environment value
|
||||
async function startUp(url){
|
||||
log.warn("url", url);
|
||||
promis = runEvalScript("getEnv('" + url + "')");
|
||||
|
||||
var res = await promis;
|
||||
// run rest only after resolved promise
|
||||
main(res);
|
||||
}
|
||||
|
||||
function get_extension_version(){
|
||||
/** Returns version number from extension manifest.xml **/
|
||||
log.debug("get_extension_version")
|
||||
var path = csInterface.getSystemPath(SystemPath.EXTENSION);
|
||||
log.debug("extension path " + path);
|
||||
|
||||
var result = window.cep.fs.readFile(path + "/CSXS/manifest.xml");
|
||||
var version = undefined;
|
||||
if(result.err === 0){
|
||||
if (window.DOMParser) {
|
||||
const parser = new DOMParser();
|
||||
const xmlDoc = parser.parseFromString(result.data.toString(), 'text/xml');
|
||||
const children = xmlDoc.children;
|
||||
|
||||
for (let i = 0; i <= children.length; i++) {
|
||||
if (children[i] && children[i].getAttribute('ExtensionBundleVersion')) {
|
||||
version = children[i].getAttribute('ExtensionBundleVersion');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
function main(websocket_url){
|
||||
// creates connection to 'websocket_url', registers routes
|
||||
log.warn("websocket_url", websocket_url);
|
||||
var default_url = 'ws://localhost:8099/ws/';
|
||||
|
||||
if (websocket_url == ''){
|
||||
websocket_url = default_url;
|
||||
}
|
||||
log.warn("connecting to:", websocket_url);
|
||||
RPC = new WSRPC(websocket_url, 5000); // spin connection
|
||||
|
||||
RPC.connect();
|
||||
|
||||
log.warn("connected");
|
||||
|
||||
function EscapeStringForJSX(str){
|
||||
// Replaces:
|
||||
// \ with \\
|
||||
// ' with \'
|
||||
// " with \"
|
||||
// See: https://stackoverflow.com/a/3967927/5285364
|
||||
return str.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"');
|
||||
}
|
||||
|
||||
RPC.addRoute('Photoshop.open', function (data) {
|
||||
log.warn('Server called client route "open":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("fileOpen('" + escapedPath +"')")
|
||||
.then(function(result){
|
||||
log.warn("open: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.read', function (data) {
|
||||
log.warn('Server called client route "read":', data);
|
||||
return runEvalScript("getHeadline()")
|
||||
.then(function(result){
|
||||
log.warn("getHeadline: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_layers', function (data) {
|
||||
log.warn('Server called client route "get_layers":', data);
|
||||
return runEvalScript("getLayers()")
|
||||
.then(function(result){
|
||||
log.warn("getLayers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.set_visible', function (data) {
|
||||
log.warn('Server called client route "set_visible":', data);
|
||||
return runEvalScript("setVisible(" + data.layer_id + ", " +
|
||||
data.visibility + ")")
|
||||
.then(function(result){
|
||||
log.warn("setVisible: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_active_document_name', function (data) {
|
||||
log.warn('Server called client route "get_active_document_name":',
|
||||
data);
|
||||
return runEvalScript("getActiveDocumentName()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_active_document_full_name', function (data) {
|
||||
log.warn('Server called client route ' +
|
||||
'"get_active_document_full_name":', data);
|
||||
return runEvalScript("getActiveDocumentFullName()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.save', function (data) {
|
||||
log.warn('Server called client route "save":', data);
|
||||
|
||||
return runEvalScript("save()")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_selected_layers', function (data) {
|
||||
log.warn('Server called client route "get_selected_layers":', data);
|
||||
|
||||
return runEvalScript("getSelectedLayers()")
|
||||
.then(function(result){
|
||||
log.warn("get_selected_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.create_group', function (data) {
|
||||
log.warn('Server called client route "create_group":', data);
|
||||
|
||||
return runEvalScript("createGroup('" + data.name + "')")
|
||||
.then(function(result){
|
||||
log.warn("createGroup: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.group_selected_layers', function (data) {
|
||||
log.warn('Server called client route "group_selected_layers":',
|
||||
data);
|
||||
|
||||
return runEvalScript("groupSelectedLayers(null, "+
|
||||
"'" + data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("group_selected_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.import_smart_object', function (data) {
|
||||
log.warn('Server called client "import_smart_object":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("importSmartObject('" + escapedPath +"', " +
|
||||
"'"+ data.name +"',"+
|
||||
+ data.as_reference +")")
|
||||
.then(function(result){
|
||||
log.warn("import_smart_object: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.replace_smart_object', function (data) {
|
||||
log.warn('Server called route "replace_smart_object":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.path);
|
||||
return runEvalScript("replaceSmartObjects("+data.layer_id+"," +
|
||||
"'" + escapedPath +"',"+
|
||||
"'"+ data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("replaceSmartObjects: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.delete_layer', function (data) {
|
||||
log.warn('Server called route "delete_layer":', data);
|
||||
return runEvalScript("deleteLayer("+data.layer_id+")")
|
||||
.then(function(result){
|
||||
log.warn("delete_layer: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.rename_layer', function (data) {
|
||||
log.warn('Server called route "rename_layer":', data);
|
||||
return runEvalScript("renameLayer("+data.layer_id+", " +
|
||||
"'"+ data.name +"')")
|
||||
.then(function(result){
|
||||
log.warn("rename_layer: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.select_layers', function (data) {
|
||||
log.warn('Server called client route "select_layers":', data);
|
||||
|
||||
return runEvalScript("selectLayers('" + data.layers +"')")
|
||||
.then(function(result){
|
||||
log.warn("select_layers: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.is_saved', function (data) {
|
||||
log.warn('Server called client route "is_saved":', data);
|
||||
|
||||
return runEvalScript("isSaved()")
|
||||
.then(function(result){
|
||||
log.warn("is_saved: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.saveAs', function (data) {
|
||||
log.warn('Server called client route "saveAsJPEG":', data);
|
||||
var escapedPath = EscapeStringForJSX(data.image_path);
|
||||
return runEvalScript("saveAs('" + escapedPath + "', " +
|
||||
"'" + data.ext + "', " +
|
||||
data.as_copy + ")")
|
||||
.then(function(result){
|
||||
log.warn("save: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.imprint', function (data) {
|
||||
log.warn('Server called client route "imprint":', data);
|
||||
var escaped = data.payload.replace(/\n/g, "\\n");
|
||||
return runEvalScript("imprint('" + escaped + "')")
|
||||
.then(function(result){
|
||||
log.warn("imprint: " + result);
|
||||
return result;
|
||||
});
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.get_extension_version', function (data) {
|
||||
log.warn('Server called client route "get_extension_version":', data);
|
||||
return get_extension_version();
|
||||
});
|
||||
|
||||
RPC.addRoute('Photoshop.close', function (data) {
|
||||
log.warn('Server called client route "close":', data);
|
||||
return runEvalScript("close()");
|
||||
});
|
||||
|
||||
RPC.call('Photoshop.ping').then(function (data) {
|
||||
log.warn('Result for calling server route "ping": ', data);
|
||||
return runEvalScript("ping()")
|
||||
.then(function(result){
|
||||
log.warn("ping: " + result);
|
||||
return result;
|
||||
});
|
||||
|
||||
}, function (error) {
|
||||
log.warn(error);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
log.warn("end script");
|
||||
2
server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js
vendored
Normal file
2
server_addon/photoshop/client/ayon_photoshop/api/extension/client/loglevel.min.js
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
/*! loglevel - v1.6.8 - https://github.com/pimterry/loglevel - (c) 2020 Tim Perry - licensed MIT */
|
||||
!function(a,b){"use strict";"function"==typeof define&&define.amd?define(b):"object"==typeof module&&module.exports?module.exports=b():a.log=b()}(this,function(){"use strict";function a(a,b){var c=a[b];if("function"==typeof c.bind)return c.bind(a);try{return Function.prototype.bind.call(c,a)}catch(b){return function(){return Function.prototype.apply.apply(c,[a,arguments])}}}function b(){console.log&&(console.log.apply?console.log.apply(console,arguments):Function.prototype.apply.apply(console.log,[console,arguments])),console.trace&&console.trace()}function c(c){return"debug"===c&&(c="log"),typeof console!==i&&("trace"===c&&j?b:void 0!==console[c]?a(console,c):void 0!==console.log?a(console,"log"):h)}function d(a,b){for(var c=0;c<k.length;c++){var d=k[c];this[d]=c<a?h:this.methodFactory(d,a,b)}this.log=this.debug}function e(a,b,c){return function(){typeof console!==i&&(d.call(this,b,c),this[a].apply(this,arguments))}}function f(a,b,d){return c(a)||e.apply(this,arguments)}function g(a,b,c){function e(a){var b=(k[a]||"silent").toUpperCase();if(typeof window!==i){try{return void(window.localStorage[l]=b)}catch(a){}try{window.document.cookie=encodeURIComponent(l)+"="+b+";"}catch(a){}}}function g(){var a;if(typeof window!==i){try{a=window.localStorage[l]}catch(a){}if(typeof a===i)try{var b=window.document.cookie,c=b.indexOf(encodeURIComponent(l)+"=");-1!==c&&(a=/^([^;]+)/.exec(b.slice(c))[1])}catch(a){}return void 0===j.levels[a]&&(a=void 0),a}}var h,j=this,l="loglevel";a&&(l+=":"+a),j.name=a,j.levels={TRACE:0,DEBUG:1,INFO:2,WARN:3,ERROR:4,SILENT:5},j.methodFactory=c||f,j.getLevel=function(){return h},j.setLevel=function(b,c){if("string"==typeof b&&void 0!==j.levels[b.toUpperCase()]&&(b=j.levels[b.toUpperCase()]),!("number"==typeof b&&b>=0&&b<=j.levels.SILENT))throw"log.setLevel() called with invalid level: "+b;if(h=b,!1!==c&&e(b),d.call(j,b,a),typeof console===i&&b<j.levels.SILENT)return"No console available for logging"},j.setDefaultLevel=function(a){g()||j.setLevel(a,!1)},j.enableAll=function(a){j.setLevel(j.levels.TRACE,a)},j.disableAll=function(a){j.setLevel(j.levels.SILENT,a)};var m=g();null==m&&(m=null==b?"WARN":b),j.setLevel(m,!1)}var h=function(){},i="undefined",j=typeof window!==i&&typeof window.navigator!==i&&/Trident\/|MSIE /.test(window.navigator.userAgent),k=["trace","debug","info","warn","error"],l=new g,m={};l.getLogger=function(a){if("string"!=typeof a||""===a)throw new TypeError("You must supply a name when creating a logger.");var b=m[a];return b||(b=m[a]=new g(a,l.getLevel(),l.methodFactory)),b};var n=typeof window!==i?window.log:void 0;return l.noConflict=function(){return typeof window!==i&&window.log===l&&(window.log=n),l},l.getLoggers=function(){return m},l});
|
||||
|
|
@ -0,0 +1,393 @@
|
|||
(function (global, factory) {
|
||||
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
|
||||
typeof define === 'function' && define.amd ? define(factory) :
|
||||
(global = global || self, global.WSRPC = factory());
|
||||
}(this, function () { 'use strict';
|
||||
|
||||
function _classCallCheck(instance, Constructor) {
|
||||
if (!(instance instanceof Constructor)) {
|
||||
throw new TypeError("Cannot call a class as a function");
|
||||
}
|
||||
}
|
||||
|
||||
var Deferred = function Deferred() {
|
||||
_classCallCheck(this, Deferred);
|
||||
|
||||
var self = this;
|
||||
self.resolve = null;
|
||||
self.reject = null;
|
||||
self.done = false;
|
||||
|
||||
function wrapper(func) {
|
||||
return function () {
|
||||
if (self.done) throw new Error('Promise already done');
|
||||
self.done = true;
|
||||
return func.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
|
||||
self.promise = new Promise(function (resolve, reject) {
|
||||
self.resolve = wrapper(resolve);
|
||||
self.reject = wrapper(reject);
|
||||
});
|
||||
|
||||
self.promise.isPending = function () {
|
||||
return !self.done;
|
||||
};
|
||||
|
||||
return self;
|
||||
};
|
||||
|
||||
function logGroup(group, level, args) {
|
||||
console.group(group);
|
||||
console[level].apply(this, args);
|
||||
console.groupEnd();
|
||||
}
|
||||
|
||||
function log() {
|
||||
if (!WSRPC.DEBUG) return;
|
||||
logGroup('WSRPC.DEBUG', 'trace', arguments);
|
||||
}
|
||||
|
||||
function trace(msg) {
|
||||
if (!WSRPC.TRACE) return;
|
||||
var payload = msg;
|
||||
if ('data' in msg) payload = JSON.parse(msg.data);
|
||||
logGroup("WSRPC.TRACE", 'trace', [payload]);
|
||||
}
|
||||
|
||||
function getAbsoluteWsUrl(url) {
|
||||
if (/^\w+:\/\//.test(url)) return url;
|
||||
if (typeof window == 'undefined' && window.location.host.length < 1) throw new Error("Can not construct absolute URL from ".concat(window.location));
|
||||
var scheme = window.location.protocol === "https:" ? "wss:" : "ws:";
|
||||
var port = window.location.port === '' ? ":".concat(window.location.port) : '';
|
||||
var host = window.location.host;
|
||||
var path = url.replace(/^\/+/gm, '');
|
||||
return "".concat(scheme, "//").concat(host).concat(port, "/").concat(path);
|
||||
}
|
||||
|
||||
var readyState = Object.freeze({
|
||||
0: 'CONNECTING',
|
||||
1: 'OPEN',
|
||||
2: 'CLOSING',
|
||||
3: 'CLOSED'
|
||||
});
|
||||
|
||||
var WSRPC = function WSRPC(URL) {
|
||||
var reconnectTimeout = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 1000;
|
||||
|
||||
_classCallCheck(this, WSRPC);
|
||||
|
||||
var self = this;
|
||||
URL = getAbsoluteWsUrl(URL);
|
||||
self.id = 1;
|
||||
self.eventId = 0;
|
||||
self.socketStarted = false;
|
||||
self.eventStore = {
|
||||
onconnect: {},
|
||||
onerror: {},
|
||||
onclose: {},
|
||||
onchange: {}
|
||||
};
|
||||
self.connectionNumber = 0;
|
||||
self.oneTimeEventStore = {
|
||||
onconnect: [],
|
||||
onerror: [],
|
||||
onclose: [],
|
||||
onchange: []
|
||||
};
|
||||
self.callQueue = [];
|
||||
|
||||
function createSocket() {
|
||||
var ws = new WebSocket(URL);
|
||||
|
||||
var rejectQueue = function rejectQueue() {
|
||||
self.connectionNumber++; // rejects incoming calls
|
||||
|
||||
var deferred; //reject all pending calls
|
||||
|
||||
while (0 < self.callQueue.length) {
|
||||
var callObj = self.callQueue.shift();
|
||||
deferred = self.store[callObj.id];
|
||||
delete self.store[callObj.id];
|
||||
|
||||
if (deferred && deferred.promise.isPending()) {
|
||||
deferred.reject('WebSocket error occurred');
|
||||
}
|
||||
} // reject all from the store
|
||||
|
||||
|
||||
for (var key in self.store) {
|
||||
if (!self.store.hasOwnProperty(key)) continue;
|
||||
deferred = self.store[key];
|
||||
|
||||
if (deferred && deferred.promise.isPending()) {
|
||||
deferred.reject('WebSocket error occurred');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
function reconnect(callEvents) {
|
||||
setTimeout(function () {
|
||||
try {
|
||||
self.socket = createSocket();
|
||||
self.id = 1;
|
||||
} catch (exc) {
|
||||
callEvents('onerror', exc);
|
||||
delete self.socket;
|
||||
console.error(exc);
|
||||
}
|
||||
}, reconnectTimeout);
|
||||
}
|
||||
|
||||
ws.onclose = function (err) {
|
||||
log('ONCLOSE CALLED', 'STATE', self.public.state());
|
||||
trace(err);
|
||||
|
||||
for (var serial in self.store) {
|
||||
if (!self.store.hasOwnProperty(serial)) continue;
|
||||
|
||||
if (self.store[serial].hasOwnProperty('reject')) {
|
||||
self.store[serial].reject('Connection closed');
|
||||
}
|
||||
}
|
||||
|
||||
rejectQueue();
|
||||
callEvents('onclose', err);
|
||||
callEvents('onchange', err);
|
||||
reconnect(callEvents);
|
||||
};
|
||||
|
||||
ws.onerror = function (err) {
|
||||
log('ONERROR CALLED', 'STATE', self.public.state());
|
||||
trace(err);
|
||||
rejectQueue();
|
||||
callEvents('onerror', err);
|
||||
callEvents('onchange', err);
|
||||
log('WebSocket has been closed by error: ', err);
|
||||
};
|
||||
|
||||
function tryCallEvent(func, event) {
|
||||
try {
|
||||
return func(event);
|
||||
} catch (e) {
|
||||
if (e.hasOwnProperty('stack')) {
|
||||
log(e.stack);
|
||||
} else {
|
||||
log('Event function', func, 'raised unknown error:', e);
|
||||
}
|
||||
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
function callEvents(evName, event) {
|
||||
while (0 < self.oneTimeEventStore[evName].length) {
|
||||
var deferred = self.oneTimeEventStore[evName].shift();
|
||||
if (deferred.hasOwnProperty('resolve') && deferred.promise.isPending()) deferred.resolve();
|
||||
}
|
||||
|
||||
for (var i in self.eventStore[evName]) {
|
||||
if (!self.eventStore[evName].hasOwnProperty(i)) continue;
|
||||
var cur = self.eventStore[evName][i];
|
||||
tryCallEvent(cur, event);
|
||||
}
|
||||
}
|
||||
|
||||
ws.onopen = function (ev) {
|
||||
log('ONOPEN CALLED', 'STATE', self.public.state());
|
||||
trace(ev);
|
||||
|
||||
while (0 < self.callQueue.length) {
|
||||
// noinspection JSUnresolvedFunction
|
||||
self.socket.send(JSON.stringify(self.callQueue.shift(), 0, 1));
|
||||
}
|
||||
|
||||
callEvents('onconnect', ev);
|
||||
callEvents('onchange', ev);
|
||||
};
|
||||
|
||||
function handleCall(self, data) {
|
||||
if (!self.routes.hasOwnProperty(data.method)) throw new Error('Route not found');
|
||||
var connectionNumber = self.connectionNumber;
|
||||
var deferred = new Deferred();
|
||||
deferred.promise.then(function (result) {
|
||||
if (connectionNumber !== self.connectionNumber) return;
|
||||
self.socket.send(JSON.stringify({
|
||||
id: data.id,
|
||||
result: result
|
||||
}));
|
||||
}, function (error) {
|
||||
if (connectionNumber !== self.connectionNumber) return;
|
||||
self.socket.send(JSON.stringify({
|
||||
id: data.id,
|
||||
error: error
|
||||
}));
|
||||
});
|
||||
var func = self.routes[data.method];
|
||||
if (self.asyncRoutes[data.method]) return func.apply(deferred, [data.params]);
|
||||
|
||||
function badPromise() {
|
||||
throw new Error("You should register route with async flag.");
|
||||
}
|
||||
|
||||
var promiseMock = {
|
||||
resolve: badPromise,
|
||||
reject: badPromise
|
||||
};
|
||||
|
||||
try {
|
||||
deferred.resolve(func.apply(promiseMock, [data.params]));
|
||||
} catch (e) {
|
||||
deferred.reject(e);
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
function handleError(self, data) {
|
||||
if (!self.store.hasOwnProperty(data.id)) return log('Unknown callback');
|
||||
var deferred = self.store[data.id];
|
||||
if (typeof deferred === 'undefined') return log('Confirmation without handler');
|
||||
delete self.store[data.id];
|
||||
log('REJECTING', data.error);
|
||||
deferred.reject(data.error);
|
||||
}
|
||||
|
||||
function handleResult(self, data) {
|
||||
var deferred = self.store[data.id];
|
||||
if (typeof deferred === 'undefined') return log('Confirmation without handler');
|
||||
delete self.store[data.id];
|
||||
|
||||
if (data.hasOwnProperty('result')) {
|
||||
return deferred.resolve(data.result);
|
||||
}
|
||||
|
||||
return deferred.reject(data.error);
|
||||
}
|
||||
|
||||
ws.onmessage = function (message) {
|
||||
log('ONMESSAGE CALLED', 'STATE', self.public.state());
|
||||
trace(message);
|
||||
if (message.type !== 'message') return;
|
||||
var data;
|
||||
|
||||
try {
|
||||
data = JSON.parse(message.data);
|
||||
log(data);
|
||||
|
||||
if (data.hasOwnProperty('method')) {
|
||||
return handleCall(self, data);
|
||||
} else if (data.hasOwnProperty('error') && data.error === null) {
|
||||
return handleError(self, data);
|
||||
} else {
|
||||
return handleResult(self, data);
|
||||
}
|
||||
} catch (exception) {
|
||||
var err = {
|
||||
error: exception.message,
|
||||
result: null,
|
||||
id: data ? data.id : null
|
||||
};
|
||||
self.socket.send(JSON.stringify(err));
|
||||
console.error(exception);
|
||||
}
|
||||
};
|
||||
|
||||
return ws;
|
||||
}
|
||||
|
||||
function makeCall(func, args, params) {
|
||||
self.id += 2;
|
||||
var deferred = new Deferred();
|
||||
var callObj = Object.freeze({
|
||||
id: self.id,
|
||||
method: func,
|
||||
params: args
|
||||
});
|
||||
var state = self.public.state();
|
||||
|
||||
if (state === 'OPEN') {
|
||||
self.store[self.id] = deferred;
|
||||
self.socket.send(JSON.stringify(callObj));
|
||||
} else if (state === 'CONNECTING') {
|
||||
log('SOCKET IS', state);
|
||||
self.store[self.id] = deferred;
|
||||
self.callQueue.push(callObj);
|
||||
} else {
|
||||
log('SOCKET IS', state);
|
||||
|
||||
if (params && params['noWait']) {
|
||||
deferred.reject("Socket is: ".concat(state));
|
||||
} else {
|
||||
self.store[self.id] = deferred;
|
||||
self.callQueue.push(callObj);
|
||||
}
|
||||
}
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
self.asyncRoutes = {};
|
||||
self.routes = {};
|
||||
self.store = {};
|
||||
self.public = Object.freeze({
|
||||
call: function call(func, args, params) {
|
||||
return makeCall(func, args, params);
|
||||
},
|
||||
addRoute: function addRoute(route, callback, isAsync) {
|
||||
self.asyncRoutes[route] = isAsync || false;
|
||||
self.routes[route] = callback;
|
||||
},
|
||||
deleteRoute: function deleteRoute(route) {
|
||||
delete self.asyncRoutes[route];
|
||||
return delete self.routes[route];
|
||||
},
|
||||
addEventListener: function addEventListener(event, func) {
|
||||
var eventId = self.eventId++;
|
||||
self.eventStore[event][eventId] = func;
|
||||
return eventId;
|
||||
},
|
||||
removeEventListener: function removeEventListener(event, index) {
|
||||
if (self.eventStore[event].hasOwnProperty(index)) {
|
||||
delete self.eventStore[event][index];
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
onEvent: function onEvent(event) {
|
||||
var deferred = new Deferred();
|
||||
self.oneTimeEventStore[event].push(deferred);
|
||||
return deferred.promise;
|
||||
},
|
||||
destroy: function destroy() {
|
||||
return self.socket.close();
|
||||
},
|
||||
state: function state() {
|
||||
return readyState[this.stateCode()];
|
||||
},
|
||||
stateCode: function stateCode() {
|
||||
if (self.socketStarted && self.socket) return self.socket.readyState;
|
||||
return 3;
|
||||
},
|
||||
connect: function connect() {
|
||||
self.socketStarted = true;
|
||||
self.socket = createSocket();
|
||||
}
|
||||
});
|
||||
self.public.addRoute('log', function (argsObj) {
|
||||
//console.info("Websocket sent: ".concat(argsObj));
|
||||
});
|
||||
self.public.addRoute('ping', function (data) {
|
||||
return data;
|
||||
});
|
||||
return self.public;
|
||||
};
|
||||
|
||||
WSRPC.DEBUG = false;
|
||||
WSRPC.TRACE = false;
|
||||
|
||||
return WSRPC;
|
||||
|
||||
}));
|
||||
//# sourceMappingURL=wsrpc.js.map
|
||||
1
server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js
vendored
Normal file
1
server_addon/photoshop/client/ayon_photoshop/api/extension/client/wsrpc.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,774 @@
|
|||
/*
|
||||
_ ______ __ _
|
||||
| / ___\ \/ / (_)___
|
||||
_ | \___ \\ / | / __|
|
||||
| |_| |___) / \ _ | \__ \
|
||||
\___/|____/_/\_(_)/ |___/
|
||||
|__/
|
||||
_ ____
|
||||
/\ /\___ _ __ ___(_) ___ _ __ |___ \
|
||||
\ \ / / _ \ '__/ __| |/ _ \| '_ \ __) |
|
||||
\ V / __/ | \__ \ | (_) | | | | / __/
|
||||
\_/ \___|_| |___/_|\___/|_| |_| |_____|
|
||||
*/
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// JSX.js © and writtent by Trevor https://creative-scripts.com/jsx-js //
|
||||
// If you turn over is less the $50,000,000 then you don't have to pay anything //
|
||||
// License MIT, don't complain, don't sue NO MATTER WHAT //
|
||||
// If you turn over is more the $50,000,000 then you DO have to pay //
|
||||
// Contact me https://creative-scripts.com/contact for pricing and licensing //
|
||||
// Don't remove these commented lines //
|
||||
// For simple and effective calling of jsx from the js engine //
|
||||
// Version 2 last modified April 18 2018 //
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Change log: //
|
||||
// JSX.js V2 is now independent of NodeJS and CSInterface.js <span class="wp-font-emots-emo-happy"></span> //
|
||||
// forceEval is now by default true //
|
||||
// It wraps the scripts in a try catch and an eval providing useful error handling //
|
||||
// One can set in the jsx engine $.includeStack = true to return the call stack in the event of an error //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// JSX.js for calling jsx code from the js engine //
|
||||
// 2 methods included //
|
||||
// 1) jsx.evalScript AKA jsx.eval //
|
||||
// 2) jsx.evalFile AKA jsx.file //
|
||||
// Special features //
|
||||
// 1) Allows all changes in your jsx code to be reloaded into your extension at the click of a button //
|
||||
// 2) Can enable the $.fileName property to work and provides a $.__fileName() method as an alternative //
|
||||
// 3) Can force a callBack result from InDesign //
|
||||
// 4) No more csInterface.evalScript('alert("hello "' + title + " " + name + '");') //
|
||||
// use jsx.evalScript('alert("hello __title__ __name__");', {title: title, name: name}); //
|
||||
// 5) execute jsx files from your jsx folder like this jsx.evalFile('myFabJsxScript.jsx'); //
|
||||
// or from a relative path jsx.evalFile('../myFabScripts/myFabJsxScript.jsx'); //
|
||||
// or from an absolute url jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac) //
|
||||
// or from an absolute url jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows) //
|
||||
// 6) Parameter can be entered in the from of a parameter list which can be in any order or as an object //
|
||||
// 7) Not camelCase sensitive (very useful for the illiterate) //
|
||||
// <span class="wp-font-emots-emo-sunglasses"></span> Dead easy to use BUT SPEND THE 3 TO 5 MINUTES IT SHOULD TAKE TO READ THE INSTRUCTIONS //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/* jshint undef:true, unused:true, esversion:6 */
|
||||
|
||||
//////////////////////////////////////
|
||||
// jsx is the interface for the API //
|
||||
//////////////////////////////////////
|
||||
|
||||
var jsx;
|
||||
|
||||
// Wrap everything in an anonymous function to prevent leeks
|
||||
(function() {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Substitute some CSInterface functions to avoid dependency on it //
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
var __dirname = (function() {
|
||||
var path, isMac;
|
||||
path = decodeURI(window.__adobe_cep__.getSystemPath('extension'));
|
||||
isMac = navigator.platform[0] === 'M'; // [M]ac
|
||||
path = path.replace('file://' + (isMac ? '' : '/'), '');
|
||||
return path;
|
||||
})();
|
||||
|
||||
var evalScript = function(script, callback) {
|
||||
callback = callback || function() {};
|
||||
window.__adobe_cep__.evalScript(script, callback);
|
||||
};
|
||||
|
||||
|
||||
////////////////////////////////////////////
|
||||
// In place of using the node path module //
|
||||
////////////////////////////////////////////
|
||||
|
||||
// jshint undef: true, unused: true
|
||||
|
||||
// A very minified version of the NodeJs Path module!!
|
||||
// For use outside of NodeJs
|
||||
// Majorly nicked by Trevor from Joyent
|
||||
var path = (function() {
|
||||
|
||||
var isString = function(arg) {
|
||||
return typeof arg === 'string';
|
||||
};
|
||||
|
||||
// var isObject = function(arg) {
|
||||
// return typeof arg === 'object' && arg !== null;
|
||||
// };
|
||||
|
||||
var basename = function(path) {
|
||||
if (!isString(path)) {
|
||||
throw new TypeError('Argument to path.basename must be a string');
|
||||
}
|
||||
var bits = path.split(/[\/\\]/g);
|
||||
return bits[bits.length - 1];
|
||||
};
|
||||
|
||||
// jshint undef: true
|
||||
// Regex to split a windows path into three parts: [*, device, slash,
|
||||
// tail] windows-only
|
||||
var splitDeviceRe =
|
||||
/^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/;
|
||||
|
||||
// Regex to split the tail part of the above into [*, dir, basename, ext]
|
||||
// var splitTailRe =
|
||||
// /^([\s\S]*?)((?:\.{1,2}|[^\\\/]+?|)(\.[^.\/\\]*|))(?:[\\\/]*)$/;
|
||||
|
||||
var win32 = {};
|
||||
// Function to split a filename into [root, dir, basename, ext]
|
||||
// var win32SplitPath = function(filename) {
|
||||
// // Separate device+slash from tail
|
||||
// var result = splitDeviceRe.exec(filename),
|
||||
// device = (result[1] || '') + (result[2] || ''),
|
||||
// tail = result[3] || '';
|
||||
// // Split the tail into dir, basename and extension
|
||||
// var result2 = splitTailRe.exec(tail),
|
||||
// dir = result2[1],
|
||||
// basename = result2[2],
|
||||
// ext = result2[3];
|
||||
// return [device, dir, basename, ext];
|
||||
// };
|
||||
|
||||
var win32StatPath = function(path) {
|
||||
var result = splitDeviceRe.exec(path),
|
||||
device = result[1] || '',
|
||||
isUnc = !!device && device[1] !== ':';
|
||||
return {
|
||||
device: device,
|
||||
isUnc: isUnc,
|
||||
isAbsolute: isUnc || !!result[2], // UNC paths are always absolute
|
||||
tail: result[3]
|
||||
};
|
||||
};
|
||||
|
||||
var normalizeUNCRoot = function(device) {
|
||||
return '\\\\' + device.replace(/^[\\\/]+/, '').replace(/[\\\/]+/g, '\\');
|
||||
};
|
||||
|
||||
var normalizeArray = function(parts, allowAboveRoot) {
|
||||
var res = [];
|
||||
for (var i = 0; i < parts.length; i++) {
|
||||
var p = parts[i];
|
||||
|
||||
// ignore empty parts
|
||||
if (!p || p === '.')
|
||||
continue;
|
||||
|
||||
if (p === '..') {
|
||||
if (res.length && res[res.length - 1] !== '..') {
|
||||
res.pop();
|
||||
} else if (allowAboveRoot) {
|
||||
res.push('..');
|
||||
}
|
||||
} else {
|
||||
res.push(p);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
win32.normalize = function(path) {
|
||||
var result = win32StatPath(path),
|
||||
device = result.device,
|
||||
isUnc = result.isUnc,
|
||||
isAbsolute = result.isAbsolute,
|
||||
tail = result.tail,
|
||||
trailingSlash = /[\\\/]$/.test(tail);
|
||||
|
||||
// Normalize the tail path
|
||||
tail = normalizeArray(tail.split(/[\\\/]+/), !isAbsolute).join('\\');
|
||||
|
||||
if (!tail && !isAbsolute) {
|
||||
tail = '.';
|
||||
}
|
||||
if (tail && trailingSlash) {
|
||||
tail += '\\';
|
||||
}
|
||||
|
||||
// Convert slashes to backslashes when `device` points to an UNC root.
|
||||
// Also squash multiple slashes into a single one where appropriate.
|
||||
if (isUnc) {
|
||||
device = normalizeUNCRoot(device);
|
||||
}
|
||||
|
||||
return device + (isAbsolute ? '\\' : '') + tail;
|
||||
};
|
||||
win32.join = function() {
|
||||
var paths = [];
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
var arg = arguments[i];
|
||||
if (!isString(arg)) {
|
||||
throw new TypeError('Arguments to path.join must be strings');
|
||||
}
|
||||
if (arg) {
|
||||
paths.push(arg);
|
||||
}
|
||||
}
|
||||
|
||||
var joined = paths.join('\\');
|
||||
|
||||
// Make sure that the joined path doesn't start with two slashes, because
|
||||
// normalize() will mistake it for an UNC path then.
|
||||
//
|
||||
// This step is skipped when it is very clear that the user actually
|
||||
// intended to point at an UNC path. This is assumed when the first
|
||||
// non-empty string arguments starts with exactly two slashes followed by
|
||||
// at least one more non-slash character.
|
||||
//
|
||||
// Note that for normalize() to treat a path as an UNC path it needs to
|
||||
// have at least 2 components, so we don't filter for that here.
|
||||
// This means that the user can use join to construct UNC paths from
|
||||
// a server name and a share name; for example:
|
||||
// path.join('//server', 'share') -> '\\\\server\\share\')
|
||||
if (!/^[\\\/]{2}[^\\\/]/.test(paths[0])) {
|
||||
joined = joined.replace(/^[\\\/]{2,}/, '\\');
|
||||
}
|
||||
return win32.normalize(joined);
|
||||
};
|
||||
|
||||
var posix = {};
|
||||
|
||||
// posix version
|
||||
posix.join = function() {
|
||||
var path = '';
|
||||
for (var i = 0; i < arguments.length; i++) {
|
||||
var segment = arguments[i];
|
||||
if (!isString(segment)) {
|
||||
throw new TypeError('Arguments to path.join must be strings');
|
||||
}
|
||||
if (segment) {
|
||||
if (!path) {
|
||||
path += segment;
|
||||
} else {
|
||||
path += '/' + segment;
|
||||
}
|
||||
}
|
||||
}
|
||||
return posix.normalize(path);
|
||||
};
|
||||
|
||||
// path.normalize(path)
|
||||
// posix version
|
||||
posix.normalize = function(path) {
|
||||
var isAbsolute = path.charAt(0) === '/',
|
||||
trailingSlash = path && path[path.length - 1] === '/';
|
||||
|
||||
// Normalize the path
|
||||
path = normalizeArray(path.split('/'), !isAbsolute).join('/');
|
||||
|
||||
if (!path && !isAbsolute) {
|
||||
path = '.';
|
||||
}
|
||||
if (path && trailingSlash) {
|
||||
path += '/';
|
||||
}
|
||||
|
||||
return (isAbsolute ? '/' : '') + path;
|
||||
};
|
||||
|
||||
win32.basename = posix.basename = basename;
|
||||
|
||||
this.win32 = win32;
|
||||
this.posix = posix;
|
||||
return (navigator.platform[0] === 'M') ? posix : win32;
|
||||
})();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// The is the "main" function which is to be prototyped //
|
||||
// It run a small snippet in the jsx engine that //
|
||||
// 1) Assigns $.__dirname with the value of the extensions __dirname base path //
|
||||
// 2) Sets up a method $.__fileName() for retrieving from within the jsx script it's $.fileName value //
|
||||
// more on that method later //
|
||||
// At the end of the script the global declaration jsx = new Jsx(); has been made. //
|
||||
// If you like you can remove that and include in your relevant functions //
|
||||
// var jsx = new Jsx(); You would never call the Jsx function without the "new" declaration //
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
var Jsx = function() {
|
||||
var jsxScript;
|
||||
// Setup jsx function to enable the jsx scripts to easily retrieve their file location
|
||||
jsxScript = [
|
||||
'$.level = 0;',
|
||||
'if(!$.__fileNames){',
|
||||
' $.__fileNames = {};',
|
||||
' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
|
||||
' $.__fileName = function(name){',
|
||||
' name = name || $.fileName;',
|
||||
' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
|
||||
' };',
|
||||
'}'
|
||||
].join('');
|
||||
evalScript(jsxScript);
|
||||
return this;
|
||||
};
|
||||
|
||||
/**
|
||||
* [evalScript] For calling jsx scripts from the js engine
|
||||
*
|
||||
* The jsx.evalScript method is used for calling jsx scripts directly from the js engine
|
||||
* Allows for easy replacement i.e. variable insertions and for forcing eval.
|
||||
* For convenience jsx.eval or jsx.script or jsx.evalscript can be used instead of calling jsx.evalScript
|
||||
*
|
||||
* @param {String} jsxScript
|
||||
* The string that makes up the jsx script
|
||||
* it can contain a simple template like syntax for replacements
|
||||
* 'alert("__foo__");'
|
||||
* the __foo__ will be replaced as per the replacements parameter
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback function you want the jsx script to trigger on completion
|
||||
* The result of the jsx script is passed as the argument to that function
|
||||
* The function can exist in some other file.
|
||||
* Note that InDesign does not automatically pass the callBack as a string.
|
||||
* Either write your InDesign in a way that it returns a sting the form of
|
||||
* return 'this is my result surrounded by quotes'
|
||||
* or use the force eval option
|
||||
* [Optional DEFAULT no callBack]
|
||||
*
|
||||
* @param {Object} replacements
|
||||
* The replacements to make on the jsx script
|
||||
* given the following script (template)
|
||||
* 'alert("__message__: " + __val__);'
|
||||
* and we want to change the script to
|
||||
* 'alert("I was born in the year: " + 1234);'
|
||||
* we would pass the following object
|
||||
* {"message": 'I was born in the year', "val": 1234}
|
||||
* or if not using reserved words like do we can leave out the key quotes
|
||||
* {message: 'I was born in the year', val: 1234}
|
||||
* [Optional DEFAULT no replacements]
|
||||
*
|
||||
* @param {Bolean} forceEval
|
||||
* If the script should be wrapped in an eval and try catch
|
||||
* This will 1) provide useful error feedback if heaven forbid it is needed
|
||||
* 2) The result will be a string which is required for callback results in InDesign
|
||||
* [Optional DEFAULT true]
|
||||
*
|
||||
* Note 1) The order of the parameters is irrelevant
|
||||
* Note 2) One can pass the arguments as an object if desired
|
||||
* jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
|
||||
* is the same as
|
||||
* jsx.evalScript({
|
||||
* script: 'alert("__myMessage__");',
|
||||
* replacements: {myMessage: 'Hi there'},
|
||||
* callBack: myCallBackFunction,
|
||||
* eval: true
|
||||
* });
|
||||
* note that either lower or camelCase key names are valid
|
||||
* i.e. both callback or callBack will work
|
||||
*
|
||||
* The following keys are the same jsx || script || jsxScript || jsxscript || file
|
||||
* The following keys are the same callBack || callback
|
||||
* The following keys are the same replacements || replace
|
||||
* The following keys are the same eval || forceEval || forceeval
|
||||
* The following keys are the same forceEvalScript || forceevalscript || evalScript || evalscript;
|
||||
*
|
||||
* @return {Boolean} if the jsxScript was executed or not
|
||||
*/
|
||||
|
||||
Jsx.prototype.evalScript = function() {
|
||||
var arg, i, key, replaceThis, withThis, args, callback, forceEval, replacements, jsxScript, isBin;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// sort out order which arguments into jsxScript, callback, replacements, forceEval //
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
args = arguments;
|
||||
|
||||
// Detect if the parameters were passed as an object and if so allow for various keys
|
||||
if (args.length === 1 && (arg = args[0]) instanceof Object) {
|
||||
jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
|
||||
callback = arg.callBack || arg.callback;
|
||||
replacements = arg.replacements || arg.replace;
|
||||
forceEval = arg.eval || arg.forceEval || arg.forceeval;
|
||||
} else {
|
||||
for (i = 0; i < 4; i++) {
|
||||
arg = args[i];
|
||||
if (arg === undefined) {
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === String) {
|
||||
jsxScript = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Object) {
|
||||
replacements = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Function) {
|
||||
callback = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg === false) {
|
||||
forceEval = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no script provide then not too much to do!
|
||||
if (!jsxScript) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Have changed the forceEval default to be true as I prefer the error handling
|
||||
if (forceEval !== false) {
|
||||
forceEval = true;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// On Illustrator and other apps the result of the jsx script is automatically passed as a string //
|
||||
// if you have a "script" containing the single number 1 and nothing else then the callBack will register as "1" //
|
||||
// On InDesign that same script will provide a blank callBack //
|
||||
// Let's say we have a callBack function var callBack = function(result){alert(result);} //
|
||||
// On Ai your see the 1 in the alert //
|
||||
// On ID your just see a blank alert //
|
||||
// To see the 1 in the alert you need to convert the result to a string and then it will show //
|
||||
// So if we rewrite out 1 byte script to '1' i.e. surround the 1 in quotes then the call back alert will show 1 //
|
||||
// If the scripts planed one can make sure that the results always passed as a string (including errors) //
|
||||
// otherwise one can wrap the script in an eval and then have the result passed as a string //
|
||||
// I have not gone through all the apps but can say //
|
||||
// for Ai you never need to set the forceEval to true //
|
||||
// for ID you if you have not coded your script appropriately and your want to send a result to the callBack then set forceEval to true //
|
||||
// I changed this that even on Illustrator it applies the try catch, Note the try catch will fail if $.level is set to 1 //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
if (forceEval) {
|
||||
|
||||
isBin = (jsxScript.substring(0, 10) === '@JSXBIN@ES') ? '' : '\n';
|
||||
jsxScript = (
|
||||
// "\n''') + '';} catch(e){(function(e){var n, a=[]; for (n in e){a.push(n + ': ' + e[n])}; return a.join('\n')})(e)}");
|
||||
// "\n''') + '';} catch(e){e + (e.line ? ('\\nLine ' + (+e.line - 1)) : '')}");
|
||||
[
|
||||
"$.level = 0;",
|
||||
"try{eval('''" + isBin, // need to add an extra line otherwise #targetengine doesn't work ;-]
|
||||
jsxScript.replace(/\\/g, '\\\\').replace(/'/g, "\\'").replace(/"/g, '\\"') + "\n''') + '';",
|
||||
"} catch (e) {",
|
||||
" (function(e) {",
|
||||
" var line, sourceLine, name, description, ErrorMessage, fileName, start, end, bug;",
|
||||
" line = +e.line" + (isBin === '' ? ';' : ' - 1;'), // To take into account the extra line added
|
||||
" fileName = File(e.fileName).fsName;",
|
||||
" sourceLine = line && e.source.split(/[\\r\\n]/)[line];",
|
||||
" name = e.name;",
|
||||
" description = e.description;",
|
||||
" ErrorMessage = name + ' ' + e.number + ': ' + description;",
|
||||
" if (fileName.length && !(/[\\/\\\\]\\d+$/.test(fileName))) {",
|
||||
" ErrorMessage += '\\nFile: ' + fileName;",
|
||||
" line++;",
|
||||
" }",
|
||||
" if (line){",
|
||||
" ErrorMessage += '\\nLine: ' + line +",
|
||||
" '-> ' + ((sourceLine.length < 300) ? sourceLine : sourceLine.substring(0,300) + '...');",
|
||||
" }",
|
||||
" if (e.start) {ErrorMessage += '\\nBug: ' + e.source.substring(e.start - 1, e.end)}",
|
||||
" if ($.includeStack) {ErrorMessage += '\\nStack:' + $.stack;}",
|
||||
" return ErrorMessage;",
|
||||
" })(e);",
|
||||
"}"
|
||||
].join('')
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// deal with the replacements //
|
||||
// Note it's probably better to use ${template} `literals` //
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
if (replacements) {
|
||||
for (key in replacements) {
|
||||
if (replacements.hasOwnProperty(key)) {
|
||||
replaceThis = new RegExp('__' + key + '__', 'g');
|
||||
withThis = replacements[key];
|
||||
jsxScript = jsxScript.replace(replaceThis, withThis + '');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
evalScript(jsxScript, callback);
|
||||
return true;
|
||||
} catch (err) {
|
||||
////////////////////////////////////////////////
|
||||
// Do whatever error handling you want here ! //
|
||||
////////////////////////////////////////////////
|
||||
var newErr;
|
||||
newErr = new Error(err);
|
||||
alert('Error Eek: ' + newErr.stack);
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* [evalFile] For calling jsx scripts from the js engine
|
||||
*
|
||||
* The jsx.evalFiles method is used for executing saved jsx scripts
|
||||
* where the jsxScript parameter is a string of the jsx scripts file location.
|
||||
* For convenience jsx.file or jsx.evalfile can be used instead of jsx.evalFile
|
||||
*
|
||||
* @param {String} file
|
||||
* The path to jsx script
|
||||
* If only the base name is provided then the path will be presumed to be the
|
||||
* To execute files stored in the jsx folder located in the __dirname folder use
|
||||
* jsx.evalFile('myFabJsxScript.jsx');
|
||||
* To execute files stored in the a folder myFabScripts located in the __dirname folder use
|
||||
* jsx.evalFile('./myFabScripts/myFabJsxScript.jsx');
|
||||
* To execute files stored in the a folder myFabScripts located at an absolute url use
|
||||
* jsx.evalFile('/Path/to/my/FabJsxScript.jsx'); (mac)
|
||||
* or jsx.evalFile('C:Path/to/my/FabJsxScript.jsx'); (windows)
|
||||
*
|
||||
* @param {Function} callback
|
||||
* The callback function you want the jsx script to trigger on completion
|
||||
* The result of the jsx script is passed as the argument to that function
|
||||
* The function can exist in some other file.
|
||||
* Note that InDesign does not automatically pass the callBack as a string.
|
||||
* Either write your InDesign in a way that it returns a sting the form of
|
||||
* return 'this is my result surrounded by quotes'
|
||||
* or use the force eval option
|
||||
* [Optional DEFAULT no callBack]
|
||||
*
|
||||
* @param {Object} replacements
|
||||
* The replacements to make on the jsx script
|
||||
* give the following script (template)
|
||||
* 'alert("__message__: " + __val__);'
|
||||
* and we want to change the script to
|
||||
* 'alert("I was born in the year: " + 1234);'
|
||||
* we would pass the following object
|
||||
* {"message": 'I was born in the year', "val": 1234}
|
||||
* or if not using reserved words like do we can leave out the key quotes
|
||||
* {message: 'I was born in the year', val: 1234}
|
||||
* By default when possible the forceEvalScript will be set to true
|
||||
* The forceEvalScript option cannot be true when there are replacements
|
||||
* To force the forceEvalScript to be false you can send a blank set of replacements
|
||||
* jsx.evalFile('myFabScript.jsx', {}); Will NOT be executed using the $.evalScript method
|
||||
* jsx.evalFile('myFabScript.jsx'); Will YES be executed using the $.evalScript method
|
||||
* see the forceEvalScript parameter for details on this
|
||||
* [Optional DEFAULT no replacements]
|
||||
*
|
||||
* @param {Bolean} forceEval
|
||||
* If the script should be wrapped in an eval and try catch
|
||||
* This will 1) provide useful error feedback if heaven forbid it is needed
|
||||
* 2) The result will be a string which is required for callback results in InDesign
|
||||
* [Optional DEFAULT true]
|
||||
*
|
||||
* If no replacements are needed then the jsx script is be executed by using the $.evalFile method
|
||||
* This exposes the true value of the $.fileName property <span class="wp-font-emots-emo-sunglasses"></span>
|
||||
* In such a case it's best to avoid using the $.__fileName() with no base name as it won't work
|
||||
* BUT one can still use the $.__fileName('baseName') method which is more accurate than the standard $.fileName property <span class="wp-font-emots-emo-happy"></span>
|
||||
* Let's say you have a Drive called "Graphics" AND YOU HAVE a root folder on your "main" drive called "Graphics"
|
||||
* You call a script jsx.evalFile('/Volumes/Graphics/myFabScript.jsx');
|
||||
* $.fileName will give you '/Graphics/myFabScript.jsx' which is wrong
|
||||
* $.__fileName('myFabScript.jsx') will give you '/Volumes/Graphics/myFabScript.jsx' which is correct
|
||||
* $.__fileName() will not give you a reliable result
|
||||
* Note that if your calling multiple versions of myFabScript.jsx stored in multiple folders then you can get stuffed!
|
||||
* i.e. if the fileName is important to you then don't do that.
|
||||
* It also will force the result of the jsx file as a string which is particularly useful for InDesign callBacks
|
||||
*
|
||||
* Note 1) The order of the parameters is irrelevant
|
||||
* Note 2) One can pass the arguments as an object if desired
|
||||
* jsx.evalScript(myCallBackFunction, 'alert("__myMessage__");', true);
|
||||
* is the same as
|
||||
* jsx.evalScript({
|
||||
* script: 'alert("__myMessage__");',
|
||||
* replacements: {myMessage: 'Hi there'},
|
||||
* callBack: myCallBackFunction,
|
||||
* eval: false,
|
||||
* });
|
||||
* note that either lower or camelCase key names or valid
|
||||
* i.e. both callback or callBack will work
|
||||
*
|
||||
* The following keys are the same file || jsx || script || jsxScript || jsxscript
|
||||
* The following keys are the same callBack || callback
|
||||
* The following keys are the same replacements || replace
|
||||
* The following keys are the same eval || forceEval || forceeval
|
||||
*
|
||||
* @return {Boolean} if the jsxScript was executed or not
|
||||
*/
|
||||
|
||||
Jsx.prototype.evalFile = function() {
|
||||
var arg, args, callback, fileName, fileNameScript, forceEval, forceEvalScript,
|
||||
i, jsxFolder, jsxScript, newLine, replacements, success;
|
||||
|
||||
success = true; // optimistic <span class="wp-font-emots-emo-happy"></span>
|
||||
args = arguments;
|
||||
|
||||
jsxFolder = path.join(__dirname, 'jsx');
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// $.fileName does not return it's correct path in the jsx engine for files called from the js engine //
|
||||
// In Illustrator it returns an integer in InDesign it returns an empty string //
|
||||
// This script injection allows for the script to know it's path by calling //
|
||||
// $.__fileName(); //
|
||||
// on Illustrator this works pretty well //
|
||||
// on InDesign it's best to use with a bit of care //
|
||||
// If the a second script has been called the InDesing will "forget" the path to the first script //
|
||||
// 2 work-arounds for this //
|
||||
// 1) at the beginning of your script add var thePathToMeIs = $.fileName(); //
|
||||
// thePathToMeIs will not be forgotten after running the second script //
|
||||
// 2) $.__fileName('myBaseName.jsx'); //
|
||||
// for example you have file with the following path //
|
||||
// /path/to/me.jsx //
|
||||
// Call $.__fileName('me.jsx') and you will get /path/to/me.jsx even after executing a second script //
|
||||
// Note When the forceEvalScript option is used then you just use the regular $.fileName property //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
fileNameScript = [
|
||||
// The if statement should not normally be executed
|
||||
'if(!$.__fileNames){',
|
||||
' $.__fileNames = {};',
|
||||
' $.__dirname = "__dirname__";'.replace('__dirname__', __dirname),
|
||||
' $.__fileName = function(name){',
|
||||
' name = name || $.fileName;',
|
||||
' return ($.__fileNames && $.__fileNames[name]) || $.fileName;',
|
||||
' };',
|
||||
'}',
|
||||
'$.__fileNames["__basename__"] = $.__fileNames["" + $.fileName] = "__fileName__";'
|
||||
].join('');
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// sort out order which arguments into jsxScript, callback, replacements, forceEval //
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// Detect if the parameters were passed as an object and if so allow for various keys
|
||||
if (args.length === 1 && (arg = args[0]) instanceof Object) {
|
||||
jsxScript = arg.jsxScript || arg.jsx || arg.script || arg.file || arg.jsxscript;
|
||||
callback = arg.callBack || arg.callback;
|
||||
replacements = arg.replacements || arg.replace;
|
||||
forceEval = arg.eval || arg.forceEval || arg.forceeval;
|
||||
} else {
|
||||
for (i = 0; i < 5; i++) {
|
||||
arg = args[i];
|
||||
if (arg === undefined) {
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor.name === 'String') {
|
||||
jsxScript = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor.name === 'Object') {
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// If no replacements are provided then the $.evalScript method will be used //
|
||||
// This will allow directly for the $.fileName property to be used //
|
||||
// If one does not want the $.evalScript method to be used then //
|
||||
// either send a blank object as the replacements {} //
|
||||
// or explicitly set the forceEvalScript option to false //
|
||||
// This can only be done if the parameters are passed as an object //
|
||||
// i.e. jsx.evalFile({file:'myFabScript.jsx', forceEvalScript: false}); //
|
||||
// if the file was called using //
|
||||
// i.e. jsx.evalFile('myFabScript.jsx'); //
|
||||
// then the following jsx code is called $.evalFile(new File('Path/to/myFabScript.jsx', 10000000000)) + ''; //
|
||||
// forceEval is never needed if the forceEvalScript is triggered //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
replacements = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg.constructor === Function) {
|
||||
callback = arg;
|
||||
continue;
|
||||
}
|
||||
if (arg === false) {
|
||||
forceEval = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no script provide then not too much to do!
|
||||
if (!jsxScript) {
|
||||
return false;
|
||||
}
|
||||
|
||||
forceEvalScript = !replacements;
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////
|
||||
// Get path of script //
|
||||
// Check if it's literal, relative or in jsx folder //
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
if (/^\/|[a-zA-Z]+:/.test(jsxScript)) { // absolute path Mac | Windows
|
||||
jsxScript = path.normalize(jsxScript);
|
||||
} else if (/^\.+\//.test(jsxScript)) {
|
||||
jsxScript = path.join(__dirname, jsxScript); // relative path
|
||||
} else {
|
||||
jsxScript = path.join(jsxFolder, jsxScript); // files in the jsxFolder
|
||||
}
|
||||
|
||||
if (forceEvalScript) {
|
||||
jsxScript = jsxScript.replace(/"/g, '\\"');
|
||||
// Check that the path exist, should change this to asynchronous at some point
|
||||
if (!window.cep.fs.stat(jsxScript).err) {
|
||||
jsxScript = fileNameScript.replace(/__fileName__/, jsxScript).replace(/__basename__/, path.basename(jsxScript)) +
|
||||
'$.evalFile(new File("' + jsxScript.replace(/\\/g, '\\\\') + '")) + "";';
|
||||
return this.evalScript(jsxScript, callback, forceEval);
|
||||
} else {
|
||||
throw new Error(`The file: {jsxScript} could not be found / read`);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Replacements made so we can't use $.evalFile and need to read the jsx script for ourselves //
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
fileName = jsxScript.replace(/\\/g, '\\\\').replace(/"/g, '\\"');
|
||||
try {
|
||||
jsxScript = window.cep.fs.readFile(jsxScript).data;
|
||||
} catch (er) {
|
||||
throw new Error(`The file: ${fileName} could not be read`);
|
||||
}
|
||||
// It is desirable that the injected fileNameScript is on the same line as the 1st line of the script
|
||||
// This is so that the $.line or error.line returns the same value as the actual file
|
||||
// However if the 1st line contains a # directive then we need to insert a new line and stuff the above problem
|
||||
// When possible i.e. when there's no replacements then $.evalFile will be used and then the whole issue is avoided
|
||||
newLine = /^\s*#/.test(jsxScript) ? '\n' : '';
|
||||
jsxScript = fileNameScript.replace(/__fileName__/, fileName).replace(/__basename__/, path.basename(fileName)) + newLine + jsxScript;
|
||||
|
||||
try {
|
||||
// evalScript(jsxScript, callback);
|
||||
return this.evalScript(jsxScript, callback, replacements, forceEval);
|
||||
} catch (err) {
|
||||
////////////////////////////////////////////////
|
||||
// Do whatever error handling you want here ! //
|
||||
////////////////////////////////////////////////
|
||||
var newErr;
|
||||
newErr = new Error(err);
|
||||
alert('Error Eek: ' + newErr.stack);
|
||||
return false;
|
||||
}
|
||||
|
||||
return success; // success should be an array but for now it's a Boolean
|
||||
};
|
||||
|
||||
|
||||
////////////////////////////////////
|
||||
// Setup alternative method names //
|
||||
////////////////////////////////////
|
||||
Jsx.prototype.eval = Jsx.prototype.script = Jsx.prototype.evalscript = Jsx.prototype.evalScript;
|
||||
Jsx.prototype.file = Jsx.prototype.evalfile = Jsx.prototype.evalFile;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Examples //
|
||||
// jsx.evalScript('alert("foo");'); //
|
||||
// jsx.evalFile('foo.jsx'); // where foo.jsx is stored in the jsx folder at the base of the extensions directory //
|
||||
// jsx.evalFile('../myFolder/foo.jsx'); // where a relative or absolute file path is given //
|
||||
// //
|
||||
// using conventional methods one would use in the case were the values to swap were supplied by variables //
|
||||
// csInterface.evalScript('var q = "' + name + '"; alert("' + myString + '" ' + myOp + ' q);q;', callback); //
|
||||
// Using all the '' + foo + '' is very error prone //
|
||||
// jsx.evalScript('var q = "__name__"; alert(__string__ __opp__ q);q;',{'name':'Fred', 'string':'Hello ', 'opp':'+'}, callBack); //
|
||||
// is much simpler and less error prone //
|
||||
// //
|
||||
// more readable to use object //
|
||||
// jsx.evalFile({ //
|
||||
// file: 'yetAnotherFabScript.jsx', //
|
||||
// replacements: {"this": foo, That: bar, and: "&&", the: foo2, other: bar2}, //
|
||||
// eval: true //
|
||||
// }) //
|
||||
// Enjoy <span class="wp-font-emots-emo-happy"></span> //
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
jsx = new Jsx();
|
||||
})();
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,530 @@
|
|||
// json2.js
|
||||
// 2017-06-12
|
||||
// Public Domain.
|
||||
// NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
// USE YOUR OWN COPY. IT IS EXTREMELY UNWISE TO LOAD CODE FROM SERVERS YOU DO
|
||||
// NOT CONTROL.
|
||||
|
||||
// This file creates a global JSON object containing two methods: stringify
|
||||
// and parse. This file provides the ES5 JSON capability to ES3 systems.
|
||||
// If a project might run on IE8 or earlier, then this file should be included.
|
||||
// This file does nothing on ES5 systems.
|
||||
|
||||
// JSON.stringify(value, replacer, space)
|
||||
// value any JavaScript value, usually an object or array.
|
||||
// replacer an optional parameter that determines how object
|
||||
// values are stringified for objects. It can be a
|
||||
// function or an array of strings.
|
||||
// space an optional parameter that specifies the indentation
|
||||
// of nested structures. If it is omitted, the text will
|
||||
// be packed without extra whitespace. If it is a number,
|
||||
// it will specify the number of spaces to indent at each
|
||||
// level. If it is a string (such as "\t" or " "),
|
||||
// it contains the characters used to indent at each level.
|
||||
// This method produces a JSON text from a JavaScript value.
|
||||
// When an object value is found, if the object contains a toJSON
|
||||
// method, its toJSON method will be called and the result will be
|
||||
// stringified. A toJSON method does not serialize: it returns the
|
||||
// value represented by the name/value pair that should be serialized,
|
||||
// or undefined if nothing should be serialized. The toJSON method
|
||||
// will be passed the key associated with the value, and this will be
|
||||
// bound to the value.
|
||||
|
||||
// For example, this would serialize Dates as ISO strings.
|
||||
|
||||
// Date.prototype.toJSON = function (key) {
|
||||
// function f(n) {
|
||||
// // Format integers to have at least two digits.
|
||||
// return (n < 10)
|
||||
// ? "0" + n
|
||||
// : n;
|
||||
// }
|
||||
// return this.getUTCFullYear() + "-" +
|
||||
// f(this.getUTCMonth() + 1) + "-" +
|
||||
// f(this.getUTCDate()) + "T" +
|
||||
// f(this.getUTCHours()) + ":" +
|
||||
// f(this.getUTCMinutes()) + ":" +
|
||||
// f(this.getUTCSeconds()) + "Z";
|
||||
// };
|
||||
|
||||
// You can provide an optional replacer method. It will be passed the
|
||||
// key and value of each member, with this bound to the containing
|
||||
// object. The value that is returned from your method will be
|
||||
// serialized. If your method returns undefined, then the member will
|
||||
// be excluded from the serialization.
|
||||
|
||||
// If the replacer parameter is an array of strings, then it will be
|
||||
// used to select the members to be serialized. It filters the results
|
||||
// such that only members with keys listed in the replacer array are
|
||||
// stringified.
|
||||
|
||||
// Values that do not have JSON representations, such as undefined or
|
||||
// functions, will not be serialized. Such values in objects will be
|
||||
// dropped; in arrays they will be replaced with null. You can use
|
||||
// a replacer function to replace those with JSON values.
|
||||
|
||||
// JSON.stringify(undefined) returns undefined.
|
||||
|
||||
// The optional space parameter produces a stringification of the
|
||||
// value that is filled with line breaks and indentation to make it
|
||||
// easier to read.
|
||||
|
||||
// If the space parameter is a non-empty string, then that string will
|
||||
// be used for indentation. If the space parameter is a number, then
|
||||
// the indentation will be that many spaces.
|
||||
|
||||
// Example:
|
||||
|
||||
// text = JSON.stringify(["e", {pluribus: "unum"}]);
|
||||
// // text is '["e",{"pluribus":"unum"}]'
|
||||
|
||||
// text = JSON.stringify(["e", {pluribus: "unum"}], null, "\t");
|
||||
// // text is '[\n\t"e",\n\t{\n\t\t"pluribus": "unum"\n\t}\n]'
|
||||
|
||||
// text = JSON.stringify([new Date()], function (key, value) {
|
||||
// return this[key] instanceof Date
|
||||
// ? "Date(" + this[key] + ")"
|
||||
// : value;
|
||||
// });
|
||||
// // text is '["Date(---current time---)"]'
|
||||
|
||||
// JSON.parse(text, reviver)
|
||||
// This method parses a JSON text to produce an object or array.
|
||||
// It can throw a SyntaxError exception.
|
||||
|
||||
// The optional reviver parameter is a function that can filter and
|
||||
// transform the results. It receives each of the keys and values,
|
||||
// and its return value is used instead of the original value.
|
||||
// If it returns what it received, then the structure is not modified.
|
||||
// If it returns undefined then the member is deleted.
|
||||
|
||||
// Example:
|
||||
|
||||
// // Parse the text. Values that look like ISO date strings will
|
||||
// // be converted to Date objects.
|
||||
|
||||
// myData = JSON.parse(text, function (key, value) {
|
||||
// var a;
|
||||
// if (typeof value === "string") {
|
||||
// a =
|
||||
// /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}(?:\.\d*)?)Z$/.exec(value);
|
||||
// if (a) {
|
||||
// return new Date(Date.UTC(
|
||||
// +a[1], +a[2] - 1, +a[3], +a[4], +a[5], +a[6]
|
||||
// ));
|
||||
// }
|
||||
// return value;
|
||||
// }
|
||||
// });
|
||||
|
||||
// myData = JSON.parse(
|
||||
// "[\"Date(09/09/2001)\"]",
|
||||
// function (key, value) {
|
||||
// var d;
|
||||
// if (
|
||||
// typeof value === "string"
|
||||
// && value.slice(0, 5) === "Date("
|
||||
// && value.slice(-1) === ")"
|
||||
// ) {
|
||||
// d = new Date(value.slice(5, -1));
|
||||
// if (d) {
|
||||
// return d;
|
||||
// }
|
||||
// }
|
||||
// return value;
|
||||
// }
|
||||
// );
|
||||
|
||||
// This is a reference implementation. You are free to copy, modify, or
|
||||
// redistribute.
|
||||
|
||||
/*jslint
|
||||
eval, for, this
|
||||
*/
|
||||
|
||||
/*property
|
||||
JSON, apply, call, charCodeAt, getUTCDate, getUTCFullYear, getUTCHours,
|
||||
getUTCMinutes, getUTCMonth, getUTCSeconds, hasOwnProperty, join,
|
||||
lastIndex, length, parse, prototype, push, replace, slice, stringify,
|
||||
test, toJSON, toString, valueOf
|
||||
*/
|
||||
|
||||
|
||||
// Create a JSON object only if one does not already exist. We create the
|
||||
// methods in a closure to avoid creating global variables.
|
||||
|
||||
if (typeof JSON !== "object") {
|
||||
JSON = {};
|
||||
}
|
||||
|
||||
(function () {
|
||||
"use strict";
|
||||
|
||||
var rx_one = /^[\],:{}\s]*$/;
|
||||
var rx_two = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g;
|
||||
var rx_three = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g;
|
||||
var rx_four = /(?:^|:|,)(?:\s*\[)+/g;
|
||||
var rx_escapable = /[\\"\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
|
||||
var rx_dangerous = /[\u0000\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g;
|
||||
|
||||
function f(n) {
|
||||
// Format integers to have at least two digits.
|
||||
return (n < 10)
|
||||
? "0" + n
|
||||
: n;
|
||||
}
|
||||
|
||||
function this_value() {
|
||||
return this.valueOf();
|
||||
}
|
||||
|
||||
if (typeof Date.prototype.toJSON !== "function") {
|
||||
|
||||
Date.prototype.toJSON = function () {
|
||||
|
||||
return isFinite(this.valueOf())
|
||||
? (
|
||||
this.getUTCFullYear()
|
||||
+ "-"
|
||||
+ f(this.getUTCMonth() + 1)
|
||||
+ "-"
|
||||
+ f(this.getUTCDate())
|
||||
+ "T"
|
||||
+ f(this.getUTCHours())
|
||||
+ ":"
|
||||
+ f(this.getUTCMinutes())
|
||||
+ ":"
|
||||
+ f(this.getUTCSeconds())
|
||||
+ "Z"
|
||||
)
|
||||
: null;
|
||||
};
|
||||
|
||||
Boolean.prototype.toJSON = this_value;
|
||||
Number.prototype.toJSON = this_value;
|
||||
String.prototype.toJSON = this_value;
|
||||
}
|
||||
|
||||
var gap;
|
||||
var indent;
|
||||
var meta;
|
||||
var rep;
|
||||
|
||||
|
||||
function quote(string) {
|
||||
|
||||
// If the string contains no control characters, no quote characters, and no
|
||||
// backslash characters, then we can safely slap some quotes around it.
|
||||
// Otherwise we must also replace the offending characters with safe escape
|
||||
// sequences.
|
||||
|
||||
rx_escapable.lastIndex = 0;
|
||||
return rx_escapable.test(string)
|
||||
? "\"" + string.replace(rx_escapable, function (a) {
|
||||
var c = meta[a];
|
||||
return typeof c === "string"
|
||||
? c
|
||||
: "\\u" + ("0000" + a.charCodeAt(0).toString(16)).slice(-4);
|
||||
}) + "\""
|
||||
: "\"" + string + "\"";
|
||||
}
|
||||
|
||||
|
||||
function str(key, holder) {
|
||||
|
||||
// Produce a string from holder[key].
|
||||
|
||||
var i; // The loop counter.
|
||||
var k; // The member key.
|
||||
var v; // The member value.
|
||||
var length;
|
||||
var mind = gap;
|
||||
var partial;
|
||||
var value = holder[key];
|
||||
|
||||
// If the value has a toJSON method, call it to obtain a replacement value.
|
||||
|
||||
if (
|
||||
value
|
||||
&& typeof value === "object"
|
||||
&& typeof value.toJSON === "function"
|
||||
) {
|
||||
value = value.toJSON(key);
|
||||
}
|
||||
|
||||
// If we were called with a replacer function, then call the replacer to
|
||||
// obtain a replacement value.
|
||||
|
||||
if (typeof rep === "function") {
|
||||
value = rep.call(holder, key, value);
|
||||
}
|
||||
|
||||
// What happens next depends on the value's type.
|
||||
|
||||
switch (typeof value) {
|
||||
case "string":
|
||||
return quote(value);
|
||||
|
||||
case "number":
|
||||
|
||||
// JSON numbers must be finite. Encode non-finite numbers as null.
|
||||
|
||||
return (isFinite(value))
|
||||
? String(value)
|
||||
: "null";
|
||||
|
||||
case "boolean":
|
||||
case "null":
|
||||
|
||||
// If the value is a boolean or null, convert it to a string. Note:
|
||||
// typeof null does not produce "null". The case is included here in
|
||||
// the remote chance that this gets fixed someday.
|
||||
|
||||
return String(value);
|
||||
|
||||
// If the type is "object", we might be dealing with an object or an array or
|
||||
// null.
|
||||
|
||||
case "object":
|
||||
|
||||
// Due to a specification blunder in ECMAScript, typeof null is "object",
|
||||
// so watch out for that case.
|
||||
|
||||
if (!value) {
|
||||
return "null";
|
||||
}
|
||||
|
||||
// Make an array to hold the partial results of stringifying this object value.
|
||||
|
||||
gap += indent;
|
||||
partial = [];
|
||||
|
||||
// Is the value an array?
|
||||
|
||||
if (Object.prototype.toString.apply(value) === "[object Array]") {
|
||||
|
||||
// The value is an array. Stringify every element. Use null as a placeholder
|
||||
// for non-JSON values.
|
||||
|
||||
length = value.length;
|
||||
for (i = 0; i < length; i += 1) {
|
||||
partial[i] = str(i, value) || "null";
|
||||
}
|
||||
|
||||
// Join all of the elements together, separated with commas, and wrap them in
|
||||
// brackets.
|
||||
|
||||
v = partial.length === 0
|
||||
? "[]"
|
||||
: gap
|
||||
? (
|
||||
"[\n"
|
||||
+ gap
|
||||
+ partial.join(",\n" + gap)
|
||||
+ "\n"
|
||||
+ mind
|
||||
+ "]"
|
||||
)
|
||||
: "[" + partial.join(",") + "]";
|
||||
gap = mind;
|
||||
return v;
|
||||
}
|
||||
|
||||
// If the replacer is an array, use it to select the members to be stringified.
|
||||
|
||||
if (rep && typeof rep === "object") {
|
||||
length = rep.length;
|
||||
for (i = 0; i < length; i += 1) {
|
||||
if (typeof rep[i] === "string") {
|
||||
k = rep[i];
|
||||
v = str(k, value);
|
||||
if (v) {
|
||||
partial.push(quote(k) + (
|
||||
(gap)
|
||||
? ": "
|
||||
: ":"
|
||||
) + v);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
// Otherwise, iterate through all of the keys in the object.
|
||||
|
||||
for (k in value) {
|
||||
if (Object.prototype.hasOwnProperty.call(value, k)) {
|
||||
v = str(k, value);
|
||||
if (v) {
|
||||
partial.push(quote(k) + (
|
||||
(gap)
|
||||
? ": "
|
||||
: ":"
|
||||
) + v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join all of the member texts together, separated with commas,
|
||||
// and wrap them in braces.
|
||||
|
||||
v = partial.length === 0
|
||||
? "{}"
|
||||
: gap
|
||||
? "{\n" + gap + partial.join(",\n" + gap) + "\n" + mind + "}"
|
||||
: "{" + partial.join(",") + "}";
|
||||
gap = mind;
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
// If the JSON object does not yet have a stringify method, give it one.
|
||||
|
||||
if (typeof JSON.stringify !== "function") {
|
||||
meta = { // table of character substitutions
|
||||
"\b": "\\b",
|
||||
"\t": "\\t",
|
||||
"\n": "\\n",
|
||||
"\f": "\\f",
|
||||
"\r": "\\r",
|
||||
"\"": "\\\"",
|
||||
"\\": "\\\\"
|
||||
};
|
||||
JSON.stringify = function (value, replacer, space) {
|
||||
|
||||
// The stringify method takes a value and an optional replacer, and an optional
|
||||
// space parameter, and returns a JSON text. The replacer can be a function
|
||||
// that can replace values, or an array of strings that will select the keys.
|
||||
// A default replacer method can be provided. Use of the space parameter can
|
||||
// produce text that is more easily readable.
|
||||
|
||||
var i;
|
||||
gap = "";
|
||||
indent = "";
|
||||
|
||||
// If the space parameter is a number, make an indent string containing that
|
||||
// many spaces.
|
||||
|
||||
if (typeof space === "number") {
|
||||
for (i = 0; i < space; i += 1) {
|
||||
indent += " ";
|
||||
}
|
||||
|
||||
// If the space parameter is a string, it will be used as the indent string.
|
||||
|
||||
} else if (typeof space === "string") {
|
||||
indent = space;
|
||||
}
|
||||
|
||||
// If there is a replacer, it must be a function or an array.
|
||||
// Otherwise, throw an error.
|
||||
|
||||
rep = replacer;
|
||||
if (replacer && typeof replacer !== "function" && (
|
||||
typeof replacer !== "object"
|
||||
|| typeof replacer.length !== "number"
|
||||
)) {
|
||||
throw new Error("JSON.stringify");
|
||||
}
|
||||
|
||||
// Make a fake root object containing our value under the key of "".
|
||||
// Return the result of stringifying the value.
|
||||
|
||||
return str("", {"": value});
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// If the JSON object does not yet have a parse method, give it one.
|
||||
|
||||
if (typeof JSON.parse !== "function") {
|
||||
JSON.parse = function (text, reviver) {
|
||||
|
||||
// The parse method takes a text and an optional reviver function, and returns
|
||||
// a JavaScript value if the text is a valid JSON text.
|
||||
|
||||
var j;
|
||||
|
||||
function walk(holder, key) {
|
||||
|
||||
// The walk method is used to recursively walk the resulting structure so
|
||||
// that modifications can be made.
|
||||
|
||||
var k;
|
||||
var v;
|
||||
var value = holder[key];
|
||||
if (value && typeof value === "object") {
|
||||
for (k in value) {
|
||||
if (Object.prototype.hasOwnProperty.call(value, k)) {
|
||||
v = walk(value, k);
|
||||
if (v !== undefined) {
|
||||
value[k] = v;
|
||||
} else {
|
||||
delete value[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return reviver.call(holder, key, value);
|
||||
}
|
||||
|
||||
|
||||
// Parsing happens in four stages. In the first stage, we replace certain
|
||||
// Unicode characters with escape sequences. JavaScript handles many characters
|
||||
// incorrectly, either silently deleting them, or treating them as line endings.
|
||||
|
||||
text = String(text);
|
||||
rx_dangerous.lastIndex = 0;
|
||||
if (rx_dangerous.test(text)) {
|
||||
text = text.replace(rx_dangerous, function (a) {
|
||||
return (
|
||||
"\\u"
|
||||
+ ("0000" + a.charCodeAt(0).toString(16)).slice(-4)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// In the second stage, we run the text against regular expressions that look
|
||||
// for non-JSON patterns. We are especially concerned with "()" and "new"
|
||||
// because they can cause invocation, and "=" because it can cause mutation.
|
||||
// But just to be safe, we want to reject all unexpected forms.
|
||||
|
||||
// We split the second stage into 4 regexp operations in order to work around
|
||||
// crippling inefficiencies in IE's and Safari's regexp engines. First we
|
||||
// replace the JSON backslash pairs with "@" (a non-JSON character). Second, we
|
||||
// replace all simple value tokens with "]" characters. Third, we delete all
|
||||
// open brackets that follow a colon or comma or that begin the text. Finally,
|
||||
// we look to see that the remaining characters are only whitespace or "]" or
|
||||
// "," or ":" or "{" or "}". If that is so, then the text is safe for eval.
|
||||
|
||||
if (
|
||||
rx_one.test(
|
||||
text
|
||||
.replace(rx_two, "@")
|
||||
.replace(rx_three, "]")
|
||||
.replace(rx_four, "")
|
||||
)
|
||||
) {
|
||||
|
||||
// In the third stage we use the eval function to compile the text into a
|
||||
// JavaScript structure. The "{" operator is subject to a syntactic ambiguity
|
||||
// in JavaScript: it can begin a block or an object literal. We wrap the text
|
||||
// in parens to eliminate the ambiguity.
|
||||
|
||||
j = eval("(" + text + ")");
|
||||
|
||||
// In the optional fourth stage, we recursively walk the new structure, passing
|
||||
// each name/value pair to a reviver function for possible transformation.
|
||||
|
||||
return (typeof reviver === "function")
|
||||
? walk({"": j}, "")
|
||||
: j;
|
||||
}
|
||||
|
||||
// If the text is not JSON parseable, then a SyntaxError is thrown.
|
||||
|
||||
throw new SyntaxError("JSON.parse");
|
||||
};
|
||||
}
|
||||
}());
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 3.5 KiB |
|
|
@ -0,0 +1,95 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<style type="text/css">
|
||||
html, body, iframe {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: 0px;
|
||||
margin: 0px;
|
||||
overflow: hidden;
|
||||
background-color: #424242;
|
||||
}
|
||||
button {width: 100%;}
|
||||
</style>
|
||||
|
||||
<style>
|
||||
button {width: 100%;}
|
||||
body {margin:0; padding:0; height: 100%;}
|
||||
html {height: 100%;}
|
||||
</style>
|
||||
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js">
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#workfiles-button").bind("click", function() {
|
||||
RPC.call('Photoshop.workfiles_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#loader-button").bind("click", function() {
|
||||
RPC.call('Photoshop.loader_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#publish-button").bind("click", function() {
|
||||
RPC.call('Photoshop.publish_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#sceneinventory-button").bind("click", function() {
|
||||
RPC.call('Photoshop.sceneinventory_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
|
||||
<script type=text/javascript>
|
||||
$(function() {
|
||||
$("a#experimental-button").bind("click", function() {
|
||||
RPC.call('Photoshop.experimental_tools_route').then(function (data) {
|
||||
}, function (error) {
|
||||
alert(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<script type="text/javascript" src="./client/wsrpc.js"></script>
|
||||
<script type="text/javascript" src="./client/CSInterface.js"></script>
|
||||
<script type="text/javascript" src="./client/loglevel.min.js"></script>
|
||||
|
||||
<!-- helper library for better debugging of .jsx check its license! -->
|
||||
<script type="text/javascript" src="./host/JSX.js"></script>
|
||||
|
||||
<script type="text/javascript" src="./client/client.js"></script>
|
||||
|
||||
<a href=# id=workfiles-button><button>Workfiles...</button></a>
|
||||
<a href=# id=loader-button><button>Load...</button></a>
|
||||
<a href=# id=publish-button><button>Publish...</button></a>
|
||||
<a href=# id=sceneinventory-button><button>Manage...</button></a>
|
||||
<a href=# id=experimental-button><button>Experimental Tools...</button></a>
|
||||
</body>
|
||||
</html>
|
||||
406
server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py
Normal file
406
server_addon/photoshop/client/ayon_photoshop/api/launch_logic.py
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
import os
|
||||
import subprocess
|
||||
import collections
|
||||
import asyncio
|
||||
|
||||
from wsrpc_aiohttp import (
|
||||
WebSocketRoute,
|
||||
WebSocketAsync
|
||||
)
|
||||
|
||||
import ayon_api
|
||||
from qtpy import QtCore
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
registered_host,
|
||||
Anatomy,
|
||||
)
|
||||
from ayon_core.pipeline.workfile import (
|
||||
get_workfile_template_key_from_context,
|
||||
get_last_workfile,
|
||||
)
|
||||
from ayon_core.pipeline.template_data import get_template_data_with_names
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline.context_tools import change_current_context
|
||||
|
||||
from .webserver import WebServerTool
|
||||
from .ws_stub import PhotoshopServerStub
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
class ConnectionNotEstablishedYet(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MainThreadItem:
|
||||
"""Structure to store information about callback in main thread.
|
||||
|
||||
Item should be used to execute callback in main thread which may be needed
|
||||
for execution of Qt objects.
|
||||
|
||||
Item store callback (callable variable), arguments and keyword arguments
|
||||
for the callback. Item hold information about it's process.
|
||||
"""
|
||||
not_set = object()
|
||||
|
||||
def __init__(self, callback, *args, **kwargs):
|
||||
self._done = False
|
||||
self._exception = self.not_set
|
||||
self._result = self.not_set
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
|
||||
@property
|
||||
def done(self):
|
||||
return self._done
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raises one.
|
||||
"""
|
||||
log.debug("Executing process in main thread")
|
||||
if self.done:
|
||||
log.warning("- item is already processed")
|
||||
return
|
||||
|
||||
log.info("Running callback: {}".format(str(self._callback)))
|
||||
try:
|
||||
result = self._callback(*self._args, **self._kwargs)
|
||||
self._result = result
|
||||
|
||||
except Exception as exc:
|
||||
self._exception = exc
|
||||
|
||||
finally:
|
||||
self._done = True
|
||||
|
||||
|
||||
def stub():
|
||||
"""
|
||||
Convenience function to get server RPC stub to call methods directed
|
||||
for host (Photoshop).
|
||||
It expects already created connection, started from client.
|
||||
Currently created when panel is opened (PS: Window>Extensions>Avalon)
|
||||
:return: <PhotoshopClientStub> where functions could be called from
|
||||
"""
|
||||
ps_stub = PhotoshopServerStub()
|
||||
if not ps_stub.client:
|
||||
raise ConnectionNotEstablishedYet("Connection is not created yet")
|
||||
|
||||
return ps_stub
|
||||
|
||||
|
||||
def show_tool_by_name(tool_name):
|
||||
kwargs = {}
|
||||
if tool_name == "loader":
|
||||
kwargs["use_context"] = True
|
||||
|
||||
host_tools.show_tool_by_name(tool_name, **kwargs)
|
||||
|
||||
|
||||
class ProcessLauncher(QtCore.QObject):
|
||||
route_name = "Photoshop"
|
||||
_main_thread_callbacks = collections.deque()
|
||||
|
||||
def __init__(self, subprocess_args):
|
||||
self._subprocess_args = subprocess_args
|
||||
self._log = None
|
||||
|
||||
super(ProcessLauncher, self).__init__()
|
||||
|
||||
# Keep track if launcher was already started
|
||||
self._started = False
|
||||
|
||||
self._process = None
|
||||
self._websocket_server = None
|
||||
|
||||
start_process_timer = QtCore.QTimer()
|
||||
start_process_timer.setInterval(100)
|
||||
|
||||
loop_timer = QtCore.QTimer()
|
||||
loop_timer.setInterval(200)
|
||||
|
||||
start_process_timer.timeout.connect(self._on_start_process_timer)
|
||||
loop_timer.timeout.connect(self._on_loop_timer)
|
||||
|
||||
self._start_process_timer = start_process_timer
|
||||
self._loop_timer = loop_timer
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if self._log is None:
|
||||
self._log = Logger.get_logger(
|
||||
"{}-launcher".format(self.route_name)
|
||||
)
|
||||
return self._log
|
||||
|
||||
@property
|
||||
def websocket_server_is_running(self):
|
||||
if self._websocket_server is not None:
|
||||
return self._websocket_server.is_running
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_process_running(self):
|
||||
if self._process is not None:
|
||||
return self._process.poll() is None
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_host_connected(self):
|
||||
"""Returns True if connected, False if app is not running at all."""
|
||||
if not self.is_process_running:
|
||||
return False
|
||||
|
||||
try:
|
||||
_stub = stub()
|
||||
if _stub:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def execute_in_main_thread(cls, callback, *args, **kwargs):
|
||||
item = MainThreadItem(callback, *args, **kwargs)
|
||||
cls._main_thread_callbacks.append(item)
|
||||
return item
|
||||
|
||||
def start(self):
|
||||
if self._started:
|
||||
return
|
||||
self.log.info("Started launch logic of Photoshop")
|
||||
self._started = True
|
||||
self._start_process_timer.start()
|
||||
|
||||
def exit(self):
|
||||
""" Exit whole application. """
|
||||
if self._start_process_timer.isActive():
|
||||
self._start_process_timer.stop()
|
||||
if self._loop_timer.isActive():
|
||||
self._loop_timer.stop()
|
||||
|
||||
if self._websocket_server is not None:
|
||||
self._websocket_server.stop()
|
||||
|
||||
if self._process:
|
||||
self._process.kill()
|
||||
self._process.wait()
|
||||
|
||||
QtCore.QCoreApplication.exit()
|
||||
|
||||
def _on_loop_timer(self):
|
||||
# TODO find better way and catch errors
|
||||
# Run only callbacks that are in queue at the moment
|
||||
cls = self.__class__
|
||||
for _ in range(len(cls._main_thread_callbacks)):
|
||||
if cls._main_thread_callbacks:
|
||||
item = cls._main_thread_callbacks.popleft()
|
||||
item.execute()
|
||||
|
||||
if not self.is_process_running:
|
||||
self.log.info("Host process is not running. Closing")
|
||||
self.exit()
|
||||
|
||||
elif not self.websocket_server_is_running:
|
||||
self.log.info("Websocket server is not running. Closing")
|
||||
self.exit()
|
||||
|
||||
def _on_start_process_timer(self):
|
||||
# TODO add try except validations for each part in this method
|
||||
# Start server as first thing
|
||||
if self._websocket_server is None:
|
||||
self._init_server()
|
||||
return
|
||||
|
||||
# TODO add waiting time
|
||||
# Wait for webserver
|
||||
if not self.websocket_server_is_running:
|
||||
return
|
||||
|
||||
# Start application process
|
||||
if self._process is None:
|
||||
self._start_process()
|
||||
self.log.info("Waiting for host to connect")
|
||||
return
|
||||
|
||||
# TODO add waiting time
|
||||
# Wait until host is connected
|
||||
if self.is_host_connected:
|
||||
self._start_process_timer.stop()
|
||||
self._loop_timer.start()
|
||||
elif (
|
||||
not self.is_process_running
|
||||
or not self.websocket_server_is_running
|
||||
):
|
||||
self.exit()
|
||||
|
||||
def _init_server(self):
|
||||
if self._websocket_server is not None:
|
||||
return
|
||||
|
||||
self.log.debug(
|
||||
"Initialization of websocket server for host communication"
|
||||
)
|
||||
|
||||
self._websocket_server = websocket_server = WebServerTool()
|
||||
if websocket_server.port_occupied(
|
||||
websocket_server.host_name,
|
||||
websocket_server.port
|
||||
):
|
||||
self.log.info(
|
||||
"Server already running, sending actual context and exit."
|
||||
)
|
||||
asyncio.run(websocket_server.send_context_change(self.route_name))
|
||||
self.exit()
|
||||
return
|
||||
|
||||
# Add Websocket route
|
||||
websocket_server.add_route("*", "/ws/", WebSocketAsync)
|
||||
# Add after effects route to websocket handler
|
||||
|
||||
print("Adding {} route".format(self.route_name))
|
||||
WebSocketAsync.add_route(
|
||||
self.route_name, PhotoshopRoute
|
||||
)
|
||||
self.log.info("Starting websocket server for host communication")
|
||||
websocket_server.start_server()
|
||||
|
||||
def _start_process(self):
|
||||
if self._process is not None:
|
||||
return
|
||||
self.log.info("Starting host process")
|
||||
try:
|
||||
self._process = subprocess.Popen(
|
||||
self._subprocess_args,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
except Exception:
|
||||
self.log.info("exce", exc_info=True)
|
||||
self.exit()
|
||||
|
||||
|
||||
class PhotoshopRoute(WebSocketRoute):
|
||||
"""
|
||||
One route, mimicking external application (like Harmony, etc).
|
||||
All functions could be called from client.
|
||||
'do_notify' function calls function on the client - mimicking
|
||||
notification after long running job on the server or similar
|
||||
"""
|
||||
instance = None
|
||||
|
||||
def init(self, **kwargs):
|
||||
# Python __init__ must be return "self".
|
||||
# This method might return anything.
|
||||
log.debug("someone called Photoshop route")
|
||||
self.instance = self
|
||||
return kwargs
|
||||
|
||||
# server functions
|
||||
async def ping(self):
|
||||
log.debug("someone called Photoshop route ping")
|
||||
|
||||
# This method calls function on the client side
|
||||
# client functions
|
||||
async def set_context(self, project, folder, task):
|
||||
"""
|
||||
Sets 'project' and 'folder' to envs, eg. setting context.
|
||||
|
||||
Opens last workile from that context if exists.
|
||||
|
||||
Args:
|
||||
project (str)
|
||||
folder (str)
|
||||
task (str
|
||||
"""
|
||||
log.info("Setting context change")
|
||||
log.info(f"project {project} folder {folder} task {task}")
|
||||
|
||||
folder_entity = ayon_api.get_folder_by_path(project, folder)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project, folder_entity["id"], task
|
||||
)
|
||||
change_current_context(folder_entity, task_entity)
|
||||
|
||||
last_workfile_path = self._get_last_workfile_path(project,
|
||||
folder,
|
||||
task)
|
||||
if last_workfile_path and os.path.exists(last_workfile_path):
|
||||
ProcessLauncher.execute_in_main_thread(
|
||||
lambda: stub().open(last_workfile_path))
|
||||
|
||||
|
||||
async def read(self):
|
||||
log.debug("photoshop.read client calls server server calls "
|
||||
"photoshop client")
|
||||
return await self.socket.call('photoshop.read')
|
||||
|
||||
# panel routes for tools
|
||||
async def workfiles_route(self):
|
||||
self._tool_route("workfiles")
|
||||
|
||||
async def loader_route(self):
|
||||
self._tool_route("loader")
|
||||
|
||||
async def publish_route(self):
|
||||
self._tool_route("publisher")
|
||||
|
||||
async def sceneinventory_route(self):
|
||||
self._tool_route("sceneinventory")
|
||||
|
||||
async def experimental_tools_route(self):
|
||||
self._tool_route("experimental_tools")
|
||||
|
||||
def _tool_route(self, _tool_name):
|
||||
"""The address accessed when clicking on the buttons."""
|
||||
|
||||
ProcessLauncher.execute_in_main_thread(show_tool_by_name, _tool_name)
|
||||
|
||||
# Required return statement.
|
||||
return "nothing"
|
||||
|
||||
def _get_last_workfile_path(self, project_name, folder_path, task_name):
|
||||
"""Returns last workfile path if exists"""
|
||||
host = registered_host()
|
||||
host_name = "photoshop"
|
||||
template_key = get_workfile_template_key_from_context(
|
||||
project_name,
|
||||
folder_path,
|
||||
task_name,
|
||||
host_name,
|
||||
)
|
||||
anatomy = Anatomy(project_name)
|
||||
|
||||
data = get_template_data_with_names(
|
||||
project_name, folder_path, task_name, host_name
|
||||
)
|
||||
data["root"] = anatomy.roots
|
||||
|
||||
work_template = anatomy.get_template_item("work", template_key)
|
||||
|
||||
# Define saving file extension
|
||||
extensions = host.get_workfile_extensions()
|
||||
|
||||
work_root = work_template["directory"].format_strict(data)
|
||||
file_template = work_template["file"].template
|
||||
last_workfile_path = get_last_workfile(
|
||||
work_root, file_template, data, extensions, True
|
||||
)
|
||||
|
||||
return last_workfile_path
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
"""Script wraps launch mechanism of Photoshop implementations.
|
||||
|
||||
Arguments passed to the script are passed to launch function in host
|
||||
implementation. In all cases requires host app executable and may contain
|
||||
workfile or others.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_photoshop.api.lib import main as host_main
|
||||
|
||||
# Get current file to locate start point of sys.argv
|
||||
CURRENT_FILE = os.path.abspath(__file__)
|
||||
|
||||
|
||||
def show_error_messagebox(title, message, detail_message=None):
|
||||
"""Function will show message and process ends after closing it."""
|
||||
from qtpy import QtWidgets, QtCore
|
||||
from ayon_core import style
|
||||
|
||||
app = QtWidgets.QApplication([])
|
||||
app.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
msgbox = QtWidgets.QMessageBox()
|
||||
msgbox.setWindowTitle(title)
|
||||
msgbox.setText(message)
|
||||
|
||||
if detail_message:
|
||||
msgbox.setDetailedText(detail_message)
|
||||
|
||||
msgbox.setWindowModality(QtCore.Qt.ApplicationModal)
|
||||
msgbox.show()
|
||||
|
||||
sys.exit(app.exec_())
|
||||
|
||||
|
||||
def on_invalid_args(script_not_found):
|
||||
"""Show to user message box saying that something went wrong.
|
||||
|
||||
Tell user that arguments to launch implementation are invalid with
|
||||
arguments details.
|
||||
|
||||
Args:
|
||||
script_not_found (bool): Use different message based on this value.
|
||||
"""
|
||||
|
||||
title = "Invalid arguments"
|
||||
joined_args = ", ".join("\"{}\"".format(arg) for arg in sys.argv)
|
||||
if script_not_found:
|
||||
submsg = "Where couldn't find script path:\n\"{}\""
|
||||
else:
|
||||
submsg = "Expected Host executable after script path:\n\"{}\""
|
||||
|
||||
message = "BUG: Got invalid arguments so can't launch Host application."
|
||||
detail_message = "Process was launched with arguments:\n{}\n\n{}".format(
|
||||
joined_args,
|
||||
submsg.format(CURRENT_FILE)
|
||||
)
|
||||
|
||||
show_error_messagebox(title, message, detail_message)
|
||||
|
||||
|
||||
def main(argv):
|
||||
# Modify current file path to find match in sys.argv which may be different
|
||||
# on windows (different letter cases and slashes).
|
||||
modified_current_file = CURRENT_FILE.replace("\\", "/").lower()
|
||||
|
||||
# Create a copy of sys argv
|
||||
sys_args = list(argv)
|
||||
after_script_idx = None
|
||||
# Find script path in sys.argv to know index of argv where host
|
||||
# executable should be.
|
||||
for idx, item in enumerate(sys_args):
|
||||
if item.replace("\\", "/").lower() == modified_current_file:
|
||||
after_script_idx = idx + 1
|
||||
break
|
||||
|
||||
# Validate that there is at least one argument after script path
|
||||
launch_args = None
|
||||
if after_script_idx is not None:
|
||||
launch_args = sys_args[after_script_idx:]
|
||||
|
||||
if launch_args:
|
||||
# Launch host implementation
|
||||
host_main(*launch_args)
|
||||
else:
|
||||
# Show message box
|
||||
on_invalid_args(after_script_idx is None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
84
server_addon/photoshop/client/ayon_photoshop/api/lib.py
Normal file
84
server_addon/photoshop/client/ayon_photoshop/api/lib.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
import os
|
||||
import sys
|
||||
import contextlib
|
||||
import traceback
|
||||
|
||||
from ayon_core.lib import env_value_to_bool, Logger, is_in_tests
|
||||
from ayon_core.addon import AddonsManager
|
||||
from ayon_core.pipeline import install_host
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.tools.utils import get_ayon_qt_app
|
||||
|
||||
from .launch_logic import ProcessLauncher, stub
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def safe_excepthook(*args):
|
||||
traceback.print_exception(*args)
|
||||
|
||||
|
||||
def main(*subprocess_args):
|
||||
from ayon_photoshop.api import PhotoshopHost
|
||||
|
||||
host = PhotoshopHost()
|
||||
install_host(host)
|
||||
|
||||
sys.excepthook = safe_excepthook
|
||||
|
||||
# coloring in StdOutBroker
|
||||
os.environ["AYON_LOG_NO_COLORS"] = "0"
|
||||
app = get_ayon_qt_app()
|
||||
app.setQuitOnLastWindowClosed(False)
|
||||
|
||||
launcher = ProcessLauncher(subprocess_args)
|
||||
launcher.start()
|
||||
|
||||
if env_value_to_bool("HEADLESS_PUBLISH"):
|
||||
manager = AddonsManager()
|
||||
webpublisher_addon = manager["webpublisher"]
|
||||
launcher.execute_in_main_thread(
|
||||
webpublisher_addon.headless_publish,
|
||||
log,
|
||||
"ClosePS",
|
||||
is_in_tests()
|
||||
)
|
||||
elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH",
|
||||
default=True):
|
||||
|
||||
launcher.execute_in_main_thread(
|
||||
host_tools.show_workfiles,
|
||||
save=env_value_to_bool("WORKFILES_SAVE_AS")
|
||||
)
|
||||
|
||||
sys.exit(app.exec_())
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context."""
|
||||
selection = stub().get_selected_layers()
|
||||
try:
|
||||
yield selection
|
||||
finally:
|
||||
stub().select_layers(selection)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_visibility(layers=None):
|
||||
"""Maintain visibility during context.
|
||||
|
||||
Args:
|
||||
layers (list) of PSItem (used for caching)
|
||||
"""
|
||||
visibility = {}
|
||||
if not layers:
|
||||
layers = stub().get_layers()
|
||||
for layer in layers:
|
||||
visibility[layer.id] = layer.visible
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for layer in layers:
|
||||
stub().set_visible(layer.id, visibility[layer.id])
|
||||
pass
|
||||
BIN
server_addon/photoshop/client/ayon_photoshop/api/panel.png
Normal file
BIN
server_addon/photoshop/client/ayon_photoshop/api/panel.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 8.6 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 13 KiB |
285
server_addon/photoshop/client/ayon_photoshop/api/pipeline.py
Normal file
285
server_addon/photoshop/client/ayon_photoshop/api/pipeline.py
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
import os
|
||||
|
||||
from qtpy import QtWidgets
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import register_event_callback, Logger
|
||||
from ayon_core.pipeline import (
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
AYON_INSTANCE_ID,
|
||||
AVALON_INSTANCE_ID,
|
||||
)
|
||||
|
||||
from ayon_core.host import (
|
||||
HostBase,
|
||||
IWorkfileHost,
|
||||
ILoadHost,
|
||||
IPublishHost
|
||||
)
|
||||
|
||||
from ayon_core.pipeline.load import any_outdated_containers
|
||||
from ayon_core.tools.utils import get_ayon_qt_app
|
||||
from ayon_photoshop import PHOTOSHOP_ADDON_ROOT
|
||||
|
||||
from . import lib
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
PLUGINS_DIR = os.path.join(PHOTOSHOP_ADDON_ROOT, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
|
||||
|
||||
|
||||
class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
|
||||
name = "photoshop"
|
||||
|
||||
def install(self):
|
||||
"""Install Photoshop-specific functionality needed for integration.
|
||||
|
||||
This function is called automatically on calling
|
||||
`api.install(photoshop)`.
|
||||
"""
|
||||
log.info("Installing OpenPype Photoshop...")
|
||||
pyblish.api.register_host("photoshop")
|
||||
|
||||
pyblish.api.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
register_event_callback("application.launched", on_application_launch)
|
||||
|
||||
def current_file(self):
|
||||
try:
|
||||
full_name = lib.stub().get_active_document_full_name()
|
||||
if full_name and full_name != "null":
|
||||
return os.path.normpath(full_name).replace("\\", "/")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def work_root(self, session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
lib.stub().open(filepath)
|
||||
|
||||
return True
|
||||
|
||||
def save_workfile(self, filepath=None):
|
||||
_, ext = os.path.splitext(filepath)
|
||||
lib.stub().saveAs(filepath, ext[1:], True)
|
||||
|
||||
def get_current_workfile(self):
|
||||
return self.current_file()
|
||||
|
||||
def workfile_has_unsaved_changes(self):
|
||||
if self.current_file():
|
||||
return not lib.stub().is_saved()
|
||||
|
||||
return False
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".psd", ".psb"]
|
||||
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
def get_context_data(self):
|
||||
"""Get stored values for context (validation enable/disable etc)"""
|
||||
meta = _get_stub().get_layers_metadata()
|
||||
for item in meta:
|
||||
if item.get("id") == "publish_context":
|
||||
item.pop("id")
|
||||
return item
|
||||
|
||||
return {}
|
||||
|
||||
def update_context_data(self, data, changes):
|
||||
"""Store value needed for context"""
|
||||
item = data
|
||||
item["id"] = "publish_context"
|
||||
_get_stub().imprint(item["id"], item)
|
||||
|
||||
def list_instances(self):
|
||||
"""List all created instances to publish from current workfile.
|
||||
|
||||
Pulls from File > File Info
|
||||
|
||||
Returns:
|
||||
(list) of dictionaries matching instances format
|
||||
"""
|
||||
stub = _get_stub()
|
||||
|
||||
if not stub:
|
||||
return []
|
||||
|
||||
instances = []
|
||||
layers_meta = stub.get_layers_metadata()
|
||||
if layers_meta:
|
||||
for instance in layers_meta:
|
||||
if instance.get("id") in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
instances.append(instance)
|
||||
|
||||
return instances
|
||||
|
||||
def remove_instance(self, instance):
|
||||
"""Remove instance from current workfile metadata.
|
||||
|
||||
Updates metadata of current file in File > File Info and removes
|
||||
icon highlight on group layer.
|
||||
|
||||
Args:
|
||||
instance (dict): instance representation from subsetmanager model
|
||||
"""
|
||||
stub = _get_stub()
|
||||
|
||||
if not stub:
|
||||
return
|
||||
|
||||
inst_id = instance.get("instance_id") or instance.get("uuid") # legacy
|
||||
if not inst_id:
|
||||
log.warning("No instance identifier for {}".format(instance))
|
||||
return
|
||||
|
||||
stub.remove_instance(inst_id)
|
||||
|
||||
if instance.get("members"):
|
||||
item = stub.get_layer(instance["members"][0])
|
||||
if item:
|
||||
stub.rename_layer(item.id,
|
||||
item.name.replace(stub.PUBLISH_ICON, ''))
|
||||
|
||||
|
||||
def check_inventory():
|
||||
if not any_outdated_containers():
|
||||
return
|
||||
|
||||
# Warn about outdated containers.
|
||||
_app = get_ayon_qt_app()
|
||||
|
||||
message_box = QtWidgets.QMessageBox()
|
||||
message_box.setIcon(QtWidgets.QMessageBox.Warning)
|
||||
msg = "There are outdated containers in the scene."
|
||||
message_box.setText(msg)
|
||||
message_box.exec_()
|
||||
|
||||
|
||||
def on_application_launch():
|
||||
check_inventory()
|
||||
|
||||
|
||||
def ls():
|
||||
"""Yields containers from active Photoshop document
|
||||
|
||||
This is the host-equivalent of api.ls(), but instead of listing
|
||||
assets on disk, it lists assets already loaded in Photoshop; once loaded
|
||||
they are called 'containers'
|
||||
|
||||
Yields:
|
||||
dict: container
|
||||
|
||||
"""
|
||||
try:
|
||||
stub = lib.stub() # only after Photoshop is up
|
||||
except lib.ConnectionNotEstablishedYet:
|
||||
print("Not connected yet, ignoring")
|
||||
return
|
||||
|
||||
if not stub.get_active_document_name():
|
||||
return
|
||||
|
||||
layers_meta = stub.get_layers_metadata() # minimalize calls to PS
|
||||
for layer in stub.get_layers():
|
||||
data = stub.read(layer, layers_meta)
|
||||
|
||||
# Skip non-tagged layers.
|
||||
if not data:
|
||||
continue
|
||||
|
||||
# Filter to only containers.
|
||||
if "container" not in data["id"]:
|
||||
continue
|
||||
|
||||
# Append transient data
|
||||
data["objectName"] = layer.name.replace(stub.LOADED_ICON, '')
|
||||
data["layer"] = layer
|
||||
|
||||
yield data
|
||||
|
||||
|
||||
def _get_stub():
|
||||
"""Handle pulling stub from PS to run operations on host
|
||||
|
||||
Returns:
|
||||
(PhotoshopServerStub) or None
|
||||
"""
|
||||
try:
|
||||
stub = lib.stub() # only after Photoshop is up
|
||||
except lib.ConnectionNotEstablishedYet:
|
||||
print("Not connected yet, ignoring")
|
||||
return
|
||||
|
||||
if not stub.get_active_document_name():
|
||||
return
|
||||
|
||||
return stub
|
||||
|
||||
|
||||
def containerise(
|
||||
name, namespace, layer, context, loader=None, suffix="_CON"
|
||||
):
|
||||
"""Imprint layer with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
layer (PSItem): Layer to containerise
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of loader used to produce this container.
|
||||
suffix (str, optional): Suffix of container, defaults to `_CON`.
|
||||
|
||||
Returns:
|
||||
container (str): Name of container assembly
|
||||
"""
|
||||
layer.name = name + suffix
|
||||
|
||||
data = {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": name,
|
||||
"namespace": namespace,
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
"members": [str(layer.id)]
|
||||
}
|
||||
stub = lib.stub()
|
||||
stub.imprint(layer.id, data)
|
||||
|
||||
return layer
|
||||
|
||||
|
||||
def cache_and_get_instances(creator):
|
||||
"""Cache instances in shared data.
|
||||
|
||||
Storing all instances as a list as legacy instances might be still present.
|
||||
Args:
|
||||
creator (Creator): Plugin which would like to get instances from host.
|
||||
Returns:
|
||||
List[]: list of all instances stored in metadata
|
||||
"""
|
||||
shared_key = "openpype.photoshop.instances"
|
||||
if shared_key not in creator.collection_shared_data:
|
||||
creator.collection_shared_data[shared_key] = \
|
||||
creator.host.list_instances()
|
||||
return creator.collection_shared_data[shared_key]
|
||||
37
server_addon/photoshop/client/ayon_photoshop/api/plugin.py
Normal file
37
server_addon/photoshop/client/ayon_photoshop/api/plugin.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
import re
|
||||
|
||||
from ayon_core.pipeline import LoaderPlugin
|
||||
from .launch_logic import stub
|
||||
|
||||
|
||||
def get_unique_layer_name(layers, container_name, product_name):
|
||||
"""Prepare unique layer name.
|
||||
|
||||
Gets all layer names and if '<container_name>_<product_name>' is present,
|
||||
it adds suffix '1', or increases the suffix by 1.
|
||||
|
||||
Args:
|
||||
layers (list) of dict with layers info (name, id etc.)
|
||||
container_name (str):
|
||||
product_name (str):
|
||||
|
||||
Returns:
|
||||
str: name_00X (without version)
|
||||
"""
|
||||
name = "{}_{}".format(container_name, product_name)
|
||||
names = {}
|
||||
for layer in layers:
|
||||
layer_name = re.sub(r'_\d{3}$', '', layer.name)
|
||||
if layer_name in names.keys():
|
||||
names[layer_name] = names[layer_name] + 1
|
||||
else:
|
||||
names[layer_name] = 1
|
||||
occurrences = names.get(name, 0)
|
||||
|
||||
return "{}_{:0>3d}".format(name, occurrences + 1)
|
||||
|
||||
|
||||
class PhotoshopLoader(LoaderPlugin):
|
||||
@staticmethod
|
||||
def get_stub():
|
||||
return stub()
|
||||
241
server_addon/photoshop/client/ayon_photoshop/api/webserver.py
Normal file
241
server_addon/photoshop/client/ayon_photoshop/api/webserver.py
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
"""Webserver for communication with photoshop.
|
||||
|
||||
Aiohttp (Asyncio) based websocket server used for communication with host
|
||||
application.
|
||||
|
||||
This webserver is started in spawned Python process that opens DCC during
|
||||
its launch, waits for connection from DCC and handles communication going
|
||||
forward. Server is closed before Python process is killed.
|
||||
"""
|
||||
import os
|
||||
import logging
|
||||
import urllib
|
||||
import threading
|
||||
import asyncio
|
||||
import socket
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from wsrpc_aiohttp import WSRPCClient
|
||||
|
||||
from ayon_core.pipeline import get_global_context
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WebServerTool:
|
||||
"""
|
||||
Basic POC implementation of asychronic websocket RPC server.
|
||||
Uses class in external_app_1.py to mimic implementation for single
|
||||
external application.
|
||||
'test_client' folder contains two test implementations of client
|
||||
"""
|
||||
_instance = None
|
||||
|
||||
def __init__(self):
|
||||
WebServerTool._instance = self
|
||||
|
||||
self.client = None
|
||||
self.handlers = {}
|
||||
self.on_stop_callbacks = []
|
||||
|
||||
port = None
|
||||
host_name = "localhost"
|
||||
websocket_url = os.getenv("WEBSOCKET_URL")
|
||||
if websocket_url:
|
||||
parsed = urllib.parse.urlparse(websocket_url)
|
||||
port = parsed.port
|
||||
host_name = parsed.netloc.split(":")[0]
|
||||
if not port:
|
||||
port = 8098 # fallback
|
||||
|
||||
self.port = port
|
||||
self.host_name = host_name
|
||||
|
||||
self.app = web.Application()
|
||||
|
||||
# add route with multiple methods for single "external app"
|
||||
self.webserver_thread = WebServerThread(self, self.port)
|
||||
|
||||
def add_route(self, *args, **kwargs):
|
||||
self.app.router.add_route(*args, **kwargs)
|
||||
|
||||
def add_static(self, *args, **kwargs):
|
||||
self.app.router.add_static(*args, **kwargs)
|
||||
|
||||
def start_server(self):
|
||||
if self.webserver_thread and not self.webserver_thread.is_alive():
|
||||
self.webserver_thread.start()
|
||||
|
||||
def stop_server(self):
|
||||
self.stop()
|
||||
|
||||
async def send_context_change(self, host):
|
||||
"""
|
||||
Calls running webserver to inform about context change
|
||||
|
||||
Used when new PS/AE should be triggered,
|
||||
but one already running, without
|
||||
this publish would point to old context.
|
||||
"""
|
||||
client = WSRPCClient(os.getenv("WEBSOCKET_URL"),
|
||||
loop=asyncio.get_event_loop())
|
||||
await client.connect()
|
||||
|
||||
context = get_global_context()
|
||||
project_name = context["project_name"]
|
||||
folder_path = context["folder_path"]
|
||||
task_name = context["task_name"]
|
||||
log.info("Sending context change to {}{}/{}".format(
|
||||
project_name, folder_path, task_name
|
||||
))
|
||||
|
||||
await client.call(
|
||||
'{}.set_context'.format(host),
|
||||
project=project_name,
|
||||
folder=folder_path,
|
||||
task=task_name
|
||||
)
|
||||
await client.close()
|
||||
|
||||
def port_occupied(self, host_name, port):
|
||||
"""
|
||||
Check if 'url' is already occupied.
|
||||
|
||||
This could mean, that app is already running and we are trying open it
|
||||
again. In that case, use existing running webserver.
|
||||
Check here is easier than capturing exception from thread.
|
||||
"""
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
|
||||
result = con.connect_ex((host_name, port)) == 0
|
||||
|
||||
if result:
|
||||
print(f"Port {port} is already in use")
|
||||
return result
|
||||
|
||||
def call(self, func):
|
||||
log.debug("websocket.call {}".format(func))
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
func,
|
||||
self.webserver_thread.loop
|
||||
)
|
||||
result = future.result()
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_instance():
|
||||
if WebServerTool._instance is None:
|
||||
WebServerTool()
|
||||
return WebServerTool._instance
|
||||
|
||||
@property
|
||||
def is_running(self):
|
||||
if not self.webserver_thread:
|
||||
return False
|
||||
return self.webserver_thread.is_running
|
||||
|
||||
def stop(self):
|
||||
if not self.is_running:
|
||||
return
|
||||
try:
|
||||
log.debug("Stopping websocket server")
|
||||
self.webserver_thread.is_running = False
|
||||
self.webserver_thread.stop()
|
||||
except Exception:
|
||||
log.warning(
|
||||
"Error has happened during Killing websocket server",
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
def thread_stopped(self):
|
||||
for callback in self.on_stop_callbacks:
|
||||
callback()
|
||||
|
||||
|
||||
class WebServerThread(threading.Thread):
|
||||
""" Listener for websocket rpc requests.
|
||||
|
||||
It would be probably better to "attach" this to main thread (as for
|
||||
example Harmony needs to run something on main thread), but currently
|
||||
it creates separate thread and separate asyncio event loop
|
||||
"""
|
||||
def __init__(self, module, port):
|
||||
super(WebServerThread, self).__init__()
|
||||
|
||||
self.is_running = False
|
||||
self.port = port
|
||||
self.module = module
|
||||
self.loop = None
|
||||
self.runner = None
|
||||
self.site = None
|
||||
self.tasks = []
|
||||
|
||||
def run(self):
|
||||
self.is_running = True
|
||||
|
||||
try:
|
||||
log.info("Starting web server")
|
||||
self.loop = asyncio.new_event_loop() # create new loop for thread
|
||||
asyncio.set_event_loop(self.loop)
|
||||
|
||||
self.loop.run_until_complete(self.start_server())
|
||||
|
||||
websocket_url = "ws://localhost:{}/ws".format(self.port)
|
||||
|
||||
log.debug(
|
||||
"Running Websocket server on URL: \"{}\"".format(websocket_url)
|
||||
)
|
||||
|
||||
asyncio.ensure_future(self.check_shutdown(), loop=self.loop)
|
||||
self.loop.run_forever()
|
||||
except Exception:
|
||||
self.is_running = False
|
||||
log.warning(
|
||||
"Websocket Server service has failed", exc_info=True
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
self.loop.close() # optional
|
||||
|
||||
self.is_running = False
|
||||
self.module.thread_stopped()
|
||||
log.info("Websocket server stopped")
|
||||
|
||||
async def start_server(self):
|
||||
""" Starts runner and TCPsite """
|
||||
self.runner = web.AppRunner(self.module.app)
|
||||
await self.runner.setup()
|
||||
self.site = web.TCPSite(self.runner, 'localhost', self.port)
|
||||
await self.site.start()
|
||||
|
||||
def stop(self):
|
||||
"""Sets is_running flag to false, 'check_shutdown' shuts server down"""
|
||||
self.is_running = False
|
||||
|
||||
async def check_shutdown(self):
|
||||
""" Future that is running and checks if server should be running
|
||||
periodically.
|
||||
"""
|
||||
while self.is_running:
|
||||
while self.tasks:
|
||||
task = self.tasks.pop(0)
|
||||
log.debug("waiting for task {}".format(task))
|
||||
await task
|
||||
log.debug("returned value {}".format(task.result))
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
log.debug("Starting shutdown")
|
||||
await self.site.stop()
|
||||
log.debug("Site stopped")
|
||||
await self.runner.cleanup()
|
||||
log.debug("Runner stopped")
|
||||
tasks = [task for task in asyncio.all_tasks() if
|
||||
task is not asyncio.current_task()]
|
||||
list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
log.debug(f'Finished awaiting cancelled tasks, results: {results}...')
|
||||
await self.loop.shutdown_asyncgens()
|
||||
# to really make sure everything else has time to stop
|
||||
await asyncio.sleep(0.07)
|
||||
self.loop.stop()
|
||||
571
server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py
Normal file
571
server_addon/photoshop/client/ayon_photoshop/api/ws_stub.py
Normal file
|
|
@ -0,0 +1,571 @@
|
|||
"""
|
||||
Stub handling connection from server to client.
|
||||
Used anywhere solution is calling client methods.
|
||||
"""
|
||||
import json
|
||||
import attr
|
||||
from wsrpc_aiohttp import WebSocketAsync
|
||||
|
||||
from .webserver import WebServerTool
|
||||
|
||||
|
||||
@attr.s
|
||||
class PSItem(object):
|
||||
"""
|
||||
Object denoting layer or group item in PS. Each item is created in
|
||||
PS by any Loader, but contains same fields, which are being used
|
||||
in later processing.
|
||||
"""
|
||||
# metadata
|
||||
id = attr.ib() # id created by AE, could be used for querying
|
||||
name = attr.ib() # name of item
|
||||
group = attr.ib(default=None) # item type (footage, folder, comp)
|
||||
parents = attr.ib(factory=list)
|
||||
visible = attr.ib(default=True)
|
||||
type = attr.ib(default=None)
|
||||
# all imported elements, single for
|
||||
members = attr.ib(factory=list)
|
||||
long_name = attr.ib(default=None)
|
||||
color_code = attr.ib(default=None) # color code of layer
|
||||
instance_id = attr.ib(default=None)
|
||||
|
||||
@property
|
||||
def clean_name(self):
|
||||
"""Returns layer name without publish icon highlight
|
||||
|
||||
Returns:
|
||||
(str)
|
||||
"""
|
||||
return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '')
|
||||
.replace(PhotoshopServerStub.LOADED_ICON, ''))
|
||||
|
||||
|
||||
class PhotoshopServerStub:
|
||||
"""
|
||||
Stub for calling function on client (Photoshop js) side.
|
||||
Expects that client is already connected (started when avalon menu
|
||||
is opened).
|
||||
'self.websocketserver.call' is used as async wrapper
|
||||
"""
|
||||
PUBLISH_ICON = '\u2117 '
|
||||
LOADED_ICON = '\u25bc'
|
||||
|
||||
def __init__(self):
|
||||
self.websocketserver = WebServerTool.get_instance()
|
||||
self.client = self.get_client()
|
||||
|
||||
@staticmethod
|
||||
def get_client():
|
||||
"""
|
||||
Return first connected client to WebSocket
|
||||
TODO implement selection by Route
|
||||
:return: <WebSocketAsync> client
|
||||
"""
|
||||
clients = WebSocketAsync.get_clients()
|
||||
client = None
|
||||
if len(clients) > 0:
|
||||
key = list(clients.keys())[0]
|
||||
client = clients.get(key)
|
||||
|
||||
return client
|
||||
|
||||
def open(self, path):
|
||||
"""Open file located at 'path' (local).
|
||||
|
||||
Args:
|
||||
path(string): file path locally
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call('Photoshop.open', path=path)
|
||||
)
|
||||
|
||||
def read(self, layer, layers_meta=None):
|
||||
"""Parses layer metadata from Headline field of active document.
|
||||
|
||||
Args:
|
||||
layer: (PSItem)
|
||||
layers_meta: full list from Headline (for performance in loops)
|
||||
Returns:
|
||||
(dict) of layer metadata stored in PS file
|
||||
|
||||
Example:
|
||||
{
|
||||
'id': 'pyblish.avalon.container',
|
||||
'loader': 'ImageLoader',
|
||||
'members': ['64'],
|
||||
'name': 'imageMainMiddle',
|
||||
'namespace': 'Hero_imageMainMiddle_001',
|
||||
'representation': '6203dc91e80934d9f6ee7d96',
|
||||
'schema': 'openpype:container-2.0'
|
||||
}
|
||||
"""
|
||||
if layers_meta is None:
|
||||
layers_meta = self.get_layers_metadata()
|
||||
|
||||
for layer_meta in layers_meta:
|
||||
layer_id = layer_meta.get("uuid") # legacy
|
||||
if layer_meta.get("members"):
|
||||
layer_id = layer_meta["members"][0]
|
||||
if str(layer.id) == str(layer_id):
|
||||
return layer_meta
|
||||
print("Unable to find layer metadata for {}".format(layer.id))
|
||||
|
||||
def imprint(self, item_id, data, all_layers=None, items_meta=None):
|
||||
"""Save layer metadata to Headline field of active document
|
||||
|
||||
Stores metadata in format:
|
||||
[{
|
||||
"active":true,
|
||||
"productName":"imageBG",
|
||||
"productType":"image",
|
||||
"id":"ayon.create.instance",
|
||||
"folderPath":"Town",
|
||||
"uuid": "8"
|
||||
}] - for created instances
|
||||
OR
|
||||
[{
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": "ayon.create.instance",
|
||||
"name": "imageMG",
|
||||
"namespace": "Jungle_imageMG_001",
|
||||
"loader": "ImageLoader",
|
||||
"representation": "5fbfc0ee30a946093c6ff18a",
|
||||
"members": [
|
||||
"40"
|
||||
]
|
||||
}] - for loaded instances
|
||||
|
||||
Args:
|
||||
item_id (str):
|
||||
data(string): json representation for single layer
|
||||
all_layers (list of PSItem): for performance, could be
|
||||
injected for usage in loop, if not, single call will be
|
||||
triggered
|
||||
items_meta(string): json representation from Headline
|
||||
(for performance - provide only if imprint is in
|
||||
loop - value should be same)
|
||||
Returns: None
|
||||
"""
|
||||
if not items_meta:
|
||||
items_meta = self.get_layers_metadata()
|
||||
|
||||
# json.dumps writes integer values in a dictionary to string, so
|
||||
# anticipating it here.
|
||||
item_id = str(item_id)
|
||||
is_new = True
|
||||
result_meta = []
|
||||
for item_meta in items_meta:
|
||||
if ((item_meta.get('members') and
|
||||
item_id == str(item_meta.get('members')[0])) or
|
||||
item_meta.get("instance_id") == item_id):
|
||||
is_new = False
|
||||
if data:
|
||||
item_meta.update(data)
|
||||
result_meta.append(item_meta)
|
||||
else:
|
||||
result_meta.append(item_meta)
|
||||
|
||||
if is_new:
|
||||
result_meta.append(data)
|
||||
|
||||
# Ensure only valid ids are stored.
|
||||
if not all_layers:
|
||||
all_layers = self.get_layers()
|
||||
layer_ids = [layer.id for layer in all_layers]
|
||||
cleaned_data = []
|
||||
|
||||
for item in result_meta:
|
||||
if item.get("members"):
|
||||
if int(item["members"][0]) not in layer_ids:
|
||||
continue
|
||||
|
||||
cleaned_data.append(item)
|
||||
|
||||
payload = json.dumps(cleaned_data, indent=4)
|
||||
self.websocketserver.call(
|
||||
self.client.call('Photoshop.imprint', payload=payload)
|
||||
)
|
||||
|
||||
def get_layers(self):
|
||||
"""Returns JSON document with all(?) layers in active document.
|
||||
|
||||
Returns: <list of PSItem>
|
||||
Format of tuple: { 'id':'123',
|
||||
'name': 'My Layer 1',
|
||||
'type': 'GUIDE'|'FG'|'BG'|'OBJ'
|
||||
'visible': 'true'|'false'
|
||||
"""
|
||||
res = self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_layers')
|
||||
)
|
||||
|
||||
return self._to_records(res)
|
||||
|
||||
def get_layer(self, layer_id):
|
||||
"""
|
||||
Returns PSItem for specific 'layer_id' or None if not found
|
||||
Args:
|
||||
layer_id (string): unique layer id, stored in 'uuid' field
|
||||
|
||||
Returns:
|
||||
(PSItem) or None
|
||||
"""
|
||||
layers = self.get_layers()
|
||||
for layer in layers:
|
||||
if str(layer.id) == str(layer_id):
|
||||
return layer
|
||||
|
||||
def get_layers_in_layers(self, layers):
|
||||
"""Return all layers that belong to layers (might be groups).
|
||||
|
||||
Args:
|
||||
layers <list of PSItem>:
|
||||
|
||||
Returns:
|
||||
<list of PSItem>
|
||||
"""
|
||||
parent_ids = set([lay.id for lay in layers])
|
||||
|
||||
return self._get_layers_in_layers(parent_ids)
|
||||
|
||||
def get_layers_in_layers_ids(self, layers_ids, layers=None):
|
||||
"""Return all layers that belong to layers (might be groups).
|
||||
|
||||
Args:
|
||||
layers_ids <list of Int>
|
||||
layers <list of PSItem>:
|
||||
|
||||
Returns:
|
||||
<list of PSItem>
|
||||
"""
|
||||
parent_ids = set(layers_ids)
|
||||
|
||||
return self._get_layers_in_layers(parent_ids, layers)
|
||||
|
||||
def _get_layers_in_layers(self, parent_ids, layers=None):
|
||||
if not layers:
|
||||
layers = self.get_layers()
|
||||
|
||||
all_layers = layers
|
||||
ret = []
|
||||
|
||||
for layer in all_layers:
|
||||
parents = set(layer.parents)
|
||||
if len(parent_ids & parents) > 0:
|
||||
ret.append(layer)
|
||||
if layer.id in parent_ids:
|
||||
ret.append(layer)
|
||||
|
||||
return ret
|
||||
|
||||
def create_group(self, name):
|
||||
"""Create new group (eg. LayerSet)
|
||||
|
||||
Returns:
|
||||
<PSItem>
|
||||
"""
|
||||
enhanced_name = self.PUBLISH_ICON + name
|
||||
ret = self.websocketserver.call(
|
||||
self.client.call('Photoshop.create_group', name=enhanced_name)
|
||||
)
|
||||
# create group on PS is asynchronous, returns only id
|
||||
return PSItem(id=ret, name=name, group=True)
|
||||
|
||||
def group_selected_layers(self, name):
|
||||
"""Group selected layers into new LayerSet (eg. group)
|
||||
|
||||
Returns:
|
||||
(Layer)
|
||||
"""
|
||||
enhanced_name = self.PUBLISH_ICON + name
|
||||
res = self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.group_selected_layers', name=enhanced_name
|
||||
)
|
||||
)
|
||||
res = self._to_records(res)
|
||||
if res:
|
||||
rec = res.pop()
|
||||
rec.name = rec.name.replace(self.PUBLISH_ICON, '')
|
||||
return rec
|
||||
raise ValueError("No group record returned")
|
||||
|
||||
def get_selected_layers(self):
|
||||
"""Get a list of actually selected layers.
|
||||
|
||||
Returns: <list of Layer('id':XX, 'name':"YYY")>
|
||||
"""
|
||||
res = self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_selected_layers')
|
||||
)
|
||||
return self._to_records(res)
|
||||
|
||||
def select_layers(self, layers):
|
||||
"""Selects specified layers in Photoshop by its ids.
|
||||
|
||||
Args:
|
||||
layers: <list of Layer('id':XX, 'name':"YYY")>
|
||||
"""
|
||||
layers_id = [str(lay.id) for lay in layers]
|
||||
self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.select_layers',
|
||||
layers=json.dumps(layers_id)
|
||||
)
|
||||
)
|
||||
|
||||
def get_active_document_full_name(self):
|
||||
"""Returns full name with path of active document via ws call
|
||||
|
||||
Returns(string):
|
||||
full path with name
|
||||
"""
|
||||
res = self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_active_document_full_name')
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
def get_active_document_name(self):
|
||||
"""Returns just a name of active document via ws call
|
||||
|
||||
Returns(string):
|
||||
file name
|
||||
"""
|
||||
return self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_active_document_name')
|
||||
)
|
||||
|
||||
def is_saved(self):
|
||||
"""Returns true if no changes in active document
|
||||
|
||||
Returns:
|
||||
<boolean>
|
||||
"""
|
||||
return self.websocketserver.call(
|
||||
self.client.call('Photoshop.is_saved')
|
||||
)
|
||||
|
||||
def save(self):
|
||||
"""Saves active document"""
|
||||
self.websocketserver.call(
|
||||
self.client.call('Photoshop.save')
|
||||
)
|
||||
|
||||
def saveAs(self, image_path, ext, as_copy):
|
||||
"""Saves active document to psd (copy) or png or jpg
|
||||
|
||||
Args:
|
||||
image_path(string): full local path
|
||||
ext: <string psd|jpg|png>
|
||||
as_copy: <boolean>
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.saveAs',
|
||||
image_path=image_path,
|
||||
ext=ext,
|
||||
as_copy=as_copy
|
||||
)
|
||||
)
|
||||
|
||||
def set_visible(self, layer_id, visibility):
|
||||
"""Set layer with 'layer_id' to 'visibility'
|
||||
|
||||
Args:
|
||||
layer_id: <int>
|
||||
visibility: <true - set visible, false - hide>
|
||||
Returns: None
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.set_visible',
|
||||
layer_id=layer_id,
|
||||
visibility=visibility
|
||||
)
|
||||
)
|
||||
|
||||
def hide_all_others_layers(self, layers):
|
||||
"""hides all layers that are not part of the list or that are not
|
||||
children of this list
|
||||
|
||||
Args:
|
||||
layers (list): list of PSItem - highest hierarchy
|
||||
"""
|
||||
extract_ids = set([ll.id for ll in self.get_layers_in_layers(layers)])
|
||||
|
||||
self.hide_all_others_layers_ids(extract_ids)
|
||||
|
||||
def hide_all_others_layers_ids(self, extract_ids, layers=None):
|
||||
"""hides all layers that are not part of the list or that are not
|
||||
children of this list
|
||||
|
||||
Args:
|
||||
extract_ids (list): list of integer that should be visible
|
||||
layers (list) of PSItem (used for caching)
|
||||
"""
|
||||
if not layers:
|
||||
layers = self.get_layers()
|
||||
for layer in layers:
|
||||
if layer.visible and layer.id not in extract_ids:
|
||||
self.set_visible(layer.id, False)
|
||||
|
||||
def get_layers_metadata(self):
|
||||
"""Reads layers metadata from Headline from active document in PS.
|
||||
(Headline accessible by File > File Info)
|
||||
|
||||
Returns:
|
||||
(list)
|
||||
example:
|
||||
{"8":{"active":true,"productName":"imageBG",
|
||||
"productType":"image","id":"ayon.create.instance",
|
||||
"folderPath":"/Town"}}
|
||||
8 is layer(group) id - used for deletion, update etc.
|
||||
"""
|
||||
res = self.websocketserver.call(self.client.call('Photoshop.read'))
|
||||
layers_data = []
|
||||
try:
|
||||
if res:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("{} cannot be parsed, recreate meta".format(res))
|
||||
# format of metadata changed from {} to [] because of standardization
|
||||
# keep current implementation logic as its working
|
||||
if isinstance(layers_data, dict):
|
||||
for layer_id, layer_meta in layers_data.items():
|
||||
if layer_meta.get("schema") != "openpype:container-2.0":
|
||||
layer_meta["members"] = [str(layer_id)]
|
||||
layers_data = list(layers_data.values())
|
||||
return layers_data
|
||||
|
||||
def import_smart_object(self, path, layer_name, as_reference=False):
|
||||
"""Import the file at `path` as a smart object to active document.
|
||||
|
||||
Args:
|
||||
path (str): File path to import.
|
||||
layer_name (str): Unique layer name to differentiate how many times
|
||||
same smart object was loaded
|
||||
as_reference (bool): pull in content or reference
|
||||
"""
|
||||
enhanced_name = self.LOADED_ICON + layer_name
|
||||
res = self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.import_smart_object',
|
||||
path=path,
|
||||
name=enhanced_name,
|
||||
as_reference=as_reference
|
||||
)
|
||||
)
|
||||
rec = self._to_records(res).pop()
|
||||
if rec:
|
||||
rec.name = rec.name.replace(self.LOADED_ICON, '')
|
||||
return rec
|
||||
|
||||
def replace_smart_object(self, layer, path, layer_name):
|
||||
"""Replace the smart object `layer` with file at `path`
|
||||
|
||||
Args:
|
||||
layer (PSItem):
|
||||
path (str): File to import.
|
||||
layer_name (str): Unique layer name to differentiate how many times
|
||||
same smart object was loaded
|
||||
"""
|
||||
enhanced_name = self.LOADED_ICON + layer_name
|
||||
self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.replace_smart_object',
|
||||
layer_id=layer.id,
|
||||
path=path,
|
||||
name=enhanced_name
|
||||
)
|
||||
)
|
||||
|
||||
def delete_layer(self, layer_id):
|
||||
"""Deletes specific layer by it's id.
|
||||
|
||||
Args:
|
||||
layer_id (int): id of layer to delete
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call('Photoshop.delete_layer', layer_id=layer_id)
|
||||
)
|
||||
|
||||
def rename_layer(self, layer_id, name):
|
||||
"""Renames specific layer by it's id.
|
||||
|
||||
Args:
|
||||
layer_id (int): id of layer to delete
|
||||
name (str): new name
|
||||
"""
|
||||
self.websocketserver.call(
|
||||
self.client.call(
|
||||
'Photoshop.rename_layer',
|
||||
layer_id=layer_id,
|
||||
name=name
|
||||
)
|
||||
)
|
||||
|
||||
def remove_instance(self, instance_id):
|
||||
cleaned_data = []
|
||||
|
||||
for item in self.get_layers_metadata():
|
||||
inst_id = item.get("instance_id") or item.get("uuid")
|
||||
if inst_id != instance_id:
|
||||
cleaned_data.append(item)
|
||||
|
||||
payload = json.dumps(cleaned_data, indent=4)
|
||||
|
||||
self.websocketserver.call(
|
||||
self.client.call('Photoshop.imprint', payload=payload)
|
||||
)
|
||||
|
||||
def get_extension_version(self):
|
||||
"""Returns version number of installed extension."""
|
||||
return self.websocketserver.call(
|
||||
self.client.call('Photoshop.get_extension_version')
|
||||
)
|
||||
|
||||
def close(self):
|
||||
"""Shutting down PS and process too.
|
||||
|
||||
For webpublishing only.
|
||||
"""
|
||||
# TODO change client.call to method with checks for client
|
||||
self.websocketserver.call(self.client.call('Photoshop.close'))
|
||||
|
||||
def _to_records(self, res):
|
||||
"""Converts string json representation into list of PSItem for
|
||||
dot notation access to work.
|
||||
|
||||
Args:
|
||||
res (string): valid json
|
||||
|
||||
Returns:
|
||||
<list of PSItem>
|
||||
"""
|
||||
try:
|
||||
layers_data = json.loads(res)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ValueError("Received broken JSON {}".format(res))
|
||||
ret = []
|
||||
|
||||
# convert to AEItem to use dot donation
|
||||
if isinstance(layers_data, dict):
|
||||
layers_data = [layers_data]
|
||||
for d in layers_data:
|
||||
# currently implemented and expected fields
|
||||
ret.append(PSItem(
|
||||
d.get('id'),
|
||||
d.get('name'),
|
||||
d.get('group'),
|
||||
d.get('parents'),
|
||||
d.get('visible'),
|
||||
d.get('type'),
|
||||
d.get('members'),
|
||||
d.get('long_name'),
|
||||
d.get("color_code"),
|
||||
d.get("instance_id")
|
||||
))
|
||||
return ret
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
from ayon_core.lib import (
|
||||
get_ayon_launcher_args,
|
||||
is_using_ayon_console,
|
||||
)
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_photoshop import get_launch_script_path
|
||||
|
||||
|
||||
def get_launch_kwargs(kwargs):
|
||||
"""Explicit setting of kwargs for Popen for Photoshop.
|
||||
|
||||
Expected behavior
|
||||
- ayon_console opens window with logs
|
||||
- ayon has stdout/stderr available for capturing
|
||||
|
||||
Args:
|
||||
kwargs (Union[dict, None]): Current kwargs or None.
|
||||
|
||||
"""
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
if platform.system().lower() != "windows":
|
||||
return kwargs
|
||||
|
||||
if not is_using_ayon_console():
|
||||
kwargs.update({
|
||||
"creationflags": subprocess.CREATE_NEW_CONSOLE
|
||||
})
|
||||
else:
|
||||
kwargs.update({
|
||||
"creationflags": subprocess.CREATE_NO_WINDOW,
|
||||
"stdout": subprocess.DEVNULL,
|
||||
"stderr": subprocess.DEVNULL
|
||||
})
|
||||
return kwargs
|
||||
|
||||
|
||||
class PhotoshopPrelaunchHook(PreLaunchHook):
|
||||
"""Launch arguments preparation.
|
||||
|
||||
Hook add python executable and script path to Photoshop implementation
|
||||
before Photoshop executable and add last workfile path to launch arguments.
|
||||
|
||||
Existence of last workfile is checked. If workfile does not exists tries
|
||||
to copy templated workfile from predefined path.
|
||||
"""
|
||||
app_groups = {"photoshop"}
|
||||
|
||||
order = 20
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
# Pop executable
|
||||
executable_path = self.launch_context.launch_args.pop(0)
|
||||
|
||||
# Pop rest of launch arguments - There should not be other arguments!
|
||||
remainders = []
|
||||
while self.launch_context.launch_args:
|
||||
remainders.append(self.launch_context.launch_args.pop(0))
|
||||
|
||||
script_path = get_launch_script_path()
|
||||
|
||||
new_launch_args = get_ayon_launcher_args(
|
||||
"run", script_path, executable_path
|
||||
)
|
||||
# Add workfile path if exists
|
||||
workfile_path = self.data["last_workfile_path"]
|
||||
if (
|
||||
self.data.get("start_last_workfile")
|
||||
and workfile_path
|
||||
and os.path.exists(workfile_path)
|
||||
):
|
||||
new_launch_args.append(workfile_path)
|
||||
|
||||
# Append as whole list as these arguments should not be separated
|
||||
self.launch_context.launch_args.append(new_launch_args)
|
||||
|
||||
if remainders:
|
||||
self.launch_context.launch_args.extend(remainders)
|
||||
|
||||
self.launch_context.kwargs = get_launch_kwargs(
|
||||
self.launch_context.kwargs
|
||||
)
|
||||
127
server_addon/photoshop/client/ayon_photoshop/lib.py
Normal file
127
server_addon/photoshop/client/ayon_photoshop/lib.py
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
import re
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.lib import prepare_template_data
|
||||
from ayon_core.pipeline import (
|
||||
AutoCreator,
|
||||
CreatedInstance
|
||||
)
|
||||
from ayon_photoshop import api
|
||||
from ayon_photoshop.api.pipeline import cache_and_get_instances
|
||||
|
||||
|
||||
class PSAutoCreator(AutoCreator):
|
||||
"""Generic autocreator to extend."""
|
||||
def get_instance_attr_defs(self):
|
||||
return []
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in cache_and_get_instances(self):
|
||||
creator_id = instance_data.get("creator_identifier")
|
||||
|
||||
if creator_id == self.identifier:
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self.log.debug("update_list:: {}".format(update_list))
|
||||
for created_inst, _changes in update_list:
|
||||
api.stub().imprint(created_inst.get("instance_id"),
|
||||
created_inst.data_to_store())
|
||||
|
||||
def create(self, options=None):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.product_type == self.product_type:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
context = self.create_context
|
||||
project_name = context.get_current_project_name()
|
||||
folder_path = context.get_current_folder_path()
|
||||
task_name = context.get_current_task_name()
|
||||
host_name = context.host_name
|
||||
|
||||
if existing_instance is None:
|
||||
existing_instance_folder = None
|
||||
else:
|
||||
existing_instance_folder = existing_instance["folderPath"]
|
||||
|
||||
if existing_instance is None:
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path
|
||||
)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
self.default_variant,
|
||||
host_name,
|
||||
)
|
||||
data = {
|
||||
"folderPath": folder_path,
|
||||
"task": task_name,
|
||||
"variant": self.default_variant
|
||||
}
|
||||
data.update(self.get_dynamic_data(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
self.default_variant,
|
||||
host_name,
|
||||
None
|
||||
))
|
||||
|
||||
if not self.active_on_create:
|
||||
data["active"] = False
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.product_type, product_name, data, self
|
||||
)
|
||||
self._add_instance_to_context(new_instance)
|
||||
api.stub().imprint(new_instance.get("instance_id"),
|
||||
new_instance.data_to_store())
|
||||
|
||||
elif (
|
||||
existing_instance_folder != folder_path
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
folder_entity = ayon_api.get_folder_by_path(
|
||||
project_name, folder_path
|
||||
)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
self.default_variant,
|
||||
host_name,
|
||||
)
|
||||
existing_instance["folderPath"] = folder_path
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["productName"] = product_name
|
||||
|
||||
|
||||
def clean_product_name(product_name):
|
||||
"""Clean all variants leftover {layer} from product name."""
|
||||
dynamic_data = prepare_template_data({"layer": "{layer}"})
|
||||
for value in dynamic_data.values():
|
||||
if value in product_name:
|
||||
product_name = (
|
||||
product_name
|
||||
.replace(value, "")
|
||||
.replace("__", "_")
|
||||
.replace("..", ".")
|
||||
)
|
||||
# clean trailing separator as Main_
|
||||
pattern = r'[\W_]+$'
|
||||
replacement = ''
|
||||
return re.sub(pattern, replacement, product_name)
|
||||
|
|
@ -0,0 +1,156 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_photoshop import api
|
||||
from ayon_photoshop.lib import PSAutoCreator, clean_product_name
|
||||
from ayon_core.lib import BoolDef, prepare_template_data
|
||||
from ayon_core.pipeline.create import get_product_name, CreatedInstance
|
||||
|
||||
|
||||
class AutoImageCreator(PSAutoCreator):
|
||||
"""Creates flatten image from all visible layers.
|
||||
|
||||
Used in simplified publishing as auto created instance.
|
||||
Must be enabled in Setting and template for product name provided
|
||||
"""
|
||||
identifier = "auto_image"
|
||||
product_type = "image"
|
||||
|
||||
# Settings
|
||||
default_variant = ""
|
||||
# - Mark by default instance for review
|
||||
mark_for_review = True
|
||||
active_on_create = True
|
||||
|
||||
def create(self, options=None):
|
||||
existing_instance = None
|
||||
for instance in self.create_context.instances:
|
||||
if instance.creator_identifier == self.identifier:
|
||||
existing_instance = instance
|
||||
break
|
||||
|
||||
context = self.create_context
|
||||
project_name = context.get_current_project_name()
|
||||
folder_path = context.get_current_folder_path()
|
||||
task_name = context.get_current_task_name()
|
||||
host_name = context.host_name
|
||||
folder_entity = ayon_api.get_folder_by_path(project_name, folder_path)
|
||||
task_entity = ayon_api.get_task_by_name(
|
||||
project_name, folder_entity["id"], task_name
|
||||
)
|
||||
|
||||
existing_folder_path = None
|
||||
if existing_instance is not None:
|
||||
existing_folder_path = existing_instance["folderPath"]
|
||||
|
||||
if existing_instance is None:
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
self.default_variant,
|
||||
host_name,
|
||||
)
|
||||
|
||||
data = {
|
||||
"folderPath": folder_path,
|
||||
"task": task_name,
|
||||
}
|
||||
|
||||
if not self.active_on_create:
|
||||
data["active"] = False
|
||||
|
||||
creator_attributes = {"mark_for_review": self.mark_for_review}
|
||||
data.update({"creator_attributes": creator_attributes})
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.product_type, product_name, data, self
|
||||
)
|
||||
self._add_instance_to_context(new_instance)
|
||||
api.stub().imprint(new_instance.get("instance_id"),
|
||||
new_instance.data_to_store())
|
||||
|
||||
elif ( # existing instance from different context
|
||||
existing_folder_path != folder_path
|
||||
or existing_instance["task"] != task_name
|
||||
):
|
||||
product_name = self.get_product_name(
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
self.default_variant,
|
||||
host_name,
|
||||
)
|
||||
existing_instance["folderPath"] = folder_path
|
||||
existing_instance["task"] = task_name
|
||||
existing_instance["productName"] = product_name
|
||||
|
||||
api.stub().imprint(existing_instance.get("instance_id"),
|
||||
existing_instance.data_to_store())
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"mark_for_review",
|
||||
label="Review",
|
||||
default=self.mark_for_review
|
||||
)
|
||||
]
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"mark_for_review",
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
plugin_settings = (
|
||||
project_settings["photoshop"]["create"]["AutoImageCreator"]
|
||||
)
|
||||
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.mark_for_review = plugin_settings["mark_for_review"]
|
||||
self.enabled = plugin_settings["enabled"]
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Creator for flatten image.
|
||||
|
||||
Studio might configure simple publishing workflow. In that case
|
||||
`image` instance is automatically created which will publish flat
|
||||
image from all visible layers.
|
||||
|
||||
Artist might disable this instance from publishing or from creating
|
||||
review for it though.
|
||||
"""
|
||||
|
||||
def get_product_name(
|
||||
self,
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
variant,
|
||||
host_name=None,
|
||||
instance=None
|
||||
):
|
||||
if host_name is None:
|
||||
host_name = self.create_context.host_name
|
||||
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
dynamic_data = prepare_template_data({"layer": "{layer}"})
|
||||
|
||||
product_name = get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
task_type,
|
||||
host_name,
|
||||
self.product_type,
|
||||
variant,
|
||||
dynamic_data=dynamic_data
|
||||
)
|
||||
return clean_product_name(product_name)
|
||||
|
|
@ -0,0 +1,265 @@
|
|||
import re
|
||||
|
||||
from ayon_core.lib import BoolDef
|
||||
from ayon_core.pipeline import (
|
||||
Creator,
|
||||
CreatedInstance,
|
||||
CreatorError
|
||||
)
|
||||
from ayon_core.lib import prepare_template_data
|
||||
from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS
|
||||
from ayon_photoshop import api
|
||||
from ayon_photoshop.api.pipeline import cache_and_get_instances
|
||||
from ayon_photoshop.lib import clean_product_name
|
||||
|
||||
|
||||
class ImageCreator(Creator):
|
||||
"""Creates image instance for publishing.
|
||||
|
||||
Result of 'image' instance is image of all visible layers, or image(s) of
|
||||
selected layers.
|
||||
"""
|
||||
identifier = "image"
|
||||
label = "Image"
|
||||
product_type = "image"
|
||||
description = "Image creator"
|
||||
|
||||
# Settings
|
||||
default_variants = ""
|
||||
mark_for_review = False
|
||||
active_on_create = True
|
||||
|
||||
def create(self, product_name_from_ui, data, pre_create_data):
|
||||
groups_to_create = []
|
||||
top_layers_to_wrap = []
|
||||
create_empty_group = False
|
||||
|
||||
stub = api.stub() # only after PS is up
|
||||
if pre_create_data.get("use_selection"):
|
||||
try:
|
||||
top_level_selected_items = stub.get_selected_layers()
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
only_single_item_selected = len(top_level_selected_items) == 1
|
||||
if (
|
||||
only_single_item_selected or
|
||||
pre_create_data.get("create_multiple")):
|
||||
for selected_item in top_level_selected_items:
|
||||
if selected_item.group:
|
||||
groups_to_create.append(selected_item)
|
||||
else:
|
||||
top_layers_to_wrap.append(selected_item)
|
||||
else:
|
||||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
groups_to_create.append(group)
|
||||
else:
|
||||
try:
|
||||
stub.select_layers(stub.get_layers())
|
||||
group = stub.group_selected_layers(product_name_from_ui)
|
||||
except ValueError:
|
||||
raise CreatorError("Cannot group locked Background layer!")
|
||||
|
||||
groups_to_create.append(group)
|
||||
|
||||
# create empty group if nothing selected
|
||||
if not groups_to_create and not top_layers_to_wrap:
|
||||
group = stub.create_group(product_name_from_ui)
|
||||
groups_to_create.append(group)
|
||||
|
||||
# wrap each top level layer into separate new group
|
||||
for layer in top_layers_to_wrap:
|
||||
stub.select_layers([layer])
|
||||
group = stub.group_selected_layers(layer.name)
|
||||
groups_to_create.append(group)
|
||||
|
||||
layer_name = ''
|
||||
# use artist chosen option OR force layer if more products are created
|
||||
# to differentiate them
|
||||
use_layer_name = (pre_create_data.get("use_layer_name") or
|
||||
len(groups_to_create) > 1)
|
||||
for group in groups_to_create:
|
||||
product_name = product_name_from_ui # reset to name from creator UI
|
||||
layer_names_in_hierarchy = []
|
||||
created_group_name = self._clean_highlights(stub, group.name)
|
||||
|
||||
if use_layer_name:
|
||||
layer_name = re.sub(
|
||||
"[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
group.name
|
||||
)
|
||||
if "{layer}" not in product_name.lower():
|
||||
product_name += "{Layer}"
|
||||
|
||||
layer_fill = prepare_template_data({"layer": layer_name})
|
||||
product_name = product_name.format(**layer_fill)
|
||||
product_name = clean_product_name(product_name)
|
||||
|
||||
if group.long_name:
|
||||
for directory in group.long_name[::-1]:
|
||||
name = self._clean_highlights(stub, directory)
|
||||
layer_names_in_hierarchy.append(name)
|
||||
|
||||
data_update = {
|
||||
"productName": product_name,
|
||||
"members": [str(group.id)],
|
||||
"layer_name": layer_name,
|
||||
"long_name": "_".join(layer_names_in_hierarchy)
|
||||
}
|
||||
data.update(data_update)
|
||||
|
||||
mark_for_review = (pre_create_data.get("mark_for_review") or
|
||||
self.mark_for_review)
|
||||
creator_attributes = {"mark_for_review": mark_for_review}
|
||||
data.update({"creator_attributes": creator_attributes})
|
||||
|
||||
if not self.active_on_create:
|
||||
data["active"] = False
|
||||
|
||||
new_instance = CreatedInstance(
|
||||
self.product_type, product_name, data, self
|
||||
)
|
||||
|
||||
stub.imprint(new_instance.get("instance_id"),
|
||||
new_instance.data_to_store())
|
||||
self._add_instance_to_context(new_instance)
|
||||
# reusing existing group, need to rename afterwards
|
||||
if not create_empty_group:
|
||||
stub.rename_layer(group.id,
|
||||
stub.PUBLISH_ICON + created_group_name)
|
||||
|
||||
def collect_instances(self):
|
||||
for instance_data in cache_and_get_instances(self):
|
||||
# legacy instances have family=='image'
|
||||
creator_id = (instance_data.get("creator_identifier") or
|
||||
instance_data.get("family"))
|
||||
|
||||
if creator_id == self.identifier:
|
||||
instance_data = self._handle_legacy(instance_data)
|
||||
instance = CreatedInstance.from_existing(
|
||||
instance_data, self
|
||||
)
|
||||
self._add_instance_to_context(instance)
|
||||
|
||||
def update_instances(self, update_list):
|
||||
self.log.debug("update_list:: {}".format(update_list))
|
||||
for created_inst, _changes in update_list:
|
||||
if created_inst.get("layer"):
|
||||
# not storing PSItem layer to metadata
|
||||
created_inst.pop("layer")
|
||||
api.stub().imprint(created_inst.get("instance_id"),
|
||||
created_inst.data_to_store())
|
||||
|
||||
def remove_instances(self, instances):
|
||||
for instance in instances:
|
||||
self.host.remove_instance(instance)
|
||||
self._remove_instance_from_context(instance)
|
||||
|
||||
def get_pre_create_attr_defs(self):
|
||||
output = [
|
||||
BoolDef("use_selection", default=True,
|
||||
label="Create only for selected"),
|
||||
BoolDef("create_multiple",
|
||||
default=True,
|
||||
label="Create separate instance for each selected"),
|
||||
BoolDef("use_layer_name",
|
||||
default=False,
|
||||
label="Use layer name in product"),
|
||||
BoolDef(
|
||||
"mark_for_review",
|
||||
label="Create separate review",
|
||||
default=False
|
||||
)
|
||||
]
|
||||
return output
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"mark_for_review",
|
||||
label="Review"
|
||||
)
|
||||
]
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
plugin_settings = (
|
||||
project_settings["photoshop"]["create"]["ImageCreator"]
|
||||
)
|
||||
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
self.default_variants = plugin_settings["default_variants"]
|
||||
self.mark_for_review = plugin_settings["mark_for_review"]
|
||||
self.enabled = plugin_settings["enabled"]
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Creator for Image instances
|
||||
|
||||
Main publishable item in Photoshop will be of `image` product type.
|
||||
Result of this item (instance) is picture that could be loaded and
|
||||
used in another DCCs (for example as single layer in composition in
|
||||
AfterEffects, reference in Maya etc).
|
||||
|
||||
There are couple of options what to publish:
|
||||
- separate image per selected layer (or group of layers)
|
||||
- one image for all selected layers
|
||||
- all visible layers (groups) flattened into single image
|
||||
|
||||
In most cases you would like to keep `Create only for selected`
|
||||
toggled on and select what you would like to publish.
|
||||
Toggling this option off will allow you to create instance for all
|
||||
visible layers without a need to select them explicitly.
|
||||
|
||||
Use 'Create separate instance for each selected' to create separate
|
||||
images per selected layer (group of layers).
|
||||
|
||||
'Use layer name in product' will explicitly add layer name into
|
||||
product name. Position of this name is configurable in
|
||||
`project_settings/global/tools/creator/product_name_profiles`.
|
||||
If layer placeholder ({layer}) is not used in `product_name_profiles`
|
||||
but layer name should be used (set explicitly in UI or implicitly if
|
||||
multiple images should be created), it is added in capitalized form
|
||||
as a suffix to product name.
|
||||
|
||||
Each image could have its separate review created if necessary via
|
||||
`Create separate review` toggle.
|
||||
But more use case is to use separate `review` instance to create review
|
||||
from all published items.
|
||||
"""
|
||||
|
||||
def _handle_legacy(self, instance_data):
|
||||
"""Converts old instances to new format."""
|
||||
if not instance_data.get("members"):
|
||||
instance_data["members"] = [instance_data.get("uuid")]
|
||||
|
||||
if instance_data.get("uuid"):
|
||||
# uuid not needed, replaced with unique instance_id
|
||||
api.stub().remove_instance(instance_data.get("uuid"))
|
||||
instance_data.pop("uuid")
|
||||
|
||||
if not instance_data.get("task"):
|
||||
instance_data["task"] = self.create_context.get_current_task_name()
|
||||
|
||||
if not instance_data.get("variant"):
|
||||
instance_data["variant"] = ''
|
||||
|
||||
return instance_data
|
||||
|
||||
def _clean_highlights(self, stub, item):
|
||||
return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON,
|
||||
'')
|
||||
|
||||
def get_dynamic_data(
|
||||
self,
|
||||
project_name,
|
||||
folder_entity,
|
||||
task_entity,
|
||||
variant,
|
||||
host_name,
|
||||
instance
|
||||
):
|
||||
if instance is not None:
|
||||
layer_name = instance.get("layer_name")
|
||||
if layer_name:
|
||||
return {"layer": layer_name}
|
||||
return {"layer": "{layer}"}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
from ayon_photoshop.lib import PSAutoCreator
|
||||
|
||||
|
||||
class ReviewCreator(PSAutoCreator):
|
||||
"""Creates review instance which might be disabled from publishing."""
|
||||
identifier = "review"
|
||||
product_type = "review"
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Auto creator for review.
|
||||
|
||||
Photoshop review is created from all published images or from all
|
||||
visible layers if no `image` instances got created.
|
||||
|
||||
Review might be disabled by an artist (instance shouldn't be deleted as
|
||||
it will get recreated in next publish either way).
|
||||
"""
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
plugin_settings = (
|
||||
project_settings["photoshop"]["create"]["ReviewCreator"]
|
||||
)
|
||||
|
||||
self.default_variant = plugin_settings["default_variant"]
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
self.enabled = plugin_settings["enabled"]
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
from ayon_photoshop.lib import PSAutoCreator
|
||||
|
||||
|
||||
class WorkfileCreator(PSAutoCreator):
|
||||
identifier = "workfile"
|
||||
product_type = "workfile"
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
def get_detail_description(self):
|
||||
return """Auto creator for workfile.
|
||||
|
||||
It is expected that each publish will also publish its source workfile
|
||||
for safekeeping. This creator triggers automatically without need for
|
||||
an artist to remember and trigger it explicitly.
|
||||
|
||||
Workfile instance could be disabled if it is not required to publish
|
||||
workfile. (Instance shouldn't be deleted though as it will be recreated
|
||||
in next publish automatically).
|
||||
"""
|
||||
|
||||
def apply_settings(self, project_settings):
|
||||
plugin_settings = (
|
||||
project_settings["photoshop"]["create"]["WorkfileCreator"]
|
||||
)
|
||||
|
||||
self.active_on_create = plugin_settings["active_on_create"]
|
||||
self.enabled = plugin_settings["enabled"]
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
import re
|
||||
|
||||
from ayon_core.pipeline import get_representation_path
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_photoshop.api import get_unique_layer_name
|
||||
|
||||
|
||||
class ImageLoader(photoshop.PhotoshopLoader):
|
||||
"""Load images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
"""
|
||||
|
||||
product_types = {"image", "render"}
|
||||
representations = {"*"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
stub = self.get_stub()
|
||||
layer_name = get_unique_layer_name(
|
||||
stub.get_layers(),
|
||||
context["folder"]["name"],
|
||||
name
|
||||
)
|
||||
with photoshop.maintained_selection():
|
||||
path = self.filepath_from_context(context)
|
||||
layer = self.import_layer(path, layer_name, stub)
|
||||
|
||||
self[:] = [layer]
|
||||
namespace = namespace or layer_name
|
||||
|
||||
return photoshop.containerise(
|
||||
name,
|
||||
namespace,
|
||||
layer,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
""" Switch asset or change version """
|
||||
stub = self.get_stub()
|
||||
|
||||
layer = container.pop("layer")
|
||||
|
||||
repre_entity = context["representation"]
|
||||
folder_name = context["folder"]["name"]
|
||||
product_name = context["product"]["name"]
|
||||
|
||||
namespace_from_container = re.sub(r'_\d{3}$', '',
|
||||
container["namespace"])
|
||||
layer_name = "{}_{}".format(folder_name, product_name)
|
||||
# switching assets
|
||||
if namespace_from_container != layer_name:
|
||||
layer_name = get_unique_layer_name(
|
||||
stub.get_layers(), folder_name, product_name
|
||||
)
|
||||
else: # switching version - keep same name
|
||||
layer_name = container["namespace"]
|
||||
|
||||
path = get_representation_path(repre_entity)
|
||||
with photoshop.maintained_selection():
|
||||
stub.replace_smart_object(
|
||||
layer, path, layer_name
|
||||
)
|
||||
|
||||
stub.imprint(
|
||||
layer.id, {"representation": repre_entity["id"]}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
"""
|
||||
Removes element from scene: deletes layer + removes from Headline
|
||||
Args:
|
||||
container (dict): container to be removed - used to get layer_id
|
||||
"""
|
||||
stub = self.get_stub()
|
||||
|
||||
layer = container.pop("layer")
|
||||
stub.imprint(layer.id, {})
|
||||
stub.delete_layer(layer.id)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def import_layer(self, file_name, layer_name, stub):
|
||||
return stub.import_smart_object(file_name, layer_name)
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
import os
|
||||
|
||||
import qargparse
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_photoshop.api import get_unique_layer_name
|
||||
|
||||
|
||||
class ImageFromSequenceLoader(photoshop.PhotoshopLoader):
|
||||
""" Load specific image from sequence
|
||||
|
||||
Used only as quick load of reference file from a sequence.
|
||||
|
||||
Plain ImageLoader picks first frame from sequence.
|
||||
|
||||
Loads only existing files - currently not possible to limit loaders
|
||||
to single select - multiselect. If user selects multiple repres, list
|
||||
for all of them is provided, but selection is only single file.
|
||||
This loader will be triggered multiple times, but selected name will
|
||||
match only to proper path.
|
||||
|
||||
Loader doesn't do containerization as there is currently no data model
|
||||
of 'frame of rendered files' (only rendered sequence), update would be
|
||||
difficult.
|
||||
"""
|
||||
|
||||
product_types = {"render"}
|
||||
representations = {"*"}
|
||||
options = []
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
|
||||
path = self.filepath_from_context(context)
|
||||
if data.get("frame"):
|
||||
path = os.path.join(
|
||||
os.path.dirname(path), data["frame"]
|
||||
)
|
||||
if not os.path.exists(path):
|
||||
return
|
||||
|
||||
stub = self.get_stub()
|
||||
layer_name = get_unique_layer_name(
|
||||
stub.get_layers(), context["folder"]["name"], name
|
||||
)
|
||||
|
||||
with photoshop.maintained_selection():
|
||||
layer = stub.import_smart_object(path, layer_name)
|
||||
|
||||
self[:] = [layer]
|
||||
namespace = namespace or layer_name
|
||||
|
||||
return namespace
|
||||
|
||||
@classmethod
|
||||
def get_options(cls, repre_contexts):
|
||||
"""
|
||||
Returns list of files for selected 'repre_contexts'.
|
||||
|
||||
It returns only files with same extension as in context as it is
|
||||
expected that context points to sequence of frames.
|
||||
|
||||
Returns:
|
||||
(list) of qargparse.Choice
|
||||
"""
|
||||
files = []
|
||||
for context in repre_contexts:
|
||||
fname = cls.filepath_from_context(context)
|
||||
_, file_extension = os.path.splitext(fname)
|
||||
|
||||
for file_name in os.listdir(os.path.dirname(fname)):
|
||||
if not file_name.endswith(file_extension):
|
||||
continue
|
||||
files.append(file_name)
|
||||
|
||||
# return selection only if there is something
|
||||
if not files or len(files) <= 1:
|
||||
return []
|
||||
|
||||
return [
|
||||
qargparse.Choice(
|
||||
"frame",
|
||||
label="Select specific file",
|
||||
items=files,
|
||||
default=0,
|
||||
help="Which frame should be loaded?"
|
||||
)
|
||||
]
|
||||
|
||||
def update(self, container, context):
|
||||
"""No update possible, not containerized."""
|
||||
pass
|
||||
|
||||
def remove(self, container):
|
||||
"""No update possible, not containerized."""
|
||||
pass
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
import re
|
||||
|
||||
from ayon_core.pipeline import get_representation_path
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_photoshop.api import get_unique_layer_name
|
||||
|
||||
|
||||
class ReferenceLoader(photoshop.PhotoshopLoader):
|
||||
"""Load reference images
|
||||
|
||||
Stores the imported asset in a container named after the asset.
|
||||
|
||||
Inheriting from 'load_image' didn't work because of
|
||||
"Cannot write to closing transport", possible refactor.
|
||||
"""
|
||||
|
||||
product_types = {"image", "render"}
|
||||
representations = {"*"}
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
stub = self.get_stub()
|
||||
layer_name = get_unique_layer_name(
|
||||
stub.get_layers(), context["folder"]["name"], name
|
||||
)
|
||||
with photoshop.maintained_selection():
|
||||
path = self.filepath_from_context(context)
|
||||
layer = self.import_layer(path, layer_name, stub)
|
||||
|
||||
self[:] = [layer]
|
||||
namespace = namespace or layer_name
|
||||
|
||||
return photoshop.containerise(
|
||||
name,
|
||||
namespace,
|
||||
layer,
|
||||
context,
|
||||
self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, context):
|
||||
""" Switch asset or change version."""
|
||||
stub = self.get_stub()
|
||||
layer = container.pop("layer")
|
||||
|
||||
folder_name = context["folder"]["name"]
|
||||
product_name = context["product"]["name"]
|
||||
repre_entity = context["representation"]
|
||||
|
||||
namespace_from_container = re.sub(r'_\d{3}$', '',
|
||||
container["namespace"])
|
||||
layer_name = "{}_{}".format(folder_name, product_name)
|
||||
# switching assets
|
||||
if namespace_from_container != layer_name:
|
||||
layer_name = get_unique_layer_name(
|
||||
stub.get_layers(), folder_name, product_name
|
||||
)
|
||||
else: # switching version - keep same name
|
||||
layer_name = container["namespace"]
|
||||
|
||||
path = get_representation_path(repre_entity)
|
||||
with photoshop.maintained_selection():
|
||||
stub.replace_smart_object(
|
||||
layer, path, layer_name
|
||||
)
|
||||
|
||||
stub.imprint(
|
||||
layer.id, {"representation": repre_entity["id"]}
|
||||
)
|
||||
|
||||
def remove(self, container):
|
||||
"""Removes element from scene: deletes layer + removes from Headline
|
||||
|
||||
Args:
|
||||
container (dict): container to be removed - used to get layer_id
|
||||
"""
|
||||
stub = self.get_stub()
|
||||
layer = container.pop("layer")
|
||||
stub.imprint(layer.id, {})
|
||||
stub.delete_layer(layer.id)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def import_layer(self, file_name, layer_name, stub):
|
||||
return stub.import_smart_object(
|
||||
file_name, layer_name, as_reference=True
|
||||
)
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Close PS after publish. For Webpublishing only."""
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ClosePS(pyblish.api.ContextPlugin):
|
||||
"""Close PS after publish. For Webpublishing only.
|
||||
"""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 14
|
||||
label = "Close PS"
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
hosts = ["photoshop"]
|
||||
targets = ["automated"]
|
||||
|
||||
def process(self, context):
|
||||
self.log.info("ClosePS")
|
||||
|
||||
stub = photoshop.stub()
|
||||
self.log.info("Shutting down PS")
|
||||
stub.save()
|
||||
stub.close()
|
||||
self.log.info("PS closed")
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
|
||||
|
||||
class CollectAutoImage(pyblish.api.ContextPlugin):
|
||||
"""Creates auto image in non artist based publishes (Webpublisher).
|
||||
"""
|
||||
|
||||
label = "Collect Auto Image"
|
||||
hosts = ["photoshop"]
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
|
||||
targets = ["automated"]
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
creator_identifier = instance.data.get("creator_identifier")
|
||||
if creator_identifier and creator_identifier == "auto_image":
|
||||
self.log.debug("Auto image instance found, won't create new")
|
||||
return
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
proj_settings = context.data["project_settings"]
|
||||
host_name = context.data["hostName"]
|
||||
folder_entity = context.data["folderEntity"]
|
||||
task_entity = context.data["taskEntity"]
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
auto_creator = proj_settings.get(
|
||||
"photoshop", {}).get(
|
||||
"create", {}).get(
|
||||
"AutoImageCreator", {})
|
||||
|
||||
if not auto_creator or not auto_creator["enabled"]:
|
||||
self.log.debug("Auto image creator disabled, won't create new")
|
||||
return
|
||||
|
||||
stub = photoshop.stub()
|
||||
stored_items = stub.get_layers_metadata()
|
||||
for item in stored_items:
|
||||
if item.get("creator_identifier") == "auto_image":
|
||||
if not item.get("active"):
|
||||
self.log.debug("Auto_image instance disabled")
|
||||
return
|
||||
|
||||
layer_items = stub.get_layers()
|
||||
|
||||
publishable_ids = [layer.id for layer in layer_items
|
||||
if layer.visible]
|
||||
|
||||
# collect stored image instances
|
||||
instance_names = []
|
||||
for layer_item in layer_items:
|
||||
layer_meta_data = stub.read(layer_item, stored_items)
|
||||
|
||||
# Skip layers without metadata.
|
||||
if layer_meta_data is None:
|
||||
continue
|
||||
|
||||
# Skip containers.
|
||||
if "container" in layer_meta_data["id"]:
|
||||
continue
|
||||
|
||||
# active might not be in legacy meta
|
||||
if layer_meta_data.get("active", True) and layer_item.visible:
|
||||
instance_names.append(layer_meta_data["productName"])
|
||||
|
||||
if len(instance_names) == 0:
|
||||
variants = proj_settings.get(
|
||||
"photoshop", {}).get(
|
||||
"create", {}).get(
|
||||
"CreateImage", {}).get(
|
||||
"default_variants", [''])
|
||||
product_type = "image"
|
||||
|
||||
variant = context.data.get("variant") or variants[0]
|
||||
|
||||
product_name = get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
task_type,
|
||||
host_name,
|
||||
product_type,
|
||||
variant,
|
||||
)
|
||||
|
||||
instance = context.create_instance(product_name)
|
||||
instance.data["folderPath"] = folder_entity["path"]
|
||||
instance.data["productType"] = product_type
|
||||
instance.data["productName"] = product_name
|
||||
instance.data["ids"] = publishable_ids
|
||||
instance.data["publish"] = True
|
||||
instance.data["creator_identifier"] = "auto_image"
|
||||
instance.data["family"] = product_type
|
||||
instance.data["families"] = [product_type]
|
||||
|
||||
if auto_creator["mark_for_review"]:
|
||||
instance.data["creator_attributes"] = {"mark_for_review": True}
|
||||
instance.data["families"].append("review")
|
||||
|
||||
self.log.info("auto image instance: {} ".format(instance.data))
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class CollectAutoImageRefresh(pyblish.api.ContextPlugin):
|
||||
"""Refreshes auto_image instance with currently visible layers..
|
||||
"""
|
||||
|
||||
label = "Collect Auto Image Refresh"
|
||||
hosts = ["photoshop"]
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
creator_identifier = instance.data.get("creator_identifier")
|
||||
if creator_identifier and creator_identifier == "auto_image":
|
||||
self.log.debug("Auto image instance found, won't create new")
|
||||
# refresh existing auto image instance with current visible
|
||||
publishable_ids = [layer.id for layer in photoshop.stub().get_layers() # noqa
|
||||
if layer.visible]
|
||||
instance.data["ids"] = publishable_ids
|
||||
return
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
"""
|
||||
Requires:
|
||||
None
|
||||
|
||||
Provides:
|
||||
instance -> productType ("review")
|
||||
"""
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
|
||||
|
||||
class CollectAutoReview(pyblish.api.ContextPlugin):
|
||||
"""Create review instance in non artist based workflow.
|
||||
|
||||
Called only if PS is triggered in Webpublisher or in tests.
|
||||
"""
|
||||
|
||||
label = "Collect Auto Review"
|
||||
hosts = ["photoshop"]
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
targets = ["automated"]
|
||||
|
||||
publish = True
|
||||
|
||||
def process(self, context):
|
||||
product_type = "review"
|
||||
has_review = False
|
||||
for instance in context:
|
||||
if instance.data["productType"] == product_type:
|
||||
self.log.debug("Review instance found, won't create new")
|
||||
has_review = True
|
||||
|
||||
creator_attributes = instance.data.get("creator_attributes", {})
|
||||
if (creator_attributes.get("mark_for_review") and
|
||||
"review" not in instance.data["families"]):
|
||||
instance.data["families"].append("review")
|
||||
|
||||
if has_review:
|
||||
return
|
||||
|
||||
stub = photoshop.stub()
|
||||
stored_items = stub.get_layers_metadata()
|
||||
for item in stored_items:
|
||||
if item.get("creator_identifier") == product_type:
|
||||
if not item.get("active"):
|
||||
self.log.debug("Review instance disabled")
|
||||
return
|
||||
|
||||
auto_creator = context.data["project_settings"].get(
|
||||
"photoshop", {}).get(
|
||||
"create", {}).get(
|
||||
"ReviewCreator", {})
|
||||
|
||||
if not auto_creator or not auto_creator["enabled"]:
|
||||
self.log.debug("Review creator disabled, won't create new")
|
||||
return
|
||||
|
||||
variant = (context.data.get("variant") or
|
||||
auto_creator["default_variant"])
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
proj_settings = context.data["project_settings"]
|
||||
host_name = context.data["hostName"]
|
||||
folder_entity = context.data["folderEntity"]
|
||||
task_entity = context.data["taskEntity"]
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
product_name = get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
task_type,
|
||||
host_name,
|
||||
product_type,
|
||||
variant,
|
||||
project_settings=proj_settings
|
||||
)
|
||||
|
||||
instance = context.create_instance(product_name)
|
||||
instance.data.update({
|
||||
"label": product_name,
|
||||
"name": product_name,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"representations": [],
|
||||
"folderPath": folder_entity["path"],
|
||||
"publish": self.publish
|
||||
})
|
||||
|
||||
self.log.debug("auto review created::{}".format(instance.data))
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_core.pipeline.create import get_product_name
|
||||
|
||||
|
||||
class CollectAutoWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Collect current script for publish."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.2
|
||||
label = "Collect Workfile"
|
||||
hosts = ["photoshop"]
|
||||
|
||||
targets = ["automated"]
|
||||
|
||||
def process(self, context):
|
||||
product_type = "workfile"
|
||||
file_path = context.data["currentFile"]
|
||||
_, ext = os.path.splitext(file_path)
|
||||
staging_dir = os.path.dirname(file_path)
|
||||
base_name = os.path.basename(file_path)
|
||||
workfile_representation = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": base_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
for instance in context:
|
||||
if instance.data["productType"] == product_type:
|
||||
self.log.debug("Workfile instance found, won't create new")
|
||||
instance.data.update({
|
||||
"label": base_name,
|
||||
"name": base_name,
|
||||
"representations": [],
|
||||
})
|
||||
|
||||
# creating representation
|
||||
_, ext = os.path.splitext(file_path)
|
||||
instance.data["representations"].append(
|
||||
workfile_representation)
|
||||
|
||||
return
|
||||
|
||||
stub = photoshop.stub()
|
||||
stored_items = stub.get_layers_metadata()
|
||||
for item in stored_items:
|
||||
if item.get("creator_identifier") == product_type:
|
||||
if not item.get("active"):
|
||||
self.log.debug("Workfile instance disabled")
|
||||
return
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
proj_settings = context.data["project_settings"]
|
||||
auto_creator = proj_settings.get(
|
||||
"photoshop", {}).get(
|
||||
"create", {}).get(
|
||||
"WorkfileCreator", {})
|
||||
|
||||
if not auto_creator or not auto_creator["enabled"]:
|
||||
self.log.debug("Workfile creator disabled, won't create new")
|
||||
return
|
||||
|
||||
# context.data["variant"] might come only from collect_batch_data
|
||||
variant = (context.data.get("variant") or
|
||||
auto_creator["default_variant"])
|
||||
|
||||
task_name = context.data["task"]
|
||||
host_name = context.data["hostName"]
|
||||
folder_entity = context.data["folderEntity"]
|
||||
task_entity = context.data["taskEntity"]
|
||||
task_name = task_type = None
|
||||
if task_entity:
|
||||
task_name = task_entity["name"]
|
||||
task_type = task_entity["taskType"]
|
||||
|
||||
product_name = get_product_name(
|
||||
project_name,
|
||||
task_name,
|
||||
task_type,
|
||||
host_name,
|
||||
product_type,
|
||||
variant,
|
||||
project_settings=proj_settings
|
||||
)
|
||||
|
||||
# Create instance
|
||||
instance = context.create_instance(product_name)
|
||||
instance.data.update({
|
||||
"label": base_name,
|
||||
"name": base_name,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"representations": [],
|
||||
"folderPath": folder_entity["path"]
|
||||
})
|
||||
|
||||
# creating representation
|
||||
instance.data["representations"].append(workfile_representation)
|
||||
|
||||
self.log.debug("auto workfile review created:{}".format(instance.data))
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
"""Parses batch context from json and continues in publish process.
|
||||
|
||||
Provides:
|
||||
context -> Loaded batch file.
|
||||
- folderPath
|
||||
- task (task name)
|
||||
- taskType
|
||||
- project_name
|
||||
- variant
|
||||
|
||||
Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as
|
||||
webpublisher should be eventually ejected as an addon, eg. mentioned plugin
|
||||
shouldn't be pushed into general publish plugins.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from openpype_modules.webpublisher.lib import (
|
||||
get_batch_context_info,
|
||||
parse_json
|
||||
)
|
||||
from ayon_core.lib import is_in_tests
|
||||
|
||||
|
||||
class CollectBatchData(pyblish.api.ContextPlugin):
|
||||
"""Collect batch data from json stored in 'AYON_PUBLISH_DATA' env dir.
|
||||
|
||||
The directory must contain 'manifest.json' file where batch data should be
|
||||
stored.
|
||||
"""
|
||||
# must be really early, context values are only in json file
|
||||
order = pyblish.api.CollectorOrder - 0.495
|
||||
label = "Collect batch data"
|
||||
hosts = ["photoshop"]
|
||||
targets = ["webpublish"]
|
||||
|
||||
def process(self, context):
|
||||
self.log.info("CollectBatchData")
|
||||
batch_dir = (
|
||||
os.environ.get("AYON_PUBLISH_DATA")
|
||||
or os.environ.get("OPENPYPE_PUBLISH_DATA")
|
||||
)
|
||||
if is_in_tests():
|
||||
self.log.debug("Automatic testing, no batch data, skipping")
|
||||
return
|
||||
|
||||
assert batch_dir, (
|
||||
"Missing `AYON_PUBLISH_DATA`")
|
||||
|
||||
assert os.path.exists(batch_dir), \
|
||||
"Folder {} doesn't exist".format(batch_dir)
|
||||
|
||||
project_name = os.environ.get("AYON_PROJECT_NAME")
|
||||
if project_name is None:
|
||||
raise AssertionError(
|
||||
"Environment `AYON_PROJECT_NAME` was not found."
|
||||
"Could not set project `root` which may cause issues."
|
||||
)
|
||||
|
||||
batch_data = parse_json(os.path.join(batch_dir, "manifest.json"))
|
||||
|
||||
context.data["batchDir"] = batch_dir
|
||||
context.data["batchData"] = batch_data
|
||||
|
||||
folder_path, task_name, task_type = get_batch_context_info(
|
||||
batch_data["context"]
|
||||
)
|
||||
|
||||
os.environ["AYON_FOLDER_PATH"] = folder_path
|
||||
os.environ["AYON_TASK_NAME"] = task_name
|
||||
|
||||
context.data["folderPath"] = folder_path
|
||||
context.data["task"] = task_name
|
||||
context.data["taskType"] = task_type
|
||||
context.data["project_name"] = project_name
|
||||
context.data["variant"] = batch_data["variant"]
|
||||
|
|
@ -0,0 +1,268 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import prepare_template_data, is_in_tests
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class CollectColorCodedInstances(pyblish.api.ContextPlugin):
|
||||
"""Creates instances for layers marked by configurable color.
|
||||
|
||||
Used in remote publishing when artists marks publishable layers by color-
|
||||
coding. Top level layers (group) must be marked by specific color to be
|
||||
published as an instance of 'image' product type.
|
||||
|
||||
Can add group for all publishable layers to allow creation of flattened
|
||||
image. (Cannot contain special background layer as it cannot be grouped!)
|
||||
|
||||
Based on value `create_flatten_image` from Settings:
|
||||
- "yes": create flattened 'image' product of all publishable layers + create
|
||||
'image' product per publishable layer
|
||||
- "only": create ONLY flattened 'image' product of all publishable layers
|
||||
- "no": do not create flattened 'image' product at all,
|
||||
only separate products per marked layer.
|
||||
|
||||
Identifier:
|
||||
id (str): "ayon.create.instance"
|
||||
"""
|
||||
|
||||
label = "Collect Color-coded Instances"
|
||||
order = pyblish.api.CollectorOrder
|
||||
hosts = ["photoshop"]
|
||||
targets = ["automated"]
|
||||
|
||||
# configurable by Settings
|
||||
color_code_mapping = []
|
||||
create_flatten_image = "no"
|
||||
flatten_product_name_template = ""
|
||||
|
||||
def process(self, context):
|
||||
self.log.info("CollectColorCodedInstances")
|
||||
batch_dir = (
|
||||
os.environ.get("AYON_PUBLISH_DATA")
|
||||
or os.environ.get("OPENPYPE_PUBLISH_DATA")
|
||||
)
|
||||
if (
|
||||
is_in_tests()
|
||||
and (
|
||||
not batch_dir or not os.path.exists(batch_dir)
|
||||
)
|
||||
):
|
||||
self.log.debug("Automatic testing, no batch data, skipping")
|
||||
return
|
||||
|
||||
existing_product_names = self._get_existing_product_names(context)
|
||||
|
||||
# from CollectBatchData
|
||||
folder_path = context.data["folderPath"]
|
||||
task_name = context.data["task"]
|
||||
variant = context.data["variant"]
|
||||
project_name = context.data["projectEntity"]["name"]
|
||||
|
||||
naming_conventions = get_project_settings(project_name).get(
|
||||
"photoshop", {}).get(
|
||||
"publish", {}).get(
|
||||
"ValidateNaming", {})
|
||||
|
||||
stub = photoshop.stub()
|
||||
layers = stub.get_layers()
|
||||
|
||||
publishable_layers = []
|
||||
created_instances = []
|
||||
product_type_from_settings = None
|
||||
for layer in layers:
|
||||
self.log.debug("Layer:: {}".format(layer))
|
||||
if layer.parents:
|
||||
self.log.debug("!!! Not a top layer, skip")
|
||||
continue
|
||||
|
||||
if not layer.visible:
|
||||
self.log.debug("Not visible, skip")
|
||||
continue
|
||||
|
||||
resolved_product_type, resolved_product_template = (
|
||||
self._resolve_mapping(layer)
|
||||
)
|
||||
|
||||
if not resolved_product_template or not resolved_product_type:
|
||||
self.log.debug("!!! Not found product type or template, skip")
|
||||
continue
|
||||
|
||||
if not product_type_from_settings:
|
||||
product_type_from_settings = resolved_product_type
|
||||
|
||||
fill_pairs = {
|
||||
"variant": variant,
|
||||
"family": resolved_product_type,
|
||||
"product": {"type": resolved_product_type},
|
||||
"task": task_name,
|
||||
"layer": layer.clean_name
|
||||
}
|
||||
|
||||
product_name = resolved_product_template.format(
|
||||
**prepare_template_data(fill_pairs))
|
||||
|
||||
product_name = self._clean_product_name(
|
||||
stub, naming_conventions, product_name, layer
|
||||
)
|
||||
|
||||
if product_name in existing_product_names:
|
||||
self.log.info((
|
||||
"Product {} already created, skipping."
|
||||
).format(product_name))
|
||||
continue
|
||||
|
||||
if self.create_flatten_image != "flatten_only":
|
||||
instance = self._create_instance(
|
||||
context,
|
||||
layer,
|
||||
resolved_product_type,
|
||||
folder_path,
|
||||
product_name,
|
||||
task_name
|
||||
)
|
||||
created_instances.append(instance)
|
||||
|
||||
existing_product_names.append(product_name)
|
||||
publishable_layers.append(layer)
|
||||
|
||||
if self.create_flatten_image != "no" and publishable_layers:
|
||||
self.log.debug("create_flatten_image")
|
||||
if not self.flatten_product_name_template:
|
||||
self.log.warning("No template for flatten image")
|
||||
return
|
||||
|
||||
fill_pairs.pop("layer")
|
||||
product_name = self.flatten_product_name_template.format(
|
||||
**prepare_template_data(fill_pairs))
|
||||
|
||||
first_layer = publishable_layers[0] # dummy layer
|
||||
first_layer.name = product_name
|
||||
product_type = product_type_from_settings # inherit product type
|
||||
instance = self._create_instance(
|
||||
context,
|
||||
first_layer,
|
||||
product_type,
|
||||
folder_path,
|
||||
product_name,
|
||||
task_name
|
||||
)
|
||||
instance.data["ids"] = [layer.id for layer in publishable_layers]
|
||||
created_instances.append(instance)
|
||||
|
||||
for instance in created_instances:
|
||||
# Produce diagnostic message for any graphical
|
||||
# user interface interested in visualising it.
|
||||
self.log.info("Found: \"%s\" " % instance.data["name"])
|
||||
self.log.info("instance: {} ".format(instance.data))
|
||||
|
||||
def _get_existing_product_names(self, context):
|
||||
"""Collect manually created instances from workfile.
|
||||
|
||||
Shouldn't be any as Webpublisher bypass publishing via Openpype, but
|
||||
might be some if workfile published through OP is reused.
|
||||
"""
|
||||
existing_product_names = []
|
||||
for instance in context:
|
||||
if instance.data.get("publish") is not False:
|
||||
existing_product_names.append(instance.data.get("productName"))
|
||||
|
||||
return existing_product_names
|
||||
|
||||
def _create_instance(
|
||||
self,
|
||||
context,
|
||||
layer,
|
||||
product_type,
|
||||
folder_path,
|
||||
product_name,
|
||||
task_name
|
||||
):
|
||||
instance = context.create_instance(layer.name)
|
||||
instance.data["publish"] = True
|
||||
instance.data["productType"] = product_type
|
||||
instance.data["productName"] = product_name
|
||||
instance.data["folderPath"] = folder_path
|
||||
instance.data["task"] = task_name
|
||||
instance.data["layer"] = layer
|
||||
instance.data["family"] = product_type
|
||||
instance.data["families"] = [product_type]
|
||||
|
||||
return instance
|
||||
|
||||
def _resolve_mapping(self, layer):
|
||||
"""Matches 'layer' color code and name to mapping.
|
||||
|
||||
If both color code AND name regex is configured, BOTH must be valid
|
||||
If layer matches to multiple mappings, only first is used!
|
||||
"""
|
||||
product_type_list = []
|
||||
product_name_list = []
|
||||
for mapping in self.color_code_mapping:
|
||||
if (
|
||||
mapping["color_code"]
|
||||
and layer.color_code not in mapping["color_code"]
|
||||
):
|
||||
continue
|
||||
|
||||
if (
|
||||
mapping["layer_name_regex"]
|
||||
and not any(
|
||||
re.search(pattern, layer.name)
|
||||
for pattern in mapping["layer_name_regex"]
|
||||
)
|
||||
):
|
||||
continue
|
||||
|
||||
product_type_list.append(mapping["product_type"])
|
||||
product_name_list.append(mapping["product_name_template"])
|
||||
|
||||
if len(product_name_list) > 1:
|
||||
self.log.warning(
|
||||
"Multiple mappings found for '{}'".format(layer.name)
|
||||
)
|
||||
self.log.warning("Only first product name template used!")
|
||||
product_name_list[:] = product_name_list[0]
|
||||
|
||||
if len(product_type_list) > 1:
|
||||
self.log.warning(
|
||||
"Multiple mappings found for '{}'".format(layer.name)
|
||||
)
|
||||
self.log.warning("Only first product type used!")
|
||||
product_type_list[:] = product_type_list[0]
|
||||
|
||||
resolved_product_template = None
|
||||
if product_name_list:
|
||||
resolved_product_template = product_name_list.pop()
|
||||
|
||||
product_type = None
|
||||
if product_type_list:
|
||||
product_type = product_type_list.pop()
|
||||
|
||||
self.log.debug("resolved_product_type {}".format(product_type))
|
||||
self.log.debug("resolved_product_template {}".format(
|
||||
resolved_product_template))
|
||||
return product_type, resolved_product_template
|
||||
|
||||
def _clean_product_name(
|
||||
self, stub, naming_conventions, product_name, layer
|
||||
):
|
||||
"""Cleans invalid characters from product name and layer name."""
|
||||
if re.search(naming_conventions["invalid_chars"], product_name):
|
||||
product_name = re.sub(
|
||||
naming_conventions["invalid_chars"],
|
||||
naming_conventions["replace_char"],
|
||||
product_name
|
||||
)
|
||||
layer_name = re.sub(
|
||||
naming_conventions["invalid_chars"],
|
||||
naming_conventions["replace_char"],
|
||||
layer.clean_name
|
||||
)
|
||||
layer.name = layer_name
|
||||
stub.rename_layer(layer.id, layer_name)
|
||||
|
||||
return product_name
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class CollectCurrentFile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
label = "Current File"
|
||||
hosts = ["photoshop"]
|
||||
|
||||
def process(self, context):
|
||||
context.data["currentFile"] = os.path.normpath(
|
||||
photoshop.stub().get_active_document_full_name()
|
||||
).replace("\\", "/")
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
import os
|
||||
import re
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class CollectExtensionVersion(pyblish.api.ContextPlugin):
|
||||
""" Pulls and compares version of installed extension.
|
||||
|
||||
It is recommended to use same extension as in provided Openpype code.
|
||||
|
||||
Please use Anastasiy’s Extension Manager or ZXPInstaller to update
|
||||
extension in case of an error.
|
||||
|
||||
You can locate extension.zxp in your installed Openpype code in
|
||||
`repos/avalon-core/avalon/photoshop`
|
||||
"""
|
||||
# This technically should be a validator, but other collectors might be
|
||||
# impacted with usage of obsolete extension, so collector that runs first
|
||||
# was chosen
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
label = "Collect extension version"
|
||||
hosts = ["photoshop"]
|
||||
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
installed_version = photoshop.stub().get_extension_version()
|
||||
|
||||
if not installed_version:
|
||||
raise ValueError("Unknown version, probably old extension")
|
||||
|
||||
manifest_url = os.path.join(os.path.dirname(photoshop.__file__),
|
||||
"extension", "CSXS", "manifest.xml")
|
||||
|
||||
if not os.path.exists(manifest_url):
|
||||
self.log.debug("Unable to locate extension manifest, not checking")
|
||||
return
|
||||
|
||||
expected_version = None
|
||||
with open(manifest_url) as fp:
|
||||
content = fp.read()
|
||||
|
||||
found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
|
||||
content)
|
||||
if found:
|
||||
expected_version = found[0][1]
|
||||
|
||||
if expected_version != installed_version:
|
||||
msg = "Expected version '{}' found '{}'\n".format(
|
||||
expected_version, installed_version)
|
||||
msg += "Please update your installed extension, it might not work "
|
||||
msg += "properly."
|
||||
|
||||
raise ValueError(msg)
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api
|
||||
|
||||
|
||||
class CollectImage(pyblish.api.InstancePlugin):
|
||||
"""Collect layer metadata into a instance.
|
||||
|
||||
Used later in validation
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.200
|
||||
label = 'Collect Image'
|
||||
|
||||
hosts = ["photoshop"]
|
||||
families = ["image"]
|
||||
|
||||
def process(self, instance):
|
||||
if instance.data.get("members"):
|
||||
layer = api.stub().get_layer(instance.data["members"][0])
|
||||
instance.data["layer"] = layer
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
"""Collects published version of workfile and increments it.
|
||||
|
||||
For synchronization of published image and workfile version it is required
|
||||
to store workfile version from workfile file name in context.data["version"].
|
||||
In remote publishing this name is unreliable (artist might not follow naming
|
||||
convention etc.), last published workfile version for particular workfile
|
||||
product is used instead.
|
||||
|
||||
This plugin runs only in remote publishing (eg. Webpublisher).
|
||||
|
||||
Requires:
|
||||
context.data["folderEntity"]
|
||||
|
||||
Provides:
|
||||
context["version"] - incremented latest published workfile version
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline.version_start import get_versioning_start
|
||||
|
||||
|
||||
class CollectPublishedVersion(pyblish.api.ContextPlugin):
|
||||
"""Collects published version of workfile and increments it."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.190
|
||||
label = "Collect published version"
|
||||
hosts = ["photoshop"]
|
||||
targets = ["automated"]
|
||||
|
||||
def process(self, context):
|
||||
workfile_product_name = None
|
||||
for instance in context:
|
||||
if instance.data["productType"] == "workfile":
|
||||
workfile_product_name = instance.data["productName"]
|
||||
break
|
||||
|
||||
if not workfile_product_name:
|
||||
self.log.warning("No workfile instance found, "
|
||||
"synchronization of version will not work.")
|
||||
return
|
||||
|
||||
project_name = context.data["projectName"]
|
||||
folder_id = context.data["folderEntity"]["id"]
|
||||
|
||||
version_entity = ayon_api.get_last_version_by_product_name(
|
||||
project_name, workfile_product_name, folder_id
|
||||
)
|
||||
|
||||
if version_entity:
|
||||
version_int = int(version_entity["version"]) + 1
|
||||
else:
|
||||
version_int = get_versioning_start(
|
||||
project_name,
|
||||
"photoshop",
|
||||
task_name=context.data["task"],
|
||||
task_type=context.data["taskType"],
|
||||
project_settings=context.data["project_settings"]
|
||||
)
|
||||
|
||||
self.log.debug(f"Setting {version_int} to context.")
|
||||
context.data["version"] = version_int
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
Requires:
|
||||
None
|
||||
|
||||
Provides:
|
||||
instance -> family ("review")
|
||||
"""
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.ContextPlugin):
|
||||
"""Adds review to families for instances marked to be reviewable.
|
||||
"""
|
||||
|
||||
label = "Collect Review"
|
||||
hosts = ["photoshop"]
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
creator_attributes = instance.data["creator_attributes"]
|
||||
if (creator_attributes.get("mark_for_review") and
|
||||
"review" not in instance.data["families"]):
|
||||
instance.data["families"].append("review")
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectVersion(pyblish.api.InstancePlugin):
|
||||
"""Collect version for publishable instances.
|
||||
|
||||
Used to synchronize version from workfile to all publishable instances:
|
||||
- image (manually created or color coded)
|
||||
- review
|
||||
- workfile
|
||||
|
||||
Dev comment:
|
||||
Explicit collector created to control this from single place and not from
|
||||
3 different.
|
||||
|
||||
Workfile set here explicitly as version might to be forced from latest + 1
|
||||
because of Webpublisher.
|
||||
(This plugin must run after CollectPublishedVersion!)
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder + 0.200
|
||||
label = 'Collect Version'
|
||||
|
||||
hosts = ["photoshop"]
|
||||
families = ["image", "review", "workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
workfile_version = instance.context.data["version"]
|
||||
self.log.debug(f"Applying version {workfile_version}")
|
||||
instance.data["version"] = workfile_version
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Collect current script for publish."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.1
|
||||
label = "Collect Workfile"
|
||||
hosts = ["photoshop"]
|
||||
|
||||
default_variant = "Main"
|
||||
|
||||
def process(self, context):
|
||||
for instance in context:
|
||||
if instance.data["productType"] == "workfile":
|
||||
file_path = context.data["currentFile"]
|
||||
_, ext = os.path.splitext(file_path)
|
||||
staging_dir = os.path.dirname(file_path)
|
||||
base_name = os.path.basename(file_path)
|
||||
|
||||
# creating representation
|
||||
_, ext = os.path.splitext(file_path)
|
||||
instance.data["representations"].append({
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"files": base_name,
|
||||
"stagingDir": staging_dir,
|
||||
})
|
||||
return
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ExtractImage(pyblish.api.ContextPlugin):
|
||||
"""Extract all layers (groups) marked for publish.
|
||||
|
||||
Usually publishable instance is created as a wrapper of layer(s). For each
|
||||
publishable instance so many images as there is 'formats' is created.
|
||||
|
||||
Logic tries to hide/unhide layers minimum times.
|
||||
|
||||
Called once for all publishable instances.
|
||||
"""
|
||||
|
||||
order = publish.Extractor.order - 0.48
|
||||
label = "Extract Image"
|
||||
hosts = ["photoshop"]
|
||||
|
||||
families = ["image", "background"]
|
||||
formats = ["png", "jpg"]
|
||||
|
||||
def process(self, context):
|
||||
stub = photoshop.stub()
|
||||
hidden_layer_ids = set()
|
||||
|
||||
all_layers = stub.get_layers()
|
||||
for layer in all_layers:
|
||||
if not layer.visible:
|
||||
hidden_layer_ids.add(layer.id)
|
||||
stub.hide_all_others_layers_ids([], layers=all_layers)
|
||||
|
||||
with photoshop.maintained_selection():
|
||||
with photoshop.maintained_visibility(layers=all_layers):
|
||||
for instance in context:
|
||||
if instance.data["productType"] not in self.families:
|
||||
continue
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
# Perform extraction
|
||||
files = {}
|
||||
ids = set()
|
||||
# real layers and groups
|
||||
members = instance.data("members")
|
||||
if members:
|
||||
ids.update(set([int(member) for member in members]))
|
||||
# virtual groups collected by color coding or auto_image
|
||||
add_ids = instance.data.pop("ids", None)
|
||||
if add_ids:
|
||||
ids.update(set(add_ids))
|
||||
extract_ids = set([ll.id for ll in stub.
|
||||
get_layers_in_layers_ids(ids, all_layers)
|
||||
if ll.id not in hidden_layer_ids])
|
||||
|
||||
for extracted_id in extract_ids:
|
||||
stub.set_visible(extracted_id, True)
|
||||
|
||||
file_basename = os.path.splitext(
|
||||
stub.get_active_document_name()
|
||||
)[0]
|
||||
for extension in self.formats:
|
||||
_filename = "{}.{}".format(file_basename,
|
||||
extension)
|
||||
files[extension] = _filename
|
||||
|
||||
full_filename = os.path.join(staging_dir,
|
||||
_filename)
|
||||
stub.saveAs(full_filename, extension, True)
|
||||
self.log.info(f"Extracted: {extension}")
|
||||
|
||||
representations = []
|
||||
for extension, filename in files.items():
|
||||
representations.append({
|
||||
"name": extension,
|
||||
"ext": extension,
|
||||
"files": filename,
|
||||
"stagingDir": staging_dir
|
||||
})
|
||||
instance.data["representations"] = representations
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
|
||||
for extracted_id in extract_ids:
|
||||
stub.set_visible(extracted_id, False)
|
||||
|
||||
def staging_dir(self, instance):
|
||||
"""Provide a temporary directory in which to store extracted files
|
||||
|
||||
Upon calling this method the staging directory is stored inside
|
||||
the instance.data['stagingDir']
|
||||
"""
|
||||
|
||||
from ayon_core.pipeline.publish import get_instance_staging_dir
|
||||
|
||||
return get_instance_staging_dir(instance)
|
||||
|
|
@ -0,0 +1,328 @@
|
|||
import os
|
||||
import shutil
|
||||
from PIL import Image
|
||||
|
||||
from ayon_core.lib import (
|
||||
run_subprocess,
|
||||
get_ffmpeg_tool_args,
|
||||
)
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ExtractReview(publish.Extractor):
|
||||
"""
|
||||
Produce a flattened or sequence image files from all 'image' instances.
|
||||
|
||||
If no 'image' instance is created, it produces flattened image from
|
||||
all visible layers.
|
||||
|
||||
It creates review, thumbnail and mov representations.
|
||||
|
||||
'review' family could be used in other steps as a reference, as it
|
||||
contains flattened image by default. (Eg. artist could load this
|
||||
review as a single item and see full image. In most cases 'image'
|
||||
product type is separated by layers to better usage in animation
|
||||
or comp.)
|
||||
"""
|
||||
|
||||
label = "Extract Review"
|
||||
hosts = ["photoshop"]
|
||||
families = ["review"]
|
||||
|
||||
# Extract Options
|
||||
jpg_options = None
|
||||
mov_options = None
|
||||
make_image_sequence = None
|
||||
max_downscale_size = 8192
|
||||
|
||||
def process(self, instance):
|
||||
staging_dir = self.staging_dir(instance)
|
||||
self.log.info("Outputting image to {}".format(staging_dir))
|
||||
|
||||
fps = instance.data.get("fps", 25)
|
||||
stub = photoshop.stub()
|
||||
self.output_seq_filename = os.path.splitext(
|
||||
stub.get_active_document_name())[0] + ".%04d.jpg"
|
||||
|
||||
layers = self._get_layers_from_image_instances(instance)
|
||||
self.log.info("Layers image instance found: {}".format(layers))
|
||||
|
||||
repre_name = "jpg"
|
||||
repre_skeleton = {
|
||||
"name": repre_name,
|
||||
"ext": "jpg",
|
||||
"stagingDir": staging_dir,
|
||||
"tags": self.jpg_options['tags'],
|
||||
}
|
||||
|
||||
if instance.data["productType"] != "review":
|
||||
self.log.debug(
|
||||
"Existing extracted file from image product type used."
|
||||
)
|
||||
# enable creation of review, without this jpg review would clash
|
||||
# with jpg of the image product type
|
||||
output_name = repre_name
|
||||
repre_name = "{}_{}".format(repre_name, output_name)
|
||||
repre_skeleton.update({"name": repre_name,
|
||||
"outputName": output_name})
|
||||
|
||||
img_file = self.output_seq_filename % 0
|
||||
self._prepare_file_for_image_product_type(
|
||||
img_file, instance, staging_dir
|
||||
)
|
||||
repre_skeleton.update({
|
||||
"files": img_file,
|
||||
})
|
||||
processed_img_names = [img_file]
|
||||
elif self.make_image_sequence and len(layers) > 1:
|
||||
self.log.debug("Extract layers to image sequence.")
|
||||
img_list = self._save_sequence_images(staging_dir, layers)
|
||||
|
||||
repre_skeleton.update({
|
||||
"frameStart": 0,
|
||||
"frameEnd": len(img_list),
|
||||
"fps": fps,
|
||||
"files": img_list,
|
||||
})
|
||||
processed_img_names = img_list
|
||||
else:
|
||||
self.log.debug("Extract layers to flatten image.")
|
||||
img_file = self._save_flatten_image(staging_dir, layers)
|
||||
|
||||
repre_skeleton.update({
|
||||
"files": img_file,
|
||||
})
|
||||
processed_img_names = [img_file]
|
||||
|
||||
instance.data["representations"].append(repre_skeleton)
|
||||
|
||||
ffmpeg_args = get_ffmpeg_tool_args("ffmpeg")
|
||||
|
||||
instance.data["stagingDir"] = staging_dir
|
||||
|
||||
source_files_pattern = os.path.join(staging_dir,
|
||||
self.output_seq_filename)
|
||||
source_files_pattern = self._check_and_resize(processed_img_names,
|
||||
source_files_pattern,
|
||||
staging_dir)
|
||||
self._generate_thumbnail(
|
||||
list(ffmpeg_args),
|
||||
instance,
|
||||
source_files_pattern,
|
||||
staging_dir)
|
||||
|
||||
no_of_frames = len(processed_img_names)
|
||||
if no_of_frames > 1:
|
||||
self._generate_mov(
|
||||
list(ffmpeg_args),
|
||||
instance,
|
||||
fps,
|
||||
no_of_frames,
|
||||
source_files_pattern,
|
||||
staging_dir)
|
||||
|
||||
self.log.info(f"Extracted {instance} to {staging_dir}")
|
||||
|
||||
def _prepare_file_for_image_product_type(
|
||||
self, img_file, instance, staging_dir
|
||||
):
|
||||
"""Converts existing file for image product type to .jpg
|
||||
|
||||
Image instance could have its own separate review (instance per layer
|
||||
for example). This uses extracted file instead of extracting again.
|
||||
Args:
|
||||
img_file (str): name of output file (with 0000 value for ffmpeg
|
||||
later)
|
||||
instance:
|
||||
staging_dir (str): temporary folder where extracted file is located
|
||||
"""
|
||||
repre_file = instance.data["representations"][0]
|
||||
source_file_path = os.path.join(repre_file["stagingDir"],
|
||||
repre_file["files"])
|
||||
if not os.path.exists(source_file_path):
|
||||
raise RuntimeError(f"{source_file_path} doesn't exist for "
|
||||
"review to create from")
|
||||
_, ext = os.path.splitext(repre_file["files"])
|
||||
if ext != ".jpg":
|
||||
im = Image.open(source_file_path)
|
||||
if (im.mode in ('RGBA', 'LA') or (
|
||||
im.mode == 'P' and 'transparency' in im.info)):
|
||||
# without this it produces messy low quality jpg
|
||||
rgb_im = Image.new("RGBA", (im.width, im.height), "#ffffff")
|
||||
rgb_im.alpha_composite(im)
|
||||
rgb_im.convert("RGB").save(os.path.join(staging_dir, img_file))
|
||||
else:
|
||||
im.save(os.path.join(staging_dir, img_file))
|
||||
else:
|
||||
# handles already .jpg
|
||||
shutil.copy(source_file_path,
|
||||
os.path.join(staging_dir, img_file))
|
||||
|
||||
def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames,
|
||||
source_files_pattern, staging_dir):
|
||||
"""Generates .mov to upload to Ftrack.
|
||||
|
||||
Args:
|
||||
ffmpeg_path (str): path to ffmpeg
|
||||
instance (Pyblish Instance)
|
||||
fps (str)
|
||||
no_of_frames (int):
|
||||
source_files_pattern (str): name of source file
|
||||
staging_dir (str): temporary location to store thumbnail
|
||||
Updates:
|
||||
instance - adds representation portion
|
||||
"""
|
||||
# Generate mov.
|
||||
mov_path = os.path.join(staging_dir, "review.mov")
|
||||
self.log.info(f"Generate mov review: {mov_path}")
|
||||
args = ffmpeg_path + [
|
||||
"-y",
|
||||
"-i", source_files_pattern,
|
||||
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
||||
"-vframes", str(no_of_frames),
|
||||
mov_path
|
||||
]
|
||||
self.log.debug("mov args:: {}".format(args))
|
||||
_output = run_subprocess(args)
|
||||
instance.data["representations"].append({
|
||||
"name": "mov",
|
||||
"ext": "mov",
|
||||
"files": os.path.basename(mov_path),
|
||||
"stagingDir": staging_dir,
|
||||
"frameStart": 1,
|
||||
"frameEnd": no_of_frames,
|
||||
"fps": fps,
|
||||
"tags": self.mov_options['tags']
|
||||
})
|
||||
|
||||
def _generate_thumbnail(
|
||||
self, ffmpeg_args, instance, source_files_pattern, staging_dir
|
||||
):
|
||||
"""Generates scaled down thumbnail and adds it as representation.
|
||||
|
||||
Args:
|
||||
ffmpeg_path (str): path to ffmpeg
|
||||
instance (Pyblish Instance)
|
||||
source_files_pattern (str): name of source file
|
||||
staging_dir (str): temporary location to store thumbnail
|
||||
Updates:
|
||||
instance - adds representation portion
|
||||
"""
|
||||
# Generate thumbnail
|
||||
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
|
||||
self.log.info(f"Generate thumbnail {thumbnail_path}")
|
||||
args = ffmpeg_args + [
|
||||
"-y",
|
||||
"-i", source_files_pattern,
|
||||
"-vf", "scale=300:-1",
|
||||
"-vframes", "1",
|
||||
thumbnail_path
|
||||
]
|
||||
self.log.debug("thumbnail args:: {}".format(args))
|
||||
_output = run_subprocess(args)
|
||||
instance.data["representations"].append({
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
"outputName": "thumb",
|
||||
"files": os.path.basename(thumbnail_path),
|
||||
"stagingDir": staging_dir,
|
||||
"tags": ["thumbnail", "delete"]
|
||||
})
|
||||
instance.data["thumbnailPath"] = thumbnail_path
|
||||
|
||||
def _check_and_resize(self, processed_img_names, source_files_pattern,
|
||||
staging_dir):
|
||||
"""Check if saved image could be used in ffmpeg.
|
||||
|
||||
Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be
|
||||
used as a source for thumbnail or review mov.
|
||||
"""
|
||||
Image.MAX_IMAGE_PIXELS = None
|
||||
first_url = os.path.join(staging_dir, processed_img_names[0])
|
||||
with Image.open(first_url) as im:
|
||||
width, height = im.size
|
||||
|
||||
if width > self.max_downscale_size or height > self.max_downscale_size:
|
||||
resized_dir = os.path.join(staging_dir, "resized")
|
||||
os.mkdir(resized_dir)
|
||||
source_files_pattern = os.path.join(resized_dir,
|
||||
self.output_seq_filename)
|
||||
for file_name in processed_img_names:
|
||||
source_url = os.path.join(staging_dir, file_name)
|
||||
with Image.open(source_url) as res_img:
|
||||
# 'thumbnail' automatically keeps aspect ratio
|
||||
res_img.thumbnail((self.max_downscale_size,
|
||||
self.max_downscale_size),
|
||||
Image.ANTIALIAS)
|
||||
res_img.save(os.path.join(resized_dir, file_name))
|
||||
|
||||
return source_files_pattern
|
||||
|
||||
def _get_layers_from_image_instances(self, instance):
|
||||
"""Collect all layers from 'instance'.
|
||||
|
||||
Returns:
|
||||
(list) of PSItem
|
||||
"""
|
||||
layers = []
|
||||
# creating review for existing 'image' instance
|
||||
if (
|
||||
instance.data["productType"] == "image"
|
||||
and instance.data.get("layer")
|
||||
):
|
||||
layers.append(instance.data["layer"])
|
||||
return layers
|
||||
|
||||
for image_instance in instance.context:
|
||||
if image_instance.data["productType"] != "image":
|
||||
continue
|
||||
if not image_instance.data.get("layer"):
|
||||
# dummy instance for flatten image
|
||||
continue
|
||||
layers.append(image_instance.data.get("layer"))
|
||||
|
||||
return sorted(layers)
|
||||
|
||||
def _save_flatten_image(self, staging_dir, layers):
|
||||
"""Creates flat image from 'layers' into 'staging_dir'.
|
||||
|
||||
Returns:
|
||||
(str): path to new image
|
||||
"""
|
||||
img_filename = self.output_seq_filename % 0
|
||||
output_image_path = os.path.join(staging_dir, img_filename)
|
||||
stub = photoshop.stub()
|
||||
|
||||
with photoshop.maintained_visibility():
|
||||
self.log.info("Extracting {}".format(layers))
|
||||
if layers:
|
||||
stub.hide_all_others_layers(layers)
|
||||
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
return img_filename
|
||||
|
||||
def _save_sequence_images(self, staging_dir, layers):
|
||||
"""Creates separate flat images from 'layers' into 'staging_dir'.
|
||||
|
||||
Used as source for multi frames .mov to review at once.
|
||||
Returns:
|
||||
(list): paths to new images
|
||||
"""
|
||||
stub = photoshop.stub()
|
||||
|
||||
list_img_filename = []
|
||||
with photoshop.maintained_visibility():
|
||||
for i, layer in enumerate(layers):
|
||||
self.log.info("Extracting {}".format(layer))
|
||||
|
||||
img_filename = self.output_seq_filename % i
|
||||
output_image_path = os.path.join(staging_dir, img_filename)
|
||||
list_img_filename.append(img_filename)
|
||||
|
||||
with photoshop.maintained_visibility():
|
||||
stub.hide_all_others_layers([layer])
|
||||
stub.saveAs(output_image_path, 'jpg', True)
|
||||
|
||||
return list_img_filename
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
from ayon_core.pipeline import publish
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ExtractSaveScene(publish.Extractor):
|
||||
"""Save scene before extraction."""
|
||||
|
||||
order = publish.Extractor.order - 0.49
|
||||
label = "Extract Save Scene"
|
||||
hosts = ["photoshop"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, instance):
|
||||
photoshop.stub().save()
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Folder does not match</title>
|
||||
<description>
|
||||
## Collected folder path is not same as in context
|
||||
|
||||
{msg}
|
||||
### How to repair?
|
||||
{repair_msg}
|
||||
Refresh Publish afterwards (circle arrow at the bottom right).
|
||||
|
||||
If that's not correct value, close workfile and reopen via Workfiles to get
|
||||
proper context folder path OR disable this validator and publish again
|
||||
if you are publishing to different context deliberately.
|
||||
|
||||
(Context means combination of project, folder path and task name.)
|
||||
</description>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root>
|
||||
<error id="main">
|
||||
<title>Product name</title>
|
||||
<description>
|
||||
## Invalid product or layer name
|
||||
|
||||
Product or layer name cannot contain specific characters (spaces etc) which could cause issue when product name is used in a published file name.
|
||||
{msg}
|
||||
|
||||
### How to repair?
|
||||
|
||||
You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right.
|
||||
</description>
|
||||
<detail>
|
||||
### __Detailed Info__ (optional)
|
||||
|
||||
Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings.
|
||||
</detail>
|
||||
</error>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from ayon_core.pipeline.publish import get_errored_plugins_from_context
|
||||
from ayon_core.lib import version_up
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class IncrementWorkfile(pyblish.api.InstancePlugin):
|
||||
"""Increment the current workfile.
|
||||
|
||||
Saves the current scene with an increased version number.
|
||||
"""
|
||||
|
||||
label = "Increment Workfile"
|
||||
order = pyblish.api.IntegratorOrder + 9.0
|
||||
hosts = ["photoshop"]
|
||||
families = ["workfile"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
errored_plugins = get_errored_plugins_from_context(instance.context)
|
||||
if errored_plugins:
|
||||
raise RuntimeError(
|
||||
"Skipping incrementing current file because publishing failed."
|
||||
)
|
||||
|
||||
scene_path = version_up(instance.context.data["currentFile"])
|
||||
_, ext = os.path.splitext(scene_path)
|
||||
photoshop.stub().saveAs(scene_path, ext[1:], True)
|
||||
|
||||
self.log.info("Incremented workfile to: {}".format(scene_path))
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import get_current_folder_path
|
||||
from ayon_core.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin
|
||||
)
|
||||
from ayon_photoshop import api as photoshop
|
||||
|
||||
|
||||
class ValidateInstanceFolderRepair(pyblish.api.Action):
|
||||
"""Repair the instance folder."""
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is not None
|
||||
and result["instance"] is not None
|
||||
and result["instance"] not in failed
|
||||
):
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
stub = photoshop.stub()
|
||||
current_folder_path = get_current_folder_path()
|
||||
for instance in instances:
|
||||
data = stub.read(instance[0])
|
||||
data["folderPath"] = current_folder_path
|
||||
stub.imprint(instance[0], data)
|
||||
|
||||
|
||||
class ValidateInstanceAsset(OptionalPyblishPluginMixin,
|
||||
pyblish.api.InstancePlugin):
|
||||
"""Validate the instance folder is the current selected context folder.
|
||||
|
||||
As it might happen that multiple worfiles are opened, switching
|
||||
between them would mess with selected context.
|
||||
In that case outputs might be output under wrong folder!
|
||||
|
||||
Repair action will use Context folder value (from Workfiles or Launcher)
|
||||
Closing and reopening with Workfiles will refresh Context value.
|
||||
"""
|
||||
|
||||
label = "Validate Instance Folder"
|
||||
hosts = ["photoshop"]
|
||||
optional = True
|
||||
actions = [ValidateInstanceFolderRepair]
|
||||
order = ValidateContentsOrder
|
||||
|
||||
def process(self, instance):
|
||||
instance_folder_path = instance.data["folderPath"]
|
||||
current_folder_path = get_current_folder_path()
|
||||
|
||||
if instance_folder_path != current_folder_path:
|
||||
msg = (
|
||||
f"Instance folder {instance_folder_path} is not the same"
|
||||
f" as current context {current_folder_path}."
|
||||
|
||||
)
|
||||
repair_msg = (
|
||||
"Repair with 'Repair' button"
|
||||
f" to use '{current_folder_path}'.\n"
|
||||
)
|
||||
formatting_data = {"msg": msg,
|
||||
"repair_msg": repair_msg}
|
||||
raise PublishXmlValidationError(self, msg,
|
||||
formatting_data=formatting_data)
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from ayon_photoshop import api as photoshop
|
||||
from ayon_core.pipeline.create import PRODUCT_NAME_ALLOWED_SYMBOLS
|
||||
from ayon_core.pipeline.publish import (
|
||||
ValidateContentsOrder,
|
||||
PublishXmlValidationError,
|
||||
)
|
||||
|
||||
|
||||
class ValidateNamingRepair(pyblish.api.Action):
|
||||
"""Repair the instance folder."""
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (
|
||||
result["error"] is not None
|
||||
and result["instance"] is not None
|
||||
and result["instance"] not in failed
|
||||
):
|
||||
failed.append(result["instance"])
|
||||
|
||||
invalid_chars, replace_char = plugin.get_replace_chars()
|
||||
self.log.debug("{} --- {}".format(invalid_chars, replace_char))
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
stub = photoshop.stub()
|
||||
for instance in instances:
|
||||
self.log.debug("validate_naming instance {}".format(instance))
|
||||
current_layer_state = stub.get_layer(instance.data["layer"].id)
|
||||
self.log.debug("current_layer{}".format(current_layer_state))
|
||||
|
||||
layer_meta = stub.read(current_layer_state)
|
||||
instance_id = (layer_meta.get("instance_id") or
|
||||
layer_meta.get("uuid"))
|
||||
if not instance_id:
|
||||
self.log.warning("Unable to repair, cannot find layer")
|
||||
continue
|
||||
|
||||
layer_name = re.sub(invalid_chars,
|
||||
replace_char,
|
||||
current_layer_state.clean_name)
|
||||
layer_name = stub.PUBLISH_ICON + layer_name
|
||||
|
||||
stub.rename_layer(current_layer_state.id, layer_name)
|
||||
|
||||
product_name = re.sub(invalid_chars, replace_char,
|
||||
instance.data["productName"])
|
||||
|
||||
# format from Tool Creator
|
||||
product_name = re.sub(
|
||||
"[^{}]+".format(PRODUCT_NAME_ALLOWED_SYMBOLS),
|
||||
"",
|
||||
product_name
|
||||
)
|
||||
|
||||
layer_meta["productName"] = product_name
|
||||
stub.imprint(instance_id, layer_meta)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ValidateNaming(pyblish.api.InstancePlugin):
|
||||
"""Validate the instance name.
|
||||
|
||||
Spaces in names are not allowed. Will be replace with underscores.
|
||||
"""
|
||||
|
||||
label = "Validate Naming"
|
||||
hosts = ["photoshop"]
|
||||
order = ValidateContentsOrder
|
||||
families = ["image"]
|
||||
actions = [ValidateNamingRepair]
|
||||
|
||||
# configured by Settings
|
||||
invalid_chars = ''
|
||||
replace_char = ''
|
||||
|
||||
def process(self, instance):
|
||||
help_msg = ' Use Repair button to fix it and then refresh publish.'
|
||||
|
||||
layer = instance.data.get("layer")
|
||||
if layer:
|
||||
msg = "Name \"{}\" is not allowed.{}".format(
|
||||
layer.clean_name, help_msg
|
||||
)
|
||||
formatting_data = {"msg": msg}
|
||||
if re.search(self.invalid_chars, layer.clean_name):
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data=formatting_data
|
||||
)
|
||||
|
||||
product_name = instance.data["productName"]
|
||||
msg = "Product \"{}\" is not allowed.{}".format(
|
||||
product_name, help_msg
|
||||
)
|
||||
formatting_data = {"msg": msg}
|
||||
if re.search(self.invalid_chars, product_name):
|
||||
raise PublishXmlValidationError(
|
||||
self, msg, formatting_data=formatting_data
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_replace_chars(cls):
|
||||
"""Pass values configured in Settings for Repair."""
|
||||
return cls.invalid_chars, cls.replace_char
|
||||
Binary file not shown.
3
server_addon/photoshop/client/ayon_photoshop/version.py
Normal file
3
server_addon/photoshop/client/ayon_photoshop/version.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'photoshop' version."""
|
||||
__version__ = "0.2.0"
|
||||
6
server_addon/photoshop/client/pyproject.toml
Normal file
6
server_addon/photoshop/client/pyproject.toml
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
[project]
|
||||
name="photoshop"
|
||||
description="AYON Phostoshop addon."
|
||||
|
||||
[ayon.runtimeDependencies]
|
||||
wsrpc_aiohttp = "^3.1.1" # websocket server
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
name = "photoshop"
|
||||
title = "Photoshop"
|
||||
version = "0.1.3"
|
||||
version = "0.2.0"
|
||||
|
||||
client_dir = "ayon_photoshop"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
|
|||
28
server_addon/resolve/client/ayon_resolve/README.markdown
Normal file
28
server_addon/resolve/client/ayon_resolve/README.markdown
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
## Basic setup
|
||||
|
||||
- Actually supported version is up to v18
|
||||
- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18)
|
||||
- pip install PySide2:
|
||||
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2`
|
||||
- pip install OpenTimelineIO:
|
||||
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO`
|
||||
- Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
|
||||
 with installed CMake in PATH.
|
||||
- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6
|
||||

|
||||
- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related.
|
||||
|
||||
## Editorial setup
|
||||
|
||||
This is how it looks on my testing project timeline
|
||||

|
||||
Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence.
|
||||
|
||||
1. you need to start AYON menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__**
|
||||
2. then select any clips in `main` track and change their color to `Chocolate`
|
||||
3. in OpenPype Menu select `Create`
|
||||
4. in Creator select `Create Publishable Clip [New]` (temporary name)
|
||||
5. set `Rename clips` to True, Master Track to `main` and Use review track to `review` as in picture
|
||||

|
||||
6. after you hit `ok` all clips are colored to `ping` and marked with openpype metadata tag
|
||||
7. git `Publish` on openpype menu and see that all had been collected correctly. That is the last step for now as rest is Work in progress. Next steps will follow.
|
||||
|
|
@ -0,0 +1,838 @@
|
|||
Last Updated: 1 April 2024
|
||||
----------------------------
|
||||
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
|
||||
modules for scripting access (DaVinciResolve.py) and some representative examples.
|
||||
|
||||
From v16.2.0 onwards, the nodeIndex parameters accepted by SetLUT() and SetCDL() are 1-based instead of 0-based, i.e. 1 <= nodeIndex <= total number of nodes.
|
||||
|
||||
Overview
|
||||
--------
|
||||
As with Blackmagic Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page,
|
||||
or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when
|
||||
allowing scripting access from outside of the Resolve application.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
DaVinci Resolve scripting requires one of the following to be installed (for all users):
|
||||
|
||||
Lua 5.1
|
||||
Python >= 3.6 64-bit
|
||||
Python 2.7 64-bit
|
||||
|
||||
Using a script
|
||||
--------------
|
||||
DaVinci Resolve needs to be running for a script to be invoked.
|
||||
|
||||
For a Resolve script to be executed from an external folder, the script needs to know of the API location.
|
||||
You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below:
|
||||
|
||||
Mac OS X:
|
||||
RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting"
|
||||
RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so"
|
||||
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
|
||||
|
||||
Windows:
|
||||
RESOLVE_SCRIPT_API="%PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Support\Developer\Scripting"
|
||||
RESOLVE_SCRIPT_LIB="C:\Program Files\Blackmagic Design\DaVinci Resolve\fusionscript.dll"
|
||||
PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\Modules\"
|
||||
|
||||
Linux:
|
||||
RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting"
|
||||
RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so"
|
||||
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
|
||||
(Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve)
|
||||
|
||||
As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console.
|
||||
|
||||
On startup, DaVinci Resolve scans the subfolders in the directories shown below and enumerates the scripts found in the Workspace application menu under Scripts.
|
||||
Place your script under Utility to be listed in all pages, under Comp or Tool to be available in the Fusion page or under folders for individual pages (Edit, Color or Deliver). Scripts under Deliver are additionally listed under render jobs.
|
||||
Placing your script here and invoking it from the menu is the easiest way to use scripts.
|
||||
Mac OS X:
|
||||
- All users: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts
|
||||
- Specific user: /Users/<UserName>/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts
|
||||
Windows:
|
||||
- All users: %PROGRAMDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts
|
||||
- Specific user: %APPDATA%\Roaming\Blackmagic Design\DaVinci Resolve\Support\Fusion\Scripts
|
||||
Linux:
|
||||
- All users: /opt/resolve/Fusion/Scripts (or /home/resolve/Fusion/Scripts/ depending on installation)
|
||||
- Specific user: $HOME/.local/share/DaVinciResolve/Fusion/Scripts
|
||||
|
||||
The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6
|
||||
and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
|
||||
|
||||
This example Python script creates a simple project:
|
||||
|
||||
#!/usr/bin/env python
|
||||
import DaVinciResolveScript as dvr_script
|
||||
resolve = dvr_script.scriptapp("Resolve")
|
||||
fusion = resolve.Fusion()
|
||||
projectManager = resolve.GetProjectManager()
|
||||
projectManager.CreateProject("Hello World")
|
||||
|
||||
The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and "getmetatable"
|
||||
in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality.
|
||||
|
||||
|
||||
Running DaVinci Resolve in headless mode
|
||||
----------------------------------------
|
||||
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled.
|
||||
However, the various scripting APIs will continue to work as expected.
|
||||
|
||||
DaVinci Resolve API
|
||||
-------------------
|
||||
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
|
||||
|
||||
Resolve
|
||||
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
|
||||
GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations.
|
||||
GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database.
|
||||
OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
|
||||
GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None).
|
||||
GetProductName() --> string # Returns product name.
|
||||
GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format.
|
||||
GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format.
|
||||
LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'.
|
||||
UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout.
|
||||
ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'.
|
||||
DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'.
|
||||
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
|
||||
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
|
||||
Quit() --> None # Quits the Resolve App.
|
||||
ImportRenderPreset(presetPath) --> Bool # Import a preset from presetPath (string) and set it as current preset for rendering.
|
||||
ExportRenderPreset(presetName, exportPath) --> Bool # Export a preset to a given path (string) if presetName(string) exists.
|
||||
ImportBurnInPreset(presetPath) --> Bool # Import a data burn in preset from a given presetPath (string)
|
||||
ExportBurnInPreset(presetName, exportPath) --> Bool # Export a data burn in preset to a given path (string) if presetName (string) exists.
|
||||
GetKeyframeMode() --> keyframeMode # Returns the currently set keyframe mode (int). Refer to section 'Keyframe Mode information' below for details.
|
||||
SetKeyframeMode(keyframeMode) --> Bool # Returns True when 'keyframeMode'(enum) is successfully set. Refer to section 'Keyframe Mode information' below for details.
|
||||
|
||||
ProjectManager
|
||||
ArchiveProject(projectName,
|
||||
filePath,
|
||||
isArchiveSrcMedia=True,
|
||||
isArchiveRenderCache=True,
|
||||
isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments
|
||||
CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not.
|
||||
DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded
|
||||
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project.
|
||||
GetCurrentProject() --> Project # Returns the currently loaded Resolve project.
|
||||
SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful.
|
||||
CloseProject(project) --> Bool # Closes the specified project without saving.
|
||||
CreateFolder(folderName) --> Bool # Creates a folder if folderName (string) is unique.
|
||||
DeleteFolder(folderName) --> Bool # Deletes the specified folder if it exists. Returns True in case of success.
|
||||
GetProjectListInCurrentFolder() --> [project names...] # Returns a list of project names in current folder.
|
||||
GetFolderListInCurrentFolder() --> [folder names...] # Returns a list of folder names in current folder.
|
||||
GotoRootFolder() --> Bool # Opens root folder in database.
|
||||
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
|
||||
GetCurrentFolder() --> string # Returns the current folder name.
|
||||
OpenFolder(folderName) --> Bool # Opens folder under given name.
|
||||
ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful.
|
||||
ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success.
|
||||
RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful.
|
||||
GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection
|
||||
GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve
|
||||
SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project.
|
||||
# 'DbType': 'Disk' or 'PostgreSQL' (string)
|
||||
# 'DbName': database name (string)
|
||||
# 'IpAddress': IP address of the PostgreSQL server (string, optional key - defaults to '127.0.0.1')
|
||||
CreateCloudProject({cloudSettings}) --> Project # Creates and returns a cloud project.
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
ImportCloudProject(filePath, {cloudSettings}) --> Bool # Returns True if import cloud project is successful; False otherwise
|
||||
# 'filePath': String; filePath of file to import
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
RestoreCloudProject(folderPath, {cloudSettings}) --> Bool # Returns True if restore cloud project is successful; False otherwise
|
||||
# 'folderPath': String; path of folder to restore
|
||||
# '{cloudSettings}': Check 'Cloud Projects Settings' subsection below for more information.
|
||||
|
||||
Project
|
||||
GetMediaPool() --> MediaPool # Returns the Media Pool object.
|
||||
GetTimelineCount() --> int # Returns the number of timelines currently present in the project.
|
||||
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
|
||||
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
|
||||
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
|
||||
GetGallery() --> Gallery # Returns the Gallery object.
|
||||
GetName() --> string # Returns project name.
|
||||
SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique.
|
||||
GetPresetList() --> [presets...] # Returns a list of presets and their information.
|
||||
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
|
||||
AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job.
|
||||
DeleteRenderJob(jobId) --> Bool # Deletes render job for input job id (string).
|
||||
DeleteAllRenderJobs() --> Bool # Deletes all render jobs in the queue.
|
||||
GetRenderJobList() --> [render jobs...] # Returns a list of render jobs and their information.
|
||||
GetRenderPresetList() --> [presets...] # Returns a list of render presets and their information.
|
||||
StartRendering(jobId1, jobId2, ...) --> Bool # Starts rendering jobs indicated by the input job ids.
|
||||
StartRendering([jobIds...], isInteractiveMode=False) --> Bool # Starts rendering jobs indicated by the input job ids.
|
||||
# The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering.
|
||||
StartRendering(isInteractiveMode=False) --> Bool # Starts rendering all queued render jobs.
|
||||
# The optional "isInteractiveMode", when set, enables error feedback in the UI during rendering.
|
||||
StopRendering() --> None # Stops any current render processes.
|
||||
IsRenderingInProgress() --> Bool # Returns True if rendering is in progress.
|
||||
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists.
|
||||
SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique.
|
||||
SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys:
|
||||
# Refer to "Looking up render settings" section for information for supported settings
|
||||
GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string).
|
||||
GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information.
|
||||
SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information.
|
||||
GetRenderFormats() --> {render formats..} # Returns a dict (format -> file extension) of available render formats.
|
||||
GetRenderCodecs(renderFormat) --> {render codecs...} # Returns a dict (codec description -> codec name) of available codecs for given render format (string).
|
||||
GetCurrentRenderFormatAndCodec() --> {format, codec} # Returns a dict with currently selected format 'format' and render codec 'codec'.
|
||||
SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering.
|
||||
GetCurrentRenderMode() --> int # Returns the render mode: 0 - Individual clips, 1 - Single clip.
|
||||
SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip.
|
||||
GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height".
|
||||
RefreshLUTList() --> Bool # Refreshes LUT List
|
||||
GetUniqueId() --> string # Returns a unique ID for the project item
|
||||
InsertAudioToCurrentTrackAtPlayhead(mediaPath, --> Bool # Inserts the media specified by mediaPath (string) with startOffsetInSamples (int) and durationInSamples (int) at the playhead on a selected track on the Fairlight page. Returns True if successful, otherwise False.
|
||||
startOffsetInSamples, durationInSamples)
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for project when supplied presetName (string). Returns true if successful.
|
||||
ExportCurrentFrameAsStill(filePath) --> Bool # Exports current frame as still to supplied filePath. filePath must end in valid export file format. Returns True if succssful, False otherwise.
|
||||
GetColorGroupsList() --> [ColorGroups...] # Returns a list of all group objects in the timeline.
|
||||
AddColorGroup(groupName) --> ColorGroup # Creates a new ColorGroup. groupName must be a unique string.
|
||||
DeleteColorGroup(colorGroup) --> Bool # Deletes the given color group and sets clips to ungrouped.
|
||||
|
||||
MediaStorage
|
||||
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage.
|
||||
GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path.
|
||||
GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
|
||||
RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolve’s Media Storage.
|
||||
AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
AddItemListToMediaPool([{itemInfo}, ...]) --> [clips...] # Adds list of itemInfos specified as dict of "media", "startFrame" (int), "endFrame" (int) from Media Storage into current Media Pool folder. Returns a list of the MediaPoolItems created.
|
||||
AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful.
|
||||
AddTimelineMattesToMediaPool([paths]) --> [MediaPoolItems] # Adds specified media files as timeline mattes in current media pool folder. Returns a list of created MediaPoolItems.
|
||||
|
||||
MediaPool
|
||||
GetRootFolder() --> Folder # Returns root Folder of Media Pool
|
||||
AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name.
|
||||
RefreshFolders() --> Bool # Updates the folders in collaboration mode
|
||||
CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name.
|
||||
AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
|
||||
AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
|
||||
AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only), "trackIndex" (int) and "recordFrame" (int). Returns the list of appended timelineItems.
|
||||
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
|
||||
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), "recordFrame" (int).
|
||||
ImportTimelineFromFile(filePath, {importOptions}) --> Timeline # Creates timeline based on parameters within given file (AAF/EDL/XML/FCPXML/DRT/ADL/OTIO) and optional importOptions dict, with support for the keys:
|
||||
# "timelineName": string, specifies the name of the timeline to be created. Not valid for DRT import
|
||||
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default. Not valid for DRT import
|
||||
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
|
||||
# "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False. Not valid for DRT import
|
||||
# "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import
|
||||
DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool.
|
||||
GetCurrentFolder() --> Folder # Returns currently selected Folder.
|
||||
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
|
||||
DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool
|
||||
ImportFolderFromFile(filePath, sourceClipsPath="") --> Bool # Returns true if import from given DRB filePath is successful, false otherwise
|
||||
# sourceClipsPath is a string that specifies a filesystem path to search for source clips if the media is inaccessible in their original path, empty by default
|
||||
DeleteFolders([subfolders]) --> Bool # Deletes specified subfolders in the media pool
|
||||
MoveClips([clips], targetFolder) --> Bool # Moves specified clips to target folder.
|
||||
MoveFolders([folders], targetFolder) --> Bool # Moves specified folders to target folder.
|
||||
GetClipMatteList(MediaPoolItem) --> [paths] # Get mattes for specified MediaPoolItem, as a list of paths to the matte files.
|
||||
GetTimelineMatteList(Folder) --> [MediaPoolItems] # Get mattes in specified Folder, as list of MediaPoolItems.
|
||||
DeleteClipMattes(MediaPoolItem, [paths]) --> Bool # Delete mattes based on their file paths, for specified MediaPoolItem. Returns True on success.
|
||||
RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path.
|
||||
UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips.
|
||||
ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
|
||||
ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created.
|
||||
# Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on.
|
||||
# Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx".
|
||||
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
|
||||
# If no clips are specified, all clips from media pool will be used.
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool
|
||||
CreateStereoClip(LeftMediaPoolItem,
|
||||
RightMediaPoolItem) --> MediaPoolItem # Takes in two existing media pool items and creates a new 3D stereoscopic media pool entry replacing the input media in the media pool.
|
||||
|
||||
Folder
|
||||
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
|
||||
GetName() --> string # Returns the media folder name.
|
||||
GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder.
|
||||
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool folder
|
||||
Export(filePath) --> bool # Returns true if export of DRB folder to filePath is successful, false otherwise
|
||||
TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise
|
||||
ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItems within the folder and nested folders. Returns True if successful; False otherwise.
|
||||
|
||||
MediaPoolItem
|
||||
GetName() --> string # Returns the clip name.
|
||||
GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'.
|
||||
# If no argument is specified, a dict of all set metadata properties is returned.
|
||||
SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful.
|
||||
SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful.
|
||||
GetMediaId() --> string # Returns the unique ID for the MediaPoolItem.
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example of output format: {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...}
|
||||
# In the above example - there is one 'Green' marker at offset 96 (position of the marker)
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the media pool item. "All" as argument deletes all color markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the media pool item.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
AddFlag(color) --> Bool # Adds a flag with given color (string).
|
||||
GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item.
|
||||
ClearFlags(color) --> Bool # Clears the flag of the given color if one exists. An "All" argument is supported and clears all flags.
|
||||
GetClipColor() --> string # Returns the item color as a string.
|
||||
SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string).
|
||||
ClearClipColor() --> Bool # Clears the item color.
|
||||
GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'.
|
||||
# If no argument is specified, a dict of all clip properties is returned. Check the section below for more information.
|
||||
SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information.
|
||||
LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path.
|
||||
UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip.
|
||||
ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path.
|
||||
GetUniqueId() --> string # Returns a unique ID for the media pool item
|
||||
TranscribeAudio() --> Bool # Transcribes audio of the MediaPoolItem. Returns True if successful; False otherwise
|
||||
ClearTranscription() --> Bool # Clears audio transcription of the MediaPoolItem. Returns True if successful; False otherwise.
|
||||
|
||||
Timeline
|
||||
GetName() --> string # Returns the timeline name.
|
||||
SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful.
|
||||
GetStartFrame() --> int # Returns the frame number at the start of timeline.
|
||||
GetEndFrame() --> int # Returns the frame number at the end of timeline.
|
||||
SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise.
|
||||
GetStartTimecode() --> string # Returns the start timecode for the timeline.
|
||||
GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle").
|
||||
AddTrack(trackType, optionalSubTrackType) --> Bool # Adds track of trackType ("video", "subtitle", "audio"). Second argument optionalSubTrackType is required for "audio"
|
||||
# optionalSubTrackType can be one of {"mono", "stereo", "5.1", "5.1film", "7.1", "7.1film", "adaptive1", ... , "adaptive24"}
|
||||
DeleteTrack(trackType, trackIndex) --> Bool # Deletes track of trackType ("video", "subtitle", "audio") and given trackIndex. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackEnable(trackType, trackIndex, Bool) --> Bool # Enables/Disables track with given trackType and trackIndex
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
GetIsTrackEnabled(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is enabled and False otherwise.
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackLock(trackType, trackIndex, Bool) --> Bool # Locks/Unlocks track with given trackType and trackIndex
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
GetIsTrackLocked(trackType, trackIndex) --> Bool # Returns True if track with given trackType and trackIndex is locked and False otherwise.
|
||||
# trackType is one of {"audio", "video", "subtitle"}
|
||||
# 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
DeleteClips([timelineItems], Bool) --> Bool # Deletes specified TimelineItems from the timeline, performing ripple delete if the second argument is True. Second argument is optional (The default for this is False)
|
||||
SetClipsLinked([timelineItems], Bool) --> Bool # Links or unlinks the specified TimelineItems depending on second argument.
|
||||
GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType).
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at timeline offset 96
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Deletes all timeline markers of the specified color. An "All" argument is supported and deletes all timeline markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Deletes the timeline marker at the given frame number.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
|
||||
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
|
||||
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages.
|
||||
SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages.
|
||||
GetCurrentVideoItem() --> item # Returns the current video timeline item.
|
||||
GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page.
|
||||
# An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder.
|
||||
GetTrackName(trackType, trackIndex) --> string # Returns the track name for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
SetTrackName(trackType, trackIndex, name) --> Bool # Sets the track name (string) for track indicated by trackType ("audio", "video" or "subtitle") and index. 1 <= trackIndex <= GetTrackCount(trackType).
|
||||
DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success.
|
||||
CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item.
|
||||
CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item.
|
||||
ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys:
|
||||
# "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default
|
||||
# "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default
|
||||
# "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default
|
||||
# "useSizingInfo": Bool, specifies if sizing information should be used, False by default
|
||||
# "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default
|
||||
# "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default
|
||||
# "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False
|
||||
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True
|
||||
# "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder
|
||||
|
||||
Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format.
|
||||
# Refer to section "Looking up timeline export properties" for information on the parameters.
|
||||
GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information.
|
||||
SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information.
|
||||
InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline.
|
||||
InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline.
|
||||
InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline.
|
||||
InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline.
|
||||
InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline.
|
||||
InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline.
|
||||
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
|
||||
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline
|
||||
CreateSubtitlesFromAudio({autoCaptionSettings}) --> Bool # Creates subtitles from audio for the timeline.
|
||||
# Takes in optional dictionary {autoCaptionSettings}. Check 'Auto Caption Settings' subsection below for more information.
|
||||
# Returns True on success, False otherwise.
|
||||
DetectSceneCuts() --> Bool # Detects and makes scene cuts along the timeline. Returns True if successful, False otherwise.
|
||||
ConvertTimelineToStereo() --> Bool # Converts timeline to stereo. Returns True if successful; False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the timeline's node graph object.
|
||||
|
||||
TimelineItem
|
||||
GetName() --> string # Returns the item name.
|
||||
GetDuration() --> int # Returns the item duration.
|
||||
GetEnd() --> int # Returns the end frame position on the timeline.
|
||||
GetFusionCompCount() --> int # Returns number of Fusion compositions associated with the timeline item.
|
||||
GetFusionCompByIndex(compIndex) --> fusionComp # Returns the Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount()
|
||||
GetFusionCompNameList() --> [names...] # Returns a list of Fusion composition names associated with the timeline item.
|
||||
GetFusionCompByName(compName) --> fusionComp # Returns the Fusion composition object based on given name.
|
||||
GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side.
|
||||
GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side.
|
||||
GetStart() --> int # Returns the start frame position on the timeline.
|
||||
SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue"
|
||||
# Refer to "Looking up Timeline item properties" for more information
|
||||
GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key
|
||||
# if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys
|
||||
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
|
||||
customData)
|
||||
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
|
||||
# Example: a value of {96.0: {'color': 'Green', 'duration': 1.0, 'note': '', 'name': 'Marker 1', 'customData': ''}, ...} indicates a single green marker at clip offset 96
|
||||
GetMarkerByCustomData(customData) --> {markers...} # Returns marker {information} for the first matching marker with specified customData.
|
||||
UpdateMarkerCustomData(frameId, customData) --> Bool # Updates customData (string) for the marker at given frameId position. CustomData is not exposed via UI and is useful for scripting developer to attach any user specific data to markers.
|
||||
GetMarkerCustomData(frameId) --> string # Returns customData string for the marker at given frameId position.
|
||||
DeleteMarkersByColor(color) --> Bool # Delete all markers of the specified color from the timeline item. "All" as argument deletes all color markers.
|
||||
DeleteMarkerAtFrame(frameNum) --> Bool # Delete marker at frame number from the timeline item.
|
||||
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
|
||||
AddFlag(color) --> Bool # Adds a flag with given color (string).
|
||||
GetFlagList() --> [colors...] # Returns a list of flag colors assigned to the item.
|
||||
ClearFlags(color) --> Bool # Clear flags of the specified color. An "All" argument is supported to clear all flags.
|
||||
GetClipColor() --> string # Returns the item color as a string.
|
||||
SetClipColor(colorName) --> Bool # Sets the item color based on the colorName (string).
|
||||
ClearClipColor() --> Bool # Clears the item color.
|
||||
AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item.
|
||||
ImportFusionComp(path) --> fusionComp # Imports a Fusion composition from given file path by creating and adding a new composition for the item.
|
||||
ExportFusionComp(path, compIndex) --> Bool # Exports the Fusion composition based on given index to the path provided.
|
||||
DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition.
|
||||
LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition.
|
||||
RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName.
|
||||
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote).
|
||||
GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote).
|
||||
DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote).
|
||||
LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote.
|
||||
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote).
|
||||
GetVersionNameList(versionType) --> [names...] # Returns a list of all color versions for the given versionType (0 - local, 1 - remote).
|
||||
GetMediaPoolItem() --> MediaPoolItem # Returns the media pool item corresponding to the timeline item if one exists.
|
||||
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
|
||||
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
|
||||
ApplyArriCdlLut() --> Bool # Applies ARRI CDL and LUT. Returns True if successful, False otherwise.
|
||||
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
|
||||
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
|
||||
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
|
||||
GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector.
|
||||
GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector.
|
||||
GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index.
|
||||
DeleteTakeByIndex(idx) --> Bool # Deletes a take by index, 1 <= idx <= number of takes.
|
||||
SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes.
|
||||
FinalizeTake() --> Bool # Finalizes take selection.
|
||||
CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred.
|
||||
SetClipEnabled(Bool) --> Bool # Sets clip enabled based on argument.
|
||||
GetClipEnabled() --> Bool # Gets clip enabled status.
|
||||
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
|
||||
GetUniqueId() --> string # Returns a unique ID for the timeline item
|
||||
LoadBurnInPreset(presetName) --> Bool # Loads user defined data burn in preset for clip when supplied presetName (string). Returns true if successful.
|
||||
CreateMagicMask(mode) --> Bool # Returns True if magic mask was created successfully, False otherwise. mode can "F" (forward), "B" (backward), or "BI" (bidirection)
|
||||
RegenerateMagicMask() --> Bool # Returns True if magic mask was regenerated successfully, False otherwise.
|
||||
Stabilize() --> Bool # Returns True if stabilization was successful, False otherwise
|
||||
SmartReframe() --> Bool # Performs Smart Reframe. Returns True if successful, False otherwise.
|
||||
GetNodeGraph() --> Graph # Returns the clip's node graph object.
|
||||
GetColorGroup() --> ColorGroup # Returns the clip's color group if one exists.
|
||||
AssignToColorGroup(ColorGroup) --> Bool # Returns True if TiItem to successfully assigned to given ColorGroup. ColorGroup must be an existing group in the current project.
|
||||
RemoveFromColorGroup() --> Bool # Returns True if the TiItem is successfully removed from the ColorGroup it is in.
|
||||
ExportLUT(exportType, path) --> Bool # Exports LUTs from tiItem referring to value passed in 'exportType' (enum) for LUT size. Refer to. 'ExportLUT notes' section for possible values.
|
||||
# Saves generated LUT in the provided 'path' (string). 'path' should include the intended file name.
|
||||
# If an empty or incorrect extension is provided, the appropriate extension (.cube/.vlt) will be appended at the end of the path.
|
||||
|
||||
Gallery
|
||||
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
|
||||
SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'.
|
||||
GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object.
|
||||
SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'.
|
||||
GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects.
|
||||
|
||||
GalleryStillAlbum
|
||||
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
|
||||
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
|
||||
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
|
||||
ImportStills([filePaths]) --> Bool # Imports GalleryStill from each filePath in [filePaths] list. True if at least one still is imported successfully. False otherwise.
|
||||
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm, drx).
|
||||
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
|
||||
|
||||
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
|
||||
|
||||
Graph
|
||||
GetNumNodes() --> int # Returns the number of nodes in the graph
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= self.GetNumNodes().
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
GetToolsInNode(nodeIndex) --> [toolsList] # Returns toolsList (list of strings) of the tools used in the node indicated by given nodeIndex (int).
|
||||
|
||||
ColorGroup
|
||||
GetName() --> String # Returns the name (string) of the ColorGroup.
|
||||
SetName(groupName) --> Bool # Renames ColorGroup to groupName (string).
|
||||
GetClipsInTimeline(Timeline=CurrTimeline) --> [TimelineItem] # Returns a list of TimelineItem that are in colorGroup in the given Timeline. Timeline is Current Timeline by default.
|
||||
GetPreClipNodeGraph() --> Graph # Returns the ColorGroup Pre-clip graph.
|
||||
GetPostClipNodeGraph() --> Graph # Returns the ColorGroup Post-clip graph.
|
||||
|
||||
List and Dict Data Structures
|
||||
-----------------------------
|
||||
Beside primitive data types, Resolve's Python API mainly uses list and dict data structures. Lists are denoted by [ ... ] and dicts are denoted by { ... } above.
|
||||
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
|
||||
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
|
||||
|
||||
Keyframe Mode information
|
||||
-------------------------
|
||||
This section covers additional notes for the functions Resolve.GetKeyframeMode() and Resolve.SetKeyframeMode(keyframeMode).
|
||||
|
||||
'keyframeMode' can be one of the following enums:
|
||||
- resolve.KEYFRAME_MODE_ALL == 0
|
||||
- resolve.KEYFRAME_MODE_COLOR == 1
|
||||
- resolve.KEYFRAME_MODE_SIZING == 2
|
||||
|
||||
Integer values returned by Resolve.GetKeyframeMode() will correspond to the enums above.
|
||||
|
||||
Cloud Projects Settings
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject"
|
||||
|
||||
All three functions take in a {cloudSettings} dict, that have the following keys:
|
||||
* resolve.CLOUD_SETTING_PROJECT_NAME: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_PROJECT_MEDIA_PATH: String, ["" by default]
|
||||
* resolve.CLOUD_SETTING_IS_COLLAB: Bool, [False by default]
|
||||
* resolve.CLOUD_SETTING_SYNC_MODE: syncMode (see below), [resolve.CLOUD_SYNC_PROXY_ONLY by default]
|
||||
* resolve.CLOUD_SETTING_IS_CAMERA_ACCESS: Bool [False by default]
|
||||
|
||||
Where syncMode is one of the following values:
|
||||
* resolve.CLOUD_SYNC_NONE,
|
||||
* resolve.CLOUD_SYNC_PROXY_ONLY,
|
||||
* resolve.CLOUD_SYNC_PROXY_AND_ORIG
|
||||
|
||||
All three "ProjectManager:CreateCloudProject," "ProjectManager:ImportCloudProject," and "ProjectManager:RestoreCloudProject" require resolve.PROJECT_MEDIA_PATH to be defined. "ProjectManager:CreateCloudProject" also requires resolve.PROJECT_NAME to be defined.
|
||||
|
||||
Looking up Project and Clip properties
|
||||
--------------------------------------
|
||||
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
|
||||
"MediaPoolItem:SetClipProperty". These functions are used to get and set properties otherwise available to the user through the Project Settings and the Clip Attributes dialogs.
|
||||
|
||||
The functions follow a key-value pair format, where each property is identified by a key (the settingName or propertyName parameter) and possesses a value (typically a text value). Keys and values are
|
||||
designed to be easily correlated with parameter names and values in the Resolve UI. Explicitly enumerated values for some parameters are listed below.
|
||||
|
||||
Some properties may be read only - these include intrinsic clip properties like date created or sample rate, and properties that can be disabled in specific application contexts (e.g. custom colorspaces
|
||||
in an ACES workflow, or output sizing parameters when behavior is set to match timeline)
|
||||
|
||||
Getting values:
|
||||
Invoke "Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" with the appropriate property key. To get a snapshot of all queryable properties (keys and values), you can call
|
||||
"Project:GetSetting", "Timeline:GetSetting" or "MediaPoolItem:GetClipProperty" without parameters (or with a NoneType or a blank property key). Using specific keys to query individual properties will
|
||||
be faster. Note that getting a property using an invalid key will return a trivial result.
|
||||
|
||||
Setting values:
|
||||
Invoke "Project:SetSetting", "Timeline:SetSetting" or "MediaPoolItem:SetClipProperty" with the appropriate property key and a valid value. When setting a parameter, please check the return value to
|
||||
ensure the success of the operation. You can troubleshoot the validity of keys and values by setting the desired result from the UI and checking property snapshots before and after the change.
|
||||
|
||||
The following Project properties have specifically enumerated values:
|
||||
"superScale" - the property value is an enumerated integer between 0 and 4 with these meanings: 0=Auto, 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x.
|
||||
for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x.
|
||||
Affects:
|
||||
• x = Project:GetSetting('superScale') and Project:SetSetting('superScale', x)
|
||||
• for '2x Enhanced' --> Project:SetSetting('superScale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0]
|
||||
|
||||
"timelineFrameRate" - the property value is one of the frame rates available to the user in project settings under "Timeline frame rate" option. Drop Frame can be configured for supported frame rates
|
||||
by appending the frame rate with "DF", e.g. "29.97 DF" will enable drop frame and "29.97" will disable drop frame
|
||||
Affects:
|
||||
• x = Project:GetSetting('timelineFrameRate') and Project:SetSetting('timelineFrameRate', x)
|
||||
|
||||
The following Clip properties have specifically enumerated values:
|
||||
"Super Scale" - the property value is an enumerated integer between 1 and 4 with these meanings: 1=no scaling, and 2, 3 and 4 represent the Super Scale multipliers 2x, 3x and 4x.
|
||||
for super scale multiplier '2x Enhanced', exactly 4 arguments must be passed as outlined below. If less than 4 arguments are passed, it will default to 2x.
|
||||
Affects:
|
||||
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
|
||||
• for '2x Enhanced' --> MediaPoolItem:SetClipProperty('Super Scale', 2, sharpnessValue, noiseReductionValue), where sharpnessValue is a float in the range [0.0, 1.0] and noiseReductionValue is a float in the range [0.0, 1.0]
|
||||
|
||||
Auto Caption Settings
|
||||
----------------------
|
||||
This section covers the supported settings for the method Timeline.CreateSubtitlesFromAudio({autoCaptionSettings})
|
||||
|
||||
The parameter setting is a dictionary containing the following keys:
|
||||
* resolve.SUBTITLE_LANGUAGE: languageID (see below), [resolve.AUTO_CAPTION_AUTO by default]
|
||||
* resolve.SUBTITLE_CAPTION_PRESET: presetType (see below), [resolve.AUTO_CAPTION_SUBTITLE_DEFAULT by default]
|
||||
* resolve.SUBTITLE_CHARS_PER_LINE: Number between 1 and 60 inclusive [42 by default]
|
||||
* resolve.SUBTITLE_LINE_BREAK: lineBreakType (see below), [resolve.AUTO_CAPTION_LINE_SINGLE by default]
|
||||
* resolve.SUBTITLE_GAP: Number between 0 and 10 inclusive [0 by default]
|
||||
|
||||
Note that the default values for some keys may change based on values defined for other keys, as per the UI.
|
||||
For example, if the following dictionary is supplied,
|
||||
CreateSubtitlesFromAudio( { resolve.SUBTITLE_LANGUAGE = resolve.AUTO_CAPTION_KOREAN,
|
||||
resolve.SUBTITLE_CAPTION_PRESET = resolve.AUTO_CAPTION_NETFLIX } )
|
||||
the default value for resolve.SUBTITLE_CHARS_PER_LINE will be 16 instead of 42
|
||||
|
||||
languageIDs:
|
||||
* resolve.AUTO_CAPTION_AUTO
|
||||
* resolve.AUTO_CAPTION_DANISH
|
||||
* resolve.AUTO_CAPTION_DUTCH
|
||||
* resolve.AUTO_CAPTION_ENGLISH
|
||||
* resolve.AUTO_CAPTION_FRENCH
|
||||
* resolve.AUTO_CAPTION_GERMAN
|
||||
* resolve.AUTO_CAPTION_ITALIAN
|
||||
* resolve.AUTO_CAPTION_JAPANESE
|
||||
* resolve.AUTO_CAPTION_KOREAN
|
||||
* resolve.AUTO_CAPTION_MANDARIN_SIMPLIFIED
|
||||
* resolve.AUTO_CAPTION_MANDARIN_TRADITIONAL
|
||||
* resolve.AUTO_CAPTION_NORWEGIAN
|
||||
* resolve.AUTO_CAPTION_PORTUGUESE
|
||||
* resolve.AUTO_CAPTION_RUSSIAN
|
||||
* resolve.AUTO_CAPTION_SPANISH
|
||||
* resolve.AUTO_CAPTION_SWEDISH
|
||||
|
||||
presetTypes:
|
||||
* resolve.AUTO_CAPTION_SUBTITLE_DEFAULT
|
||||
* resolve.AUTO_CAPTION_TELETEXT
|
||||
* resolve.AUTO_CAPTION_NETFLIX
|
||||
|
||||
lineBreakTypes:
|
||||
* resolve.AUTO_CAPTION_LINE_SINGLE
|
||||
* resolve.AUTO_CAPTION_LINE_DOUBLE
|
||||
|
||||
Looking up Render Settings
|
||||
--------------------------
|
||||
This section covers the supported settings for the method SetRenderSettings({settings})
|
||||
|
||||
The parameter setting is a dictionary containing the following keys:
|
||||
- "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored)
|
||||
- "MarkIn": int
|
||||
- "MarkOut": int
|
||||
- "TargetDir": string
|
||||
- "CustomName": string
|
||||
- "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
|
||||
- "ExportVideo": Bool
|
||||
- "ExportAudio": Bool
|
||||
- "FormatWidth": int
|
||||
- "FormatHeight": int
|
||||
- "FrameRate": float (examples: 23.976, 24)
|
||||
- "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
|
||||
- "VideoQuality" possible values for current codec (if applicable):
|
||||
- 0 (int) - will set quality to automatic
|
||||
- [1 -> MAX] (int) - will set input bit rate
|
||||
- ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
|
||||
- "AudioCodec": string (example: "aac")
|
||||
- "AudioBitDepth": int
|
||||
- "AudioSampleRate": int
|
||||
- "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
|
||||
- "GammaTag" : string (example: "Same as Project", "ACEScct")
|
||||
- "ExportAlpha": Bool
|
||||
- "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265.
|
||||
- "MultiPassEncode": Bool. Can only be set for H.264.
|
||||
- "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true.
|
||||
- "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats.
|
||||
|
||||
Looking up timeline export properties
|
||||
-------------------------------------
|
||||
This section covers the parameters for the argument Export(fileName, exportType, exportSubtype).
|
||||
|
||||
exportType can be one of the following constants:
|
||||
- resolve.EXPORT_AAF
|
||||
- resolve.EXPORT_DRT
|
||||
- resolve.EXPORT_EDL
|
||||
- resolve.EXPORT_FCP_7_XML
|
||||
- resolve.EXPORT_FCPXML_1_8
|
||||
- resolve.EXPORT_FCPXML_1_9
|
||||
- resolve.EXPORT_FCPXML_1_10
|
||||
- resolve.EXPORT_HDR_10_PROFILE_A
|
||||
- resolve.EXPORT_HDR_10_PROFILE_B
|
||||
- resolve.EXPORT_TEXT_CSV
|
||||
- resolve.EXPORT_TEXT_TAB
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_2_9
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_4_0
|
||||
- resolve.EXPORT_DOLBY_VISION_VER_5_1
|
||||
- resolve.EXPORT_OTIO
|
||||
- resolve.EXPORT_ALE
|
||||
- resolve.EXPORT_ALE_CDL
|
||||
exportSubtype can be one of the following enums:
|
||||
- resolve.EXPORT_NONE
|
||||
- resolve.EXPORT_AAF_NEW
|
||||
- resolve.EXPORT_AAF_EXISTING
|
||||
- resolve.EXPORT_CDL
|
||||
- resolve.EXPORT_SDL
|
||||
- resolve.EXPORT_MISSING_CLIPS
|
||||
Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
|
||||
When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
|
||||
When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
|
||||
Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
|
||||
|
||||
Unsupported exportType types
|
||||
---------------------------------
|
||||
Starting with DaVinci Resolve 18.1, the following export types are not supported:
|
||||
- resolve.EXPORT_FCPXML_1_3
|
||||
- resolve.EXPORT_FCPXML_1_4
|
||||
- resolve.EXPORT_FCPXML_1_5
|
||||
- resolve.EXPORT_FCPXML_1_6
|
||||
- resolve.EXPORT_FCPXML_1_7
|
||||
|
||||
|
||||
Looking up Timeline item properties
|
||||
-----------------------------------
|
||||
This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned.
|
||||
|
||||
The supported keys with their accepted values are:
|
||||
"Pan" : floating point values from -4.0*width to 4.0*width
|
||||
"Tilt" : floating point values from -4.0*height to 4.0*height
|
||||
"ZoomX" : floating point values from 0.0 to 100.0
|
||||
"ZoomY" : floating point values from 0.0 to 100.0
|
||||
"ZoomGang" : a boolean value
|
||||
"RotationAngle" : floating point values from -360.0 to 360.0
|
||||
"AnchorPointX" : floating point values from -4.0*width to 4.0*width
|
||||
"AnchorPointY" : floating point values from -4.0*height to 4.0*height
|
||||
"Pitch" : floating point values from -1.5 to 1.5
|
||||
"Yaw" : floating point values from -1.5 to 1.5
|
||||
"FlipX" : boolean value for flipping horizontally
|
||||
"FlipY" : boolean value for flipping vertically
|
||||
"CropLeft" : floating point values from 0.0 to width
|
||||
"CropRight" : floating point values from 0.0 to width
|
||||
"CropTop" : floating point values from 0.0 to height
|
||||
"CropBottom" : floating point values from 0.0 to height
|
||||
"CropSoftness" : floating point values from -100.0 to 100.0
|
||||
"CropRetain" : boolean value for "Retain Image Position" checkbox
|
||||
"DynamicZoomEase" : A value from the following constants
|
||||
- DYNAMIC_ZOOM_EASE_LINEAR = 0
|
||||
- DYNAMIC_ZOOM_EASE_IN
|
||||
- DYNAMIC_ZOOM_EASE_OUT
|
||||
- DYNAMIC_ZOOM_EASE_IN_AND_OUT
|
||||
"CompositeMode" : A value from the following constants
|
||||
- COMPOSITE_NORMAL = 0
|
||||
- COMPOSITE_ADD
|
||||
- COMPOSITE_SUBTRACT
|
||||
- COMPOSITE_DIFF
|
||||
- COMPOSITE_MULTIPLY
|
||||
- COMPOSITE_SCREEN
|
||||
- COMPOSITE_OVERLAY
|
||||
- COMPOSITE_HARDLIGHT
|
||||
- COMPOSITE_SOFTLIGHT
|
||||
- COMPOSITE_DARKEN
|
||||
- COMPOSITE_LIGHTEN
|
||||
- COMPOSITE_COLOR_DODGE
|
||||
- COMPOSITE_COLOR_BURN
|
||||
- COMPOSITE_EXCLUSION
|
||||
- COMPOSITE_HUE
|
||||
- COMPOSITE_SATURATE
|
||||
- COMPOSITE_COLORIZE
|
||||
- COMPOSITE_LUMA_MASK
|
||||
- COMPOSITE_DIVIDE
|
||||
- COMPOSITE_LINEAR_DODGE
|
||||
- COMPOSITE_LINEAR_BURN
|
||||
- COMPOSITE_LINEAR_LIGHT
|
||||
- COMPOSITE_VIVID_LIGHT
|
||||
- COMPOSITE_PIN_LIGHT
|
||||
- COMPOSITE_HARD_MIX
|
||||
- COMPOSITE_LIGHTER_COLOR
|
||||
- COMPOSITE_DARKER_COLOR
|
||||
- COMPOSITE_FOREGROUND
|
||||
- COMPOSITE_ALPHA
|
||||
- COMPOSITE_INVERTED_ALPHA
|
||||
- COMPOSITE_LUM
|
||||
- COMPOSITE_INVERTED_LUM
|
||||
"Opacity" : floating point value from 0.0 to 100.0
|
||||
"Distortion" : floating point value from -1.0 to 1.0
|
||||
"RetimeProcess" : A value from the following constants
|
||||
- RETIME_USE_PROJECT = 0
|
||||
- RETIME_NEAREST
|
||||
- RETIME_FRAME_BLEND
|
||||
- RETIME_OPTICAL_FLOW
|
||||
"MotionEstimation" : A value from the following constants
|
||||
- MOTION_EST_USE_PROJECT = 0
|
||||
- MOTION_EST_STANDARD_FASTER
|
||||
- MOTION_EST_STANDARD_BETTER
|
||||
- MOTION_EST_ENHANCED_FASTER
|
||||
- MOTION_EST_ENHANCED_BETTER
|
||||
- MOTION_EST_SPEED_WARP_BETTER
|
||||
- MOTION_EST_SPEED_WARP_FASTER
|
||||
"Scaling" : A value from the following constants
|
||||
- SCALE_USE_PROJECT = 0
|
||||
- SCALE_CROP
|
||||
- SCALE_FIT
|
||||
- SCALE_FILL
|
||||
- SCALE_STRETCH
|
||||
"ResizeFilter" : A value from the following constants
|
||||
- RESIZE_FILTER_USE_PROJECT = 0
|
||||
- RESIZE_FILTER_SHARPER
|
||||
- RESIZE_FILTER_SMOOTHER
|
||||
- RESIZE_FILTER_BICUBIC
|
||||
- RESIZE_FILTER_BILINEAR
|
||||
- RESIZE_FILTER_BESSEL
|
||||
- RESIZE_FILTER_BOX
|
||||
- RESIZE_FILTER_CATMULL_ROM
|
||||
- RESIZE_FILTER_CUBIC
|
||||
- RESIZE_FILTER_GAUSSIAN
|
||||
- RESIZE_FILTER_LANCZOS
|
||||
- RESIZE_FILTER_MITCHELL
|
||||
- RESIZE_FILTER_NEAREST_NEIGHBOR
|
||||
- RESIZE_FILTER_QUADRATIC
|
||||
- RESIZE_FILTER_SINC
|
||||
- RESIZE_FILTER_LINEAR
|
||||
Values beyond the range will be clipped
|
||||
width and height are same as the UI max limits
|
||||
|
||||
The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed
|
||||
as a single argument.
|
||||
|
||||
Getting the values for the keys that uses constants will return the number which is in the constant
|
||||
|
||||
ExportLUT notes
|
||||
---------------
|
||||
The following section covers additional notes for TimelineItem.ExportLUT(exportType, path).
|
||||
|
||||
Supported values for 'exportType' (enum) are:
|
||||
- resolve.EXPORT_LUT_17PTCUBE
|
||||
- resolve.EXPORT_LUT_33PTCUBE
|
||||
- resolve.EXPORT_LUT_65PTCUBE
|
||||
- resolve.EXPORT_LUT_PANASONICVLUT
|
||||
|
||||
Deprecated Resolve API Functions
|
||||
--------------------------------
|
||||
The following API functions are deprecated.
|
||||
|
||||
ProjectManager
|
||||
GetProjectsInCurrentFolder() --> {project names...} # Returns a dict of project names in current folder.
|
||||
GetFoldersInCurrentFolder() --> {folder names...} # Returns a dict of folder names in current folder.
|
||||
|
||||
Project
|
||||
GetPresets() --> {presets...} # Returns a dict of presets and their information.
|
||||
GetRenderJobs() --> {render jobs...} # Returns a dict of render jobs and their information.
|
||||
GetRenderPresets() --> {presets...} # Returns a dict of render presets and their information.
|
||||
|
||||
MediaStorage
|
||||
GetMountedVolumes() --> {paths...} # Returns a dict of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage.
|
||||
GetSubFolders(folderPath) --> {paths...} # Returns a dict of folder paths in the given absolute folder path.
|
||||
GetFiles(folderPath) --> {paths...} # Returns a dict of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
|
||||
AddItemsToMediaPool(item1, item2, ...) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a dict of the MediaPoolItems created.
|
||||
AddItemsToMediaPool([items...]) --> {clips...} # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a dict of the MediaPoolItems created.
|
||||
|
||||
Folder
|
||||
GetClips() --> {clips...} # Returns a dict of clips (items) within the folder.
|
||||
GetSubFolders() --> {folders...} # Returns a dict of subfolders in the folder.
|
||||
|
||||
MediaPoolItem
|
||||
GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item.
|
||||
|
||||
Timeline
|
||||
GetItemsInTrack(trackType, index) --> {items...} # Returns a dict of Timeline items on the video or audio track (based on trackType) at specified
|
||||
|
||||
TimelineItem
|
||||
GetFusionCompNames() --> {names...} # Returns a dict of Fusion composition names associated with the timeline item.
|
||||
GetFlags() --> {colors...} # Returns a dict of flag colors assigned to the item.
|
||||
GetVersionNames(versionType) --> {names...} # Returns a dict of version names by provided versionType: 0 - local, 1 - remote.
|
||||
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
|
||||
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
|
||||
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
|
||||
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
|
||||
GetNodeLabel(nodeIndex) --> string # Returns the label of the node at nodeIndex.
|
||||
|
||||
Unsupported Resolve API Functions
|
||||
---------------------------------
|
||||
The following API (functions and parameters) are no longer supported. Use job IDs instead of indices.
|
||||
|
||||
Project
|
||||
StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices.
|
||||
GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices.
|
||||
GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI.
|
||||
# settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available.
|
||||
13
server_addon/resolve/client/ayon_resolve/__init__.py
Normal file
13
server_addon/resolve/client/ayon_resolve/__init__.py
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
from .version import __version__
|
||||
from .addon import (
|
||||
RESOLVE_ADDON_ROOT,
|
||||
ResolveAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"__version__",
|
||||
|
||||
"RESOLVE_ADDON_ROOT",
|
||||
"ResolveAddon",
|
||||
)
|
||||
22
server_addon/resolve/client/ayon_resolve/addon.py
Normal file
22
server_addon/resolve/client/ayon_resolve/addon.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import os
|
||||
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
from .version import __version__
|
||||
from .utils import RESOLVE_ADDON_ROOT
|
||||
|
||||
|
||||
class ResolveAddon(AYONAddon, IHostAddon):
|
||||
name = "resolve"
|
||||
version = __version__
|
||||
host_name = "resolve"
|
||||
|
||||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(RESOLVE_ADDON_ROOT, "hooks")
|
||||
]
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".drp"]
|
||||
133
server_addon/resolve/client/ayon_resolve/api/__init__.py
Normal file
133
server_addon/resolve/client/ayon_resolve/api/__init__.py
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
"""
|
||||
resolve api
|
||||
"""
|
||||
from .utils import (
|
||||
get_resolve_module
|
||||
)
|
||||
|
||||
from .pipeline import (
|
||||
ResolveHost,
|
||||
ls,
|
||||
containerise,
|
||||
update_container,
|
||||
maintained_selection,
|
||||
remove_instance,
|
||||
list_instances
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
maintain_current_timeline,
|
||||
publish_clip_color,
|
||||
get_project_manager,
|
||||
get_current_project,
|
||||
get_current_timeline,
|
||||
get_any_timeline,
|
||||
get_new_timeline,
|
||||
create_bin,
|
||||
get_media_pool_item,
|
||||
create_media_pool_item,
|
||||
create_timeline_item,
|
||||
get_timeline_item,
|
||||
get_video_track_names,
|
||||
get_current_timeline_items,
|
||||
get_pype_timeline_item_by_name,
|
||||
get_timeline_item_pype_tag,
|
||||
set_timeline_item_pype_tag,
|
||||
imprint,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
create_compound_clip,
|
||||
swap_clips,
|
||||
get_pype_clip_metadata,
|
||||
set_project_manager_to_folder_name,
|
||||
get_otio_clip_instance_data,
|
||||
get_reformated_path
|
||||
)
|
||||
|
||||
from .menu import launch_ayon_menu
|
||||
|
||||
from .plugin import (
|
||||
ClipLoader,
|
||||
TimelineItemLoader,
|
||||
Creator,
|
||||
PublishClip
|
||||
)
|
||||
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
|
||||
from .testing_utils import TestGUI
|
||||
|
||||
|
||||
bmdvr = None
|
||||
bmdvf = None
|
||||
|
||||
__all__ = [
|
||||
"bmdvr",
|
||||
"bmdvf",
|
||||
|
||||
# pipeline
|
||||
"ResolveHost",
|
||||
"ls",
|
||||
"containerise",
|
||||
"update_container",
|
||||
"maintained_selection",
|
||||
"remove_instance",
|
||||
"list_instances",
|
||||
|
||||
# utils
|
||||
"get_resolve_module",
|
||||
|
||||
# lib
|
||||
"maintain_current_timeline",
|
||||
"publish_clip_color",
|
||||
"get_project_manager",
|
||||
"get_current_project",
|
||||
"get_current_timeline",
|
||||
"get_any_timeline",
|
||||
"get_new_timeline",
|
||||
"create_bin",
|
||||
"get_media_pool_item",
|
||||
"create_media_pool_item",
|
||||
"create_timeline_item",
|
||||
"get_timeline_item",
|
||||
"get_video_track_names",
|
||||
"get_current_timeline_items",
|
||||
"get_pype_timeline_item_by_name",
|
||||
"get_timeline_item_pype_tag",
|
||||
"set_timeline_item_pype_tag",
|
||||
"imprint",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"create_compound_clip",
|
||||
"swap_clips",
|
||||
"get_pype_clip_metadata",
|
||||
"set_project_manager_to_folder_name",
|
||||
"get_otio_clip_instance_data",
|
||||
"get_reformated_path",
|
||||
|
||||
# menu
|
||||
"launch_ayon_menu",
|
||||
|
||||
# plugin
|
||||
"ClipLoader",
|
||||
"TimelineItemLoader",
|
||||
"Creator",
|
||||
"PublishClip",
|
||||
|
||||
# workio
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
"TestGUI"
|
||||
]
|
||||
52
server_addon/resolve/client/ayon_resolve/api/action.py
Normal file
52
server_addon/resolve/client/ayon_resolve/api/action.py
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
# absolute_import is needed to counter the `module has no cmds error` in Maya
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
from ayon_core.pipeline.publish import get_errored_instances_from_context
|
||||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid clips in Resolve timeline when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
try:
|
||||
from .lib import get_project_manager
|
||||
pm = get_project_manager()
|
||||
self.log.debug(pm)
|
||||
except ImportError:
|
||||
raise ImportError("Current host is not Resolve")
|
||||
|
||||
errored_instances = get_errored_instances_from_context(context,
|
||||
plugin=plugin)
|
||||
|
||||
# Get the invalid nodes for the plug-ins
|
||||
self.log.info("Finding invalid clips..")
|
||||
invalid = list()
|
||||
for instance in errored_instances:
|
||||
invalid_nodes = plugin.get_invalid(instance)
|
||||
if invalid_nodes:
|
||||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
|
||||
# Ensure unique (process each node only once)
|
||||
invalid = list(set(invalid))
|
||||
|
||||
if invalid:
|
||||
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
|
||||
# TODO: select resolve timeline track items in current timeline
|
||||
else:
|
||||
self.log.info("No invalid nodes found.")
|
||||
949
server_addon/resolve/client/ayon_resolve/api/lib.py
Normal file
949
server_addon/resolve/client/ayon_resolve/api/lib.py
Normal file
|
|
@ -0,0 +1,949 @@
|
|||
import sys
|
||||
import json
|
||||
import re
|
||||
import os
|
||||
import contextlib
|
||||
from opentimelineio import opentime
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline.editorial import (
|
||||
is_overlapping_otio_ranges,
|
||||
frames_to_timecode
|
||||
)
|
||||
|
||||
from ..otio import davinci_export as otio_export
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.project_manager = None
|
||||
self.media_storage = None
|
||||
|
||||
# OpenPype sequential rename variables
|
||||
self.rename_index = 0
|
||||
self.rename_add = 0
|
||||
|
||||
self.publish_clip_color = "Pink"
|
||||
self.pype_marker_workflow = True
|
||||
|
||||
# OpenPype compound clip workflow variable
|
||||
self.pype_tag_name = "VFX Notes"
|
||||
|
||||
# OpenPype marker workflow variables
|
||||
self.pype_marker_name = "OpenPypeData"
|
||||
self.pype_marker_duration = 1
|
||||
self.pype_marker_color = "Mint"
|
||||
self.temp_marker_frame = None
|
||||
|
||||
# OpenPype default timeline
|
||||
self.pype_timeline_name = "OpenPypeTimeline"
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintain_current_timeline(to_timeline: object,
|
||||
from_timeline: object = None):
|
||||
"""Maintain current timeline selection during context
|
||||
|
||||
Attributes:
|
||||
from_timeline (resolve.Timeline)[optional]:
|
||||
Example:
|
||||
>>> print(from_timeline.GetName())
|
||||
timeline1
|
||||
>>> print(to_timeline.GetName())
|
||||
timeline2
|
||||
|
||||
>>> with maintain_current_timeline(to_timeline):
|
||||
... print(get_current_timeline().GetName())
|
||||
timeline2
|
||||
|
||||
>>> print(get_current_timeline().GetName())
|
||||
timeline1
|
||||
"""
|
||||
project = get_current_project()
|
||||
working_timeline = from_timeline or project.GetCurrentTimeline()
|
||||
|
||||
# switch to the input timeline
|
||||
project.SetCurrentTimeline(to_timeline)
|
||||
|
||||
try:
|
||||
# do a work
|
||||
yield
|
||||
finally:
|
||||
# put the original working timeline to context
|
||||
project.SetCurrentTimeline(working_timeline)
|
||||
|
||||
|
||||
def get_project_manager():
|
||||
from . import bmdvr
|
||||
if not self.project_manager:
|
||||
self.project_manager = bmdvr.GetProjectManager()
|
||||
return self.project_manager
|
||||
|
||||
|
||||
def get_media_storage():
|
||||
from . import bmdvr
|
||||
if not self.media_storage:
|
||||
self.media_storage = bmdvr.GetMediaStorage()
|
||||
return self.media_storage
|
||||
|
||||
|
||||
def get_current_project():
|
||||
"""Get current project object.
|
||||
"""
|
||||
return get_project_manager().GetCurrentProject()
|
||||
|
||||
|
||||
def get_current_timeline(new=False):
|
||||
"""Get current timeline object.
|
||||
|
||||
Args:
|
||||
new (bool)[optional]: [DEPRECATED] if True it will create
|
||||
new timeline if none exists
|
||||
|
||||
Returns:
|
||||
TODO: will need to reflect future `None`
|
||||
object: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
|
||||
# return current timeline if any
|
||||
if timeline:
|
||||
return timeline
|
||||
|
||||
# TODO: [deprecated] and will be removed in future
|
||||
if new:
|
||||
return get_new_timeline()
|
||||
|
||||
|
||||
def get_any_timeline():
|
||||
"""Get any timeline object.
|
||||
|
||||
Returns:
|
||||
object | None: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
timeline_count = project.GetTimelineCount()
|
||||
if timeline_count > 0:
|
||||
return project.GetTimelineByIndex(1)
|
||||
|
||||
|
||||
def get_new_timeline(timeline_name: str = None):
|
||||
"""Get new timeline object.
|
||||
|
||||
Arguments:
|
||||
timeline_name (str): New timeline name.
|
||||
|
||||
Returns:
|
||||
object: resolve.Timeline
|
||||
"""
|
||||
project = get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
new_timeline = media_pool.CreateEmptyTimeline(
|
||||
timeline_name or self.pype_timeline_name)
|
||||
project.SetCurrentTimeline(new_timeline)
|
||||
return new_timeline
|
||||
|
||||
|
||||
def create_bin(name: str, root: object = None) -> object:
|
||||
"""
|
||||
Create media pool's folder.
|
||||
|
||||
Return folder object and if the name does not exist it will create a new.
|
||||
If the input name is with forward or backward slashes then it will create
|
||||
all parents and return the last child bin object
|
||||
|
||||
Args:
|
||||
name (str): name of folder / bin, or hierarchycal name "parent/name"
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
|
||||
Returns:
|
||||
object: resolve.Folder
|
||||
"""
|
||||
# get all variables
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root_bin = root or media_pool.GetRootFolder()
|
||||
|
||||
# create hierarchy of bins in case there is slash in name
|
||||
if "/" in name.replace("\\", "/"):
|
||||
child_bin = None
|
||||
for bname in name.split("/"):
|
||||
child_bin = create_bin(bname, child_bin or root_bin)
|
||||
if child_bin:
|
||||
return child_bin
|
||||
else:
|
||||
created_bin = None
|
||||
for subfolder in root_bin.GetSubFolderList():
|
||||
if subfolder.GetName() in name:
|
||||
created_bin = subfolder
|
||||
|
||||
if not created_bin:
|
||||
new_folder = media_pool.AddSubFolder(root_bin, name)
|
||||
media_pool.SetCurrentFolder(new_folder)
|
||||
else:
|
||||
media_pool.SetCurrentFolder(created_bin)
|
||||
|
||||
return media_pool.GetCurrentFolder()
|
||||
|
||||
|
||||
def remove_media_pool_item(media_pool_item: object) -> bool:
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
return media_pool.DeleteClips([media_pool_item])
|
||||
|
||||
|
||||
def create_media_pool_item(
|
||||
files: list,
|
||||
root: object = None,
|
||||
) -> object:
|
||||
"""
|
||||
Create media pool item.
|
||||
|
||||
Args:
|
||||
files (list[str]): list of absolute paths to files
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
|
||||
Returns:
|
||||
object: resolve.MediaPoolItem
|
||||
"""
|
||||
# get all variables
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root_bin = root or media_pool.GetRootFolder()
|
||||
|
||||
# make sure files list is not empty and first available file exists
|
||||
filepath = next((f for f in files if os.path.isfile(f)), None)
|
||||
if not filepath:
|
||||
raise FileNotFoundError("No file found in input files list")
|
||||
|
||||
# try to search in bin if the clip does not exist
|
||||
existing_mpi = get_media_pool_item(filepath, root_bin)
|
||||
|
||||
if existing_mpi:
|
||||
return existing_mpi
|
||||
|
||||
# add all data in folder to media pool
|
||||
media_pool_items = media_pool.ImportMedia(files)
|
||||
|
||||
return media_pool_items.pop() if media_pool_items else False
|
||||
|
||||
|
||||
def get_media_pool_item(filepath, root: object = None) -> object:
|
||||
"""
|
||||
Return clip if found in folder with use of input file path.
|
||||
|
||||
Args:
|
||||
filepath (str): absolute path to a file
|
||||
root (resolve.Folder)[optional]: root folder / bin object
|
||||
|
||||
Returns:
|
||||
object: resolve.MediaPoolItem
|
||||
"""
|
||||
media_pool = get_current_project().GetMediaPool()
|
||||
root = root or media_pool.GetRootFolder()
|
||||
fname = os.path.basename(filepath)
|
||||
|
||||
for _mpi in root.GetClipList():
|
||||
_mpi_name = _mpi.GetClipProperty("File Name")
|
||||
_mpi_name = get_reformated_path(_mpi_name, first=True)
|
||||
if fname in _mpi_name:
|
||||
return _mpi
|
||||
return None
|
||||
|
||||
|
||||
def create_timeline_item(
|
||||
media_pool_item: object,
|
||||
timeline: object = None,
|
||||
timeline_in: int = None,
|
||||
source_start: int = None,
|
||||
source_end: int = None,
|
||||
) -> object:
|
||||
"""
|
||||
Add media pool item to current or defined timeline.
|
||||
|
||||
Args:
|
||||
media_pool_item (resolve.MediaPoolItem): resolve's object
|
||||
timeline (Optional[resolve.Timeline]): resolve's object
|
||||
timeline_in (Optional[int]): timeline input frame (sequence frame)
|
||||
source_start (Optional[int]): media source input frame (sequence frame)
|
||||
source_end (Optional[int]): media source output frame (sequence frame)
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
# get all variables
|
||||
project = get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
clip_name = _clip_property("File Name")
|
||||
timeline = timeline or get_current_timeline()
|
||||
|
||||
# timing variables
|
||||
if all([timeline_in, source_start, source_end]):
|
||||
fps = timeline.GetSetting("timelineFrameRate")
|
||||
duration = source_end - source_start
|
||||
timecode_in = frames_to_timecode(timeline_in, fps)
|
||||
timecode_out = frames_to_timecode(timeline_in + duration, fps)
|
||||
else:
|
||||
timecode_in = None
|
||||
timecode_out = None
|
||||
|
||||
# if timeline was used then switch it to current timeline
|
||||
with maintain_current_timeline(timeline):
|
||||
# Add input mediaPoolItem to clip data
|
||||
clip_data = {
|
||||
"mediaPoolItem": media_pool_item,
|
||||
}
|
||||
|
||||
if source_start:
|
||||
clip_data["startFrame"] = source_start
|
||||
if source_end:
|
||||
clip_data["endFrame"] = source_end
|
||||
if timecode_in:
|
||||
clip_data["recordFrame"] = timeline_in
|
||||
|
||||
# add to timeline
|
||||
media_pool.AppendToTimeline([clip_data])
|
||||
|
||||
output_timeline_item = get_timeline_item(
|
||||
media_pool_item, timeline)
|
||||
|
||||
assert output_timeline_item, AssertionError((
|
||||
"Clip name '{}' was't created on the timeline: '{}' \n\n"
|
||||
"Please check if correct track position is activated, \n"
|
||||
"or if a clip is not already at the timeline in \n"
|
||||
"position: '{}' out: '{}'. \n\n"
|
||||
"Clip data: {}"
|
||||
).format(
|
||||
clip_name, timeline.GetName(), timecode_in, timecode_out, clip_data
|
||||
))
|
||||
return output_timeline_item
|
||||
|
||||
|
||||
def get_timeline_item(media_pool_item: object,
|
||||
timeline: object = None) -> object:
|
||||
"""
|
||||
Returns clips related to input mediaPoolItem.
|
||||
|
||||
Args:
|
||||
media_pool_item (resolve.MediaPoolItem): resolve's object
|
||||
timeline (resolve.Timeline)[optional]: resolve's object
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
clip_name = media_pool_item.GetClipProperty("File Name")
|
||||
output_timeline_item = None
|
||||
timeline = timeline or get_current_timeline()
|
||||
|
||||
with maintain_current_timeline(timeline):
|
||||
# search the timeline for the added clip
|
||||
|
||||
for ti_data in get_current_timeline_items():
|
||||
ti_clip_item = ti_data["clip"]["item"]
|
||||
ti_media_pool_item = ti_clip_item.GetMediaPoolItem()
|
||||
|
||||
# Skip items that do not have a media pool item, like for example
|
||||
# an "Adjustment Clip" or a "Fusion Composition" from the effects
|
||||
# toolbox
|
||||
if not ti_media_pool_item:
|
||||
continue
|
||||
|
||||
if clip_name in ti_media_pool_item.GetClipProperty("File Name"):
|
||||
output_timeline_item = ti_clip_item
|
||||
|
||||
return output_timeline_item
|
||||
|
||||
|
||||
def get_video_track_names() -> list:
|
||||
tracks = list()
|
||||
track_type = "video"
|
||||
timeline = get_current_timeline()
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
track_index: int
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
track_name = timeline.GetTrackName("video", track_index)
|
||||
tracks.append(track_name)
|
||||
|
||||
return tracks
|
||||
|
||||
|
||||
def get_current_timeline_items(
|
||||
filter: bool = False,
|
||||
track_type: str = None,
|
||||
track_name: str = None,
|
||||
selecting_color: str = None) -> list:
|
||||
""" Gets all available current timeline track items
|
||||
"""
|
||||
track_type = track_type or "video"
|
||||
selecting_color = selecting_color or "Chocolate"
|
||||
project = get_current_project()
|
||||
|
||||
# get timeline anyhow
|
||||
timeline = (
|
||||
get_current_timeline() or
|
||||
get_any_timeline() or
|
||||
get_new_timeline()
|
||||
)
|
||||
selected_clips = []
|
||||
|
||||
# get all tracks count filtered by track type
|
||||
selected_track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks and get items
|
||||
_clips = {}
|
||||
for track_index in range(1, (int(selected_track_count) + 1)):
|
||||
_track_name = timeline.GetTrackName(track_type, track_index)
|
||||
|
||||
# filter out all unmathed track names
|
||||
if track_name and _track_name not in track_name:
|
||||
continue
|
||||
|
||||
timeline_items = timeline.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
_clips[track_index] = timeline_items
|
||||
|
||||
_data = {
|
||||
"project": project,
|
||||
"timeline": timeline,
|
||||
"track": {
|
||||
"name": _track_name,
|
||||
"index": track_index,
|
||||
"type": track_type}
|
||||
}
|
||||
# get track item object and its color
|
||||
for clip_index, ti in enumerate(_clips[track_index]):
|
||||
data = _data.copy()
|
||||
data["clip"] = {
|
||||
"item": ti,
|
||||
"index": clip_index
|
||||
}
|
||||
ti_color = ti.GetClipColor()
|
||||
if filter and selecting_color in ti_color or not filter:
|
||||
selected_clips.append(data)
|
||||
return selected_clips
|
||||
|
||||
|
||||
def get_pype_timeline_item_by_name(name: str) -> object:
|
||||
"""Get timeline item by name.
|
||||
|
||||
Args:
|
||||
name (str): name of timeline item
|
||||
|
||||
Returns:
|
||||
object: resolve.TimelineItem
|
||||
"""
|
||||
for _ti_data in get_current_timeline_items():
|
||||
_ti_clip = _ti_data["clip"]["item"]
|
||||
tag_data = get_timeline_item_pype_tag(_ti_clip)
|
||||
tag_name = tag_data.get("namespace")
|
||||
if not tag_name:
|
||||
continue
|
||||
if tag_name in name:
|
||||
return _ti_clip
|
||||
return None
|
||||
|
||||
|
||||
def get_timeline_item_pype_tag(timeline_item):
|
||||
"""
|
||||
Get openpype track item tag created by creator or loader plugin.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): resolve object
|
||||
|
||||
Returns:
|
||||
dict: openpype tag data
|
||||
"""
|
||||
return_tag = None
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
return_tag = get_pype_marker(timeline_item)
|
||||
else:
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
|
||||
# get all tags from track item
|
||||
_tags = media_pool_item.GetMetadata()
|
||||
if not _tags:
|
||||
return None
|
||||
for key, data in _tags.items():
|
||||
# return only correct tag defined by global name
|
||||
if key in self.pype_tag_name:
|
||||
return_tag = json.loads(data)
|
||||
|
||||
return return_tag
|
||||
|
||||
|
||||
def set_timeline_item_pype_tag(timeline_item, data=None):
|
||||
"""
|
||||
Set openpype track item tag to input timeline_item.
|
||||
|
||||
Attributes:
|
||||
trackItem (resolve.TimelineItem): resolve api object
|
||||
|
||||
Returns:
|
||||
dict: json loaded data
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
# get available openpype tag if any
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if self.pype_marker_workflow:
|
||||
# delete tag as it is not updatable
|
||||
if tag_data:
|
||||
delete_pype_marker(timeline_item)
|
||||
|
||||
tag_data.update(data)
|
||||
set_pype_marker(timeline_item, tag_data)
|
||||
else:
|
||||
if tag_data:
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
# it not tag then create one
|
||||
tag_data.update(data)
|
||||
media_pool_item.SetMetadata(
|
||||
self.pype_tag_name, json.dumps(tag_data))
|
||||
else:
|
||||
tag_data = data
|
||||
# if openpype tag available then update with input data
|
||||
# add it to the input track item
|
||||
timeline_item.SetMetadata(self.pype_tag_name, json.dumps(tag_data))
|
||||
|
||||
return tag_data
|
||||
|
||||
|
||||
def imprint(timeline_item, data=None):
|
||||
"""
|
||||
Adding `Avalon data` into a hiero track item tag.
|
||||
|
||||
Also including publish attribute into tag.
|
||||
|
||||
Arguments:
|
||||
timeline_item (hiero.core.TrackItem): hiero track item object
|
||||
data (dict): Any data which needs to be imprinted
|
||||
|
||||
Examples:
|
||||
data = {
|
||||
'folderPath': 'sq020sh0280',
|
||||
'productType': 'render',
|
||||
'productName': 'productMain'
|
||||
}
|
||||
"""
|
||||
data = data or {}
|
||||
|
||||
set_timeline_item_pype_tag(timeline_item, data)
|
||||
|
||||
# add publish attribute
|
||||
set_publish_attribute(timeline_item, True)
|
||||
|
||||
|
||||
def set_publish_attribute(timeline_item, value):
|
||||
""" Set Publish attribute in input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
tag_data["publish"] = value
|
||||
# set data to the publish attribute
|
||||
set_timeline_item_pype_tag(timeline_item, tag_data)
|
||||
|
||||
|
||||
def get_publish_attribute(timeline_item):
|
||||
""" Get Publish attribute from input Tag object
|
||||
|
||||
Attribute:
|
||||
tag (hiero.core.Tag): a tag object
|
||||
value (bool): True or False
|
||||
"""
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
return tag_data["publish"]
|
||||
|
||||
|
||||
def set_pype_marker(timeline_item, tag_data):
|
||||
source_start = timeline_item.GetLeftOffset()
|
||||
item_duration = timeline_item.GetDuration()
|
||||
frame = int(source_start + (item_duration / 2))
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame / 10) * 10
|
||||
color = self.pype_marker_color
|
||||
name = self.pype_marker_name
|
||||
note = json.dumps(tag_data)
|
||||
duration = (self.pype_marker_duration / 10) * 10
|
||||
|
||||
timeline_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def get_pype_marker(timeline_item):
|
||||
timeline_item_markers = timeline_item.GetMarkers()
|
||||
for marker_frame, marker in timeline_item_markers.items():
|
||||
color = marker["color"]
|
||||
name = marker["name"]
|
||||
if name == self.pype_marker_name and color == self.pype_marker_color:
|
||||
note = marker["note"]
|
||||
self.temp_marker_frame = marker_frame
|
||||
return json.loads(note)
|
||||
|
||||
return dict()
|
||||
|
||||
|
||||
def delete_pype_marker(timeline_item):
|
||||
timeline_item.DeleteMarkerAtFrame(self.temp_marker_frame)
|
||||
self.temp_marker_frame = None
|
||||
|
||||
|
||||
def create_compound_clip(clip_data, name, folder):
|
||||
"""
|
||||
Convert timeline object into nested timeline object
|
||||
|
||||
Args:
|
||||
clip_data (dict): timeline item object packed into dict
|
||||
with project, timeline (sequence)
|
||||
folder (resolve.MediaPool.Folder): media pool folder object,
|
||||
name (str): name for compound clip
|
||||
|
||||
Returns:
|
||||
resolve.MediaPoolItem: media pool item with compound clip timeline(cct)
|
||||
"""
|
||||
# get basic objects form data
|
||||
project = clip_data["project"]
|
||||
timeline = clip_data["timeline"]
|
||||
clip = clip_data["clip"]
|
||||
|
||||
# get details of objects
|
||||
clip_item = clip["item"]
|
||||
|
||||
mp = project.GetMediaPool()
|
||||
|
||||
# get clip attributes
|
||||
clip_attributes = get_clip_attributes(clip_item)
|
||||
|
||||
mp_item = clip_item.GetMediaPoolItem()
|
||||
_mp_props = mp_item.GetClipProperty
|
||||
|
||||
mp_first_frame = int(_mp_props("Start"))
|
||||
mp_last_frame = int(_mp_props("End"))
|
||||
|
||||
# initialize basic source timing for otio
|
||||
ci_l_offset = clip_item.GetLeftOffset()
|
||||
ci_duration = clip_item.GetDuration()
|
||||
rate = float(_mp_props("FPS"))
|
||||
|
||||
# source rational times
|
||||
mp_in_rc = opentime.RationalTime((ci_l_offset), rate)
|
||||
mp_out_rc = opentime.RationalTime((ci_l_offset + ci_duration - 1), rate)
|
||||
|
||||
# get frame in and out for clip swapping
|
||||
in_frame = opentime.to_frames(mp_in_rc)
|
||||
out_frame = opentime.to_frames(mp_out_rc)
|
||||
|
||||
# keep original sequence
|
||||
tl_origin = timeline
|
||||
|
||||
# Set current folder to input media_pool_folder:
|
||||
mp.SetCurrentFolder(folder)
|
||||
|
||||
# check if clip doesn't exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
|
||||
if cct:
|
||||
print(f"Compound clip exists: {cct}")
|
||||
else:
|
||||
# Create empty timeline in current folder and give name:
|
||||
cct = mp.CreateEmptyTimeline(name)
|
||||
|
||||
# check if clip doesn't exist already:
|
||||
clips = folder.GetClipList()
|
||||
cct = next((c for c in clips
|
||||
if c.GetName() in name), None)
|
||||
print(f"Compound clip created: {cct}")
|
||||
|
||||
with maintain_current_timeline(cct, tl_origin):
|
||||
# Add input clip to the current timeline:
|
||||
mp.AppendToTimeline([{
|
||||
"mediaPoolItem": mp_item,
|
||||
"startFrame": mp_first_frame,
|
||||
"endFrame": mp_last_frame
|
||||
}])
|
||||
|
||||
# Add collected metadata and attributes to the comound clip:
|
||||
if mp_item.GetMetadata(self.pype_tag_name):
|
||||
clip_attributes[self.pype_tag_name] = mp_item.GetMetadata(
|
||||
self.pype_tag_name)[self.pype_tag_name]
|
||||
|
||||
# stringify
|
||||
clip_attributes = json.dumps(clip_attributes)
|
||||
|
||||
# add attributes to metadata
|
||||
for k, v in mp_item.GetMetadata().items():
|
||||
cct.SetMetadata(k, v)
|
||||
|
||||
# add metadata to cct
|
||||
cct.SetMetadata(self.pype_tag_name, clip_attributes)
|
||||
|
||||
# reset start timecode of the compound clip
|
||||
cct.SetClipProperty("Start TC", _mp_props("Start TC"))
|
||||
|
||||
# swap clips on timeline
|
||||
swap_clips(clip_item, cct, in_frame, out_frame)
|
||||
|
||||
cct.SetClipColor("Pink")
|
||||
return cct
|
||||
|
||||
|
||||
def swap_clips(from_clip, to_clip, to_in_frame, to_out_frame):
|
||||
"""
|
||||
Swapping clips on timeline in timelineItem
|
||||
|
||||
It will add take and activate it to the frame range which is inputted
|
||||
|
||||
Args:
|
||||
from_clip (resolve.TimelineItem)
|
||||
to_clip (resolve.mediaPoolItem)
|
||||
to_clip_name (str): name of to_clip
|
||||
to_in_frame (float): cut in frame, usually `GetLeftOffset()`
|
||||
to_out_frame (float): cut out frame, usually left offset plus duration
|
||||
|
||||
Returns:
|
||||
bool: True if successfully replaced
|
||||
|
||||
"""
|
||||
# copy ACES input transform from timeline clip to new media item
|
||||
mediapool_item_from_timeline = from_clip.GetMediaPoolItem()
|
||||
_idt = mediapool_item_from_timeline.GetClipProperty('IDT')
|
||||
to_clip.SetClipProperty('IDT', _idt)
|
||||
|
||||
_clip_prop = to_clip.GetClipProperty
|
||||
to_clip_name = _clip_prop("File Name")
|
||||
# add clip item as take to timeline
|
||||
take = from_clip.AddTake(
|
||||
to_clip,
|
||||
float(to_in_frame),
|
||||
float(to_out_frame)
|
||||
)
|
||||
|
||||
if not take:
|
||||
return False
|
||||
|
||||
for take_index in range(1, (int(from_clip.GetTakesCount()) + 1)):
|
||||
take_item = from_clip.GetTakeByIndex(take_index)
|
||||
take_mp_item = take_item["mediaPoolItem"]
|
||||
if to_clip_name in take_mp_item.GetName():
|
||||
from_clip.SelectTakeByIndex(take_index)
|
||||
from_clip.FinalizeTake()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _validate_tc(x):
|
||||
# Validate and reformat timecode string
|
||||
|
||||
if len(x) != 11:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
c = ':'
|
||||
colonized = x[:2] + c + x[3:5] + c + x[6:8] + c + x[9:]
|
||||
|
||||
if colonized.replace(':', '').isdigit():
|
||||
print(f"_ colonized: {colonized}")
|
||||
return colonized
|
||||
else:
|
||||
print('Invalid timecode. Try again.')
|
||||
|
||||
|
||||
def get_pype_clip_metadata(clip):
|
||||
"""
|
||||
Get openpype metadata created by creator plugin
|
||||
|
||||
Attributes:
|
||||
clip (resolve.TimelineItem): resolve's object
|
||||
|
||||
Returns:
|
||||
dict: hierarchy, orig clip attributes
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
metadata = mp_item.GetMetadata()
|
||||
|
||||
return metadata.get(self.pype_tag_name)
|
||||
|
||||
|
||||
def get_clip_attributes(clip):
|
||||
"""
|
||||
Collect basic attributes from resolve timeline item
|
||||
|
||||
Args:
|
||||
clip (resolve.TimelineItem): timeline item object
|
||||
|
||||
Returns:
|
||||
dict: all collected attributres as key: values
|
||||
"""
|
||||
mp_item = clip.GetMediaPoolItem()
|
||||
|
||||
return {
|
||||
"clipIn": clip.GetStart(),
|
||||
"clipOut": clip.GetEnd(),
|
||||
"clipLeftOffset": clip.GetLeftOffset(),
|
||||
"clipRightOffset": clip.GetRightOffset(),
|
||||
"clipMarkers": clip.GetMarkers(),
|
||||
"clipFlags": clip.GetFlagList(),
|
||||
"sourceId": mp_item.GetMediaId(),
|
||||
"sourceProperties": mp_item.GetClipProperty()
|
||||
}
|
||||
|
||||
|
||||
def set_project_manager_to_folder_name(folder_name):
|
||||
"""
|
||||
Sets context of Project manager to given folder by name.
|
||||
|
||||
Searching for folder by given name from root folder to nested.
|
||||
If no existing folder by name it will create one in root folder.
|
||||
|
||||
Args:
|
||||
folder_name (str): name of searched folder
|
||||
|
||||
Returns:
|
||||
bool: True if success
|
||||
|
||||
Raises:
|
||||
Exception: Cannot create folder in root
|
||||
|
||||
"""
|
||||
# initialize project manager
|
||||
get_project_manager()
|
||||
|
||||
set_folder = False
|
||||
|
||||
# go back to root folder
|
||||
if self.project_manager.GotoRootFolder():
|
||||
log.info(f"Testing existing folder: {folder_name}")
|
||||
folders = _convert_resolve_list_type(
|
||||
self.project_manager.GetFoldersInCurrentFolder())
|
||||
log.info(f"Testing existing folders: {folders}")
|
||||
# get me first available folder object
|
||||
# with the same name as in `folder_name` else return False
|
||||
if next((f for f in folders if f in folder_name), False):
|
||||
log.info(f"Found existing folder: {folder_name}")
|
||||
set_folder = self.project_manager.OpenFolder(folder_name)
|
||||
|
||||
if set_folder:
|
||||
return True
|
||||
|
||||
# if folder by name is not existent then create one
|
||||
# go back to root folder
|
||||
log.info(f"Folder `{folder_name}` not found and will be created")
|
||||
if self.project_manager.GotoRootFolder():
|
||||
try:
|
||||
# create folder by given name
|
||||
self.project_manager.CreateFolder(folder_name)
|
||||
self.project_manager.OpenFolder(folder_name)
|
||||
return True
|
||||
except NameError as e:
|
||||
log.error((f"Folder with name `{folder_name}` cannot be created!"
|
||||
f"Error: {e}"))
|
||||
return False
|
||||
|
||||
|
||||
def _convert_resolve_list_type(resolve_list):
|
||||
""" Resolve is using indexed dictionary as list type.
|
||||
`{1.0: 'vaule'}`
|
||||
This will convert it to normal list class
|
||||
"""
|
||||
assert isinstance(resolve_list, dict), (
|
||||
"Input argument should be dict() type")
|
||||
|
||||
return [resolve_list[i] for i in sorted(resolve_list.keys())]
|
||||
|
||||
|
||||
def create_otio_time_range_from_timeline_item_data(timeline_item_data):
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
project = timeline_item_data["project"]
|
||||
timeline = timeline_item_data["timeline"]
|
||||
timeline_start = timeline.GetStartFrame()
|
||||
|
||||
frame_start = int(timeline_item.GetStart() - timeline_start)
|
||||
frame_duration = int(timeline_item.GetDuration())
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
|
||||
return otio_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
||||
|
||||
def get_otio_clip_instance_data(otio_timeline, timeline_item_data):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
track_name = timeline_item_data["track"]["name"]
|
||||
timeline_range = create_otio_time_range_from_timeline_item_data(
|
||||
timeline_item_data)
|
||||
|
||||
for otio_clip in otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if track_name not in track_name:
|
||||
continue
|
||||
if otio_clip.name not in timeline_item.GetName():
|
||||
continue
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if self.pype_marker_name in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=False, first=False):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]")
|
||||
|
||||
if "[" in path:
|
||||
padding_pattern = r"(\d+)(?=-)"
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
num_pattern = r"(\[\d+\-\d+\])"
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
elif first:
|
||||
first_frame = re.findall(first_frame_pattern, path, flags=0)
|
||||
if len(first_frame) >= 1:
|
||||
first_frame = first_frame[0]
|
||||
path = re.sub(num_pattern, first_frame, path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
184
server_addon/resolve/client/ayon_resolve/api/menu.py
Normal file
184
server_addon/resolve/client/ayon_resolve/api/menu.py
Normal file
|
|
@ -0,0 +1,184 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from qtpy import QtWidgets, QtCore, QtGui
|
||||
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.pipeline import registered_host
|
||||
|
||||
|
||||
MENU_LABEL = os.environ["AYON_MENU_LABEL"]
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
path = os.path.join(os.path.dirname(__file__), "menu_style.qss")
|
||||
if not os.path.exists(path):
|
||||
print("Unable to load stylesheet, file not found in resources")
|
||||
return ""
|
||||
|
||||
with open(path, "r") as file_stream:
|
||||
stylesheet = file_stream.read()
|
||||
return stylesheet
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(Spacer, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
class AYONMenu(QtWidgets.QWidget):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AYONMenu, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setObjectName(f"{MENU_LABEL}Menu")
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowMinimizeButtonHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
|
||||
self.setWindowTitle(f"{MENU_LABEL}")
|
||||
save_current_btn = QtWidgets.QPushButton("Save current file", self)
|
||||
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
|
||||
create_btn = QtWidgets.QPushButton("Create ...", self)
|
||||
publish_btn = QtWidgets.QPushButton("Publish ...", self)
|
||||
load_btn = QtWidgets.QPushButton("Load ...", self)
|
||||
inventory_btn = QtWidgets.QPushButton("Manager ...", self)
|
||||
subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library ...", self)
|
||||
experimental_btn = QtWidgets.QPushButton(
|
||||
"Experimental tools ...", self
|
||||
)
|
||||
# rename_btn = QtWidgets.QPushButton("Rename", self)
|
||||
# set_colorspace_btn = QtWidgets.QPushButton(
|
||||
# "Set colorspace from presets", self
|
||||
# )
|
||||
# reset_resolution_btn = QtWidgets.QPushButton(
|
||||
# "Set Resolution from presets", self
|
||||
# )
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(10, 20, 10, 20)
|
||||
|
||||
layout.addWidget(save_current_btn)
|
||||
|
||||
layout.addWidget(Spacer(15, self))
|
||||
|
||||
layout.addWidget(workfiles_btn)
|
||||
layout.addWidget(create_btn)
|
||||
layout.addWidget(publish_btn)
|
||||
layout.addWidget(load_btn)
|
||||
layout.addWidget(inventory_btn)
|
||||
layout.addWidget(subsetm_btn)
|
||||
|
||||
layout.addWidget(Spacer(15, self))
|
||||
|
||||
layout.addWidget(libload_btn)
|
||||
|
||||
# layout.addWidget(Spacer(15, self))
|
||||
|
||||
# layout.addWidget(rename_btn)
|
||||
|
||||
# layout.addWidget(Spacer(15, self))
|
||||
|
||||
# layout.addWidget(set_colorspace_btn)
|
||||
# layout.addWidget(reset_resolution_btn)
|
||||
layout.addWidget(Spacer(15, self))
|
||||
layout.addWidget(experimental_btn)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
save_current_btn.clicked.connect(self.on_save_current_clicked)
|
||||
save_current_btn.setShortcut(QtGui.QKeySequence.Save)
|
||||
workfiles_btn.clicked.connect(self.on_workfile_clicked)
|
||||
create_btn.clicked.connect(self.on_create_clicked)
|
||||
publish_btn.clicked.connect(self.on_publish_clicked)
|
||||
load_btn.clicked.connect(self.on_load_clicked)
|
||||
inventory_btn.clicked.connect(self.on_inventory_clicked)
|
||||
subsetm_btn.clicked.connect(self.on_subsetm_clicked)
|
||||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
# rename_btn.clicked.connect(self.on_rename_clicked)
|
||||
# set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked)
|
||||
# reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
|
||||
experimental_btn.clicked.connect(self.on_experimental_clicked)
|
||||
|
||||
def on_save_current_clicked(self):
|
||||
host = registered_host()
|
||||
current_file = host.get_current_workfile()
|
||||
if not current_file:
|
||||
print("Current project is not saved. "
|
||||
"Please save once first via workfiles tool.")
|
||||
host_tools.show_workfiles()
|
||||
return
|
||||
|
||||
print(f"Saving current file to: {current_file}")
|
||||
host.save_workfile(current_file)
|
||||
|
||||
def on_workfile_clicked(self):
|
||||
print("Clicked Workfile")
|
||||
host_tools.show_workfiles()
|
||||
|
||||
def on_create_clicked(self):
|
||||
print("Clicked Create")
|
||||
host_tools.show_creator()
|
||||
|
||||
def on_publish_clicked(self):
|
||||
print("Clicked Publish")
|
||||
host_tools.show_publish(parent=None)
|
||||
|
||||
def on_load_clicked(self):
|
||||
print("Clicked Load")
|
||||
host_tools.show_loader(use_context=True)
|
||||
|
||||
def on_inventory_clicked(self):
|
||||
print("Clicked Inventory")
|
||||
host_tools.show_scene_inventory()
|
||||
|
||||
def on_subsetm_clicked(self):
|
||||
print("Clicked Subset Manager")
|
||||
host_tools.show_subset_manager()
|
||||
|
||||
def on_libload_clicked(self):
|
||||
print("Clicked Library")
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rename_clicked(self):
|
||||
print("Clicked Rename")
|
||||
|
||||
def on_set_colorspace_clicked(self):
|
||||
print("Clicked Set Colorspace")
|
||||
|
||||
def on_set_resolution_clicked(self):
|
||||
print("Clicked Set Resolution")
|
||||
|
||||
def on_experimental_clicked(self):
|
||||
host_tools.show_experimental_tools_dialog()
|
||||
|
||||
|
||||
def launch_ayon_menu():
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
|
||||
ayon_menu = AYONMenu()
|
||||
|
||||
stylesheet = load_stylesheet()
|
||||
ayon_menu.setStyleSheet(stylesheet)
|
||||
|
||||
ayon_menu.show()
|
||||
|
||||
sys.exit(app.exec_())
|
||||
71
server_addon/resolve/client/ayon_resolve/api/menu_style.qss
Normal file
71
server_addon/resolve/client/ayon_resolve/api/menu_style.qss
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
QWidget {
|
||||
background-color: #282828;
|
||||
border-radius: 3;
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
QComboBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
QComboBox QAbstractItemView
|
||||
{
|
||||
color: white;
|
||||
}
|
||||
|
||||
QPushButton {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 5;
|
||||
}
|
||||
|
||||
QPushButton:focus {
|
||||
background-color: "#171717";
|
||||
color: #d0d0d0;
|
||||
}
|
||||
|
||||
QPushButton:hover {
|
||||
background-color: "#171717";
|
||||
color: #e64b3d;
|
||||
}
|
||||
|
||||
QSpinBox {
|
||||
border: 1px solid #090909;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
max-width: 8em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
QLineEdit {
|
||||
border: 1px solid #090909;
|
||||
border-radius: 3px;
|
||||
background-color: #201f1f;
|
||||
color: #ffffff;
|
||||
padding: 2;
|
||||
min-width: 10em;
|
||||
qproperty-alignment: AlignCenter;
|
||||
}
|
||||
|
||||
#AYONMenu {
|
||||
qproperty-alignment: AlignLeft;
|
||||
min-width: 10em;
|
||||
border: 1px solid #fef9ef;
|
||||
}
|
||||
|
||||
QVBoxLayout {
|
||||
background-color: #282828;
|
||||
}
|
||||
|
||||
#Divider {
|
||||
border: 1px solid #090909;
|
||||
background-color: #585858;
|
||||
}
|
||||
|
||||
QLabel {
|
||||
color: #77776b;
|
||||
}
|
||||
301
server_addon/resolve/client/ayon_resolve/api/pipeline.py
Normal file
301
server_addon/resolve/client/ayon_resolve/api/pipeline.py
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
"""
|
||||
Basic avalon integration
|
||||
"""
|
||||
import os
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
from pyblish import api as pyblish
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
schema,
|
||||
register_loader_plugin_path,
|
||||
register_creator_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_core.host import (
|
||||
HostBase,
|
||||
IWorkfileHost,
|
||||
ILoadHost
|
||||
)
|
||||
|
||||
from . import lib
|
||||
from .utils import get_resolve_module
|
||||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
file_extensions,
|
||||
has_unsaved_changes,
|
||||
work_root,
|
||||
current_file
|
||||
)
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
|
||||
|
||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
||||
|
||||
class ResolveHost(HostBase, IWorkfileHost, ILoadHost):
|
||||
name = "resolve"
|
||||
|
||||
def install(self):
|
||||
"""Install resolve-specific functionality of avalon-core.
|
||||
|
||||
This is where you install menus and register families, data
|
||||
and loaders into resolve.
|
||||
|
||||
It is called automatically when installing via `api.install(resolve)`.
|
||||
|
||||
See the Maya equivalent for inspiration on how to implement this.
|
||||
|
||||
"""
|
||||
|
||||
log.info("ayon_resolve installed")
|
||||
|
||||
pyblish.register_host(self.name)
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
print("Registering DaVinci Resolve plug-ins..")
|
||||
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled",
|
||||
on_pyblish_instance_toggled)
|
||||
|
||||
get_resolve_module()
|
||||
|
||||
def open_workfile(self, filepath):
|
||||
return open_file(filepath)
|
||||
|
||||
def save_workfile(self, filepath=None):
|
||||
return save_file(filepath)
|
||||
|
||||
def work_root(self, session):
|
||||
return work_root(session)
|
||||
|
||||
def get_current_workfile(self):
|
||||
return current_file()
|
||||
|
||||
def workfile_has_unsaved_changes(self):
|
||||
return has_unsaved_changes()
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return file_extensions()
|
||||
|
||||
def get_containers(self):
|
||||
return ls()
|
||||
|
||||
|
||||
def containerise(timeline_item,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
"""Bundle Hiero's object into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
timeline_item (hiero.core.TrackItem): object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this container.
|
||||
|
||||
Returns:
|
||||
timeline_item (hiero.core.TrackItem): containerised object
|
||||
|
||||
"""
|
||||
|
||||
data_imprint = OrderedDict({
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
})
|
||||
|
||||
if data:
|
||||
data_imprint.update(data)
|
||||
|
||||
lib.set_timeline_item_pype_tag(timeline_item, data_imprint)
|
||||
|
||||
return timeline_item
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
|
||||
This function is used by the Container Manager in Nuke. You'll
|
||||
need to implement a for-loop that then *yields* one Container at
|
||||
a time.
|
||||
|
||||
See the `container.json` schema for details on how it should look,
|
||||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
|
||||
# get all track items from current timeline
|
||||
all_timeline_items = lib.get_current_timeline_items(filter=False)
|
||||
|
||||
for timeline_item_data in all_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
container = parse_container(timeline_item)
|
||||
if container:
|
||||
yield container
|
||||
|
||||
|
||||
def parse_container(timeline_item, validate=True):
|
||||
"""Return container data from timeline_item's openpype tag.
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): A containerised track item.
|
||||
validate (bool)[optional]: validating with avalon scheme
|
||||
|
||||
Returns:
|
||||
dict: The container schema data for input containerized track item.
|
||||
|
||||
"""
|
||||
# convert tag metadata to normal keys names
|
||||
data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if validate and data and data.get("schema"):
|
||||
schema.validate(data)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
# If not all required data return the empty container
|
||||
required = ['schema', 'id', 'name',
|
||||
'namespace', 'loader', 'representation']
|
||||
|
||||
if not all(key in data for key in required):
|
||||
return
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
|
||||
container["objectName"] = timeline_item.GetName()
|
||||
|
||||
# Store reference to the node object
|
||||
container["_timeline_item"] = timeline_item
|
||||
|
||||
return container
|
||||
|
||||
|
||||
def update_container(timeline_item, data=None):
|
||||
"""Update container data to input timeline_item's openpype tag.
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): A containerised track item.
|
||||
data (dict)[optional]: dictionery with data to be updated
|
||||
|
||||
Returns:
|
||||
bool: True if container was updated correctly
|
||||
|
||||
"""
|
||||
data = data or dict()
|
||||
|
||||
container = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
for _key, _value in container.items():
|
||||
try:
|
||||
container[_key] = data[_key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
log.info("Updating container: `{}`".format(timeline_item))
|
||||
return bool(lib.set_timeline_item_pype_tag(timeline_item, container))
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... node['selected'].setValue(True)
|
||||
>>> print(node['selected'].value())
|
||||
False
|
||||
"""
|
||||
try:
|
||||
# do the operation
|
||||
yield
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from ayon_resolve.api import set_publish_attribute
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
timeline_item = instance.data["item"]
|
||||
set_publish_attribute(timeline_item, new_value)
|
||||
|
||||
|
||||
def remove_instance(instance):
|
||||
"""Remove instance marker from track item."""
|
||||
instance_id = instance.get("uuid")
|
||||
|
||||
selected_timeline_items = lib.get_current_timeline_items(
|
||||
filter=True, selecting_color=lib.publish_clip_color)
|
||||
|
||||
found_ti = None
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
_ti_id = tag_data.get("uuid")
|
||||
if _ti_id == instance_id:
|
||||
found_ti = timeline_item
|
||||
break
|
||||
|
||||
if found_ti is None:
|
||||
return
|
||||
|
||||
# removing instance by marker color
|
||||
print(f"Removing instance: {found_ti.GetName()}")
|
||||
found_ti.DeleteMarkersByColor(lib.pype_marker_color)
|
||||
|
||||
|
||||
def list_instances():
|
||||
"""List all created instances from current workfile."""
|
||||
listed_instances = []
|
||||
selected_timeline_items = lib.get_current_timeline_items(
|
||||
filter=True, selecting_color=lib.publish_clip_color)
|
||||
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
ti_name = timeline_item.GetName().split(".")[0]
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = lib.get_timeline_item_pype_tag(timeline_item)
|
||||
|
||||
if tag_data:
|
||||
asset = tag_data.get("asset")
|
||||
product_name = tag_data.get("productName")
|
||||
tag_data["label"] = f"{ti_name} [{asset}-{product_name}]"
|
||||
listed_instances.append(tag_data)
|
||||
|
||||
return listed_instances
|
||||
910
server_addon/resolve/client/ayon_resolve/api/plugin.py
Normal file
910
server_addon/resolve/client/ayon_resolve/api/plugin.py
Normal file
|
|
@ -0,0 +1,910 @@
|
|||
import re
|
||||
import uuid
|
||||
import copy
|
||||
|
||||
import qargparse
|
||||
from qtpy import QtWidgets, QtCore
|
||||
|
||||
from ayon_core.settings import get_current_project_settings
|
||||
from ayon_core.pipeline import (
|
||||
LegacyCreator,
|
||||
LoaderPlugin,
|
||||
Anatomy
|
||||
)
|
||||
|
||||
from . import lib
|
||||
from .menu import load_stylesheet
|
||||
|
||||
|
||||
class CreatorWidget(QtWidgets.QDialog):
|
||||
|
||||
# output items
|
||||
items = {}
|
||||
|
||||
def __init__(self, name, info, ui_inputs, parent=None):
|
||||
super(CreatorWidget, self).__init__(parent)
|
||||
|
||||
self.setObjectName(name)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.setWindowTitle(name or "OpenPype Creator Input")
|
||||
self.resize(500, 700)
|
||||
|
||||
# Where inputs and labels are set
|
||||
self.content_widget = [QtWidgets.QWidget(self)]
|
||||
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
|
||||
top_layout.setObjectName("ContentLayout")
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# first add widget tag line
|
||||
top_layout.addWidget(QtWidgets.QLabel(info))
|
||||
|
||||
# main dynamic layout
|
||||
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAsNeeded)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOn)
|
||||
self.scroll_area.setHorizontalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOff)
|
||||
self.scroll_area.setWidgetResizable(True)
|
||||
|
||||
self.content_widget.append(self.scroll_area)
|
||||
|
||||
scroll_widget = QtWidgets.QWidget(self)
|
||||
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
|
||||
self.content_layout = [in_scroll_area]
|
||||
|
||||
# add preset data into input widget layout
|
||||
self.items = self.populate_widgets(ui_inputs)
|
||||
self.scroll_area.setWidget(scroll_widget)
|
||||
|
||||
# Confirmation buttons
|
||||
btns_widget = QtWidgets.QWidget(self)
|
||||
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
|
||||
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel")
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok")
|
||||
btns_layout.addWidget(ok_btn)
|
||||
|
||||
# Main layout of the dialog
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(10, 10, 10, 10)
|
||||
main_layout.setSpacing(0)
|
||||
|
||||
# adding content widget
|
||||
for w in self.content_widget:
|
||||
main_layout.addWidget(w)
|
||||
|
||||
main_layout.addWidget(btns_widget)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
stylesheet = load_stylesheet()
|
||||
self.setStyleSheet(stylesheet)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self.result = self.value(self.items)
|
||||
self.close()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self.result = None
|
||||
self.close()
|
||||
|
||||
def value(self, data, new_data=None):
|
||||
new_data = new_data or {}
|
||||
for k, v in data.items():
|
||||
new_data[k] = {
|
||||
"target": None,
|
||||
"value": None
|
||||
}
|
||||
if v["type"] == "dict":
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = self.value(v["value"])
|
||||
if v["type"] == "section":
|
||||
new_data.pop(k)
|
||||
new_data = self.value(v["value"], new_data)
|
||||
elif getattr(v["value"], "currentText", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].currentText()
|
||||
elif getattr(v["value"], "isChecked", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].isChecked()
|
||||
elif getattr(v["value"], "value", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].value()
|
||||
elif getattr(v["value"], "text", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].text()
|
||||
|
||||
return new_data
|
||||
|
||||
def camel_case_split(self, text):
|
||||
matches = re.finditer(
|
||||
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
|
||||
return " ".join([str(m.group(0)).capitalize() for m in matches])
|
||||
|
||||
def create_row(self, layout, type, text, **kwargs):
|
||||
# get type attribute from qwidgets
|
||||
attr = getattr(QtWidgets, type)
|
||||
|
||||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to label widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
# create attribute name text strip of spaces
|
||||
attr_name = text.replace(" ", "")
|
||||
|
||||
# create attribute and assign default values
|
||||
setattr(
|
||||
self,
|
||||
attr_name,
|
||||
attr(parent=self))
|
||||
|
||||
# assign the created attribute to variable
|
||||
item = getattr(self, attr_name)
|
||||
for func, val in kwargs.items():
|
||||
if getattr(item, func):
|
||||
func_attr = getattr(item, func)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
return item
|
||||
|
||||
def populate_widgets(self, data, content_layout=None):
|
||||
"""
|
||||
Populate widget from input dict.
|
||||
|
||||
Each plugin has its own set of widget rows defined in dictionary
|
||||
each row values should have following keys: `type`, `target`,
|
||||
`label`, `order`, `value` and optionally also `toolTip`.
|
||||
|
||||
Args:
|
||||
data (dict): widget rows or organized groups defined
|
||||
by types `dict` or `section`
|
||||
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
|
||||
|
||||
Returns:
|
||||
dict: redefined data dict updated with created widgets
|
||||
|
||||
"""
|
||||
|
||||
content_layout = content_layout or self.content_layout[-1]
|
||||
# fix order of process by defined order value
|
||||
ordered_keys = list(data.keys())
|
||||
for k, v in data.items():
|
||||
try:
|
||||
# try removing a key from index which should
|
||||
# be filled with new
|
||||
ordered_keys.pop(v["order"])
|
||||
except IndexError:
|
||||
pass
|
||||
# add key into correct order
|
||||
ordered_keys.insert(v["order"], k)
|
||||
|
||||
# process ordered
|
||||
for k in ordered_keys:
|
||||
v = data[k]
|
||||
tool_tip = v.get("toolTip", "")
|
||||
if v["type"] == "dict":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
if v["type"] == "section":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
elif v["type"] == "QLineEdit":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QLineEdit", v["label"],
|
||||
setText=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QComboBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QComboBox", v["label"],
|
||||
addItems=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QCheckBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QCheckBox", v["label"],
|
||||
setChecked=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setRange=(0, 99999),
|
||||
setValue=v["value"],
|
||||
setToolTip=tool_tip)
|
||||
return data
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
class ClipLoader:
|
||||
|
||||
active_bin = None
|
||||
data = {}
|
||||
|
||||
def __init__(self, loader_obj, context, **options):
|
||||
""" Initialize object
|
||||
|
||||
Arguments:
|
||||
loader_obj (ayon_core.pipeline.load.LoaderPlugin): plugin object
|
||||
context (dict): loader plugin context
|
||||
options (dict)[optional]: possible keys:
|
||||
projectBinPath: "path/to/binItem"
|
||||
|
||||
"""
|
||||
self.__dict__.update(loader_obj.__dict__)
|
||||
self.context = context
|
||||
self.active_project = lib.get_current_project()
|
||||
|
||||
# try to get value from options or evaluate key value for `handles`
|
||||
self.with_handles = options.get("handles") is True
|
||||
|
||||
# try to get value from options or evaluate key value for `load_to`
|
||||
self.new_timeline = (
|
||||
options.get("newTimeline") or
|
||||
options.get("load_to") == "New timeline"
|
||||
)
|
||||
# try to get value from options or evaluate key value for `load_how`
|
||||
self.sequential_load = (
|
||||
options.get("sequentially") or
|
||||
options.get("load_how") == "Sequentially in order"
|
||||
)
|
||||
|
||||
assert self._populate_data(), str(
|
||||
"Cannot Load selected data, look into database "
|
||||
"or call your supervisor")
|
||||
|
||||
# inject asset data to representation dict
|
||||
self._get_folder_attributes()
|
||||
|
||||
# add active components to class
|
||||
if self.new_timeline:
|
||||
loader_cls = loader_obj.__class__
|
||||
if loader_cls.timeline:
|
||||
# if multiselection is set then use options sequence
|
||||
self.active_timeline = loader_cls.timeline
|
||||
else:
|
||||
# create new sequence
|
||||
self.active_timeline = lib.get_new_timeline(
|
||||
"{}_{}".format(
|
||||
self.data["timeline_basename"],
|
||||
str(uuid.uuid4())[:8]
|
||||
)
|
||||
)
|
||||
loader_cls.timeline = self.active_timeline
|
||||
|
||||
else:
|
||||
self.active_timeline = lib.get_current_timeline()
|
||||
|
||||
def _populate_data(self):
|
||||
""" Gets context and convert it to self.data
|
||||
data structure:
|
||||
{
|
||||
"name": "assetName_productName_representationName"
|
||||
"binPath": "projectBinPath",
|
||||
}
|
||||
"""
|
||||
# create name
|
||||
folder_entity = self.context["folder"]
|
||||
product_name = self.context["product"]["name"]
|
||||
repre_entity = self.context["representation"]
|
||||
|
||||
folder_name = folder_entity["name"]
|
||||
folder_path = folder_entity["path"]
|
||||
representation_name = repre_entity["name"]
|
||||
|
||||
self.data["clip_name"] = "_".join([
|
||||
folder_name,
|
||||
product_name,
|
||||
representation_name
|
||||
])
|
||||
self.data["versionAttributes"] = self.context["version"]["attrib"]
|
||||
|
||||
self.data["timeline_basename"] = "timeline_{}_{}".format(
|
||||
product_name, representation_name)
|
||||
|
||||
# solve project bin structure path
|
||||
hierarchy = "Loader{}".format(folder_path)
|
||||
|
||||
self.data["binPath"] = hierarchy
|
||||
|
||||
return True
|
||||
|
||||
def _get_folder_attributes(self):
|
||||
""" Get all available asset data
|
||||
|
||||
joint `data` key with asset.data dict into the representation
|
||||
|
||||
"""
|
||||
|
||||
self.data["folderAttributes"] = copy.deepcopy(
|
||||
self.context["folder"]["attrib"]
|
||||
)
|
||||
|
||||
def load(self, files):
|
||||
"""Load clip into timeline
|
||||
|
||||
Arguments:
|
||||
files (list[str]): list of files to load into timeline
|
||||
"""
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = lib.create_bin(self.data["binPath"])
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
media_pool_item = lib.create_media_pool_item(
|
||||
files,
|
||||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
source_in = int(_clip_property("Start"))
|
||||
source_out = int(_clip_property("End"))
|
||||
source_duration = int(_clip_property("Frames"))
|
||||
|
||||
# Trim clip start if slate is present
|
||||
if "slate" in self.data["versionAttributes"]["families"]:
|
||||
source_in += 1
|
||||
source_duration = source_out - source_in + 1
|
||||
|
||||
if not self.with_handles:
|
||||
# Load file without the handles of the source media
|
||||
# We remove the handles from the source in and source out
|
||||
# so that the handles are excluded in the timeline
|
||||
|
||||
# get version data frame data from db
|
||||
version_attributes = self.data["versionAttributes"]
|
||||
frame_start = version_attributes.get("frameStart")
|
||||
frame_end = version_attributes.get("frameEnd")
|
||||
|
||||
# The version data usually stored the frame range + handles of the
|
||||
# media however certain representations may be shorter because they
|
||||
# exclude those handles intentionally. Unfortunately the
|
||||
# representation does not store that in the database currently;
|
||||
# so we should compensate for those cases. If the media is shorter
|
||||
# than the frame range specified in the database we assume it is
|
||||
# without handles and thus we do not need to remove the handles
|
||||
# from source and out
|
||||
if frame_start is not None and frame_end is not None:
|
||||
# Version has frame range data, so we can compare media length
|
||||
handle_start = version_attributes.get("handleStart", 0)
|
||||
handle_end = version_attributes.get("handleEnd", 0)
|
||||
frame_start_handle = frame_start - handle_start
|
||||
frame_end_handle = frame_end + handle_end
|
||||
database_frame_duration = int(
|
||||
frame_end_handle - frame_start_handle + 1
|
||||
)
|
||||
if source_duration >= database_frame_duration:
|
||||
source_in += handle_start
|
||||
source_out -= handle_end
|
||||
|
||||
# get timeline in
|
||||
timeline_start = self.active_timeline.GetStartFrame()
|
||||
if self.sequential_load:
|
||||
# set timeline start frame
|
||||
timeline_in = int(timeline_start)
|
||||
else:
|
||||
# set timeline start frame + original clip in frame
|
||||
timeline_in = int(
|
||||
timeline_start + self.data["folderAttributes"]["clipIn"])
|
||||
|
||||
# make track item from source in bin as item
|
||||
timeline_item = lib.create_timeline_item(
|
||||
media_pool_item,
|
||||
self.active_timeline,
|
||||
timeline_in,
|
||||
source_in,
|
||||
source_out,
|
||||
)
|
||||
|
||||
print("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
return timeline_item
|
||||
|
||||
def update(self, timeline_item, files):
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = lib.create_bin(self.data["binPath"])
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
media_pool_item = lib.create_media_pool_item(
|
||||
files,
|
||||
self.active_bin
|
||||
)
|
||||
_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
# Read trimming from timeline item
|
||||
timeline_item_in = timeline_item.GetLeftOffset()
|
||||
timeline_item_len = timeline_item.GetDuration()
|
||||
timeline_item_out = timeline_item_in + timeline_item_len
|
||||
|
||||
lib.swap_clips(
|
||||
timeline_item,
|
||||
media_pool_item,
|
||||
timeline_item_in,
|
||||
timeline_item_out
|
||||
)
|
||||
|
||||
print("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
return timeline_item
|
||||
|
||||
|
||||
class TimelineItemLoader(LoaderPlugin):
|
||||
"""A basic SequenceLoader for Resolve
|
||||
|
||||
This will implement the basic behavior for a loader to inherit from that
|
||||
will containerize the reference and will implement the `remove` and
|
||||
`update` logic.
|
||||
|
||||
"""
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"handles",
|
||||
label="Include handles",
|
||||
default=0,
|
||||
help="Load with handles or without?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_to",
|
||||
label="Where to load clips",
|
||||
items=[
|
||||
"Current timeline",
|
||||
"New timeline"
|
||||
],
|
||||
default=0,
|
||||
help="Where do you want clips to be loaded?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_how",
|
||||
label="How to load clips",
|
||||
items=[
|
||||
"Original timing",
|
||||
"Sequentially in order"
|
||||
],
|
||||
default="Original timing",
|
||||
help="Would you like to place it at original timing?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(
|
||||
self,
|
||||
context,
|
||||
name=None,
|
||||
namespace=None,
|
||||
options=None
|
||||
):
|
||||
pass
|
||||
|
||||
def update(self, container, context):
|
||||
"""Update an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove(self, container):
|
||||
"""Remove an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
marker_color = "Purple"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
|
||||
resolve_p_settings = get_current_project_settings().get("resolve")
|
||||
self.presets = {}
|
||||
if resolve_p_settings:
|
||||
self.presets = resolve_p_settings["create"].get(
|
||||
self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
self.project = lib.get_current_project()
|
||||
self.timeline = lib.get_current_timeline()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
self.selected = lib.get_current_timeline_items(filter=True)
|
||||
else:
|
||||
self.selected = lib.get_current_timeline_items(filter=False)
|
||||
|
||||
self.widget = CreatorWidget
|
||||
|
||||
|
||||
class PublishClip:
|
||||
"""
|
||||
Convert a track item to publishable instance
|
||||
|
||||
Args:
|
||||
timeline_item (hiero.core.TrackItem): hiero track item object
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: hiero track item object with openpype tag
|
||||
"""
|
||||
vertical_clip_match = {}
|
||||
tag_data = {}
|
||||
types = {
|
||||
"shot": "shot",
|
||||
"folder": "folder",
|
||||
"episode": "episode",
|
||||
"sequence": "sequence",
|
||||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search pattern
|
||||
parents_search_pattern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
|
||||
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
|
||||
base_product_name_default = "<track_name>"
|
||||
review_track_default = "< none >"
|
||||
product_type_default = "plate"
|
||||
count_from_default = 10
|
||||
count_steps_default = 10
|
||||
vertical_sync_default = False
|
||||
driving_layer_default = ""
|
||||
|
||||
def __init__(self, cls, timeline_item_data, **kwargs):
|
||||
# populate input cls attribute onto self.[attr]
|
||||
self.__dict__.update(cls.__dict__)
|
||||
|
||||
# get main parent objects
|
||||
self.timeline_item_data = timeline_item_data
|
||||
self.timeline_item = timeline_item_data["clip"]["item"]
|
||||
timeline_name = timeline_item_data["timeline"].GetName()
|
||||
self.timeline_name = str(timeline_name).replace(" ", "_")
|
||||
|
||||
# track item (clip) main attributes
|
||||
self.ti_name = self.timeline_item.GetName()
|
||||
self.ti_index = int(timeline_item_data["clip"]["index"])
|
||||
|
||||
# get track name and index
|
||||
track_name = timeline_item_data["track"]["name"]
|
||||
self.track_name = str(track_name).replace(" ", "_")
|
||||
self.track_index = int(timeline_item_data["track"]["index"])
|
||||
|
||||
if kwargs.get("avalon"):
|
||||
self.tag_data.update(kwargs["avalon"])
|
||||
|
||||
# adding ui inputs if any
|
||||
self.ui_inputs = kwargs.get("ui_inputs", {})
|
||||
|
||||
# adding media pool folder if any
|
||||
self.mp_folder = kwargs.get("mp_folder")
|
||||
|
||||
# populate default data before we get other attributes
|
||||
self._populate_timeline_item_default_data()
|
||||
|
||||
# use all populated default data to create all important attributes
|
||||
self._populate_attributes()
|
||||
|
||||
# create parents with correct types
|
||||
self._create_parents()
|
||||
|
||||
def convert(self):
|
||||
# solve track item data and add them to tag data
|
||||
self._convert_to_tag_data()
|
||||
|
||||
# if track name is in review track name and also if driving track name
|
||||
# is not in review track name: skip tag creation
|
||||
if (self.track_name in self.review_layer) and (
|
||||
self.driving_layer not in self.review_layer):
|
||||
return
|
||||
|
||||
# deal with clip name
|
||||
new_name = self.tag_data.pop("newClipName")
|
||||
|
||||
if self.rename:
|
||||
self.tag_data["asset_name"] = new_name
|
||||
else:
|
||||
self.tag_data["asset_name"] = self.ti_name
|
||||
|
||||
# AYON unique identifier
|
||||
folder_path = "/{}/{}".format(
|
||||
self.tag_data["hierarchy"],
|
||||
self.tag_data["asset_name"]
|
||||
)
|
||||
self.tag_data["folder_path"] = folder_path
|
||||
|
||||
# create new name for track item
|
||||
if not lib.pype_marker_workflow:
|
||||
# create compound clip workflow
|
||||
lib.create_compound_clip(
|
||||
self.timeline_item_data,
|
||||
self.tag_data["asset_name"],
|
||||
self.mp_folder
|
||||
)
|
||||
|
||||
# add timeline_item_data selection to tag
|
||||
self.tag_data.update({
|
||||
"track_data": self.timeline_item_data["track"]
|
||||
})
|
||||
|
||||
# create openpype tag on timeline_item and add data
|
||||
lib.imprint(self.timeline_item, self.tag_data)
|
||||
|
||||
return self.timeline_item
|
||||
|
||||
def _populate_timeline_item_default_data(self):
|
||||
""" Populate default formatting data from track item. """
|
||||
|
||||
self.timeline_item_default_data = {
|
||||
"_folder_": "shots",
|
||||
"_sequence_": self.timeline_name,
|
||||
"_track_": self.track_name,
|
||||
"_clip_": self.ti_name,
|
||||
"_trackIndex_": self.track_index,
|
||||
"_clipIndex_": self.ti_index
|
||||
}
|
||||
|
||||
def _populate_attributes(self):
|
||||
""" Populate main object attributes. """
|
||||
# track item frame range and parent track name for vertical sync check
|
||||
self.clip_in = int(self.timeline_item.GetStart())
|
||||
self.clip_out = int(self.timeline_item.GetEnd())
|
||||
|
||||
# define ui inputs if non gui mode was used
|
||||
self.shot_num = self.ti_index
|
||||
|
||||
# ui_inputs data or default values if gui was not used
|
||||
self.rename = self.ui_inputs.get(
|
||||
"clipRename", {}).get("value") or self.rename_default
|
||||
self.clip_name = self.ui_inputs.get(
|
||||
"clipName", {}).get("value") or self.clip_name_default
|
||||
self.hierarchy = self.ui_inputs.get(
|
||||
"hierarchy", {}).get("value") or self.hierarchy_default
|
||||
self.hierarchy_data = self.ui_inputs.get(
|
||||
"hierarchyData", {}).get("value") or \
|
||||
self.timeline_item_default_data.copy()
|
||||
self.count_from = self.ui_inputs.get(
|
||||
"countFrom", {}).get("value") or self.count_from_default
|
||||
self.count_steps = self.ui_inputs.get(
|
||||
"countSteps", {}).get("value") or self.count_steps_default
|
||||
self.base_product_name = self.ui_inputs.get(
|
||||
"productName", {}).get("value") or self.base_product_name_default
|
||||
self.product_type = self.ui_inputs.get(
|
||||
"productType", {}).get("value") or self.product_type_default
|
||||
self.vertical_sync = self.ui_inputs.get(
|
||||
"vSyncOn", {}).get("value") or self.vertical_sync_default
|
||||
self.driving_layer = self.ui_inputs.get(
|
||||
"vSyncTrack", {}).get("value") or self.driving_layer_default
|
||||
self.review_track = self.ui_inputs.get(
|
||||
"reviewTrack", {}).get("value") or self.review_track_default
|
||||
|
||||
# build product name from layer name
|
||||
if self.base_product_name == "<track_name>":
|
||||
self.base_product_name = self.track_name
|
||||
|
||||
# create product name for publishing
|
||||
self.product_name = (
|
||||
self.product_type + self.base_product_name.capitalize()
|
||||
)
|
||||
|
||||
def _replace_hash_to_expression(self, name, text):
|
||||
""" Replace hash with number in correct padding. """
|
||||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
new_text = text.replace(("#" * _len), _repl)
|
||||
return new_text
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
||||
Populating the tag data into internal variable self.tag_data
|
||||
"""
|
||||
# define vertical sync attributes
|
||||
hero_track = True
|
||||
self.review_layer = ""
|
||||
if self.vertical_sync:
|
||||
# check if track name is not in driving layer
|
||||
if self.track_name not in self.driving_layer:
|
||||
# if it is not then define vertical sync as None
|
||||
hero_track = False
|
||||
|
||||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formatting_data = {}
|
||||
_data = self.timeline_item_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
# adding tag metadata from ui
|
||||
for _k, _v in self.ui_inputs.items():
|
||||
if _v["target"] == "tag":
|
||||
self.tag_data[_k] = _v["value"]
|
||||
|
||||
# driving layer is set as positive match
|
||||
if hero_track or self.vertical_sync:
|
||||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as default
|
||||
self.review_layer = self.review_track
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
self.shot_num = self.count_from
|
||||
else:
|
||||
self.shot_num = self.count_from + self.count_steps
|
||||
|
||||
# clip name sequence number
|
||||
_data.update({"shot": self.shot_num})
|
||||
|
||||
# solve # in test to pythonic expression
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
if "#" not in _v["value"]:
|
||||
continue
|
||||
self.hierarchy_data[
|
||||
_k]["value"] = self._replace_hash_to_expression(
|
||||
_k, _v["value"])
|
||||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in self.hierarchy_data.items():
|
||||
hierarchy_formatting_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formatting_data = self.hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formatting_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
if hero_track and self.vertical_sync:
|
||||
self.vertical_clip_match.update({
|
||||
(self.clip_in, self.clip_out): tag_hierarchy_data
|
||||
})
|
||||
|
||||
if not hero_track and self.vertical_sync:
|
||||
# driving layer is set as negative match
|
||||
for (_in, _out), hero_data in self.vertical_clip_match.items():
|
||||
hero_data.update({"heroTrack": False})
|
||||
if _in != self.clip_in or _out != self.clip_out:
|
||||
continue
|
||||
|
||||
data_product_name = hero_data["productName"]
|
||||
# add track index in case duplicity of names in hero data
|
||||
if self.product_name in data_product_name:
|
||||
hero_data["productName"] = self.product_name + str(
|
||||
self.track_index)
|
||||
# in case track name and product name is the same then add
|
||||
if self.base_product_name == self.track_name:
|
||||
hero_data["productName"] = self.product_name
|
||||
# assign data to return hierarchy data to tag
|
||||
tag_hierarchy_data = hero_data
|
||||
|
||||
# add data to return data dict
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
# add uuid to tag data
|
||||
self.tag_data["uuid"] = str(uuid.uuid4())
|
||||
|
||||
# add review track only to hero track
|
||||
if hero_track and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
else:
|
||||
self.tag_data.update({"reviewTrack": None})
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formatting_data)
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formatting_data,
|
||||
"productName": self.product_name,
|
||||
"productType": self.product_type
|
||||
}
|
||||
|
||||
def _convert_to_entity(self, key):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
folder_type = self.types.get(key)
|
||||
|
||||
assert folder_type, "Missing folder type for `{}`".format(
|
||||
key
|
||||
)
|
||||
|
||||
return {
|
||||
"folder_type": folder_type,
|
||||
"entity_name": self.hierarchy_data[key]["value"].format(
|
||||
**self.timeline_item_default_data
|
||||
)
|
||||
}
|
||||
|
||||
def _create_parents(self):
|
||||
""" Create parents and return it in list. """
|
||||
self.parents = []
|
||||
|
||||
pattern = re.compile(self.parents_search_pattern)
|
||||
par_split = [pattern.findall(t).pop()
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for key in par_split:
|
||||
parent = self._convert_to_entity(key)
|
||||
self.parents.append(parent)
|
||||
|
||||
|
||||
def get_representation_files(representation):
|
||||
anatomy = Anatomy()
|
||||
files = []
|
||||
for file_data in representation["files"]:
|
||||
path = anatomy.fill_root(file_data["path"])
|
||||
files.append(path)
|
||||
return files
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
#! python3
|
||||
|
||||
|
||||
class TestGUI:
|
||||
def __init__(self):
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
self.fu = resolve.Fusion()
|
||||
ui = self.fu.UIManager
|
||||
self.disp = bmd.UIDispatcher(self.fu.UIManager) # noqa
|
||||
self.title_font = ui.Font({"PixelSize": 18})
|
||||
self._dialogue = self.disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Get Testing folder",
|
||||
"ID": "TestingWin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "inputTestSourcesFolder",
|
||||
"Text": "Select folder with testing media",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": (
|
||||
"Chose folder with videos, sequences, "
|
||||
"single images, nested folders with "
|
||||
"media"
|
||||
),
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "openButton",
|
||||
"Text": "Process Test",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Run the test...",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
self._widgets = self._dialogue.GetItems()
|
||||
self._dialogue.On.TestingWin.Close = self._close_window
|
||||
self._dialogue.On.inputTestSourcesFolder.Clicked = self._open_dir_button_pressed # noqa
|
||||
self._dialogue.On.openButton.Clicked = self.process
|
||||
|
||||
def _close_window(self, event):
|
||||
self.disp.ExitLoop()
|
||||
|
||||
def process(self, event):
|
||||
# placeholder function this supposed to be run from child class
|
||||
pass
|
||||
|
||||
def _open_dir_button_pressed(self, event):
|
||||
# placeholder function this supposed to be run from child class
|
||||
pass
|
||||
|
||||
def show_gui(self):
|
||||
self._dialogue.Show()
|
||||
self.disp.RunLoop()
|
||||
self._dialogue.Hide()
|
||||
134
server_addon/resolve/client/ayon_resolve/api/todo-rendering.py
Normal file
134
server_addon/resolve/client/ayon_resolve/api/todo-rendering.py
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
#!/usr/bin/env python
|
||||
# TODO: convert this script to be usable with OpenPype
|
||||
"""
|
||||
Example DaVinci Resolve script:
|
||||
Load a still from DRX file, apply the still to all clips in all timelines.
|
||||
Set render format and codec, add render jobs for all timelines, render
|
||||
to specified path and wait for rendering completion.
|
||||
Once render is complete, delete all jobs
|
||||
"""
|
||||
# clonned from: https://github.com/survos/transcribe/blob/fe3cf51eb95b82dabcf21fbe5f89bfb3d8bb6ce2/python/3_grade_and_render_all_timelines.py # noqa
|
||||
|
||||
from python_get_resolve import GetResolve
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def AddTimelineToRender(project, timeline, presetName,
|
||||
targetDirectory, renderFormat, renderCodec):
|
||||
project.SetCurrentTimeline(timeline)
|
||||
project.LoadRenderPreset(presetName)
|
||||
|
||||
if not project.SetCurrentRenderFormatAndCodec(renderFormat, renderCodec):
|
||||
return False
|
||||
|
||||
project.SetRenderSettings(
|
||||
{"SelectAllFrames": 1, "TargetDir": targetDirectory})
|
||||
return project.AddRenderJob()
|
||||
|
||||
|
||||
def RenderAllTimelines(resolve, presetName, targetDirectory,
|
||||
renderFormat, renderCodec):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
resolve.OpenPage("Deliver")
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
if not AddTimelineToRender(
|
||||
project,
|
||||
project.GetTimelineByIndex(index + 1),
|
||||
presetName,
|
||||
targetDirectory,
|
||||
renderFormat,
|
||||
renderCodec):
|
||||
return False
|
||||
return project.StartRendering()
|
||||
|
||||
|
||||
def IsRenderingInProgress(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
|
||||
return project.IsRenderingInProgress()
|
||||
|
||||
|
||||
def WaitForRenderingCompletion(resolve):
|
||||
while IsRenderingInProgress(resolve):
|
||||
time.sleep(1)
|
||||
return
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelineClips(timeline, path, gradeMode=0):
|
||||
trackCount = timeline.GetTrackCount("video")
|
||||
|
||||
clips = {}
|
||||
for index in range(1, int(trackCount) + 1):
|
||||
clips.update(timeline.GetItemsInTrack("video", index))
|
||||
return timeline.ApplyGradeFromDRX(path, int(gradeMode), clips)
|
||||
|
||||
|
||||
def ApplyDRXToAllTimelines(resolve, path, gradeMode=0):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
if not project:
|
||||
return False
|
||||
timelineCount = project.GetTimelineCount()
|
||||
|
||||
for index in range(0, int(timelineCount)):
|
||||
timeline = project.GetTimelineByIndex(index + 1)
|
||||
project.SetCurrentTimeline(timeline)
|
||||
if not ApplyDRXToAllTimelineClips(timeline, path, gradeMode):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def DeleteAllRenderJobs(resolve):
|
||||
projectManager = resolve.GetProjectManager()
|
||||
project = projectManager.GetCurrentProject()
|
||||
project.DeleteAllRenderJobs()
|
||||
return
|
||||
|
||||
|
||||
# Inputs:
|
||||
# - DRX file to import grade still and apply it for clips
|
||||
# - grade mode (0, 1 or 2)
|
||||
# - preset name for rendering
|
||||
# - render path
|
||||
# - render format
|
||||
# - render codec
|
||||
if len(sys.argv) < 7:
|
||||
print(
|
||||
"input parameters for scripts are [drx file path] [grade mode] "
|
||||
"[render preset name] [render path] [render format] [render codec]")
|
||||
sys.exit()
|
||||
|
||||
drxPath = sys.argv[1]
|
||||
gradeMode = sys.argv[2]
|
||||
renderPresetName = sys.argv[3]
|
||||
renderPath = sys.argv[4]
|
||||
renderFormat = sys.argv[5]
|
||||
renderCodec = sys.argv[6]
|
||||
|
||||
# Get currently open project
|
||||
resolve = GetResolve()
|
||||
|
||||
if not ApplyDRXToAllTimelines(resolve, drxPath, gradeMode):
|
||||
print("Unable to apply a still from drx file to all timelines")
|
||||
sys.exit()
|
||||
|
||||
if not RenderAllTimelines(resolve, renderPresetName, renderPath,
|
||||
renderFormat, renderCodec):
|
||||
print("Unable to set all timelines for rendering")
|
||||
sys.exit()
|
||||
|
||||
WaitForRenderingCompletion(resolve)
|
||||
|
||||
DeleteAllRenderJobs(resolve)
|
||||
|
||||
print("Rendering is completed.")
|
||||
83
server_addon/resolve/client/ayon_resolve/api/utils.py
Normal file
83
server_addon/resolve/client/ayon_resolve/api/utils.py
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
#! python3
|
||||
|
||||
"""
|
||||
Resolve's tools for setting environment
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def get_resolve_module():
|
||||
from ayon_resolve import api
|
||||
# dont run if already loaded
|
||||
if api.bmdvr:
|
||||
log.info(("resolve module is assigned to "
|
||||
f"`ayon_resolve.api.bmdvr`: {api.bmdvr}"))
|
||||
return api.bmdvr
|
||||
try:
|
||||
"""
|
||||
The PYTHONPATH needs to be set correctly for this import
|
||||
statement to work. An alternative is to import the
|
||||
DaVinciResolveScript by specifying absolute path
|
||||
(see ExceptionHandler logic)
|
||||
"""
|
||||
import DaVinciResolveScript as bmd
|
||||
except ImportError:
|
||||
if sys.platform.startswith("darwin"):
|
||||
expected_path = ("/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Developer/Scripting/Modules")
|
||||
elif sys.platform.startswith("win") \
|
||||
or sys.platform.startswith("cygwin"):
|
||||
expected_path = os.path.normpath(
|
||||
os.getenv('PROGRAMDATA') + (
|
||||
"/Blackmagic Design/DaVinci Resolve/Support/Developer"
|
||||
"/Scripting/Modules"
|
||||
)
|
||||
)
|
||||
elif sys.platform.startswith("linux"):
|
||||
expected_path = "/opt/resolve/libs/Fusion/Modules"
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Unsupported platform: {}".format(sys.platform)
|
||||
)
|
||||
|
||||
# check if the default path has it...
|
||||
print(("Unable to find module DaVinciResolveScript from "
|
||||
"$PYTHONPATH - trying default locations"))
|
||||
|
||||
module_path = os.path.normpath(
|
||||
os.path.join(
|
||||
expected_path,
|
||||
"DaVinciResolveScript.py"
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
import imp
|
||||
bmd = imp.load_source('DaVinciResolveScript', module_path)
|
||||
except ImportError:
|
||||
# No fallbacks ... report error:
|
||||
log.error(
|
||||
("Unable to find module DaVinciResolveScript - please "
|
||||
"ensure that the module DaVinciResolveScript is "
|
||||
"discoverable by python")
|
||||
)
|
||||
log.error(
|
||||
("For a default DaVinci Resolve installation, the "
|
||||
f"module is expected to be located in: {expected_path}")
|
||||
)
|
||||
sys.exit()
|
||||
# assign global var and return
|
||||
bmdvr = bmd.scriptapp("Resolve")
|
||||
bmdvf = bmd.scriptapp("Fusion")
|
||||
api.bmdvr = bmdvr
|
||||
api.bmdvf = bmdvf
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`ayon_resolve.api.bmdvr`: {api.bmdvr}"))
|
||||
log.info(("Assigning resolve module to "
|
||||
f"`ayon_resolve.api.bmdvf`: {api.bmdvf}"))
|
||||
96
server_addon/resolve/client/ayon_resolve/api/workio.py
Normal file
96
server_addon/resolve/client/ayon_resolve/api/workio.py
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
"""Host API required Work Files tool"""
|
||||
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
from .lib import (
|
||||
get_project_manager,
|
||||
get_current_project
|
||||
)
|
||||
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".drp"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
get_project_manager().SaveProject()
|
||||
return False
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
pm = get_project_manager()
|
||||
file = os.path.basename(filepath)
|
||||
fname, _ = os.path.splitext(file)
|
||||
project = get_current_project()
|
||||
name = project.GetName()
|
||||
|
||||
response = False
|
||||
if name == "Untitled Project":
|
||||
response = pm.CreateProject(fname)
|
||||
log.info("New project created: {}".format(response))
|
||||
pm.SaveProject()
|
||||
elif name != fname:
|
||||
response = project.SetName(fname)
|
||||
log.info("Project renamed: {}".format(response))
|
||||
|
||||
exported = pm.ExportProject(fname, filepath)
|
||||
log.info("Project exported: {}".format(exported))
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
"""
|
||||
Loading project
|
||||
"""
|
||||
|
||||
from . import bmdvr
|
||||
|
||||
pm = get_project_manager()
|
||||
page = bmdvr.GetCurrentPage()
|
||||
if page is not None:
|
||||
# Save current project only if Resolve has an active page, otherwise
|
||||
# we consider Resolve being in a pre-launch state (no open UI yet)
|
||||
project = pm.GetCurrentProject()
|
||||
print(f"Saving current project: {project}")
|
||||
pm.SaveProject()
|
||||
|
||||
file = os.path.basename(filepath)
|
||||
fname, _ = os.path.splitext(file)
|
||||
|
||||
try:
|
||||
# load project from input path
|
||||
project = pm.LoadProject(fname)
|
||||
log.info(f"Project {project.GetName()} opened...")
|
||||
|
||||
except AttributeError:
|
||||
log.warning((f"Project with name `{fname}` does not exist! It will "
|
||||
f"be imported from {filepath} and then loaded..."))
|
||||
if pm.ImportProject(filepath):
|
||||
# load project from input path
|
||||
project = pm.LoadProject(fname)
|
||||
log.info(f"Project imported/loaded {project.GetName()}...")
|
||||
return True
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def current_file():
|
||||
pm = get_project_manager()
|
||||
file_ext = file_extensions()[0]
|
||||
workdir_path = os.getenv("AYON_WORKDIR")
|
||||
project = pm.GetCurrentProject()
|
||||
project_name = project.GetName()
|
||||
file_name = project_name + file_ext
|
||||
|
||||
# create current file path
|
||||
current_file_path = os.path.join(workdir_path, file_name)
|
||||
|
||||
# return current file path if it exists
|
||||
if os.path.exists(current_file_path):
|
||||
return os.path.normpath(current_file_path)
|
||||
|
||||
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
import os
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
|
||||
|
||||
class PreLaunchResolveLastWorkfile(PreLaunchHook):
|
||||
"""Special hook to open last workfile for Resolve.
|
||||
|
||||
Checks 'start_last_workfile', if set to False, it will not open last
|
||||
workfile. This property is set explicitly in Launcher.
|
||||
"""
|
||||
order = 10
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
if not self.data.get("start_last_workfile"):
|
||||
self.log.info("It is set to not start last workfile on start.")
|
||||
return
|
||||
|
||||
last_workfile = self.data.get("last_workfile_path")
|
||||
if not last_workfile:
|
||||
self.log.warning("Last workfile was not collected.")
|
||||
return
|
||||
|
||||
if not os.path.exists(last_workfile):
|
||||
self.log.info("Current context does not have any workfile yet.")
|
||||
return
|
||||
|
||||
# Add path to launch environment for the startup script to pick up
|
||||
self.log.info(
|
||||
"Setting AYON_RESOLVE_OPEN_ON_LAUNCH to launch "
|
||||
f"last workfile: {last_workfile}"
|
||||
)
|
||||
key = "AYON_RESOLVE_OPEN_ON_LAUNCH"
|
||||
self.launch_context.env[key] = last_workfile
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
import platform
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_resolve.utils import setup
|
||||
|
||||
|
||||
class PreLaunchResolveSetup(PreLaunchHook):
|
||||
"""
|
||||
This hook will set up the Resolve scripting environment as described in
|
||||
Resolve's documentation found with the installed application at
|
||||
{resolve}/Support/Developer/Scripting/README.txt
|
||||
|
||||
Prepares the following environment variables:
|
||||
- `RESOLVE_SCRIPT_API`
|
||||
- `RESOLVE_SCRIPT_LIB`
|
||||
|
||||
It adds $RESOLVE_SCRIPT_API/Modules to PYTHONPATH.
|
||||
|
||||
Additionally it sets up the Python home for Python 3 based on the
|
||||
RESOLVE_PYTHON3_HOME in the environment (usually defined in OpenPype's
|
||||
Application environment for Resolve by the admin). For this it sets
|
||||
PYTHONHOME and PATH variables.
|
||||
|
||||
It also defines:
|
||||
- `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype
|
||||
Fusion scripts to be copied to for Resolve to pick them up.
|
||||
- `AYON_LOG_NO_COLORS` to True to ensure OP doesn't try to
|
||||
use logging with terminal colors as it fails in Resolve.
|
||||
|
||||
"""
|
||||
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
current_platform = platform.system().lower()
|
||||
|
||||
programdata = self.launch_context.env.get("PROGRAMDATA", "")
|
||||
resolve_script_api_locations = {
|
||||
"windows": (
|
||||
f"{programdata}/Blackmagic Design/"
|
||||
"DaVinci Resolve/Support/Developer/Scripting"
|
||||
),
|
||||
"darwin": (
|
||||
"/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Developer/Scripting"
|
||||
),
|
||||
"linux": "/opt/resolve/Developer/Scripting",
|
||||
}
|
||||
resolve_script_api = Path(
|
||||
resolve_script_api_locations[current_platform]
|
||||
)
|
||||
self.log.info(
|
||||
f"setting RESOLVE_SCRIPT_API variable to {resolve_script_api}"
|
||||
)
|
||||
self.launch_context.env[
|
||||
"RESOLVE_SCRIPT_API"
|
||||
] = resolve_script_api.as_posix()
|
||||
|
||||
resolve_script_lib_dirs = {
|
||||
"windows": (
|
||||
"C:/Program Files/Blackmagic Design"
|
||||
"/DaVinci Resolve/fusionscript.dll"
|
||||
),
|
||||
"darwin": (
|
||||
"/Applications/DaVinci Resolve/DaVinci Resolve.app"
|
||||
"/Contents/Libraries/Fusion/fusionscript.so"
|
||||
),
|
||||
"linux": "/opt/resolve/libs/Fusion/fusionscript.so",
|
||||
}
|
||||
resolve_script_lib = Path(resolve_script_lib_dirs[current_platform])
|
||||
self.launch_context.env[
|
||||
"RESOLVE_SCRIPT_LIB"
|
||||
] = resolve_script_lib.as_posix()
|
||||
self.log.info(
|
||||
f"setting RESOLVE_SCRIPT_LIB variable to {resolve_script_lib}"
|
||||
)
|
||||
|
||||
# TODO: add OTIO installation from `openpype/requirements.py`
|
||||
# making sure python <3.9.* is installed at provided path
|
||||
python3_home = Path(
|
||||
self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")
|
||||
)
|
||||
|
||||
assert python3_home.is_dir(), (
|
||||
"Python 3 is not installed at the provided folder path. Either "
|
||||
"make sure the `environments\resolve.json` is having correctly "
|
||||
"set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed "
|
||||
f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`"
|
||||
)
|
||||
python3_home_str = python3_home.as_posix()
|
||||
self.launch_context.env["PYTHONHOME"] = python3_home_str
|
||||
self.log.info(f"Path to Resolve Python folder: `{python3_home_str}`")
|
||||
|
||||
# add to the PYTHONPATH
|
||||
env_pythonpath = self.launch_context.env["PYTHONPATH"]
|
||||
modules_path = Path(resolve_script_api, "Modules").as_posix()
|
||||
self.launch_context.env[
|
||||
"PYTHONPATH"
|
||||
] = f"{modules_path}{os.pathsep}{env_pythonpath}"
|
||||
|
||||
self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}")
|
||||
|
||||
# add the pythonhome folder to PATH because on Windows
|
||||
# this is needed for Py3 to be correctly detected within Resolve
|
||||
env_path = self.launch_context.env["PATH"]
|
||||
self.log.info(f"Adding `{python3_home_str}` to the PATH variable")
|
||||
self.launch_context.env[
|
||||
"PATH"
|
||||
] = f"{python3_home_str}{os.pathsep}{env_path}"
|
||||
|
||||
self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
|
||||
|
||||
resolve_utility_scripts_dirs = {
|
||||
"windows": (
|
||||
f"{programdata}/Blackmagic Design"
|
||||
"/DaVinci Resolve/Fusion/Scripts/Comp"
|
||||
),
|
||||
"darwin": (
|
||||
"/Library/Application Support/Blackmagic Design"
|
||||
"/DaVinci Resolve/Fusion/Scripts/Comp"
|
||||
),
|
||||
"linux": "/opt/resolve/Fusion/Scripts/Comp",
|
||||
}
|
||||
resolve_utility_scripts_dir = Path(
|
||||
resolve_utility_scripts_dirs[current_platform]
|
||||
)
|
||||
# setting utility scripts dir for scripts syncing
|
||||
self.launch_context.env[
|
||||
"RESOLVE_UTILITY_SCRIPTS_DIR"
|
||||
] = resolve_utility_scripts_dir.as_posix()
|
||||
|
||||
# remove terminal coloring tags
|
||||
self.launch_context.env["AYON_LOG_NO_COLORS"] = "1"
|
||||
|
||||
# Resolve Setup integration
|
||||
setup(self.launch_context.env)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
import os
|
||||
|
||||
from ayon_applications import PreLaunchHook, LaunchTypes
|
||||
from ayon_resolve import RESOLVE_ADDON_ROOT
|
||||
|
||||
|
||||
class PreLaunchResolveStartup(PreLaunchHook):
|
||||
"""Special hook to configure startup script.
|
||||
|
||||
"""
|
||||
order = 11
|
||||
app_groups = {"resolve"}
|
||||
launch_types = {LaunchTypes.local}
|
||||
|
||||
def execute(self):
|
||||
# Set the openpype prelaunch startup script path for easy access
|
||||
# in the LUA .scriptlib code
|
||||
script_path = os.path.join(RESOLVE_ADDON_ROOT, "startup.py")
|
||||
key = "AYON_RESOLVE_STARTUP_SCRIPT"
|
||||
self.launch_context.env[key] = script_path
|
||||
|
||||
self.log.info(
|
||||
f"Setting AYON_RESOLVE_STARTUP_SCRIPT to: {script_path}"
|
||||
)
|
||||
326
server_addon/resolve/client/ayon_resolve/otio/davinci_export.py
Normal file
326
server_addon/resolve/client/ayon_resolve/otio/davinci_export.py
Normal file
|
|
@ -0,0 +1,326 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and older
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import clique
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_reference(media_pool_item):
|
||||
metadata = _get_metadata_media_pool_item(media_pool_item)
|
||||
print("media pool item: {}".format(media_pool_item.GetName()))
|
||||
|
||||
_mp_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
path = _mp_clip_property("File Path")
|
||||
reformat_path = utils.get_reformated_path(path, padded=True)
|
||||
padding = utils.get_padding_from_path(path)
|
||||
|
||||
if padding:
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# get clip property regarding to type
|
||||
fps = float(_mp_clip_property("FPS"))
|
||||
if _mp_clip_property("Type") == "Video":
|
||||
frame_start = int(_mp_clip_property("Start"))
|
||||
frame_duration = int(_mp_clip_property("Frames"))
|
||||
else:
|
||||
audio_duration = str(_mp_clip_property("Duration"))
|
||||
frame_start = 0
|
||||
frame_duration = int(utils.timecode_to_frames(
|
||||
audio_duration, float(fps)))
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if padding:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname, filename = os.path.split(path)
|
||||
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
|
||||
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=collection.format("{head}"),
|
||||
name_suffix=collection.format("{tail}"),
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding_num,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def create_otio_markers(track_item, fps):
|
||||
track_item_markers = track_item.GetMarkers()
|
||||
markers = []
|
||||
for marker_frame in track_item_markers:
|
||||
note = track_item_markers[marker_frame]["note"]
|
||||
if "{" in note and "}" in note:
|
||||
metadata = json.loads(note)
|
||||
else:
|
||||
metadata = {"note": note}
|
||||
markers.append(
|
||||
otio.schema.Marker(
|
||||
name=track_item_markers[marker_frame]["name"],
|
||||
marked_range=create_otio_time_range(
|
||||
marker_frame,
|
||||
track_item_markers[marker_frame]["duration"],
|
||||
fps
|
||||
),
|
||||
color=track_item_markers[marker_frame]["color"].upper(),
|
||||
metadata=metadata
|
||||
)
|
||||
)
|
||||
return markers
|
||||
|
||||
|
||||
def create_otio_clip(track_item):
|
||||
media_pool_item = track_item.GetMediaPoolItem()
|
||||
_mp_clip_property = media_pool_item.GetClipProperty
|
||||
|
||||
if not self.project_fps:
|
||||
fps = float(_mp_clip_property("FPS"))
|
||||
else:
|
||||
fps = self.project_fps
|
||||
|
||||
name = track_item.GetName()
|
||||
|
||||
media_reference = create_otio_reference(media_pool_item)
|
||||
source_range = create_otio_time_range(
|
||||
int(track_item.GetLeftOffset()),
|
||||
int(track_item.GetDuration()),
|
||||
fps
|
||||
)
|
||||
|
||||
if _mp_clip_property("Type") == "Audio":
|
||||
return_clips = list()
|
||||
audio_chanels = _mp_clip_property("Audio Ch")
|
||||
for channel in range(0, int(audio_chanels)):
|
||||
clip = otio.schema.Clip(
|
||||
name=f"{name}_{channel}",
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
return_clips.append(clip)
|
||||
return return_clips
|
||||
else:
|
||||
clip = otio.schema.Clip(
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
for marker in create_otio_markers(track_item, fps):
|
||||
clip.markers.append(marker)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _create_otio_timeline(project, timeline, fps):
|
||||
metadata = _get_timeline_metadata(project, timeline)
|
||||
start_time = create_otio_rational_time(
|
||||
timeline.GetStartFrame(), fps)
|
||||
otio_timeline = otio.schema.Timeline(
|
||||
name=timeline.GetName(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def _get_timeline_metadata(project, timeline):
|
||||
media_pool = project.GetMediaPool()
|
||||
root_folder = media_pool.GetRootFolder()
|
||||
ls_folder = root_folder.GetClipList()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
timeline_name = timeline.GetName()
|
||||
for tl in ls_folder:
|
||||
if tl.GetName() not in timeline_name:
|
||||
continue
|
||||
return _get_metadata_media_pool_item(tl)
|
||||
|
||||
|
||||
def _get_metadata_media_pool_item(media_pool_item):
|
||||
data = dict()
|
||||
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
|
||||
property = media_pool_item.GetClipProperty() or {}
|
||||
for name, value in property.items():
|
||||
if "Resolution" in name and "" != value:
|
||||
width, height = value.split("x")
|
||||
data.update({
|
||||
"width": int(width),
|
||||
"height": int(height)
|
||||
})
|
||||
if "PAR" in name and "" != value:
|
||||
try:
|
||||
data.update({"pixelAspect": float(value)})
|
||||
except ValueError:
|
||||
if "Square" in value:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
else:
|
||||
data.update({"pixelAspect": float(1)})
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=self.track_types[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(clip_start, otio_track, track_item, timeline):
|
||||
# if gap between track start and clip start
|
||||
if clip_start > otio_track.available_range().duration.value:
|
||||
# create gap and add it to track
|
||||
otio_track.append(
|
||||
create_otio_gap(
|
||||
otio_track.available_range().duration.value,
|
||||
track_item.GetStart(),
|
||||
timeline.GetStartFrame(),
|
||||
self.project_fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
|
||||
mp_metadata = media_pool_item.GetMetadata()
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
mp_metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in mp_metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def create_otio_timeline(resolve_project):
|
||||
|
||||
# get current timeline
|
||||
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
|
||||
timeline = resolve_project.GetCurrentTimeline()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline(
|
||||
resolve_project, timeline, self.project_fps)
|
||||
|
||||
# loop all defined track types
|
||||
for track_type in list(self.track_types.keys()):
|
||||
# get total track count
|
||||
track_count = timeline.GetTrackCount(track_type)
|
||||
|
||||
# loop all tracks by track indexes
|
||||
for track_index in range(1, int(track_count) + 1):
|
||||
# get current track name
|
||||
track_name = timeline.GetTrackName(track_type, track_index)
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
|
||||
# get all track items in current track
|
||||
current_track_items = timeline.GetItemListInTrack(
|
||||
track_type, track_index)
|
||||
|
||||
# loop available track items in current track items
|
||||
for track_item in current_track_items:
|
||||
# skip offline track items
|
||||
if track_item.GetMediaPoolItem() is None:
|
||||
continue
|
||||
|
||||
# calculate real clip start
|
||||
clip_start = track_item.GetStart() - timeline.GetStartFrame()
|
||||
|
||||
add_otio_gap(
|
||||
clip_start, otio_track, track_item, timeline)
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(track_item)
|
||||
|
||||
if not isinstance(otio_clip, list):
|
||||
otio_track.append(otio_clip)
|
||||
else:
|
||||
for index, clip in enumerate(otio_clip):
|
||||
if index == 0:
|
||||
otio_track.append(clip)
|
||||
else:
|
||||
# add previous otio track to timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
track_type, track_name)
|
||||
add_otio_gap(
|
||||
clip_start, otio_track,
|
||||
track_item, timeline)
|
||||
otio_track.append(clip)
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
108
server_addon/resolve/client/ayon_resolve/otio/davinci_import.py
Normal file
108
server_addon/resolve/client/ayon_resolve/otio/davinci_import.py
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
import sys
|
||||
import json
|
||||
import DaVinciResolveScript
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self.resolve = DaVinciResolveScript.scriptapp('Resolve')
|
||||
self.fusion = DaVinciResolveScript.scriptapp('Fusion')
|
||||
self.project_manager = self.resolve.GetProjectManager()
|
||||
self.current_project = self.project_manager.GetCurrentProject()
|
||||
self.media_pool = self.current_project.GetMediaPool()
|
||||
self.track_types = {
|
||||
"video": otio.schema.TrackKind.Video,
|
||||
"audio": otio.schema.TrackKind.Audio
|
||||
}
|
||||
self.project_fps = None
|
||||
|
||||
|
||||
def build_timeline(otio_timeline):
|
||||
# TODO: build timeline in mediapool `otioImport` folder
|
||||
# TODO: loop otio tracks and build them in the new timeline
|
||||
for clip in otio_timeline.each_clip():
|
||||
# TODO: create track item
|
||||
print(clip.name)
|
||||
print(clip.parent().name)
|
||||
print(clip.range_in_parent())
|
||||
|
||||
|
||||
def _build_track(otio_track):
|
||||
# TODO: _build_track
|
||||
pass
|
||||
|
||||
|
||||
def _build_media_pool_item(otio_media_reference):
|
||||
# TODO: _build_media_pool_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_track_item(otio_clip):
|
||||
# TODO: _build_track_item
|
||||
pass
|
||||
|
||||
|
||||
def _build_gap(otio_clip):
|
||||
# TODO: _build_gap
|
||||
pass
|
||||
|
||||
|
||||
def _build_marker(track_item, otio_marker):
|
||||
frame_start = otio_marker.marked_range.start_time.value
|
||||
frame_duration = otio_marker.marked_range.duration.value
|
||||
|
||||
# marker attributes
|
||||
frameId = (frame_start / 10) * 10
|
||||
color = otio_marker.color
|
||||
name = otio_marker.name
|
||||
note = otio_marker.metadata.get("note") or json.dumps(otio_marker.metadata)
|
||||
duration = (frame_duration / 10) * 10
|
||||
|
||||
track_item.AddMarker(
|
||||
frameId,
|
||||
color,
|
||||
name,
|
||||
note,
|
||||
duration
|
||||
)
|
||||
|
||||
|
||||
def _build_media_pool_folder(name):
|
||||
"""
|
||||
Returns folder with input name and sets it as current folder.
|
||||
|
||||
It will create new media bin if none is found in root media bin
|
||||
|
||||
Args:
|
||||
name (str): name of bin
|
||||
|
||||
Returns:
|
||||
resolve.api.MediaPool.Folder: description
|
||||
|
||||
"""
|
||||
|
||||
root_folder = self.media_pool.GetRootFolder()
|
||||
sub_folders = root_folder.GetSubFolderList()
|
||||
testing_names = list()
|
||||
|
||||
for subfolder in sub_folders:
|
||||
subf_name = subfolder.GetName()
|
||||
if name in subf_name:
|
||||
testing_names.append(subfolder)
|
||||
else:
|
||||
testing_names.append(False)
|
||||
|
||||
matching = next((f for f in testing_names if f is not False), None)
|
||||
|
||||
if not matching:
|
||||
new_folder = self.media_pool.AddSubFolder(root_folder, name)
|
||||
self.media_pool.SetCurrentFolder(new_folder)
|
||||
else:
|
||||
self.media_pool.SetCurrentFolder(matching)
|
||||
|
||||
return self.media_pool.GetCurrentFolder()
|
||||
|
||||
|
||||
def read_from_file(otio_file):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_timeline(otio_timeline)
|
||||
70
server_addon/resolve/client/ayon_resolve/otio/utils.py
Normal file
70
server_addon/resolve/client/ayon_resolve/otio/utils.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, 24)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True, first=False):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
num_pattern = r"(\[\d+\-\d+\])"
|
||||
padding_pattern = r"(\d+)(?=-)"
|
||||
first_frame_pattern = re.compile(r"\[(\d+)\-\d+\]")
|
||||
|
||||
if "[" in path:
|
||||
padding = len(re.findall(padding_pattern, path).pop())
|
||||
if padded:
|
||||
path = re.sub(num_pattern, f"%0{padding}d", path)
|
||||
elif first:
|
||||
first_frame = re.findall(first_frame_pattern, path, flags=0)
|
||||
if len(first_frame) >= 1:
|
||||
first_frame = first_frame[0]
|
||||
path = re.sub(num_pattern, first_frame, path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
"""
|
||||
Return padding number from DaVinci Resolve sequence path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.[0001-1008].exr") > 4
|
||||
|
||||
"""
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
return len(re.findall(padding_pattern, path).pop())
|
||||
|
||||
return None
|
||||
|
|
@ -0,0 +1,272 @@
|
|||
# from pprint import pformat
|
||||
from ayon_resolve.api import plugin, lib
|
||||
from ayon_resolve.api.lib import (
|
||||
get_video_track_names,
|
||||
create_bin,
|
||||
)
|
||||
|
||||
|
||||
class CreateShotClip(plugin.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Create Publishable Clip"
|
||||
product_type = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
gui_tracks = get_video_track_names()
|
||||
gui_name = "AYON publish attributes creator"
|
||||
gui_info = "Define sequential rename and fill hierarchy data."
|
||||
gui_inputs = {
|
||||
"renameHierarchy": {
|
||||
"type": "section",
|
||||
"label": "Shot Hierarchy And Rename Settings",
|
||||
"target": "ui",
|
||||
"order": 0,
|
||||
"value": {
|
||||
"hierarchy": {
|
||||
"value": "{folder}/{sequence}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Shot Parent Hierarchy",
|
||||
"target": "tag",
|
||||
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
|
||||
"order": 0},
|
||||
"clipRename": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Rename clips",
|
||||
"target": "ui",
|
||||
"toolTip": "Renaming selected clips on fly", # noqa
|
||||
"order": 1},
|
||||
"clipName": {
|
||||
"value": "{sequence}{shot}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Clip Name Template",
|
||||
"target": "ui",
|
||||
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
|
||||
"order": 2},
|
||||
"countFrom": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Count sequence from",
|
||||
"target": "ui",
|
||||
"toolTip": "Set when the sequence number stafrom", # noqa
|
||||
"order": 3},
|
||||
"countSteps": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Stepping number",
|
||||
"target": "ui",
|
||||
"toolTip": "What number is adding every new step", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"hierarchyData": {
|
||||
"type": "dict",
|
||||
"label": "Shot Template Keywords",
|
||||
"target": "tag",
|
||||
"order": 1,
|
||||
"value": {
|
||||
"folder": {
|
||||
"value": "shots",
|
||||
"type": "QLineEdit",
|
||||
"label": "{folder}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 0},
|
||||
"episode": {
|
||||
"value": "ep01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{episode}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 1},
|
||||
"sequence": {
|
||||
"value": "sq01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{sequence}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 2},
|
||||
"track": {
|
||||
"value": "{_track_}",
|
||||
"type": "QLineEdit",
|
||||
"label": "{track}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 3},
|
||||
"shot": {
|
||||
"value": "sh###",
|
||||
"type": "QLineEdit",
|
||||
"label": "{shot}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 4}
|
||||
}
|
||||
},
|
||||
"verticalSync": {
|
||||
"type": "section",
|
||||
"label": "Vertical Synchronization Of Attributes",
|
||||
"target": "ui",
|
||||
"order": 2,
|
||||
"value": {
|
||||
"vSyncOn": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Enable Vertical Sync",
|
||||
"target": "ui",
|
||||
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
|
||||
"order": 0},
|
||||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be mastering all others", # noqa
|
||||
"order": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"publishSettings": {
|
||||
"type": "section",
|
||||
"label": "Publish Settings",
|
||||
"target": "ui",
|
||||
"order": 3,
|
||||
"value": {
|
||||
"productName": {
|
||||
"value": ["<track_name>", "main", "bg", "fg", "bg",
|
||||
"animatic"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose product name pattern, if <track_name> is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"productType": {
|
||||
"value": ["plate", "take"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product type",
|
||||
"target": "ui", "toolTip": "What use of this product is for", # noqa
|
||||
"order": 1},
|
||||
"reviewTrack": {
|
||||
"value": ["< none >"] + gui_tracks,
|
||||
"type": "QComboBox",
|
||||
"label": "Use Review Track",
|
||||
"target": "ui",
|
||||
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
|
||||
"order": 2},
|
||||
"audio": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include audio",
|
||||
"target": "tag",
|
||||
"toolTip": "Process products with corresponding audio", # noqa
|
||||
"order": 3},
|
||||
"sourceResolution": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Source resolution",
|
||||
"target": "tag",
|
||||
"toolTip": "Is resolution taken from timeline or source?", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"shotAttr": {
|
||||
"type": "section",
|
||||
"label": "Shot Attributes",
|
||||
"target": "ui",
|
||||
"order": 4,
|
||||
"value": {
|
||||
"workfileFrameStart": {
|
||||
"value": 1001,
|
||||
"type": "QSpinBox",
|
||||
"label": "Workfiles Start Frame",
|
||||
"target": "tag",
|
||||
"toolTip": "Set workfile starting frame number", # noqa
|
||||
"order": 0
|
||||
},
|
||||
"handleStart": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle start (head)",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at start of clip", # noqa
|
||||
"order": 1
|
||||
},
|
||||
"handleEnd": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle end (tail)",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at end of clip", # noqa
|
||||
"order": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# get key pairs from presets and match it on ui inputs
|
||||
for k, v in self.gui_inputs.items():
|
||||
if v["type"] in ("dict", "section"):
|
||||
# nested dictionary (only one level allowed
|
||||
# for sections and dict)
|
||||
for _k, _v in v["value"].items():
|
||||
if self.presets.get(_k) is not None:
|
||||
self.gui_inputs[k][
|
||||
"value"][_k]["value"] = self.presets[_k]
|
||||
if self.presets.get(k):
|
||||
self.gui_inputs[k]["value"] = self.presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
|
||||
widget.exec_()
|
||||
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not widget.result:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
self.rename_add = 0
|
||||
|
||||
# get ui output for track name for vertical sync
|
||||
v_sync_track = widget.result["vSyncTrack"]["value"]
|
||||
|
||||
# sort selected trackItems by
|
||||
sorted_selected_track_items = []
|
||||
unsorted_selected_track_items = []
|
||||
print("_____ selected ______")
|
||||
print(self.selected)
|
||||
for track_item_data in self.selected:
|
||||
if track_item_data["track"]["name"] in v_sync_track:
|
||||
sorted_selected_track_items.append(track_item_data)
|
||||
else:
|
||||
unsorted_selected_track_items.append(track_item_data)
|
||||
|
||||
sorted_selected_track_items.extend(unsorted_selected_track_items)
|
||||
|
||||
# sequence attrs
|
||||
sq_frame_start = self.timeline.GetStartFrame()
|
||||
sq_markers = self.timeline.GetMarkers()
|
||||
|
||||
# create media bin for compound clips (trackItems)
|
||||
mp_folder = create_bin(self.timeline.GetName())
|
||||
|
||||
kwargs = {
|
||||
"ui_inputs": widget.result,
|
||||
"avalon": self.data,
|
||||
"mp_folder": mp_folder,
|
||||
"sq_frame_start": sq_frame_start,
|
||||
"sq_markers": sq_markers
|
||||
}
|
||||
print(kwargs)
|
||||
for i, track_item_data in enumerate(sorted_selected_track_items):
|
||||
self.rename_index = i
|
||||
self.log.info(track_item_data)
|
||||
# convert track item to timeline media pool item
|
||||
track_item = plugin.PublishClip(
|
||||
self, track_item_data, **kwargs).convert()
|
||||
track_item.SetClipColor(lib.publish_clip_color)
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_resolve.api import lib, plugin
|
||||
from ayon_resolve.api.pipeline import (
|
||||
containerise,
|
||||
update_container,
|
||||
)
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
|
||||
|
||||
class LoadClip(plugin.TimelineItemLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# for loader multiselection
|
||||
timeline = None
|
||||
|
||||
# presets
|
||||
clip_color_last = "Olive"
|
||||
clip_color = "Orange"
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
files = plugin.get_representation_files(context["representation"])
|
||||
|
||||
timeline_item = plugin.ClipLoader(
|
||||
self, context, **options).load(files)
|
||||
namespace = namespace or timeline_item.GetName()
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"],
|
||||
timeline_item,
|
||||
context["version"]
|
||||
)
|
||||
|
||||
data_imprint = self.get_tag_data(context, name, namespace)
|
||||
return containerise(
|
||||
timeline_item,
|
||||
name, namespace, context,
|
||||
self.__class__.__name__,
|
||||
data_imprint)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def update(self, container, context):
|
||||
""" Updating previously loaded clips
|
||||
"""
|
||||
|
||||
repre_entity = context["representation"]
|
||||
name = container['name']
|
||||
namespace = container['namespace']
|
||||
timeline_item = container["_timeline_item"]
|
||||
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
|
||||
files = plugin.get_representation_files(repre_entity)
|
||||
|
||||
loader = plugin.ClipLoader(self, context)
|
||||
timeline_item = loader.update(timeline_item, files)
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"],
|
||||
timeline_item,
|
||||
context["version"]
|
||||
)
|
||||
|
||||
# if original media pool item has no remaining usages left
|
||||
# remove it from the media pool
|
||||
if int(media_pool_item.GetClipProperty("Usage")) == 0:
|
||||
lib.remove_media_pool_item(media_pool_item)
|
||||
|
||||
data_imprint = self.get_tag_data(context, name, namespace)
|
||||
return update_container(timeline_item, data_imprint)
|
||||
|
||||
def get_tag_data(self, context, name, namespace):
|
||||
"""Return data to be imprinted on the timeline item marker"""
|
||||
|
||||
repre_entity = context["representation"]
|
||||
version_entity = context["version"]
|
||||
version_attributes = version_entity["attrib"]
|
||||
colorspace = version_attributes.get("colorSpace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
# move all version data keys to tag data
|
||||
add_version_data_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
data = {
|
||||
key: version_attributes.get(key, "None")
|
||||
for key in add_version_data_keys
|
||||
}
|
||||
|
||||
# add variables related to version context
|
||||
data.update({
|
||||
"representation": repre_entity["id"],
|
||||
"version": version_entity["version"],
|
||||
"colorspace": colorspace,
|
||||
"objectName": object_name
|
||||
})
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def set_item_color(cls, project_name, timeline_item, version_entity):
|
||||
"""Color timeline item based on whether it is outdated or latest"""
|
||||
# get all versions in list
|
||||
last_version_entity = ayon_api.get_last_version_by_product_id(
|
||||
project_name,
|
||||
version_entity["productId"],
|
||||
fields=["name"]
|
||||
)
|
||||
last_version_id = None
|
||||
if last_version_entity:
|
||||
last_version_id = last_version_entity["id"]
|
||||
|
||||
# set clip colour
|
||||
if version_entity["id"] == last_version_id:
|
||||
timeline_item.SetClipColor(cls.clip_color_last)
|
||||
else:
|
||||
timeline_item.SetClipColor(cls.clip_color)
|
||||
|
||||
def remove(self, container):
|
||||
timeline_item = container["_timeline_item"]
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
timeline = lib.get_current_timeline()
|
||||
|
||||
# DeleteClips function was added in Resolve 18.5+
|
||||
# by checking None we can detect whether the
|
||||
# function exists in Resolve
|
||||
if timeline.DeleteClips is not None:
|
||||
timeline.DeleteClips([timeline_item])
|
||||
else:
|
||||
# Resolve versions older than 18.5 can't delete clips via API
|
||||
# so all we can do is just remove the pype marker to 'untag' it
|
||||
if lib.get_pype_marker(timeline_item):
|
||||
# Note: We must call `get_pype_marker` because
|
||||
# `delete_pype_marker` uses a global variable set by
|
||||
# `get_pype_marker` to delete the right marker
|
||||
# TODO: Improve code to avoid the global `temp_marker_frame`
|
||||
lib.delete_pype_marker(timeline_item)
|
||||
|
||||
# if media pool item has no remaining usages left
|
||||
# remove it from the media pool
|
||||
if int(media_pool_item.GetClipProperty("Usage")) == 0:
|
||||
lib.remove_media_pool_item(media_pool_item)
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
from pathlib import Path
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
|
||||
from ayon_resolve.api import lib
|
||||
|
||||
|
||||
class LoadEditorialPackage(load.LoaderPlugin):
|
||||
"""Load editorial package to timeline.
|
||||
|
||||
Loading timeline from OTIO file included media sources
|
||||
and timeline structure.
|
||||
"""
|
||||
|
||||
product_types = {"editorial_pkg"}
|
||||
|
||||
representations = {"*"}
|
||||
extensions = {"otio"}
|
||||
|
||||
label = "Load as Timeline"
|
||||
order = -10
|
||||
icon = "ei.align-left"
|
||||
color = "orange"
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
files = get_representation_path(context["representation"])
|
||||
|
||||
search_folder_path = Path(files).parent / "resources"
|
||||
|
||||
project = lib.get_current_project()
|
||||
media_pool = project.GetMediaPool()
|
||||
|
||||
# create versioned bin for editorial package
|
||||
version_name = context["version"]["name"]
|
||||
bin_name = f"{name}_{version_name}"
|
||||
lib.create_bin(bin_name)
|
||||
|
||||
import_options = {
|
||||
"timelineName": "Editorial Package Timeline",
|
||||
"importSourceClips": True,
|
||||
"sourceClipsPath": search_folder_path.as_posix(),
|
||||
}
|
||||
|
||||
timeline = media_pool.ImportTimelineFromFile(files, import_options)
|
||||
print("Timeline imported: ", timeline)
|
||||
|
||||
def update(self, container, context):
|
||||
# TODO: implement update method in future
|
||||
pass
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
from ayon_resolve.api.lib import get_project_manager
|
||||
|
||||
|
||||
class ExtractWorkfile(publish.Extractor):
|
||||
"""
|
||||
Extractor export DRP workfile file representation
|
||||
"""
|
||||
|
||||
label = "Extract Workfile"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["workfile"]
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
name = instance.data["name"]
|
||||
project = instance.context.data["activeProject"]
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
resolve_workfile_ext = ".drp"
|
||||
drp_file_name = name + resolve_workfile_ext
|
||||
|
||||
drp_file_path = os.path.normpath(
|
||||
os.path.join(staging_dir, drp_file_name))
|
||||
|
||||
# write out the drp workfile
|
||||
get_project_manager().ExportProject(
|
||||
project.GetName(), drp_file_path)
|
||||
|
||||
# create drp workfile representation
|
||||
representation_drp = {
|
||||
'name': resolve_workfile_ext[1:],
|
||||
'ext': resolve_workfile_ext[1:],
|
||||
'files': drp_file_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
|
||||
instance.data["representations"].append(representation_drp)
|
||||
|
||||
# add sourcePath attribute to instance
|
||||
if not instance.data.get("sourcePath"):
|
||||
instance.data["sourcePath"] = drp_file_path
|
||||
|
||||
self.log.info("Added Resolve file representation: {}".format(
|
||||
representation_drp))
|
||||
|
|
@ -0,0 +1,176 @@
|
|||
from pprint import pformat
|
||||
|
||||
import pyblish
|
||||
|
||||
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
from ayon_resolve.api.lib import (
|
||||
get_current_timeline_items,
|
||||
get_timeline_item_pype_tag,
|
||||
publish_clip_color,
|
||||
get_publish_attribute,
|
||||
get_otio_clip_instance_data,
|
||||
)
|
||||
|
||||
|
||||
class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
label = "Precollect Instances"
|
||||
hosts = ["resolve"]
|
||||
|
||||
def process(self, context):
|
||||
otio_timeline = context.data["otioTimeline"]
|
||||
selected_timeline_items = get_current_timeline_items(
|
||||
filter=True, selecting_color=publish_clip_color)
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(
|
||||
len(selected_timeline_items)))
|
||||
|
||||
for timeline_item_data in selected_timeline_items:
|
||||
|
||||
data = {}
|
||||
timeline_item = timeline_item_data["clip"]["item"]
|
||||
|
||||
# get pype tag data
|
||||
tag_data = get_timeline_item_pype_tag(timeline_item)
|
||||
self.log.debug(f"__ tag_data: {pformat(tag_data)}")
|
||||
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if tag_data.get("id") not in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
continue
|
||||
|
||||
media_pool_item = timeline_item.GetMediaPoolItem()
|
||||
source_duration = int(media_pool_item.GetClipProperty("Frames"))
|
||||
|
||||
# solve handles length
|
||||
handle_start = min(
|
||||
tag_data["handleStart"], int(timeline_item.GetLeftOffset()))
|
||||
handle_end = min(
|
||||
tag_data["handleEnd"], int(
|
||||
source_duration - timeline_item.GetRightOffset()))
|
||||
|
||||
self.log.debug("Handles: <{}, {}>".format(handle_start, handle_end))
|
||||
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
|
||||
folder_path = tag_data["folder_path"]
|
||||
# Backward compatibility fix of 'entity_type' > 'folder_type'
|
||||
if "parents" in data:
|
||||
for parent in data["parents"]:
|
||||
if "entity_type" in parent:
|
||||
parent["folder_type"] = parent.pop("entity_type")
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_name = tag_data.get("productName")
|
||||
if product_name is None:
|
||||
# backward compatibility: subset -> productName
|
||||
product_name = tag_data.get("subset")
|
||||
|
||||
# backward compatibility: product_name should not be missing
|
||||
if not product_name:
|
||||
self.log.error(
|
||||
"Product name is not defined for: {}".format(folder_path))
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_type = tag_data.get("productType")
|
||||
if product_type is None:
|
||||
# backward compatibility: family -> productType
|
||||
product_type = tag_data.get("family")
|
||||
|
||||
# backward compatibility: product_type should not be missing
|
||||
if not product_type:
|
||||
self.log.error(
|
||||
"Product type is not defined for: {}".format(folder_path))
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": "{} {}".format(folder_path, product_name),
|
||||
"folderPath": folder_path,
|
||||
"item": timeline_item,
|
||||
"publish": get_publish_attribute(timeline_item),
|
||||
"fps": context.data["fps"],
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"newAssetPublishing": True,
|
||||
"families": ["clip"],
|
||||
"productType": product_type,
|
||||
"productName": product_name,
|
||||
"family": product_type
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
otio_data = get_otio_clip_instance_data(
|
||||
otio_timeline, timeline_item_data) or {}
|
||||
data.update(otio_data)
|
||||
|
||||
# add resolution
|
||||
self.get_resolution_to_data(data, context)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self.create_shot_instance(context, timeline_item, **data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata["width"],
|
||||
"resolutionHeight": otio_clip_metadata["height"],
|
||||
"pixelAspect": otio_clip_metadata["pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["width"],
|
||||
"resolutionHeight": otio_tl_metadata["height"],
|
||||
"pixelAspect": otio_tl_metadata["pixelAspect"]
|
||||
})
|
||||
|
||||
def create_shot_instance(self, context, timeline_item, **data):
|
||||
hero_track = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
|
||||
if not hero_track:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
product_name = "shotMain"
|
||||
|
||||
# insert family into families
|
||||
product_type = "shot"
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": "{} {}".format(folder_path, product_name),
|
||||
"folderPath": folder_path,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"publish": get_publish_attribute(timeline_item)
|
||||
})
|
||||
|
||||
context.create_instance(**data)
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
import pyblish.api
|
||||
from pprint import pformat
|
||||
|
||||
from ayon_core.pipeline import get_current_folder_path
|
||||
|
||||
from ayon_resolve import api as rapi
|
||||
from ayon_resolve.otio import davinci_export
|
||||
|
||||
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Precollect the current working file into context"""
|
||||
|
||||
label = "Precollect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.5
|
||||
|
||||
def process(self, context):
|
||||
current_folder_path = get_current_folder_path()
|
||||
folder_name = current_folder_path.split("/")[-1]
|
||||
|
||||
product_name = "workfileMain"
|
||||
project = rapi.get_current_project()
|
||||
fps = project.GetSetting("timelineFrameRate")
|
||||
video_tracks = rapi.get_video_track_names()
|
||||
|
||||
# adding otio timeline to context
|
||||
otio_timeline = davinci_export.create_otio_timeline(project)
|
||||
|
||||
instance_data = {
|
||||
"name": "{}_{}".format(folder_name, product_name),
|
||||
"label": "{} {}".format(current_folder_path, product_name),
|
||||
"item": project,
|
||||
"folderPath": current_folder_path,
|
||||
"productName": product_name,
|
||||
"productType": "workfile",
|
||||
"family": "workfile",
|
||||
"families": []
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"otioTimeline": otio_timeline,
|
||||
"videoTracks": video_tracks,
|
||||
"currentFile": project.GetName(),
|
||||
"fps": fps,
|
||||
}
|
||||
context.data.update(context_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
70
server_addon/resolve/client/ayon_resolve/startup.py
Normal file
70
server_addon/resolve/client/ayon_resolve/startup.py
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
"""This script is used as a startup script in Resolve through a .scriptlib file
|
||||
|
||||
It triggers directly after the launch of Resolve and it's recommended to keep
|
||||
it optimized for fast performance since the Resolve UI is actually interactive
|
||||
while this is running. As such, there's nothing ensuring the user isn't
|
||||
continuing manually before any of the logic here runs. As such we also try
|
||||
to delay any imports as much as possible.
|
||||
|
||||
This code runs in a separate process to the main Resolve process.
|
||||
|
||||
"""
|
||||
import os
|
||||
from ayon_core.lib import Logger
|
||||
import ayon_resolve.api
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def ensure_installed_host():
|
||||
"""Install resolve host with openpype and return the registered host.
|
||||
|
||||
This function can be called multiple times without triggering an
|
||||
additional install.
|
||||
"""
|
||||
from ayon_core.pipeline import install_host, registered_host
|
||||
host = registered_host()
|
||||
if host:
|
||||
return host
|
||||
|
||||
host = ayon_resolve.api.ResolveHost()
|
||||
install_host(host)
|
||||
return registered_host()
|
||||
|
||||
|
||||
def launch_menu():
|
||||
print("Launching Resolve AYON menu..")
|
||||
ensure_installed_host()
|
||||
ayon_resolve.api.launch_ayon_menu()
|
||||
|
||||
|
||||
def open_workfile(path):
|
||||
# Avoid the need to "install" the host
|
||||
host = ensure_installed_host()
|
||||
host.open_workfile(path)
|
||||
|
||||
|
||||
def main():
|
||||
# Open last workfile
|
||||
workfile_path = os.environ.get("AYON_RESOLVE_OPEN_ON_LAUNCH")
|
||||
|
||||
if workfile_path and os.path.exists(workfile_path):
|
||||
log.info(f"Opening last workfile: {workfile_path}")
|
||||
open_workfile(workfile_path)
|
||||
else:
|
||||
log.info("No last workfile set to open. Skipping..")
|
||||
|
||||
# Launch AYON menu
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline.context_tools import get_current_project_name
|
||||
project_name = get_current_project_name()
|
||||
log.info(f"Current project name in context: {project_name}")
|
||||
|
||||
settings = get_project_settings(project_name)
|
||||
if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True):
|
||||
log.info("Launching AYON menu..")
|
||||
launch_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.pipeline import install_host
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def main(env):
|
||||
from ayon_resolve.api import ResolveHost, launch_ayon_menu
|
||||
|
||||
# activate resolve from openpype
|
||||
host = ResolveHost()
|
||||
install_host(host)
|
||||
|
||||
launch_ayon_menu()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
-- Run OpenPype's Python launch script for resolve
|
||||
function file_exists(name)
|
||||
local f = io.open(name, "r")
|
||||
return f ~= nil and io.close(f)
|
||||
end
|
||||
|
||||
|
||||
ayon_startup_script = os.getenv("AYON_RESOLVE_STARTUP_SCRIPT")
|
||||
if ayon_startup_script ~= nil then
|
||||
script = fusion:MapPath(ayon_startup_script)
|
||||
|
||||
if file_exists(script) then
|
||||
-- We must use RunScript to ensure it runs in a separate
|
||||
-- process to Resolve itself to avoid a deadlock for
|
||||
-- certain imports of OpenPype libraries or Qt
|
||||
print("Running launch script: " .. script)
|
||||
fusion:RunScript(script)
|
||||
else
|
||||
print("Launch script not found at: " .. script)
|
||||
end
|
||||
end
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from ayon_resolve.otio import davinci_export as otio_export
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Export OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportfilebttn",
|
||||
"Text": "Select Destination",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose where to save the otio",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "exportbttn",
|
||||
"Text": "Export",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Export the current timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _export_button(event):
|
||||
pm = resolve.GetProjectManager()
|
||||
project = pm.GetCurrentProject()
|
||||
timeline = project.GetCurrentTimeline()
|
||||
otio_timeline = otio_export.create_otio_timeline(project)
|
||||
otio_path = os.path.join(
|
||||
itm["exportfilebttn"].Text,
|
||||
timeline.GetName() + ".otio")
|
||||
print(otio_path)
|
||||
otio_export.write_to_file(
|
||||
otio_timeline,
|
||||
otio_path)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _export_file_pressed(event):
|
||||
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
|
||||
itm["exportfilebttn"].Text = selectedPath
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.exportfilebttn.Clicked = _export_file_pressed
|
||||
dlg.On.exportbttn.Clicked = _export_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
from ayon_resolve.otio import davinci_import as otio_import
|
||||
|
||||
resolve = bmd.scriptapp("Resolve") # noqa
|
||||
fu = resolve.Fusion()
|
||||
ui = fu.UIManager
|
||||
disp = bmd.UIDispatcher(fu.UIManager) # noqa
|
||||
|
||||
|
||||
title_font = ui.Font({"PixelSize": 18})
|
||||
dlg = disp.AddWindow(
|
||||
{
|
||||
"WindowTitle": "Import OTIO",
|
||||
"ID": "OTIOwin",
|
||||
"Geometry": [250, 250, 250, 100],
|
||||
"Spacing": 0,
|
||||
"Margin": 10
|
||||
},
|
||||
[
|
||||
ui.VGroup(
|
||||
{
|
||||
"Spacing": 2
|
||||
},
|
||||
[
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importOTIOfileButton",
|
||||
"Text": "Select OTIO File Path",
|
||||
"Weight": 1.25,
|
||||
"ToolTip": "Choose otio file to import from",
|
||||
"Flat": False
|
||||
}
|
||||
),
|
||||
ui.VGap(),
|
||||
ui.Button(
|
||||
{
|
||||
"ID": "importButton",
|
||||
"Text": "Import",
|
||||
"Weight": 2,
|
||||
"ToolTip": "Import otio to new timeline",
|
||||
"Flat": False
|
||||
}
|
||||
)
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
itm = dlg.GetItems()
|
||||
|
||||
|
||||
def _close_window(event):
|
||||
disp.ExitLoop()
|
||||
|
||||
|
||||
def _import_button(event):
|
||||
otio_import.read_from_file(itm["importOTIOfileButton"].Text)
|
||||
_close_window(None)
|
||||
|
||||
|
||||
def _import_file_pressed(event):
|
||||
selected_path = fu.RequestFile(os.path.expanduser("~/Documents"))
|
||||
itm["importOTIOfileButton"].Text = selected_path
|
||||
|
||||
|
||||
dlg.On.OTIOwin.Close = _close_window
|
||||
dlg.On.importOTIOfileButton.Clicked = _import_file_pressed
|
||||
dlg.On.importButton.Clicked = _import_button
|
||||
dlg.Show()
|
||||
disp.RunLoop()
|
||||
dlg.Hide()
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ayon_core.pipeline import install_host
|
||||
|
||||
|
||||
def main(env):
|
||||
from ayon_resolve.utils import setup
|
||||
import ayon_resolve.api as bmdvr
|
||||
# Registers openpype's Global pyblish plugins
|
||||
install_host(bmdvr)
|
||||
setup(env)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
result = main(os.environ)
|
||||
sys.exit(not bool(result))
|
||||
71
server_addon/resolve/client/ayon_resolve/utils.py
Normal file
71
server_addon/resolve/client/ayon_resolve/utils.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
import os
|
||||
import shutil
|
||||
from ayon_core.lib import Logger, is_running_from_build
|
||||
|
||||
RESOLVE_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
def setup(env):
|
||||
log = Logger.get_logger("ResolveSetup")
|
||||
scripts = {}
|
||||
util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
|
||||
util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
|
||||
|
||||
util_scripts_paths = [os.path.join(
|
||||
RESOLVE_ADDON_ROOT,
|
||||
"utility_scripts"
|
||||
)]
|
||||
|
||||
# collect script dirs
|
||||
if util_scripts_env:
|
||||
log.info("Utility Scripts Env: `{}`".format(util_scripts_env))
|
||||
util_scripts_paths = util_scripts_env.split(
|
||||
os.pathsep) + util_scripts_paths
|
||||
|
||||
# collect scripts from dirs
|
||||
for path in util_scripts_paths:
|
||||
scripts.update({path: os.listdir(path)})
|
||||
|
||||
log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths))
|
||||
log.info("Utility Scripts: `{}`".format(scripts))
|
||||
|
||||
# Make sure scripts dir exists
|
||||
os.makedirs(util_scripts_dir, exist_ok=True)
|
||||
|
||||
# make sure no script file is in folder
|
||||
for script in os.listdir(util_scripts_dir):
|
||||
path = os.path.join(util_scripts_dir, script)
|
||||
log.info("Removing `{}`...".format(path))
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path, onerror=None)
|
||||
else:
|
||||
os.remove(path)
|
||||
|
||||
# copy scripts into Resolve's utility scripts dir
|
||||
for directory, scripts in scripts.items():
|
||||
for script in scripts:
|
||||
if (
|
||||
is_running_from_build()
|
||||
and script in ["tests", "develop"]
|
||||
):
|
||||
# only copy those if started from build
|
||||
continue
|
||||
|
||||
src = os.path.join(directory, script)
|
||||
dst = os.path.join(util_scripts_dir, script)
|
||||
|
||||
# TODO: Make this a less hacky workaround
|
||||
if script == "ayon_startup.scriptlib":
|
||||
# Handle special case for scriptlib that needs to be a folder
|
||||
# up from the Comp folder in the Fusion scripts
|
||||
dst = os.path.join(os.path.dirname(util_scripts_dir),
|
||||
script)
|
||||
|
||||
log.info("Copying `{}` to `{}`...".format(src, dst))
|
||||
if os.path.isdir(src):
|
||||
shutil.copytree(
|
||||
src, dst, symlinks=False,
|
||||
ignore=None, ignore_dangling_symlinks=False
|
||||
)
|
||||
else:
|
||||
shutil.copy2(src, dst)
|
||||
3
server_addon/resolve/client/ayon_resolve/version.py
Normal file
3
server_addon/resolve/client/ayon_resolve/version.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Package declaring AYON addon 'resolve' version."""
|
||||
__version__ = "0.2.0"
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
name = "resolve"
|
||||
title = "DaVinci Resolve"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
|
||||
client_dir = "ayon_resolve"
|
||||
|
||||
ayon_required_addons = {
|
||||
"core": ">0.3.2",
|
||||
}
|
||||
ayon_compatible_addons = {}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue