Merge remote-tracking branch 'origin/develop' into enhancement/OP-3075_houdini-new-publisher

This commit is contained in:
Ondřej Samohel 2022-10-20 14:41:35 +02:00
commit 00d03df6ef
No known key found for this signature in database
GPG key ID: 02376E18990A97C6
60 changed files with 5513 additions and 947 deletions

View file

@ -37,27 +37,27 @@ jobs:
echo ::set-output name=next_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version_type.outputs.type != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
@ -85,11 +85,11 @@ jobs:
tags: true
unprotect_reviews: true
- name: 🔨 Merge main back to develop
- name: 🔨 Merge main back to develop
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -2,7 +2,7 @@ name: Stable Release
on:
release:
types:
types:
- prereleased
jobs:
@ -13,7 +13,7 @@ jobs:
steps:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
with:
with:
fetch-depth: 0
- name: Set up Python
@ -33,27 +33,27 @@ jobs:
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: 💾 Commit and Tag
id: git_commit
@ -73,8 +73,8 @@ jobs:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
@ -114,11 +114,11 @@ jobs:
with:
tag: "${{ steps.version.outputs.current_version }}"
- name: 🔁 Merge main back to develop
- name: 🔁 Merge main back to develop
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

2
.gitignore vendored
View file

@ -110,3 +110,5 @@ tools/run_eventserver.*
# Developer tools
tools/dev_*
.github_changelog_generator

File diff suppressed because it is too large Load diff

1818
HISTORY.md

File diff suppressed because it is too large Load diff

View file

@ -277,6 +277,13 @@ def projectmanager():
PypeCommands().launch_project_manager()
@main.command(context_settings={"ignore_unknown_options": True})
def publish_report_viewer():
from openpype.tools.publisher.publish_report_viewer import main
sys.exit(main())
@main.command()
@click.argument("output_path")
@click.option("--project", help="Define project context")

View file

@ -44,11 +44,26 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = getattr(sys.modules["__main__"], "fusion").Print
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
self._print(entry)
self.print(entry)
def install():

View file

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
import openpype.api
import pyblish.api
import hou
from openpype.pipeline import (
@ -8,6 +7,8 @@ from openpype.pipeline import (
)
from openpype.pipeline.publish import RepairAction
from openpype.pipeline.publish import RepairAction
class ValidateWorkfilePaths(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin):

View file

@ -260,20 +260,20 @@ class ARenderProducts:
"""
try:
file_prefix_attr = IMAGE_PREFIXES[self.renderer]
prefix_attr = IMAGE_PREFIXES[self.renderer]
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
file_prefix = self._get_attr(file_prefix_attr)
prefix = self._get_attr(prefix_attr)
if not file_prefix:
if not prefix:
# Fall back to scene name by default
log.debug("Image prefix not set, using <Scene>")
file_prefix = "<Scene>"
return file_prefix
return prefix
def get_render_attribute(self, attribute):
"""Get attribute from render options.
@ -730,13 +730,16 @@ class RenderProductsVray(ARenderProducts):
"""Get image prefix for V-Ray.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
prefix = super(RenderProductsVray, self).get_renderer_prefix()
if self.multipart:
return prefix
aov_separator = self._get_aov_separator()
prefix = "{}{}<aov>".format(prefix, aov_separator)
return prefix
@ -974,15 +977,18 @@ class RenderProductsRedshift(ARenderProducts):
"""Get image prefix for Redshift.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
separator = self.extract_separator(file_prefix)
prefix = "{}{}<aov>".format(file_prefix, separator or "_")
prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
if self.multipart:
return prefix
separator = self.extract_separator(prefix)
prefix = "{}{}<aov>".format(prefix, separator or "_")
return prefix
def get_render_products(self):

View file

@ -12,6 +12,7 @@ class CreateAnimation(plugin.Creator):
family = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
@ -24,7 +25,7 @@ class CreateAnimation(plugin.Creator):
# Write vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False
self.data["writeFaceSets"] = self.write_face_sets
# Include only renderable visible shapes.
# Skips locators and empty transforms

View file

@ -9,13 +9,14 @@ class CreateModel(plugin.Creator):
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
self.data["writeFaceSets"] = False
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
# Include attributes by attribute name or prefix
self.data["attr"] = ""

View file

@ -12,6 +12,7 @@ class CreatePointCache(plugin.Creator):
family = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
@ -21,7 +22,8 @@ class CreatePointCache(plugin.Creator):
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False # Vertex colors with the geometry.
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups

View file

@ -13,22 +13,14 @@ from openpype.settings import (
get_system_settings,
get_project_settings,
)
from openpype.lib import requests_get
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.lib import requests_get
from openpype.api import (
get_system_settings,
get_project_settings)
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.pipeline.context_tools import get_current_project_asset
class CreateRender(plugin.Creator):

View file

@ -1,22 +1,24 @@
#### Basic setup
## Basic setup
- Install [latest DaVinci Resolve](https://sw.blackmagicdesign.com/DaVinciResolve/v16.2.8/DaVinci_Resolve_Studio_16.2.8_Windows.zip?Key-Pair-Id=APKAJTKA3ZJMJRQITVEA&Signature=EcFuwQFKHZIBu2zDj5LTCQaQDXcKOjhZY7Fs07WGw24xdDqfwuALOyKu+EVzDX2Tik0cWDunYyV0r7hzp+mHmczp9XP4YaQXHdyhD/2BGWDgiMsiTQbNkBgbfy5MsAMFY8FHCl724Rxm8ke1foWeUVyt/Cdkil+ay+9sL72yFhaSV16sncko1jCIlCZeMkHhbzqPwyRuqLGmxmp8ey9KgBhI3wGFFPN201VMaV+RHrpX+KAfaR6p6dwo3FrPbRHK9TvMI1RA/1lJ3fVtrkDW69LImIKAWmIxgcStUxR9/taqLOD66FNiflHd1tufHv3FBa9iYQsjb3VLMPx7OCwLyg==&Expires=1608308139)
- add absolute path to ffmpeg into openpype settings
![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png)
- install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve)
- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
- Actually supported version is up to v18
- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18)
- pip install PySide2:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2`
- pip install OpenTimelineIO:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO`
- Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH.
- install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2`
- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6
![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png)
- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related.
#### Editorial setup
## Editorial setup
This is how it looks on my testing project timeline
![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png)
Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence.
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/**__OpenPype_Menu__**
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__**
2. then select any clips in `main` track and change their color to `Chocolate`
3. in OpenPype Menu select `Create`
4. in Creator select `Create Publishable Clip [New]` (temporary name)

View file

@ -1,189 +0,0 @@
Updated as of 08 March 2019
--------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples.
Overview
--------
As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when allowing scripting access from outside of the Resolve application.
Using a script
--------------
DaVinci Resolve needs to be running for a script to be invoked.
For a Resolve script to be executed from an external folder, the script needs to know of the API location.
You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below:
Mac OS X:
RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
Windows:
RESOLVE_SCRIPT_API="%PROGRAMDATA%\\Blackmagic Design\\DaVinci Resolve\\Support\\Developer\\Scripting\\"
RESOLVE_SCRIPT_LIB="C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\fusionscript.dll"
PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\\Modules\\"
Linux:
RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
(Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve)
As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console.
On startup, DaVinci Resolve scans the Utility Scripts directory and enumerates the scripts found in the Script application menu. Placing your script in this folder and invoking it from this menu is the easiest way to use scripts. The Utility Scripts folder is located in:
Mac OS X: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp/
Windows: %APPDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts\Comp\
Linux: /opt/resolve/Fusion/Scripts/Comp/ (or /home/resolve/Fusion/Scripts/Comp/ depending on installation)
The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
This example Python script creates a simple project:
#!/usr/bin/env python
import DaVinciResolveScript as dvr_script
resolve = dvr_script.scriptapp("Resolve")
fusion = resolve.Fusion()
projectManager = resolve.GetProjectManager()
projectManager.CreateProject("Hello World")
The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and `getmetatable` in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality.
Running DaVinci Resolve in headless mode
----------------------------------------
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. However, the various scripting APIs will continue to work as expected.
Basic Resolve API
-----------------
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "edit", "fusion", "color", "fairlight", "deliver").
ProjectManager
CreateProject(projectName) --> Project # Creates and returns a project if projectName (text) is unique, and None if it is not.
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (text) if there is a match found, and None if there is no matching Project.
GetCurrentProject() --> Project # Returns the currently loaded Resolve project.
SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful.
CreateFolder(folderName) --> Bool # Creates a folder if folderName (text) is unique.
GetProjectsInCurrentFolder() --> [project names...] # Returns an array of project names in current folder.
GetFoldersInCurrentFolder() --> [folder names...] # Returns an array of folder names in current folder.
GotoRootFolder() --> Bool # Opens root folder in database.
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project under given file path. Returns true in case of success.
ExportProject(projectName, filePath) --> Bool # Exports a project based on given name into provided file path. Returns true in case of success.
RestoreProject(filePath) --> Bool # Restores a project under given backup file path. Returns true in case of success.
Project
GetMediaPool() --> MediaPool # Returns the Media Pool object.
GetTimelineCount() --> int # Returns the number of timelines currently present in the project.
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (text) is unique.
GetPresets() --> [presets...] # Returns a table of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
GetRenderJobs() --> [render jobs...] # Returns a table of render jobs and their information.
GetRenderPresets() --> [presets...] # Returns a table of render presets and their information.
StartRendering(index1, index2, ...) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StartRendering([idxs...]) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StopRendering() --> None # Stops rendering for all render jobs.
IsRenderingInProgress() --> Bool # Returns true is rendering is in progress.
AddRenderJob() --> Bool # Adds render job to render queue.
DeleteRenderJobByIndex(idx) --> Bool # Deletes render job based on given job index (int).
DeleteAllRenderJobs() --> Bool # Deletes all render jobs.
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (text) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates a new render preset by given name if presetName(text) is unique.
SetRenderSettings([settings map]) --> Bool # Sets given settings for rendering. Settings map is a map, keys of map are: "SelectAllFrames", "MarkIn", "MarkOut", "TargetDir", "CustomName".
GetRenderJobStatus(idx) --> [status info] # Returns job status and completion rendering percentage of the job by given job index (int).
GetSetting(settingName) --> string # Returns setting value by given settingName (string) if the setting exist. With empty settingName the function returns a full list of settings.
SetSetting(settingName, settingValue) --> Bool # Sets project setting base on given name (string) and value (string).
GetRenderFormats() --> [render formats...]# Returns a list of available render formats.
GetRenderCodecs(renderFormat) --> [render codecs...] # Returns a list of available codecs for given render format (string).
GetCurrentRenderFormatAndCodec() --> [format, codec] # Returns currently selected render format and render codec.
SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering.
MediaStorage
GetMountedVolumes() --> [paths...] # Returns an array of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolders(folderPath) --> [paths...] # Returns an array of folder paths in the given absolute folder path.
GetFiles(folderPath) --> [paths...] # Returns an array of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays a given file/folder path in Resolves Media Storage.
AddItemsToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is one or more file/folder paths.
AddItemsToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is an array of file/folder paths.
MediaPool
GetRootFolder() --> Folder # Returns the root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds a new subfolder under specified Folder object with the given name.
CreateEmptyTimeline(name) --> Timeline # Adds a new timeline with given name.
AppendToTimeline(clip1, clip2...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
CreateTimelineFromClips(name, clip1, clip2, ...)--> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
ImportTimelineFromFile(filePath) --> Timeline # Creates timeline based on parameters within given file.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
Folder
GetClips() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns user-defined name of the folder.
GetSubFolders() --> [folders...] # Returns a list of subfolders in the folder.
MediaPoolItem
GetMetadata(metadataType) --> [[types],[values]] # Returns a value of metadataType. If parameter is not specified returns all set metadata parameters.
SetMetadata(metadataType, metadataValue) --> Bool # Sets metadata by given type and value. Returns True if successful.
GetMediaId() --> string # Returns a unique ID name related to MediaPoolItem.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
AddFlag(color) --> Bool # Adds a flag with given color (text).
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
GetClipProperty(propertyName) --> [[types],[values]] # Returns property value related to the item based on given propertyName (string). if propertyName is empty then it returns a full list of properties.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets into given propertyName (string) propertyValue (string).
Timeline
GetName() --> string # Returns user-defined name of the timeline.
SetName(timelineName) --> Bool # Sets timeline name is timelineName (text) is unique.
GetStartFrame() --> int # Returns frame number at the start of timeline.
GetEndFrame() --> int # Returns frame number at the end of timeline.
GetTrackCount(trackType) --> int # Returns a number of track based on specified track type ("audio", "video" or "subtitle").
GetItemsInTrack(trackType, index) --> [items...] # Returns an array of Timeline items on the video or audio track (based on trackType) at specified index. 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string representing a timecode for current position of the timeline, while on Cut, Edit, Color and Deliver page.
GetCurrentVideoItem() --> item # Returns current video timeline item.
GetCurrentClipThumbnailImage() --> [width, height, format, data] # Returns raw thumbnail image data (This image data is encoded in base 64 format and the image format is RGB 8 bit) for the current media in the Color Page in the format of dictionary (in Python) and table (in Lua). Information return are "width", "height", "format" and "data". Example is provided in 6_get_current_media_thumbnail.py in Example folder.
TimelineItem
GetName() --> string # Returns a name of the item.
GetDuration() --> int # Returns a duration of item.
GetEnd() --> int # Returns a position of end frame.
GetFusionCompCount() --> int # Returns the number of Fusion compositions associated with the timeline item.
GetFusionCompByIndex(compIndex) --> fusionComp # Returns Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount()
GetFusionCompNames() --> [names...] # Returns a list of Fusion composition names associated with the timeline item.
GetFusionCompByName(compName) --> fusionComp # Returns Fusion composition object based on given name.
GetLeftOffset() --> int # Returns a maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns a maximum extension by frame for clip from right side.
GetStart() --> int # Returns a position of first frame.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item.
ImportFusionComp(path) --> fusionComp # Imports Fusion composition from given file path by creating and adding a new composition for the item.
ExportFusionComp(path, compIndex) --> Bool # Exports Fusion composition based on given index into provided file name path.
DeleteFusionCompByName(compName) --> Bool # Deletes Fusion composition by provided name.
LoadFusionCompByName(compName) --> fusionComp # Loads Fusion composition by provided name and sets it as active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames Fusion composition by provided name with new given name.
AddVersion(versionName, versionType) --> Bool # Adds a new Version associated with the timeline item. versionType: 0 - local, 1 - remote.
DeleteVersionByName(versionName, versionType) --> Bool # Deletes Version by provided name. versionType: 0 - local, 1 - remote.
LoadVersionByName(versionName, versionType) --> Bool # Loads Version by provided name and sets it as active Version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames Version by provided name with new given name. versionType: 0 - local, 1 - remote.
GetMediaPoolItem() --> MediaPoolItem # Returns a corresponding to the timeline item media pool item if it exists.
GetVersionNames(versionType) --> [strings...] # Returns a list of version names by provided versionType: 0 - local, 1 - remote.
GetStereoConvergenceValues() --> [offset, value] # Returns a table of keyframe offsets and respective convergence values
GetStereoLeftFloatingWindowParams() --> [offset, value] # For the LEFT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values
GetStereoRightFloatingWindowParams() --> [offset, value] # For the RIGHT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values

View file

@ -1,5 +1,5 @@
Updated as of 20 October 2020
-----------------------------
Updated as of 9 May 2022
----------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
modules for scripting access (DaVinciResolve.py) and some representative examples.
@ -89,12 +89,25 @@ Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None).
GetProductName() --> string # Returns product name.
GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format.
GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format.
LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'.
UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout.
ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'.
DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'.
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
Quit() --> None # Quits the Resolve App.
ProjectManager
ArchiveProject(projectName,
filePath,
isArchiveSrcMedia=True,
isArchiveRenderCache=True,
isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments
CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not.
DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project.
@ -109,9 +122,9 @@ ProjectManager
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
GetCurrentFolder() --> string # Returns the current folder name.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project from the file path provided. Returns True if successful.
ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful.
ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success.
RestoreProject(filePath) --> Bool # Restores a project from the file path provided. Returns True if successful.
RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful.
GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection
GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve
SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project.
@ -125,8 +138,9 @@ Project
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetGallery() --> Gallery # Returns the Gallery object.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (string) is unique.
SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique.
GetPresetList() --> [presets...] # Returns a list of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job.
@ -144,27 +158,7 @@ Project
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique.
SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys:
# "SelectAllFrames": Bool
# "MarkIn": int
# "MarkOut": int
# "TargetDir": string
# "CustomName": string
# "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
# "ExportVideo": Bool
# "ExportAudio": Bool
# "FormatWidth": int
# "FormatHeight": int
# "FrameRate": float (examples: 23.976, 24)
# "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
# "VideoQuality" possible values for current codec (if applicable):
# 0 (int) - will set quality to automatic
# [1 -> MAX] (int) - will set input bit rate
# ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
# "AudioCodec": string (example: "aac")
# "AudioBitDepth": int
# "AudioSampleRate": int
# "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
# "GammaTag" : string (example: "Same as Project", "ACEScct")
# Refer to "Looking up render settings" section for information for supported settings
GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string).
GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information.
@ -176,12 +170,13 @@ Project
SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip.
GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height".
RefreshLUTList() --> Bool # Refreshes LUT List
GetUniqueId() --> string # Returns a unique ID for the project item
MediaStorage
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path.
GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays given file/folder path in Resolves Media Storage.
RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolves Media Storage.
AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created.
AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful.
@ -190,10 +185,11 @@ MediaStorage
MediaPool
GetRootFolder() --> Folder # Returns root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name.
RefreshFolders() --> Bool # Updates the folders in collaboration mode
CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name.
AppendToTimeline(clip1, clip2, ...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([{clipInfo}, ...]) --> Bool # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only). Returns the list of appended timelineItems.
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
@ -202,6 +198,8 @@ MediaPool
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
# "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False
# "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import
DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool
@ -214,19 +212,26 @@ MediaPool
RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path.
UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips.
ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created.
# Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on.
# Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx".
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
# If no clips are specified, all clips from media pool will be used.
GetUniqueId() --> string # Returns a unique ID for the media pool
Folder
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns the media folder name.
GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder.
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
GetUniqueId() --> string # Returns a unique ID for the media pool folder
MediaPoolItem
GetName() --> string # Returns the clip name.
GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'.
# If no argument is specified, a dict of all set metadata properties is returned.
SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful.
SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful.
GetMediaId() --> string # Returns the unique ID for the MediaPoolItem.
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
@ -248,15 +253,18 @@ MediaPoolItem
GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'.
# If no argument is specified, a dict of all clip properties is returned. Check the section below for more information.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information.
LinkProxyMedia(propertyName) --> Bool # Links proxy media (absolute path) with the current clip.
LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path.
UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip.
ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path.
GetUniqueId() --> string # Returns a unique ID for the media pool item
Timeline
GetName() --> string # Returns the timeline name.
SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful.
GetStartFrame() --> int # Returns the frame number at the start of timeline.
GetEndFrame() --> int # Returns the frame number at the end of timeline.
SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise.
GetStartTimecode() --> string # Returns the start timecode for the timeline.
GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle").
GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
@ -271,7 +279,8 @@ Timeline
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color and Deliver pages.
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages.
SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages.
GetCurrentVideoItem() --> item # Returns the current video timeline item.
GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page.
# An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder.
@ -280,37 +289,30 @@ Timeline
DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success.
CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item.
CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item.
ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys:
# "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default
# "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default
# "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default
# "useSizingInfo": Bool, specifies if sizing information should be used, False by default
# "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default
# "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default
# "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True
# "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder
Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format.
# exportType can be one of the following constants:
# resolve.EXPORT_AAF
# resolve.EXPORT_DRT
# resolve.EXPORT_EDL
# resolve.EXPORT_FCP_7_XML
# resolve.EXPORT_FCPXML_1_3
# resolve.EXPORT_FCPXML_1_4
# resolve.EXPORT_FCPXML_1_5
# resolve.EXPORT_FCPXML_1_6
# resolve.EXPORT_FCPXML_1_7
# resolve.EXPORT_FCPXML_1_8
# resolve.EXPORT_HDR_10_PROFILE_A
# resolve.EXPORT_HDR_10_PROFILE_B
# resolve.EXPORT_TEXT_CSV
# resolve.EXPORT_TEXT_TAB
# resolve.EXPORT_DOLBY_VISION_VER_2_9
# resolve.EXPORT_DOLBY_VISION_VER_4_0
# exportSubtype can be one of the following enums:
# resolve.EXPORT_NONE
# resolve.EXPORT_AAF_NEW
# resolve.EXPORT_AAF_EXISTING
# resolve.EXPORT_CDL
# resolve.EXPORT_SDL
# resolve.EXPORT_MISSING_CLIPS
# Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
# When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
# When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
# Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
# Refer to section "Looking up timeline exports properties" for information on the parameters.
GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information.
InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline.
InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline.
InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline.
InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline.
InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline.
InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline.
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
GetUniqueId() --> string # Returns a unique ID for the timeline
TimelineItem
GetName() --> string # Returns the item name.
@ -323,6 +325,10 @@ TimelineItem
GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side.
GetStart() --> int # Returns the start frame position on the timeline.
SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue"
# Refer to "Looking up Timeline item properties" for more information
GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key
# if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
@ -345,7 +351,8 @@ TimelineItem
DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition.
LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName.
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clipbased on versionType (0 - local, 1 - remote).
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote).
GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote).
DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote).
LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote).
@ -354,12 +361,14 @@ TimelineItem
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
AddTake(mediaPoolItem, startFrame=0, endFrame)=0 --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the whole clip is added. startFrame and endFrame can be specified as extents.
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector.
GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector.
GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index.
@ -367,7 +376,24 @@ TimelineItem
SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes.
FinalizeTake() --> Bool # Finalizes take selection.
CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred.
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
GetUniqueId() --> string # Returns a unique ID for the timeline item
Gallery
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'.
GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object.
SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'.
GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects.
GalleryStillAlbum
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm).
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
List and Dict Data Structures
-----------------------------
@ -375,7 +401,6 @@ Beside primitive data types, Resolve's Python API mainly uses list and dict data
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
Looking up Project and Clip properties
--------------------------------------
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
@ -412,6 +437,179 @@ Affects:
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
Looking up Render Settings
--------------------------
This section covers the supported settings for the method SetRenderSettings({settings})
The parameter setting is a dictionary containing the following keys:
- "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored)
- "MarkIn": int
- "MarkOut": int
- "TargetDir": string
- "CustomName": string
- "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
- "ExportVideo": Bool
- "ExportAudio": Bool
- "FormatWidth": int
- "FormatHeight": int
- "FrameRate": float (examples: 23.976, 24)
- "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
- "VideoQuality" possible values for current codec (if applicable):
- 0 (int) - will set quality to automatic
- [1 -> MAX] (int) - will set input bit rate
- ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
- "AudioCodec": string (example: "aac")
- "AudioBitDepth": int
- "AudioSampleRate": int
- "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
- "GammaTag" : string (example: "Same as Project", "ACEScct")
- "ExportAlpha": Bool
- "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265.
- "MultiPassEncode": Bool. Can only be set for H.264.
- "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true.
- "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats.
Looking up timeline export properties
-------------------------------------
This section covers the parameters for the argument Export(fileName, exportType, exportSubtype).
exportType can be one of the following constants:
- resolve.EXPORT_AAF
- resolve.EXPORT_DRT
- resolve.EXPORT_EDL
- resolve.EXPORT_FCP_7_XML
- resolve.EXPORT_FCPXML_1_3
- resolve.EXPORT_FCPXML_1_4
- resolve.EXPORT_FCPXML_1_5
- resolve.EXPORT_FCPXML_1_6
- resolve.EXPORT_FCPXML_1_7
- resolve.EXPORT_FCPXML_1_8
- resolve.EXPORT_FCPXML_1_9
- resolve.EXPORT_FCPXML_1_10
- resolve.EXPORT_HDR_10_PROFILE_A
- resolve.EXPORT_HDR_10_PROFILE_B
- resolve.EXPORT_TEXT_CSV
- resolve.EXPORT_TEXT_TAB
- resolve.EXPORT_DOLBY_VISION_VER_2_9
- resolve.EXPORT_DOLBY_VISION_VER_4_0
exportSubtype can be one of the following enums:
- resolve.EXPORT_NONE
- resolve.EXPORT_AAF_NEW
- resolve.EXPORT_AAF_EXISTING
- resolve.EXPORT_CDL
- resolve.EXPORT_SDL
- resolve.EXPORT_MISSING_CLIPS
Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
Looking up Timeline item properties
-----------------------------------
This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned.
The supported keys with their accepted values are:
"Pan" : floating point values from -4.0*width to 4.0*width
"Tilt" : floating point values from -4.0*height to 4.0*height
"ZoomX" : floating point values from 0.0 to 100.0
"ZoomY" : floating point values from 0.0 to 100.0
"ZoomGang" : a boolean value
"RotationAngle" : floating point values from -360.0 to 360.0
"AnchorPointX" : floating point values from -4.0*width to 4.0*width
"AnchorPointY" : floating point values from -4.0*height to 4.0*height
"Pitch" : floating point values from -1.5 to 1.5
"Yaw" : floating point values from -1.5 to 1.5
"FlipX" : boolean value for flipping horizontally
"FlipY" : boolean value for flipping vertically
"CropLeft" : floating point values from 0.0 to width
"CropRight" : floating point values from 0.0 to width
"CropTop" : floating point values from 0.0 to height
"CropBottom" : floating point values from 0.0 to height
"CropSoftness" : floating point values from -100.0 to 100.0
"CropRetain" : boolean value for "Retain Image Position" checkbox
"DynamicZoomEase" : A value from the following constants
- DYNAMIC_ZOOM_EASE_LINEAR = 0
- DYNAMIC_ZOOM_EASE_IN
- DYNAMIC_ZOOM_EASE_OUT
- DYNAMIC_ZOOM_EASE_IN_AND_OUT
"CompositeMode" : A value from the following constants
- COMPOSITE_NORMAL = 0
- COMPOSITE_ADD
- COMPOSITE_SUBTRACT
- COMPOSITE_DIFF
- COMPOSITE_MULTIPLY
- COMPOSITE_SCREEN
- COMPOSITE_OVERLAY
- COMPOSITE_HARDLIGHT
- COMPOSITE_SOFTLIGHT
- COMPOSITE_DARKEN
- COMPOSITE_LIGHTEN
- COMPOSITE_COLOR_DODGE
- COMPOSITE_COLOR_BURN
- COMPOSITE_EXCLUSION
- COMPOSITE_HUE
- COMPOSITE_SATURATE
- COMPOSITE_COLORIZE
- COMPOSITE_LUMA_MASK
- COMPOSITE_DIVIDE
- COMPOSITE_LINEAR_DODGE
- COMPOSITE_LINEAR_BURN
- COMPOSITE_LINEAR_LIGHT
- COMPOSITE_VIVID_LIGHT
- COMPOSITE_PIN_LIGHT
- COMPOSITE_HARD_MIX
- COMPOSITE_LIGHTER_COLOR
- COMPOSITE_DARKER_COLOR
- COMPOSITE_FOREGROUND
- COMPOSITE_ALPHA
- COMPOSITE_INVERTED_ALPHA
- COMPOSITE_LUM
- COMPOSITE_INVERTED_LUM
"Opacity" : floating point value from 0.0 to 100.0
"Distortion" : floating point value from -1.0 to 1.0
"RetimeProcess" : A value from the following constants
- RETIME_USE_PROJECT = 0
- RETIME_NEAREST
- RETIME_FRAME_BLEND
- RETIME_OPTICAL_FLOW
"MotionEstimation" : A value from the following constants
- MOTION_EST_USE_PROJECT = 0
- MOTION_EST_STANDARD_FASTER
- MOTION_EST_STANDARD_BETTER
- MOTION_EST_ENHANCED_FASTER
- MOTION_EST_ENHANCED_BETTER
- MOTION_EST_SPEED_WRAP
"Scaling" : A value from the following constants
- SCALE_USE_PROJECT = 0
- SCALE_CROP
- SCALE_FIT
- SCALE_FILL
- SCALE_STRETCH
"ResizeFilter" : A value from the following constants
- RESIZE_FILTER_USE_PROJECT = 0
- RESIZE_FILTER_SHARPER
- RESIZE_FILTER_SMOOTHER
- RESIZE_FILTER_BICUBIC
- RESIZE_FILTER_BILINEAR
- RESIZE_FILTER_BESSEL
- RESIZE_FILTER_BOX
- RESIZE_FILTER_CATMULL_ROM
- RESIZE_FILTER_CUBIC
- RESIZE_FILTER_GAUSSIAN
- RESIZE_FILTER_LANCZOS
- RESIZE_FILTER_MITCHELL
- RESIZE_FILTER_NEAREST_NEIGHBOR
- RESIZE_FILTER_QUADRATIC
- RESIZE_FILTER_SINC
- RESIZE_FILTER_LINEAR
Values beyond the range will be clipped
width and height are same as the UI max limits
The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed
as a single argument.
Getting the values for the keys that uses constants will return the number which is in the constant
Deprecated Resolve API Functions
--------------------------------
The following API functions are deprecated.
@ -450,12 +648,12 @@ TimelineItem
Unsupported Resolve API Functions
---------------------------------
The following API (functions and paraameters) are no longer supported.
The following API (functions and parameters) are no longer supported. Use job IDs instead of indices.
Project
StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices.
StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices.
DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices.
GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices.
GetSetting and SetSetting --> {} # settingName "videoMonitorUseRec601For422SDI" is no longer supported.
# Please use "videoMonitorUseMatrixOverrideFor422SDI" and "videoMonitorMatrixOverrideFor422SDI" instead.
GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI.
# settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available.

View file

@ -1,10 +1,6 @@
"""
resolve api
"""
bmdvr = None
bmdvf = None
from .utils import (
get_resolve_module
)
@ -70,6 +66,9 @@ from .workio import (
from .testing_utils import TestGUI
bmdvr = None
bmdvf = None
__all__ = [
"bmdvr",
"bmdvf",

View file

@ -54,15 +54,15 @@ class OpenPypeMenu(QtWidgets.QWidget):
)
self.setWindowTitle("OpenPype")
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
inventory_btn = QtWidgets.QPushButton("Inventory...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)
publish_btn = QtWidgets.QPushButton("Publish ...", self)
load_btn = QtWidgets.QPushButton("Load ...", self)
inventory_btn = QtWidgets.QPushButton("Manager ...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self)
libload_btn = QtWidgets.QPushButton("Library ...", self)
experimental_btn = QtWidgets.QPushButton(
"Experimental tools...", self
"Experimental tools ...", self
)
# rename_btn = QtWidgets.QPushButton("Rename", self)
# set_colorspace_btn = QtWidgets.QPushButton(

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python
import time
from openpype.hosts.resolve.utils import get_resolve_module
from openpype.lib import Logger
log = Logger.get_logger(__name__)
wait_delay = 2.5
wait = 0.00
ready = None
while True:
try:
# Create project and set parameters:
resolve = get_resolve_module()
pm = resolve.GetProjectManager()
if pm:
ready = None
else:
ready = True
except AttributeError:
pass
if ready is None:
time.sleep(wait_delay)
log.info(f"Waiting {wait}s for Resolve to have opened Project Manager")
wait += wait_delay
else:
print(f"Preloaded variables: \n\n\tResolve module: "
f"`resolve` > {type(resolve)} \n\tProject manager: "
f"`pm` > {type(pm)}")
break

View file

@ -1,5 +1,5 @@
import os
import platform
from openpype.lib import PreLaunchHook
from openpype.hosts.resolve.utils import setup
@ -14,35 +14,91 @@ class ResolvePrelaunch(PreLaunchHook):
app_groups = ["resolve"]
def execute(self):
current_platform = platform.system().lower()
PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "")
RESOLVE_SCRIPT_API_ = {
"windows": (
f"{PROGRAMDATA}/Blackmagic Design/"
"DaVinci Resolve/Support/Developer/Scripting"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Developer/Scripting"
),
"linux": "/opt/resolve/Developer/Scripting"
}
RESOLVE_SCRIPT_API = os.path.normpath(
RESOLVE_SCRIPT_API_[current_platform])
self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API
RESOLVE_SCRIPT_LIB_ = {
"windows": (
"C:/Program Files/Blackmagic Design"
"/DaVinci Resolve/fusionscript.dll"
),
"darwin": (
"/Applications/DaVinci Resolve/DaVinci Resolve.app"
"/Contents/Libraries/Fusion/fusionscript.so"
),
"linux": "/opt/resolve/libs/Fusion/fusionscript.so"
}
RESOLVE_SCRIPT_LIB = os.path.normpath(
RESOLVE_SCRIPT_LIB_[current_platform])
self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB
# TODO: add OTIO installation from `openpype/requirements.py`
# making sure python 3.6 is installed at provided path
py36_dir = os.path.normpath(
self.launch_context.env.get("PYTHON36_RESOLVE", ""))
assert os.path.isdir(py36_dir), (
"Python 3.6 is not installed at the provided folder path. Either "
# making sure python <3.9.* is installed at provided path
python3_home = os.path.normpath(
self.launch_context.env.get("RESOLVE_PYTHON3_HOME", ""))
assert os.path.isdir(python3_home), (
"Python 3 is not installed at the provided folder path. Either "
"make sure the `environments\resolve.json` is having correctly "
"set `PYTHON36_RESOLVE` or make sure Python 3.6 is installed "
f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`"
"set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed "
f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`"
)
self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...")
self.launch_context.env["PYTHONHOME"] = python3_home
self.log.info(f"Path to Resolve Python folder: `{python3_home}`...")
# add to the python path to path
env_path = self.launch_context.env["PATH"]
self.launch_context.env["PATH"] = os.pathsep.join([
python3_home,
os.path.join(python3_home, "Scripts")
] + env_path.split(os.pathsep))
self.log.debug(f"PATH: {self.launch_context.env['PATH']}")
# add to the PYTHONPATH
env_pythonpath = self.launch_context.env["PYTHONPATH"]
self.launch_context.env["PYTHONPATH"] = os.pathsep.join([
os.path.join(python3_home, "Lib", "site-packages"),
os.path.join(RESOLVE_SCRIPT_API, "Modules"),
] + env_pythonpath.split(os.pathsep))
self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}")
RESOLVE_UTILITY_SCRIPTS_DIR_ = {
"windows": (
f"{PROGRAMDATA}/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
"darwin": (
"/Library/Application Support/Blackmagic Design"
"/DaVinci Resolve/Fusion/Scripts/Comp"
),
"linux": "/opt/resolve/Fusion/Scripts/Comp"
}
RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath(
RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform]
)
# setting utility scripts dir for scripts syncing
us_dir = os.path.normpath(
self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
)
assert os.path.isdir(us_dir), (
"Resolve utility script dir does not exists. Either make sure "
"the `environments\resolve.json` is having correctly set "
"`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n"
f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`"
)
self.log.debug(f"-- us_dir: `{us_dir}`")
self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = (
RESOLVE_UTILITY_SCRIPTS_DIR)
# correctly format path for pre python script
pre_py_sc = os.path.normpath(
self.launch_context.env.get("PRE_PYTHON_SCRIPT", ""))
self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc
self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...")
# remove terminal coloring tags
self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True"
# Resolve Setup integration
setup(self.launch_context.env)

View file

@ -9,7 +9,8 @@ def setup(env):
log = Logger.get_logger("ResolveSetup")
scripts = {}
us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR")
us_dir = env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "")
us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"]
us_paths = [os.path.join(
RESOLVE_ROOT_DIR,
"utility_scripts"

View file

@ -188,6 +188,7 @@ class BatchMovieCreator(TrayPublishCreator):
folders=False,
single_item=False,
extensions=self.extensions,
allow_sequences=False,
label="Filepath"
),
BoolDef(

View file

@ -1,47 +1,11 @@
from .communication_server import CommunicationWrapper
from . import lib
from . import launch_script
from . import workio
from . import pipeline
from . import plugin
from .pipeline import (
install,
maintained_selection,
remove_instance,
list_instances,
ls
)
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root,
TVPaintHost,
)
__all__ = (
"CommunicationWrapper",
"lib",
"launch_script",
"workio",
"pipeline",
"plugin",
"install",
"maintained_selection",
"remove_instance",
"list_instances",
"ls",
# Workfiles API
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root"
"TVPaintHost",
)

View file

@ -10,10 +10,10 @@ from Qt import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.pipeline import install_host
from openpype.hosts.tvpaint.api.communication_server import (
CommunicationWrapper
from openpype.hosts.tvpaint.api import (
TVPaintHost,
CommunicationWrapper,
)
from openpype.hosts.tvpaint import api as tvpaint_host
log = logging.getLogger(__name__)
@ -30,6 +30,7 @@ def main(launch_args):
# - QApplicaiton is also main thread/event loop of the server
qt_app = QtWidgets.QApplication([])
tvpaint_host = TVPaintHost()
# Execute pipeline installation
install_host(tvpaint_host)

View file

@ -2,7 +2,7 @@ import os
import logging
import tempfile
from . import CommunicationWrapper
from .communication_server import CommunicationWrapper
log = logging.getLogger(__name__)

View file

@ -1,6 +1,5 @@
import os
import json
import contextlib
import tempfile
import logging
@ -9,7 +8,8 @@ import requests
import pyblish.api
from openpype.client import get_project, get_asset_by_name
from openpype.hosts import tvpaint
from openpype.host import HostBase, IWorkfileHost, ILoadHost
from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback
from openpype.pipeline import (
@ -26,11 +26,6 @@ from .lib import (
log = logging.getLogger(__name__)
HOST_DIR = os.path.dirname(os.path.abspath(tvpaint.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
METADATA_SECTION = "avalon"
SECTION_NAME_CONTEXT = "context"
@ -63,30 +58,152 @@ instances=2
"""
def install():
"""Install TVPaint-specific functionality."""
class TVPaintHost(HostBase, IWorkfileHost, ILoadHost):
name = "tvpaint"
log.info("OpenPype - Installing TVPaint integration")
legacy_io.install()
def install(self):
"""Install TVPaint-specific functionality."""
# Create workdir folder if does not exist yet
workdir = legacy_io.Session["AVALON_WORKDIR"]
if not os.path.exists(workdir):
os.makedirs(workdir)
log.info("OpenPype - Installing TVPaint integration")
legacy_io.install()
pyblish.api.register_host("tvpaint")
pyblish.api.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
# Create workdir folder if does not exist yet
workdir = legacy_io.Session["AVALON_WORKDIR"]
if not os.path.exists(workdir):
os.makedirs(workdir)
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
if on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback("instanceToggled", on_instance_toggle)
plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins")
publish_dir = os.path.join(plugins_dir, "publish")
load_dir = os.path.join(plugins_dir, "load")
create_dir = os.path.join(plugins_dir, "create")
register_event_callback("application.launched", initial_launch)
register_event_callback("application.exit", application_exit)
pyblish.api.register_host("tvpaint")
pyblish.api.register_plugin_path(publish_dir)
register_loader_plugin_path(load_dir)
register_creator_plugin_path(create_dir)
registered_callbacks = (
pyblish.api.registered_callbacks().get("instanceToggled") or []
)
if self.on_instance_toggle not in registered_callbacks:
pyblish.api.register_callback(
"instanceToggled", self.on_instance_toggle
)
register_event_callback("application.launched", self.initial_launch)
register_event_callback("application.exit", self.application_exit)
def open_workfile(self, filepath):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_workfile(self, filepath=None):
if not filepath:
filepath = self.get_current_workfile()
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def work_root(self, session):
return session["AVALON_WORKDIR"]
def get_current_workfile(self):
return execute_george("tv_GetProjectName")
def workfile_has_unsaved_changes(self):
return None
def get_workfile_extensions(self):
return [".tvpp"]
def get_containers(self):
return get_containers()
def initial_launch(self):
# Setup project settings if its the template that's launched.
# TODO also check for template creation when it's possible to define
# templates
last_workfile = os.environ.get("AVALON_LAST_WORKFILE")
if not last_workfile or os.path.exists(last_workfile):
return
log.info("Setting up project...")
set_context_settings()
def remove_instance(self, instance):
"""Remove instance from current workfile metadata.
Implementation for Subset manager tool.
"""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
current_instances.pop(found_idx)
write_instances(current_instances)
def application_exit(self):
"""Logic related to TimerManager.
Todo:
This should be handled out of TVPaint integration logic.
"""
data = get_current_project_settings()
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
if not stop_timer:
return
# Stop application timer.
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def on_instance_toggle(self, instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
self.write_instances(current_instances)
def list_instances(self):
"""List all created instances from current workfile."""
return list_instances()
def write_instances(self, data):
return write_instances(data)
def containerise(
@ -116,7 +233,7 @@ def containerise(
"representation": str(context["representation"]["_id"])
}
if current_containers is None:
current_containers = ls()
current_containers = get_containers()
# Add container to containers list
current_containers.append(container_data)
@ -127,15 +244,6 @@ def containerise(
return container_data
@contextlib.contextmanager
def maintained_selection():
# TODO implement logic
try:
yield
finally:
pass
def split_metadata_string(text, chunk_length=None):
"""Split string by length.
@ -333,23 +441,6 @@ def save_current_workfile_context(context):
return write_workfile_metadata(SECTION_NAME_CONTEXT, context)
def remove_instance(instance):
"""Remove instance from current workfile metadata."""
current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES)
instance_id = instance.get("uuid")
found_idx = None
if instance_id:
for idx, _inst in enumerate(current_instances):
if _inst["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
current_instances.pop(found_idx)
write_instances(current_instances)
def list_instances():
"""List all created instances from current workfile."""
return get_workfile_metadata(SECTION_NAME_INSTANCES)
@ -359,12 +450,7 @@ def write_instances(data):
return write_workfile_metadata(SECTION_NAME_INSTANCES, data)
# Backwards compatibility
def _write_instances(*args, **kwargs):
return write_instances(*args, **kwargs)
def ls():
def get_containers():
output = get_workfile_metadata(SECTION_NAME_CONTAINERS)
if output:
for item in output:
@ -376,53 +462,6 @@ def ls():
return output
def on_instance_toggle(instance, old_value, new_value):
"""Update instance data in workfile on publish toggle."""
# Review may not have real instance in wokrfile metadata
if not instance.data.get("uuid"):
return
instance_id = instance.data["uuid"]
found_idx = None
current_instances = list_instances()
for idx, workfile_instance in enumerate(current_instances):
if workfile_instance["uuid"] == instance_id:
found_idx = idx
break
if found_idx is None:
return
if "active" in current_instances[found_idx]:
current_instances[found_idx]["active"] = new_value
write_instances(current_instances)
def initial_launch():
# Setup project settings if its the template that's launched.
# TODO also check for template creation when it's possible to define
# templates
last_workfile = os.environ.get("AVALON_LAST_WORKFILE")
if not last_workfile or os.path.exists(last_workfile):
return
log.info("Setting up project...")
set_context_settings()
def application_exit():
data = get_current_project_settings()
stop_timer = data["tvpaint"]["stop_timer_on_application_exit"]
if not stop_timer:
return
# Stop application timer.
webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL")
rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url)
requests.post(rest_api_url)
def set_context_settings(asset_doc=None):
"""Set workfile settings by asset document data.

View file

@ -4,11 +4,11 @@ import uuid
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
registered_host,
)
from openpype.hosts.tvpaint.api import (
pipeline,
lib
)
from .lib import get_layers_data
from .pipeline import get_current_workfile_context
class Creator(LegacyCreator):
@ -22,7 +22,7 @@ class Creator(LegacyCreator):
dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs)
# Change asset and name by current workfile context
workfile_context = pipeline.get_current_workfile_context()
workfile_context = get_current_workfile_context()
asset_name = workfile_context.get("asset")
task_name = workfile_context.get("task")
if "asset" not in dynamic_data and asset_name:
@ -67,10 +67,12 @@ class Creator(LegacyCreator):
self.log.debug(
"Storing instance data to workfile. {}".format(str(data))
)
return pipeline.write_instances(data)
host = registered_host()
return host.write_instances(data)
def process(self):
data = pipeline.list_instances()
host = registered_host()
data = host.list_instances()
data.append(self.data)
self.write_instances(data)
@ -108,7 +110,7 @@ class Loader(LoaderPlugin):
counter_regex = re.compile(r"_(\d{3})$")
higher_counter = 0
for layer in lib.get_layers_data():
for layer in get_layers_data():
layer_name = layer["name"]
if not layer_name.startswith(layer_name_base):
continue

View file

@ -1,58 +0,0 @@
"""Host API required for Work Files.
# TODO @iLLiCiT implement functions:
has_unsaved_changes
"""
from openpype.pipeline import (
HOST_WORKFILE_EXTENSIONS,
legacy_io,
)
from .lib import (
execute_george,
execute_george_through_file
)
from .pipeline import save_current_workfile_context
def open_file(filepath):
"""Open the scene file in Blender."""
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_file(filepath):
"""Save the open scene file."""
# Store context to workfile before save
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def current_file():
"""Return the path of the open scene file."""
george_script = "tv_GetProjectName"
return execute_george(george_script)
def has_unsaved_changes():
"""Does the open scene file have unsaved changes?"""
return False
def file_extensions():
"""Return the supported file extensions for Blender scene files."""
return HOST_WORKFILE_EXTENSIONS["tvpaint"]
def work_root(session):
"""Return the default root to browse for work files."""
return session["AVALON_WORKDIR"]

View file

@ -1,11 +1,15 @@
from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.pipeline import CreatorError
from openpype.hosts.tvpaint.api import (
plugin,
pipeline,
lib,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
get_groups_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderlayer(plugin.Creator):
@ -63,7 +67,7 @@ class CreateRenderlayer(plugin.Creator):
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = lib.get_layers_data()
layers_data = get_layers_data()
selected_layers = [
layer
@ -81,8 +85,8 @@ class CreateRenderlayer(plugin.Creator):
def process(self):
self.log.debug("Query data from workfile.")
instances = pipeline.list_instances()
layers_data = lib.get_layers_data()
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking for selection groups.")
# Collect group ids from selection
@ -109,7 +113,7 @@ class CreateRenderlayer(plugin.Creator):
self.log.debug(f"Selected group id is \"{group_id}\".")
self.data["group_id"] = group_id
group_data = lib.get_groups_data()
group_data = get_groups_data()
group_name = None
for group in group_data:
if group["group_id"] == group_id:
@ -176,7 +180,7 @@ class CreateRenderlayer(plugin.Creator):
return
self.log.debug("Querying groups data from workfile.")
groups_data = lib.get_groups_data()
groups_data = get_groups_data()
self.log.debug("Changing name of the group.")
selected_group = None
@ -195,7 +199,7 @@ class CreateRenderlayer(plugin.Creator):
b=selected_group["blue"],
name=new_group_name
)
lib.execute_george_through_file(rename_script)
execute_george_through_file(rename_script)
self.log.info(
f"Name of group with index {group_id}"

View file

@ -2,10 +2,10 @@ from openpype.pipeline import CreatorError
from openpype.lib import prepare_template_data
from openpype.hosts.tvpaint.api import (
plugin,
pipeline,
lib,
CommunicationWrapper
)
from openpype.hosts.tvpaint.api.lib import get_layers_data
from openpype.hosts.tvpaint.api.pipeline import list_instances
class CreateRenderPass(plugin.Creator):
@ -54,7 +54,7 @@ class CreateRenderPass(plugin.Creator):
# Validate that communication is initialized
if CommunicationWrapper.communicator:
# Get currently selected layers
layers_data = lib.layers_data()
layers_data = get_layers_data()
selected_layers = [
layer
@ -72,8 +72,8 @@ class CreateRenderPass(plugin.Creator):
def process(self):
self.log.debug("Query data from workfile.")
instances = pipeline.list_instances()
layers_data = lib.layers_data()
instances = list_instances()
layers_data = get_layers_data()
self.log.debug("Checking selection.")
# Get all selected layers and their group ids

View file

@ -1,5 +1,6 @@
import qargparse
from openpype.hosts.tvpaint.api import lib, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import execute_george_through_file
class ImportImage(plugin.Loader):
@ -79,4 +80,4 @@ class ImportImage(plugin.Loader):
layer_name,
load_options_str
)
return lib.execute_george_through_file(george_script)
return execute_george_through_file(george_script)

View file

@ -1,7 +1,21 @@
import collections
import qargparse
from openpype.pipeline import get_representation_context
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
from openpype.pipeline import (
get_representation_context,
register_host,
)
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
get_layers_data,
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import (
write_workfile_metadata,
SECTION_NAME_CONTAINERS,
containerise,
)
class LoadImage(plugin.Loader):
@ -79,10 +93,10 @@ class LoadImage(plugin.Loader):
load_options_str
)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
loaded_layer = None
layers = lib.layers_data()
layers = get_layers_data()
for layer in layers:
if layer["name"] == layer_name:
loaded_layer = layer
@ -95,7 +109,7 @@ class LoadImage(plugin.Loader):
layer_names = [loaded_layer["name"]]
namespace = namespace or layer_name
return pipeline.containerise(
return containerise(
name=name,
namespace=namespace,
members=layer_names,
@ -109,7 +123,7 @@ class LoadImage(plugin.Loader):
return
if layers is None:
layers = lib.layers_data()
layers = get_layers_data()
available_ids = set(layer["layer_id"] for layer in layers)
@ -152,14 +166,15 @@ class LoadImage(plugin.Loader):
line = "tv_layerkill {}".format(layer_id)
george_script_lines.append(line)
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
def _remove_container(self, container, members=None):
if not container:
return
representation = container["representation"]
members = self.get_members_from_container(container)
current_containers = pipeline.ls()
host = register_host()
current_containers = host.get_containers()
pop_idx = None
for idx, cur_con in enumerate(current_containers):
cur_members = self.get_members_from_container(cur_con)
@ -179,8 +194,8 @@ class LoadImage(plugin.Loader):
return
current_containers.pop(pop_idx)
pipeline.write_workfile_metadata(
pipeline.SECTION_NAME_CONTAINERS, current_containers
write_workfile_metadata(
SECTION_NAME_CONTAINERS, current_containers
)
def remove(self, container):
@ -214,7 +229,7 @@ class LoadImage(plugin.Loader):
break
old_layers = []
layers = lib.layers_data()
layers = get_layers_data()
previous_layer_ids = set(layer["layer_id"] for layer in layers)
if old_layers_are_ids:
for layer in layers:
@ -263,7 +278,7 @@ class LoadImage(plugin.Loader):
new_container = self.load(context, name, namespace, {})
new_layer_names = self.get_members_from_container(new_container)
layers = lib.layers_data()
layers = get_layers_data()
new_layers = []
for layer in layers:
@ -304,4 +319,4 @@ class LoadImage(plugin.Loader):
# Execute george scripts if there are any
if george_script_lines:
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)

View file

@ -1,6 +1,9 @@
import os
import tempfile
from openpype.hosts.tvpaint.api import lib, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
execute_george_through_file,
)
class ImportSound(plugin.Loader):
@ -64,7 +67,7 @@ class ImportSound(plugin.Loader):
)
self.log.info("*** George script:\n{}\n***".format(george_script))
# Execute geoge script
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
# Read output file
lines = []

View file

@ -11,7 +11,13 @@ from openpype.pipeline.workfile import (
get_last_workfile_with_version,
)
from openpype.pipeline.template_data import get_template_data_with_names
from openpype.hosts.tvpaint.api import lib, pipeline, plugin
from openpype.hosts.tvpaint.api import plugin
from openpype.hosts.tvpaint.api.lib import (
execute_george_through_file,
)
from openpype.hosts.tvpaint.api.pipeline import (
get_current_workfile_context,
)
class LoadWorkfile(plugin.Loader):
@ -26,9 +32,9 @@ class LoadWorkfile(plugin.Loader):
# Load context of current workfile as first thing
# - which context and extension has
host = registered_host()
current_file = host.current_file()
current_file = host.get_current_workfile()
context = pipeline.get_current_workfile_context()
context = get_current_workfile_context()
filepath = self.fname.replace("\\", "/")
@ -40,7 +46,7 @@ class LoadWorkfile(plugin.Loader):
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath
)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
# Save workfile.
host_name = "tvpaint"
@ -69,12 +75,13 @@ class LoadWorkfile(plugin.Loader):
file_template = anatomy.templates[template_key]["file"]
# Define saving file extension
extensions = host.get_workfile_extensions()
if current_file:
# Match the extension of current file
_, extension = os.path.splitext(current_file)
else:
# Fall back to the first extension supported for this host.
extension = host.file_extensions()[0]
extension = extensions[0]
data["ext"] = extension
@ -83,7 +90,7 @@ class LoadWorkfile(plugin.Loader):
folder_template, data
)
version = get_last_workfile_with_version(
work_root, file_template, data, host.file_extensions()
work_root, file_template, data, extensions
)[1]
if version is None:
@ -97,4 +104,4 @@ class LoadWorkfile(plugin.Loader):
file_template, data
)
path = os.path.join(work_root, filename)
host.save_file(path)
host.save_workfile(path)

View file

@ -5,7 +5,22 @@ import tempfile
import pyblish.api
from openpype.pipeline import legacy_io
from openpype.hosts.tvpaint.api import pipeline, lib
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_data,
get_groups_data,
)
from openpype.hosts.tvpaint.api.pipeline import (
SECTION_NAME_CONTEXT,
SECTION_NAME_INSTANCES,
SECTION_NAME_CONTAINERS,
get_workfile_metadata_string,
write_workfile_metadata,
get_current_workfile_context,
list_instances,
)
class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
@ -15,12 +30,12 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
def process(self, context, plugin):
metadata_keys = {
pipeline.SECTION_NAME_CONTEXT: {},
pipeline.SECTION_NAME_INSTANCES: [],
pipeline.SECTION_NAME_CONTAINERS: []
SECTION_NAME_CONTEXT: {},
SECTION_NAME_INSTANCES: [],
SECTION_NAME_CONTAINERS: []
}
for metadata_key, default in metadata_keys.items():
json_string = pipeline.get_workfile_metadata_string(metadata_key)
json_string = get_workfile_metadata_string(metadata_key)
if not json_string:
continue
@ -35,7 +50,7 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action):
).format(metadata_key, default, json_string),
exc_info=True
)
pipeline.write_workfile_metadata(metadata_key, default)
write_workfile_metadata(metadata_key, default)
class CollectWorkfileData(pyblish.api.ContextPlugin):
@ -45,8 +60,8 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
actions = [ResetTVPaintWorkfileMetadata]
def process(self, context):
current_project_id = lib.execute_george("tv_projectcurrentid")
lib.execute_george("tv_projectselect {}".format(current_project_id))
current_project_id = execute_george("tv_projectcurrentid")
execute_george("tv_projectselect {}".format(current_project_id))
# Collect and store current context to have reference
current_context = {
@ -60,7 +75,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect context from workfile metadata
self.log.info("Collecting workfile context")
workfile_context = pipeline.get_current_workfile_context()
workfile_context = get_current_workfile_context()
# Store workfile context to pyblish context
context.data["workfile_context"] = workfile_context
if workfile_context:
@ -96,7 +111,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect instances
self.log.info("Collecting instance data from workfile")
instance_data = pipeline.list_instances()
instance_data = list_instances()
context.data["workfileInstances"] = instance_data
self.log.debug(
"Instance data:\"{}".format(json.dumps(instance_data, indent=4))
@ -104,7 +119,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect information about layers
self.log.info("Collecting layers data from workfile")
layers_data = lib.layers_data()
layers_data = get_layers_data()
layers_by_name = {}
for layer in layers_data:
layer_name = layer["name"]
@ -120,14 +135,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
# Collect information about groups
self.log.info("Collecting groups data from workfile")
group_data = lib.groups_data()
group_data = get_groups_data()
context.data["groupsData"] = group_data
self.log.debug(
"Group data:\"{}".format(json.dumps(group_data, indent=4))
)
self.log.info("Collecting scene data from workfile")
workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ")
workfile_info_parts = execute_george("tv_projectinfo").split(" ")
# Project frame start - not used
workfile_info_parts.pop(-1)
@ -139,10 +154,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
workfile_path = " ".join(workfile_info_parts).replace("\"", "")
# Marks return as "{frame - 1} {state} ", example "0 set".
result = lib.execute_george("tv_markin")
result = execute_george("tv_markin")
mark_in_frame, mark_in_state, _ = result.split(" ")
result = lib.execute_george("tv_markout")
result = execute_george("tv_markout")
mark_out_frame, mark_out_state, _ = result.split(" ")
scene_data = {
@ -156,7 +171,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
"sceneMarkInState": mark_in_state == "set",
"sceneMarkOut": int(mark_out_frame),
"sceneMarkOutState": mark_out_state == "set",
"sceneStartFrame": int(lib.execute_george("tv_startframe")),
"sceneStartFrame": int(execute_george("tv_startframe")),
"sceneBgColor": self._get_bg_color()
}
self.log.debug(
@ -188,7 +203,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin):
]
george_script = "\n".join(george_script_lines)
lib.execute_george_through_file(george_script)
execute_george_through_file(george_script)
with open(output_filepath, "r") as stream:
data = stream.read()

View file

@ -5,7 +5,13 @@ import tempfile
from PIL import Image
import pyblish.api
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import (
execute_george,
execute_george_through_file,
get_layers_pre_post_behavior,
get_layers_exposure_frames,
)
from openpype.hosts.tvpaint.lib import (
calculate_layers_extraction_data,
get_frame_filename_template,
@ -61,7 +67,7 @@ class ExtractSequence(pyblish.api.Extractor):
# different way when Start Frame is not `0`
# NOTE It will be set back after rendering
scene_start_frame = instance.context.data["sceneStartFrame"]
lib.execute_george("tv_startframe 0")
execute_george("tv_startframe 0")
# Frame start/end may be stored as float
frame_start = int(instance.data["frameStart"])
@ -113,7 +119,7 @@ class ExtractSequence(pyblish.api.Extractor):
output_filepaths_by_frame_idx, thumbnail_fullpath = result
# Change scene frame Start back to previous value
lib.execute_george("tv_startframe {}".format(scene_start_frame))
execute_george("tv_startframe {}".format(scene_start_frame))
# Sequence of one frame
if not output_filepaths_by_frame_idx:
@ -241,7 +247,7 @@ class ExtractSequence(pyblish.api.Extractor):
george_script_lines.append(" ".join(orig_color_command))
lib.execute_george_through_file("\n".join(george_script_lines))
execute_george_through_file("\n".join(george_script_lines))
first_frame_filepath = None
output_filepaths_by_frame_idx = {}
@ -304,8 +310,8 @@ class ExtractSequence(pyblish.api.Extractor):
return [], None
self.log.debug("Collecting pre/post behavior of individual layers.")
behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = lib.get_layers_exposure_frames(
behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids)
exposure_frames_by_layer_id = get_layers_exposure_frames(
layer_ids, layers
)
extraction_data_by_layer_id = calculate_layers_extraction_data(
@ -410,7 +416,7 @@ class ExtractSequence(pyblish.api.Extractor):
",".join(frames_to_render), layer_id, layer["name"]
))
# Let TVPaint render layer's image
lib.execute_george_through_file("\n".join(george_script_lines))
execute_george_through_file("\n".join(george_script_lines))
# Fill frames between `frame_start_index` and `frame_end_index`
self.log.debug("Filling frames not rendered frames.")

View file

@ -1,7 +1,7 @@
import pyblish.api
from openpype.api import version_up
from openpype.hosts.tvpaint.api import workio
from openpype.lib import version_up
from openpype.pipeline import registered_host
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
@ -17,6 +17,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
host = registered_host()
path = context.data["currentFile"]
workio.save_file(version_up(path))
host.save_workfile(version_up(path))
self.log.info('Incrementing workfile version')

View file

@ -1,6 +1,9 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import pipeline
from openpype.hosts.tvpaint.api.pipeline import (
list_instances,
write_instances,
)
class FixAssetNames(pyblish.api.Action):
@ -15,7 +18,7 @@ class FixAssetNames(pyblish.api.Action):
def process(self, context, plugin):
context_asset_name = context.data["asset"]
old_instance_items = pipeline.list_instances()
old_instance_items = list_instances()
new_instance_items = []
for instance_item in old_instance_items:
instance_asset_name = instance_item.get("asset")
@ -25,7 +28,7 @@ class FixAssetNames(pyblish.api.Action):
):
instance_item["asset"] = context_asset_name
new_instance_items.append(instance_item)
pipeline._write_instances(new_instance_items)
write_instances(new_instance_items)
class ValidateAssetNames(pyblish.api.ContextPlugin):

View file

@ -2,7 +2,7 @@ import json
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import execute_george
class ValidateMarksRepair(pyblish.api.Action):
@ -15,10 +15,10 @@ class ValidateMarksRepair(pyblish.api.Action):
def process(self, context, plugin):
expected_data = ValidateMarks.get_expected_data(context)
lib.execute_george(
execute_george(
"tv_markin {} set".format(expected_data["markIn"])
)
lib.execute_george(
execute_george(
"tv_markout {} set".format(expected_data["markOut"])
)

View file

@ -1,6 +1,6 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
from openpype.hosts.tvpaint.api.lib import execute_george
class RepairStartFrame(pyblish.api.Action):
@ -11,7 +11,7 @@ class RepairStartFrame(pyblish.api.Action):
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
@ -24,7 +24,7 @@ class ValidateStartFrame(pyblish.api.ContextPlugin):
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
start_frame = execute_george("tv_startframe")
if start_frame == 0:
return

View file

@ -1,6 +1,5 @@
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import save_file
from openpype.pipeline import PublishXmlValidationError, registered_host
class ValidateWorkfileMetadataRepair(pyblish.api.Action):
@ -13,8 +12,9 @@ class ValidateWorkfileMetadataRepair(pyblish.api.Action):
def process(self, context, _plugin):
"""Save current workfile which should trigger storing of metadata."""
current_file = context.data["currentFile"]
host = registered_host()
# Save file should trigger
save_file(current_file)
host.save_workfile(current_file)
class ValidateWorkfileMetadata(pyblish.api.ContextPlugin):

View file

@ -8,8 +8,8 @@ from openpype.lib import (
PreLaunchHook,
ApplicationLaunchFailed,
ApplicationNotFound,
get_workfile_template_key
)
from openpype.pipeline.workfile import get_workfile_template_key
import openpype.hosts.unreal.lib as unreal_lib

View file

@ -1,5 +1,5 @@
#include "OpenPype.h"
#include "OpenPypeStyle.h"
#include "OpenPype.h"
#include "Framework/Application/SlateApplication.h"
#include "Styling/SlateStyleRegistry.h"
#include "Slate/SlateGameResources.h"

View file

@ -1,10 +1,33 @@
import os
import logging
import platform
import subprocess
log = logging.getLogger("Vendor utils")
class CachedToolPaths:
"""Cache already used and discovered tools and their executables.
Discovering path can take some time and can trigger subprocesses so it's
better to cache the paths on first get.
"""
_cached_paths = {}
@classmethod
def is_tool_cached(cls, tool):
return tool in cls._cached_paths
@classmethod
def get_executable_path(cls, tool):
return cls._cached_paths.get(tool)
@classmethod
def cache_executable_path(cls, tool, path):
cls._cached_paths[tool] = path
def is_file_executable(filepath):
"""Filepath lead to executable file.
@ -98,6 +121,7 @@ def get_vendor_bin_path(bin_app):
Returns:
str: Path to vendorized binaries folder.
"""
return os.path.join(
os.environ["OPENPYPE_ROOT"],
"vendor",
@ -107,6 +131,123 @@ def get_vendor_bin_path(bin_app):
)
def find_tool_in_custom_paths(paths, tool, validation_func=None):
"""Find a tool executable in custom paths.
Args:
paths (Iterable[str]): Iterable of paths where to look for tool.
tool (str): Name of tool (binary file) to find in passed paths.
validation_func (Function): Custom validation function of path.
Function must expect one argument which is path to executable.
If not passed only 'find_executable' is used to be able identify
if path is valid.
Reuturns:
Union[str, None]: Path to validated executable or None if was not
found.
"""
for path in paths:
# Skip empty strings
if not path:
continue
# Handle cases when path is just an executable
# - it allows to use executable from PATH
# - basename must match 'tool' value (without extension)
extless_path, ext = os.path.splitext(path)
if extless_path == tool:
executable_path = find_executable(tool)
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
continue
# Normalize path because it should be a path and check if exists
normalized = os.path.normpath(path)
if not os.path.exists(normalized):
continue
# Note: Path can be both file and directory
# If path is a file validate it
if os.path.isfile(normalized):
basename, ext = os.path.splitext(os.path.basename(path))
# Check if the filename has actually the sane bane as 'tool'
if basename == tool:
executable_path = find_executable(normalized)
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
# Check if path is a directory and look for tool inside the dir
if os.path.isdir(normalized):
executable_path = find_executable(os.path.join(normalized, tool))
if executable_path and (
validation_func is None
or validation_func(executable_path)
):
return executable_path
return None
def _check_args_returncode(args):
try:
# Python 2 compatibility where DEVNULL is not available
if hasattr(subprocess, "DEVNULL"):
proc = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
proc.wait()
else:
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(
args, stdout=devnull, stderr=devnull,
)
proc.wait()
except Exception:
return False
return proc.returncode == 0
def _oiio_executable_validation(filepath):
"""Validate oiio tool executable if can be executed.
Validation has 2 steps. First is using 'find_executable' to fill possible
missing extension or fill directory then launch executable and validate
that it can be executed. For that is used '--help' argument which is fast
and does not need any other inputs.
Any possible crash of missing libraries or invalid build should be catched.
Main reason is to validate if executable can be executed on OS just running
which can be issue ob linux machines.
Note:
It does not validate if the executable is really a oiio tool which
should be used.
Args:
filepath (str): Path to executable.
Returns:
bool: Filepath is valid executable.
"""
filepath = find_executable(filepath)
if not filepath:
return False
return _check_args_returncode([filepath, "--help"])
def get_oiio_tools_path(tool="oiiotool"):
"""Path to vendorized OpenImageIO tool executables.
@ -117,10 +258,62 @@ def get_oiio_tools_path(tool="oiiotool"):
Default is "oiiotool".
"""
oiio_dir = get_vendor_bin_path("oiio")
if platform.system().lower() == "linux":
oiio_dir = os.path.join(oiio_dir, "bin")
return find_executable(os.path.join(oiio_dir, tool))
if CachedToolPaths.is_tool_cached(tool):
return CachedToolPaths.get_executable_path(tool)
custom_paths_str = os.environ.get("OPENPYPE_OIIO_PATHS") or ""
tool_executable_path = find_tool_in_custom_paths(
custom_paths_str.split(os.pathsep),
tool,
_oiio_executable_validation
)
if not tool_executable_path:
oiio_dir = get_vendor_bin_path("oiio")
if platform.system().lower() == "linux":
oiio_dir = os.path.join(oiio_dir, "bin")
default_path = os.path.join(oiio_dir, tool)
if _oiio_executable_validation(default_path):
tool_executable_path = default_path
# Look to PATH for the tool
if not tool_executable_path:
from_path = find_executable(tool)
if from_path and _oiio_executable_validation(from_path):
tool_executable_path = from_path
CachedToolPaths.cache_executable_path(tool, tool_executable_path)
return tool_executable_path
def _ffmpeg_executable_validation(filepath):
"""Validate ffmpeg tool executable if can be executed.
Validation has 2 steps. First is using 'find_executable' to fill possible
missing extension or fill directory then launch executable and validate
that it can be executed. For that is used '-version' argument which is fast
and does not need any other inputs.
Any possible crash of missing libraries or invalid build should be catched.
Main reason is to validate if executable can be executed on OS just running
which can be issue ob linux machines.
Note:
It does not validate if the executable is really a ffmpeg tool.
Args:
filepath (str): Path to executable.
Returns:
bool: Filepath is valid executable.
"""
filepath = find_executable(filepath)
if not filepath:
return False
return _check_args_returncode([filepath, "-version"])
def get_ffmpeg_tool_path(tool="ffmpeg"):
@ -133,10 +326,33 @@ def get_ffmpeg_tool_path(tool="ffmpeg"):
Returns:
str: Full path to ffmpeg executable.
"""
ffmpeg_dir = get_vendor_bin_path("ffmpeg")
if platform.system().lower() == "windows":
ffmpeg_dir = os.path.join(ffmpeg_dir, "bin")
return find_executable(os.path.join(ffmpeg_dir, tool))
if CachedToolPaths.is_tool_cached(tool):
return CachedToolPaths.get_executable_path(tool)
custom_paths_str = os.environ.get("OPENPYPE_FFMPEG_PATHS") or ""
tool_executable_path = find_tool_in_custom_paths(
custom_paths_str.split(os.pathsep),
tool,
_ffmpeg_executable_validation
)
if not tool_executable_path:
ffmpeg_dir = get_vendor_bin_path("ffmpeg")
if platform.system().lower() == "windows":
ffmpeg_dir = os.path.join(ffmpeg_dir, "bin")
tool_path = find_executable(os.path.join(ffmpeg_dir, tool))
if tool_path and _ffmpeg_executable_validation(tool_path):
tool_executable_path = tool_path
# Look to PATH for the tool
if not tool_executable_path:
from_path = find_executable(tool)
if from_path and _oiio_executable_validation(from_path):
tool_executable_path = from_path
CachedToolPaths.cache_executable_path(tool, tool_executable_path)
return tool_executable_path
def is_oiio_supported():

View file

@ -32,6 +32,9 @@ from maya import cmds
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api.lib_rendersettings import RenderSettings
from openpype.hosts.maya.api.lib import get_attr_in_layer
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@ -498,9 +501,10 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
job_info.AssetDependency += self.scene_path
# Get layer prefix
render_products = self._instance.data["renderProducts"]
layer_metadata = render_products.layer_data
layer_prefix = layer_metadata.filePrefix
renderlayer = self._instance.data["setMembers"]
renderer = self._instance.data["renderer"]
layer_prefix_attr = RenderSettings.get_image_prefix_attr(renderer)
layer_prefix = get_attr_in_layer(layer_prefix_attr, layer=renderlayer)
plugin_info = copy.deepcopy(self.plugin_info)
plugin_info.update({

View file

@ -0,0 +1,125 @@
import webbrowser
from openpype.pipeline import LauncherAction
from openpype.modules import ModulesManager
from openpype.client import get_project, get_asset_by_name
class ShowInKitsu(LauncherAction):
name = "showinkitsu"
label = "Show in Kitsu"
icon = "external-link-square"
color = "#e0e1e1"
order = 10
@staticmethod
def get_kitsu_module():
return ModulesManager().modules_by_name.get("kitsu")
def is_compatible(self, session):
if not session.get("AVALON_PROJECT"):
return False
return True
def process(self, session, **kwargs):
# Context inputs
project_name = session["AVALON_PROJECT"]
asset_name = session.get("AVALON_ASSET", None)
task_name = session.get("AVALON_TASK", None)
project = get_project(project_name=project_name,
fields=["data.zou_id"])
if not project:
raise RuntimeError(f"Project {project_name} not found.")
project_zou_id = project["data"].get("zou_id")
if not project_zou_id:
raise RuntimeError(f"Project {project_name} has no "
f"connected kitsu id.")
asset_zou_name = None
asset_zou_id = None
asset_zou_type = 'Assets'
task_zou_id = None
zou_sub_type = ['AssetType', 'Sequence']
if asset_name:
asset_zou_name = asset_name
asset_fields = ["data.zou.id", "data.zou.type"]
if task_name:
asset_fields.append(f"data.tasks.{task_name}.zou.id")
asset = get_asset_by_name(project_name,
asset_name=asset_name,
fields=asset_fields)
asset_zou_data = asset["data"].get("zou")
if asset_zou_data:
asset_zou_type = asset_zou_data["type"]
if asset_zou_type not in zou_sub_type:
asset_zou_id = asset_zou_data["id"]
else:
asset_zou_type = asset_name
if task_name:
task_data = asset["data"]["tasks"][task_name]
task_zou_data = task_data.get("zou", {})
if not task_zou_data:
self.log.debug(f"No zou task data for task: {task_name}")
task_zou_id = task_zou_data["id"]
# Define URL
url = self.get_url(project_id=project_zou_id,
asset_name=asset_zou_name,
asset_id=asset_zou_id,
asset_type=asset_zou_type,
task_id=task_zou_id)
# Open URL in webbrowser
self.log.info(f"Opening URL: {url}")
webbrowser.open(url,
# Try in new tab
new=2)
def get_url(self,
project_id,
asset_name=None,
asset_id=None,
asset_type=None,
task_id=None):
shots_url = {'Shots', 'Sequence', 'Shot'}
sub_type = {'AssetType', 'Sequence'}
kitsu_module = self.get_kitsu_module()
# Get kitsu url with /api stripped
kitsu_url = kitsu_module.server_url
if kitsu_url.endswith("/api"):
kitsu_url = kitsu_url[:-len("/api")]
sub_url = f"/productions/{project_id}"
asset_type_url = "Shots" if asset_type in shots_url else "Assets"
if task_id:
# Go to task page
# /productions/{project-id}/{asset_type}/tasks/{task_id}
sub_url += f"/{asset_type_url}/tasks/{task_id}"
elif asset_id:
# Go to asset or shot page
# /productions/{project-id}/assets/{entity_id}
# /productions/{project-id}/shots/{entity_id}
sub_url += f"/{asset_type_url}/{asset_id}"
else:
# Go to project page
# Project page must end with a view
# /productions/{project-id}/assets/
# Add search method if is a sub_type
sub_url += f"/{asset_type_url}"
if asset_type in sub_type:
sub_url += f'?search={asset_name}'
return f"{kitsu_url}{sub_url}"

View file

@ -89,7 +89,10 @@ class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction):
"""Implementation of abstract method for `IPluginPaths`."""
current_dir = os.path.dirname(os.path.abspath(__file__))
return {"publish": [os.path.join(current_dir, "plugins", "publish")]}
return {
"publish": [os.path.join(current_dir, "plugins", "publish")],
"actions": [os.path.join(current_dir, "actions")]
}
def cli(self, click_group):
click_group.add_command(cli_main)

View file

@ -6,8 +6,17 @@ import collections
import numbers
import six
import time
from openpype.settings.lib import get_anatomy_settings
from openpype.settings.lib import (
get_project_settings,
get_local_settings,
)
from openpype.settings.constants import (
DEFAULT_PROJECT_KEY
)
from openpype.client import get_project
from openpype.lib.path_templates import (
TemplateUnsolved,
TemplateResult,
@ -39,34 +48,23 @@ class RootCombinationError(Exception):
super(RootCombinationError, self).__init__(msg)
class Anatomy:
class BaseAnatomy(object):
"""Anatomy module helps to keep project settings.
Wraps key project specifications, AnatomyTemplates and Roots.
Args:
project_name (str): Project name to look on overrides.
"""
root_key_regex = re.compile(r"{(root?[^}]+)}")
root_name_regex = re.compile(r"root\[([^]]+)\]")
def __init__(self, project_name=None, site_name=None):
if not project_name:
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ProjectNotSet((
"Implementation bug: Project name is not set. Anatomy requires"
" to load data for specific project."
))
def __init__(self, project_doc, local_settings, site_name):
project_name = project_doc["name"]
self.project_name = project_name
self._data = self._prepare_anatomy_data(
get_anatomy_settings(project_name, site_name)
)
self._site_name = site_name
self._data = self._prepare_anatomy_data(
project_doc, local_settings, site_name
)
self._templates_obj = AnatomyTemplates(self)
self._roots_obj = Roots(self)
@ -87,12 +85,14 @@ class Anatomy:
def items(self):
return copy.deepcopy(self._data).items()
@staticmethod
def _prepare_anatomy_data(anatomy_data):
def _prepare_anatomy_data(self, project_doc, local_settings, site_name):
"""Prepare anatomy data for further processing.
Method added to replace `{task}` with `{task[name]}` in templates.
"""
project_name = project_doc["name"]
anatomy_data = self._project_doc_to_anatomy_data(project_doc)
templates_data = anatomy_data.get("templates")
if templates_data:
# Replace `{task}` with `{task[name]}` in templates
@ -103,23 +103,13 @@ class Anatomy:
if not isinstance(item, dict):
continue
for key in tuple(item.keys()):
value = item[key]
if isinstance(value, dict):
value_queue.append(value)
self._apply_local_settings_on_anatomy_data(anatomy_data,
local_settings,
project_name,
site_name)
elif isinstance(value, six.string_types):
item[key] = value.replace("{task}", "{task[name]}")
return anatomy_data
def reset(self):
"""Reset values of cached data in templates and roots objects."""
self._data = self._prepare_anatomy_data(
get_anatomy_settings(self.project_name, self._site_name)
)
self.templates_obj.reset()
self.roots_obj.reset()
@property
def templates(self):
"""Wrap property `templates` of Anatomy's AnatomyTemplates instance."""
@ -338,6 +328,161 @@ class Anatomy:
data = self.root_environmets_fill_data(template)
return rootless_path.format(**data)
def _project_doc_to_anatomy_data(self, project_doc):
"""Convert project document to anatomy data.
Probably should fill missing keys and values.
"""
output = copy.deepcopy(project_doc["config"])
output["attributes"] = copy.deepcopy(project_doc["data"])
return output
def _apply_local_settings_on_anatomy_data(
self, anatomy_data, local_settings, project_name, site_name
):
"""Apply local settings on anatomy data.
ATM local settings can modify project roots. Project name is required
as local settings have data stored data by project's name.
Local settings override root values in this order:
1.) Check if local settings contain overrides for default project and
apply it's values on roots if there are any.
2.) If passed `project_name` is not None then check project specific
overrides in local settings for the project and apply it's value on
roots if there are any.
NOTE: Root values of default project from local settings are always
applied if are set.
Args:
anatomy_data (dict): Data for anatomy.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which anatomy data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects") or {}
# Check for roots existence in local settings first
roots_project_locals = (
local_project_settings
.get(project_name, {})
)
roots_default_locals = (
local_project_settings
.get(DEFAULT_PROJECT_KEY, {})
)
# Skip rest of processing if roots are not set
if not roots_project_locals and not roots_default_locals:
return
# Combine roots from local settings
roots_locals = roots_default_locals.get(site_name) or {}
roots_locals.update(roots_project_locals.get(site_name) or {})
# Skip processing if roots for current active site are not available in
# local settings
if not roots_locals:
return
current_platform = platform.system().lower()
root_data = anatomy_data["roots"]
for root_name, path in roots_locals.items():
if root_name not in root_data:
continue
anatomy_data["roots"][root_name][current_platform] = (
path
)
class Anatomy(BaseAnatomy):
_project_cache = {}
_site_cache = {}
def __init__(self, project_name=None, site_name=None):
if not project_name:
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ProjectNotSet((
"Implementation bug: Project name is not set. Anatomy requires"
" to load data for specific project."
))
project_doc = self.get_project_doc_from_cache(project_name)
local_settings = get_local_settings()
if not site_name:
site_name = self.get_site_name_from_cache(
project_name, local_settings
)
super(Anatomy, self).__init__(
project_doc,
local_settings,
site_name
)
@classmethod
def get_project_doc_from_cache(cls, project_name):
project_cache = cls._project_cache.get(project_name)
if project_cache is not None:
if time.time() - project_cache["start"] > 10:
cls._project_cache.pop(project_name)
project_cache = None
if project_cache is None:
project_cache = {
"project_doc": get_project(project_name),
"start": time.time()
}
cls._project_cache[project_name] = project_cache
return copy.deepcopy(
cls._project_cache[project_name]["project_doc"]
)
@classmethod
def get_site_name_from_cache(cls, project_name, local_settings):
site_cache = cls._site_cache.get(project_name)
if site_cache is not None:
if time.time() - site_cache["start"] > 10:
cls._site_cache.pop(project_name)
site_cache = None
if site_cache:
return site_cache["site_name"]
local_project_settings = local_settings.get("projects")
if not local_project_settings:
return
project_locals = local_project_settings.get(project_name) or {}
default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {}
active_site = (
project_locals.get("active_site")
or default_locals.get("active_site")
)
if not active_site:
project_settings = get_project_settings(project_name)
active_site = (
project_settings
["global"]
["sync_server"]
["config"]
["active_site"]
)
cls._site_cache[project_name] = {
"site_name": active_site,
"start": time.time()
}
return active_site
class AnatomyTemplateUnsolved(TemplateUnsolved):
"""Exception for unsolved template when strict is set to True."""

View file

@ -29,7 +29,7 @@
"delivery": {},
"unreal": {
"folder": "{root[work]}/{project[name]}/unreal/{task[name]}",
"file": "{project[code]}_{asset}",
"file": "{project[code]}_{asset}.{ext}",
"path": "{@folder}/{@file}"
},
"others": {

View file

@ -126,6 +126,7 @@
"CreateAnimation": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main"
]
@ -133,6 +134,7 @@
"CreatePointCache": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main"
]
@ -187,6 +189,8 @@
},
"CreateModel": {
"enabled": true,
"write_color_sets": false,
"write_face_sets": false,
"defaults": [
"Main",
"Proxy",

View file

@ -782,41 +782,11 @@
"host_name": "resolve",
"environment": {
"RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [],
"RESOLVE_SCRIPT_API": {
"windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting",
"darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting",
"linux": "/opt/resolve/Developer/Scripting"
},
"RESOLVE_SCRIPT_LIB": {
"windows": "C:/Program Files/Blackmagic Design/DaVinci Resolve/fusionscript.dll",
"darwin": "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so",
"linux": "/opt/resolve/libs/Fusion/fusionscript.so"
},
"RESOLVE_UTILITY_SCRIPTS_DIR": {
"windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp",
"darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp",
"linux": "/opt/resolve/Fusion/Scripts/Comp"
},
"PYTHON36_RESOLVE": {
"RESOLVE_PYTHON3_HOME": {
"windows": "{LOCALAPPDATA}/Programs/Python/Python36",
"darwin": "~/Library/Python/3.6/bin",
"linux": "/opt/Python/3.6/bin"
},
"PYTHONPATH": [
"{PYTHON36_RESOLVE}/Lib/site-packages",
"{VIRTUAL_ENV}/Lib/site-packages",
"{PYTHONPATH}",
"{RESOLVE_SCRIPT_API}/Modules",
"{PYTHONPATH}"
],
"PATH": [
"{PYTHON36_RESOLVE}",
"{PYTHON36_RESOLVE}/Scripts",
"{PATH}"
],
"PRE_PYTHON_SCRIPT": "{OPENPYPE_REPOS_ROOT}/openpype/resolve/preload_console.py",
"OPENPYPE_LOG_NO_COLORS": "True",
"RESOLVE_DEV": "True"
}
},
"variants": {
"stable": {

View file

@ -127,6 +127,41 @@
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
"label": "Default Subsets",
"object_type": "text"
}
]
},
{
"type": "dict",
"collapsible": true,
"key": "CreateModel",
"label": "Create Model",
"checkbox_key": "enabled",
"children": [
{
"type": "boolean",
"key": "enabled",
"label": "Enabled"
},
{
"type": "boolean",
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
@ -152,6 +187,11 @@
"key": "write_color_sets",
"label": "Write Color Sets"
},
{
"type": "boolean",
"key": "write_face_sets",
"label": "Write Face Sets"
},
{
"type": "list",
"key": "defaults",
@ -160,7 +200,7 @@
}
]
},
{
"type": "schema_template",
"name": "template_create_plugin",
@ -197,10 +237,6 @@
"key": "CreateMayaScene",
"label": "Create Maya Scene"
},
{
"key": "CreateModel",
"label": "Create Model"
},
{
"key": "CreateRenderSetup",
"label": "Create Render Setup"

View file

@ -1,3 +1,5 @@
from Qt import QtWidgets
from .report_items import (
PublishReport
)
@ -16,4 +18,13 @@ __all__ = (
"PublishReportViewerWidget",
"PublishReportViewerWindow",
"main",
)
def main():
app = QtWidgets.QApplication([])
window = PublishReportViewerWindow()
window.show()
return app.exec_()

View file

@ -41,9 +41,26 @@ from ..constants import (
)
class SelectionType:
def __init__(self, name):
self.name = name
def __eq__(self, other):
if isinstance(other, SelectionType):
other = other.name
return self.name == other
class SelectionTypes:
clear = SelectionType("clear")
extend = SelectionType("extend")
extend_to = SelectionType("extend_to")
class GroupWidget(QtWidgets.QWidget):
"""Widget wrapping instances under group."""
selected = QtCore.Signal(str, str)
selected = QtCore.Signal(str, str, SelectionType)
active_changed = QtCore.Signal()
removed_selected = QtCore.Signal()
@ -72,21 +89,73 @@ class GroupWidget(QtWidgets.QWidget):
self._group_icons = group_icons
self._widgets_by_id = {}
self._ordered_instance_ids = []
self._label_widget = label_widget
self._content_layout = layout
@property
def group_name(self):
"""Group which widget represent.
Returns:
str: Name of group.
"""
return self._group
def get_selected_instance_ids(self):
"""Selected instance ids.
Returns:
Set[str]: Instance ids that are selected.
"""
return {
instance_id
for instance_id, widget in self._widgets_by_id.items()
if widget.is_selected
}
def get_selected_widgets(self):
"""Access to widgets marked as selected.
Returns:
List[InstanceCardWidget]: Instance widgets that are selected.
"""
return [
widget
for instance_id, widget in self._widgets_by_id.items()
if widget.is_selected
]
def get_ordered_widgets(self):
"""Get instance ids in order as are shown in ui.
Returns:
List[str]: Instance ids.
"""
return [
self._widgets_by_id[instance_id]
for instance_id in self._ordered_instance_ids
]
def get_widget_by_instance_id(self, instance_id):
"""Get instance widget by it's id."""
return self._widgets_by_id.get(instance_id)
def update_instance_values(self):
"""Trigger update on instance widgets."""
for widget in self._widgets_by_id.values():
widget.update_instance_values()
def confirm_remove_instance_id(self, instance_id):
"""Delete widget by instance id."""
widget = self._widgets_by_id.pop(instance_id)
widget.setVisible(False)
self._content_layout.removeWidget(widget)
@ -123,6 +192,7 @@ class GroupWidget(QtWidgets.QWidget):
# Sort instances by subset name
sorted_subset_names = list(sorted(instances_by_subset_name.keys()))
# Add new instances to widget
widget_idx = 1
for subset_names in sorted_subset_names:
@ -135,17 +205,30 @@ class GroupWidget(QtWidgets.QWidget):
widget = InstanceCardWidget(
instance, group_icon, self
)
widget.selected.connect(self.selected)
widget.selected.connect(self._on_widget_selection)
widget.active_changed.connect(self.active_changed)
self._widgets_by_id[instance.id] = widget
self._content_layout.insertWidget(widget_idx, widget)
widget_idx += 1
ordered_instance_ids = []
for idx in range(self._content_layout.count()):
if idx > 0:
item = self._content_layout.itemAt(idx)
widget = item.widget()
if widget is not None:
ordered_instance_ids.append(widget.id)
self._ordered_instance_ids = ordered_instance_ids
def _on_widget_selection(self, instance_id, group_id, selection_type):
self.selected.emit(instance_id, group_id, selection_type)
class CardWidget(BaseClickableFrame):
"""Clickable card used as bigger button."""
selected = QtCore.Signal(str, str)
selected = QtCore.Signal(str, str, SelectionType)
# Group identifier of card
# - this must be set because if send when mouse is released with card id
_group_identifier = None
@ -157,6 +240,12 @@ class CardWidget(BaseClickableFrame):
self._selected = False
self._id = None
@property
def id(self):
"""Id of card."""
return self._id
@property
def is_selected(self):
"""Is card selected."""
@ -173,7 +262,16 @@ class CardWidget(BaseClickableFrame):
def _mouse_release_callback(self):
"""Trigger selected signal."""
self.selected.emit(self._id, self._group_identifier)
modifiers = QtWidgets.QApplication.keyboardModifiers()
selection_type = SelectionTypes.clear
if bool(modifiers & QtCore.Qt.ShiftModifier):
selection_type = SelectionTypes.extend_to
elif bool(modifiers & QtCore.Qt.ControlModifier):
selection_type = SelectionTypes.extend
self.selected.emit(self._id, self._group_identifier, selection_type)
class ContextCardWidget(CardWidget):
@ -382,11 +480,12 @@ class InstanceCardView(AbstractInstanceView):
self._content_layout = content_layout
self._content_widget = content_widget
self._widgets_by_group = {}
self._context_widget = None
self._widgets_by_group = {}
self._ordered_groups = []
self._selected_group = None
self._selected_instance_id = None
self._explicitly_selected_instance_ids = []
self._explicitly_selected_groups = []
self.setSizePolicy(
QtWidgets.QSizePolicy.Minimum,
@ -406,21 +505,30 @@ class InstanceCardView(AbstractInstanceView):
result.setWidth(width)
return result
def _get_selected_widget(self):
if self._selected_instance_id == CONTEXT_ID:
return self._context_widget
def _get_selected_widgets(self):
output = []
if (
self._context_widget is not None
and self._context_widget.is_selected
):
output.append(self._context_widget)
group_widget = self._widgets_by_group.get(
self._selected_group
)
if group_widget is not None:
widget = group_widget.get_widget_by_instance_id(
self._selected_instance_id
)
if widget is not None:
return widget
for group_widget in self._widgets_by_group.values():
for widget in group_widget.get_selected_widgets():
output.append(widget)
return output
return None
def _get_selected_instance_ids(self):
output = []
if (
self._context_widget is not None
and self._context_widget.is_selected
):
output.append(CONTEXT_ID)
for group_widget in self._widgets_by_group.values():
output.extend(group_widget.get_selected_instance_ids())
return output
def refresh(self):
"""Refresh instances in view based on CreatedContext."""
@ -436,8 +544,6 @@ class InstanceCardView(AbstractInstanceView):
self.selection_changed.emit()
self._content_layout.insertWidget(0, widget)
self.select_item(CONTEXT_ID, None)
# Prepare instances by group and identifiers by group
instances_by_group = collections.defaultdict(list)
identifiers_by_group = collections.defaultdict(set)
@ -453,15 +559,17 @@ class InstanceCardView(AbstractInstanceView):
if group_name in instances_by_group:
continue
if group_name == self._selected_group:
self._on_remove_selected()
widget = self._widgets_by_group.pop(group_name)
widget.setVisible(False)
self._content_layout.removeWidget(widget)
widget.deleteLater()
if group_name in self._explicitly_selected_groups:
self._explicitly_selected_groups.remove(group_name)
# Sort groups
sorted_group_names = list(sorted(instances_by_group.keys()))
# Keep track of widget indexes
# - we start with 1 because Context item as at the top
widget_idx = 1
@ -479,9 +587,6 @@ class InstanceCardView(AbstractInstanceView):
)
group_widget.active_changed.connect(self._on_active_changed)
group_widget.selected.connect(self._on_widget_selection)
group_widget.removed_selected.connect(
self._on_remove_selected
)
self._content_layout.insertWidget(widget_idx, group_widget)
self._widgets_by_group[group_name] = group_widget
@ -490,6 +595,16 @@ class InstanceCardView(AbstractInstanceView):
instances_by_group[group_name]
)
ordered_group_names = [""]
for idx in range(self._content_layout.count()):
if idx > 0:
item = self._content_layout.itemAt(idx)
group_widget = item.widget()
if group_widget is not None:
ordered_group_names.append(group_widget.group_name)
self._ordered_groups = ordered_group_names
def refresh_instance_states(self):
"""Trigger update of instances on group widgets."""
for widget in self._widgets_by_group.values():
@ -498,10 +613,7 @@ class InstanceCardView(AbstractInstanceView):
def _on_active_changed(self):
self.active_changed.emit()
def _on_widget_selection(self, instance_id, group_name):
self.select_item(instance_id, group_name)
def select_item(self, instance_id, group_name):
def _on_widget_selection(self, instance_id, group_name, selection_type):
"""Select specific item by instance id.
Pass `CONTEXT_ID` as instance id and empty string as group to select
@ -513,34 +625,318 @@ class InstanceCardView(AbstractInstanceView):
group_widget = self._widgets_by_group[group_name]
new_widget = group_widget.get_widget_by_instance_id(instance_id)
selected_widget = self._get_selected_widget()
if new_widget is selected_widget:
return
if selected_widget is not None:
selected_widget.set_selected(False)
self._selected_instance_id = instance_id
self._selected_group = group_name
if new_widget is not None:
new_widget.set_selected(True)
if selection_type is SelectionTypes.clear:
self._select_item_clear(instance_id, group_name, new_widget)
elif selection_type is SelectionTypes.extend:
self._select_item_extend(instance_id, group_name, new_widget)
elif selection_type is SelectionTypes.extend_to:
self._select_item_extend_to(instance_id, group_name, new_widget)
self.selection_changed.emit()
def _on_remove_selected(self):
selected_widget = self._get_selected_widget()
if selected_widget is None:
self._on_widget_selection(CONTEXT_ID, None)
def _select_item_clear(self, instance_id, group_name, new_widget):
"""Select specific item by instance id and clear previous selection.
Pass `CONTEXT_ID` as instance id and empty string as group to select
global context item.
"""
selected_widgets = self._get_selected_widgets()
for widget in selected_widgets:
if widget.id != instance_id:
widget.set_selected(False)
self._explicitly_selected_groups = [group_name]
self._explicitly_selected_instance_ids = [instance_id]
if new_widget is not None:
new_widget.set_selected(True)
def _select_item_extend(self, instance_id, group_name, new_widget):
"""Add/Remove single item to/from current selection.
If item is already selected the selection is removed.
"""
self._explicitly_selected_instance_ids = (
self._get_selected_instance_ids()
)
if new_widget.is_selected:
self._explicitly_selected_instance_ids.remove(instance_id)
new_widget.set_selected(False)
remove_group = False
if instance_id == CONTEXT_ID:
remove_group = True
else:
group_widget = self._widgets_by_group[group_name]
if not group_widget.get_selected_widgets():
remove_group = True
if remove_group:
self._explicitly_selected_groups.remove(group_name)
return
self._explicitly_selected_instance_ids.append(instance_id)
if group_name in self._explicitly_selected_groups:
self._explicitly_selected_groups.remove(group_name)
self._explicitly_selected_groups.append(group_name)
new_widget.set_selected(True)
def _select_item_extend_to(self, instance_id, group_name, new_widget):
"""Extend selected items to specific instance id.
This method is handling Shift+click selection of widgets. Selection
is not stored to explicit selection items. That's because user can
shift select again and it should use last explicit selected item as
source item for selection.
Items selected via this function can get to explicit selection only if
selection is extended by one specific item ('_select_item_extend').
From that moment the selection is locked to new last explicit selected
item.
It's required to traverse through group widgets in their UI order and
through their instances in UI order. All explicitly selected items
must not change their selection state during this function. Passed
instance id can be above or under last selected item so a start item
and end item must be found to be able know which direction is selection
happening.
"""
# Start group name (in '_ordered_groups')
start_group = None
# End group name (in '_ordered_groups')
end_group = None
# Instance id of first selected item
start_instance_id = None
# Instance id of last selected item
end_instance_id = None
# Get previously selected group by explicit selected groups
previous_group = None
if self._explicitly_selected_groups:
previous_group = self._explicitly_selected_groups[-1]
# Find last explicitly selected instance id
previous_last_selected_id = None
if self._explicitly_selected_instance_ids:
previous_last_selected_id = (
self._explicitly_selected_instance_ids[-1]
)
# If last instance id was not found or available then last selected
# group is also invalid.
# NOTE: This probably never happen?
if previous_last_selected_id is None:
previous_group = None
# Check if previously selected group is available and find out if
# new instance group is above or under previous selection
# - based on these information are start/end group/instance filled
if previous_group in self._ordered_groups:
new_idx = self._ordered_groups.index(group_name)
prev_idx = self._ordered_groups.index(previous_group)
if new_idx < prev_idx:
start_group = group_name
end_group = previous_group
start_instance_id = instance_id
end_instance_id = previous_last_selected_id
else:
start_group = previous_group
end_group = group_name
start_instance_id = previous_last_selected_id
end_instance_id = instance_id
# If start group is not set then use context item group name
if start_group is None:
start_group = ""
# If start instance id is not filled then use context id (similar to
# group)
if start_instance_id is None:
start_instance_id = CONTEXT_ID
# If end group is not defined then use passed group name
# - this can be happen when previous group was not selected
# - when this happens the selection will probably happen from context
# item to item selected by user
if end_group is None:
end_group = group_name
# If end instance is not filled then use instance selected by user
if end_instance_id is None:
end_instance_id = instance_id
# Start and end group are the same
# - a different logic is needed in that case
same_group = start_group == end_group
# Process known information and change selection of items
passed_start_group = False
passed_end_group = False
# Go through ordered groups (from top to bottom) and change selection
for name in self._ordered_groups:
# Prepare sorted instance widgets
if name == "":
sorted_widgets = [self._context_widget]
else:
group_widget = self._widgets_by_group[name]
sorted_widgets = group_widget.get_ordered_widgets()
# Change selection based on explicit selection if start group
# was not passed yet
if not passed_start_group:
if name != start_group:
for widget in sorted_widgets:
widget.set_selected(
widget.id in self._explicitly_selected_instance_ids
)
continue
# Change selection based on explicit selection if end group
# already passed
if passed_end_group:
for widget in sorted_widgets:
widget.set_selected(
widget.id in self._explicitly_selected_instance_ids
)
continue
# Start group is already passed and end group was not yet hit
if same_group:
passed_start_group = True
passed_end_group = True
passed_start_instance = False
passed_end_instance = False
for widget in sorted_widgets:
if not passed_start_instance:
if widget.id in (start_instance_id, end_instance_id):
if widget.id != start_instance_id:
# Swap start/end instance if start instance is
# after end
# - fix 'passed_end_instance' check
start_instance_id, end_instance_id = (
end_instance_id, start_instance_id
)
passed_start_instance = True
# Find out if widget should be selected
select = False
if passed_end_instance:
select = False
elif passed_start_instance:
select = True
# Check if instance is in explicitly selected items if
# should ont be selected
if (
not select
and widget.id in self._explicitly_selected_instance_ids
):
select = True
widget.set_selected(select)
if (
not passed_end_instance
and widget.id == end_instance_id
):
passed_end_instance = True
elif name == start_group:
# First group from which selection should start
# - look for start instance first from which the selection
# should happen
passed_start_group = True
passed_start_instance = False
for widget in sorted_widgets:
if widget.id == start_instance_id:
passed_start_instance = True
select = False
# Check if passed start instance or instance is
# in explicitly selected items to be selected
if (
passed_start_instance
or widget.id in self._explicitly_selected_instance_ids
):
select = True
widget.set_selected(select)
elif name == end_group:
# Last group where selection should happen
# - look for end instance first after which the selection
# should stop
passed_end_group = True
passed_end_instance = False
for widget in sorted_widgets:
select = False
# Check if not yet passed end instance or if instance is
# in explicitly selected items to be selected
if (
not passed_end_instance
or widget.id in self._explicitly_selected_instance_ids
):
select = True
widget.set_selected(select)
if widget.id == end_instance_id:
passed_end_instance = True
else:
# Just select everything between start and end group
for widget in sorted_widgets:
widget.set_selected(True)
def get_selected_items(self):
"""Get selected instance ids and context."""
instances = []
context_selected = False
selected_widget = self._get_selected_widget()
if selected_widget is self._context_widget:
context_selected = True
selected_widgets = self._get_selected_widgets()
elif selected_widget is not None:
instances.append(selected_widget.instance)
context_selected = False
for widget in selected_widgets:
if widget is self._context_widget:
context_selected = True
else:
instances.append(widget.id)
return instances, context_selected
def set_selected_items(self, instance_ids, context_selected):
s_instance_ids = set(instance_ids)
cur_ids, cur_context = self.get_selected_items()
if (
set(cur_ids) == s_instance_ids
and cur_context == context_selected
):
return
selected_groups = []
selected_instances = []
if context_selected:
selected_groups.append("")
selected_instances.append(CONTEXT_ID)
self._context_widget.set_selected(context_selected)
for group_name in self._ordered_groups:
if group_name == "":
continue
group_widget = self._widgets_by_group[group_name]
group_selected = False
for widget in group_widget.get_ordered_widgets():
select = False
if widget.id in s_instance_ids:
selected_instances.append(widget.id)
group_selected = True
select = True
widget.set_selected(select)
if group_selected:
selected_groups.append(group_name)
self._explicitly_selected_groups = selected_groups
self._explicitly_selected_instance_ids = selected_instances

View file

@ -723,13 +723,13 @@ class InstanceListView(AbstractInstanceView):
widget.update_instance_values()
def _on_active_changed(self, changed_instance_id, new_value):
selected_instances, _ = self.get_selected_items()
selected_instance_ids, _ = self.get_selected_items()
selected_ids = set()
found = False
for instance in selected_instances:
selected_ids.add(instance.id)
if not found and instance.id == changed_instance_id:
for instance_id in selected_instance_ids:
selected_ids.add(instance_id)
if not found and instance_id == changed_instance_id:
found = True
if not found:
@ -760,29 +760,6 @@ class InstanceListView(AbstractInstanceView):
if changed_ids:
self.active_changed.emit()
def get_selected_items(self):
"""Get selected instance ids and context selection.
Returns:
tuple<list, bool>: Selected instance ids and boolean if context
is selected.
"""
instances = []
context_selected = False
instances_by_id = self._controller.instances
for index in self._instance_view.selectionModel().selectedIndexes():
instance_id = index.data(INSTANCE_ID_ROLE)
if not context_selected and instance_id == CONTEXT_ID:
context_selected = True
elif instance_id is not None:
instance = instances_by_id.get(instance_id)
if instance:
instances.append(instance)
return instances, context_selected
def _on_selection_change(self, *_args):
self.selection_changed.emit()
@ -822,3 +799,102 @@ class InstanceListView(AbstractInstanceView):
proxy_index = self._proxy_model.mapFromSource(group_item.index())
if not self._instance_view.isExpanded(proxy_index):
self._instance_view.expand(proxy_index)
def get_selected_items(self):
"""Get selected instance ids and context selection.
Returns:
tuple<list, bool>: Selected instance ids and boolean if context
is selected.
"""
instance_ids = []
context_selected = False
for index in self._instance_view.selectionModel().selectedIndexes():
instance_id = index.data(INSTANCE_ID_ROLE)
if not context_selected and instance_id == CONTEXT_ID:
context_selected = True
elif instance_id is not None:
instance_ids.append(instance_id)
return instance_ids, context_selected
def set_selected_items(self, instance_ids, context_selected):
s_instance_ids = set(instance_ids)
cur_ids, cur_context = self.get_selected_items()
if (
set(cur_ids) == s_instance_ids
and cur_context == context_selected
):
return
view = self._instance_view
src_model = self._instance_model
proxy_model = self._proxy_model
select_indexes = []
select_queue = collections.deque()
select_queue.append(
(src_model.invisibleRootItem(), [])
)
while select_queue:
queue_item = select_queue.popleft()
item, parent_items = queue_item
if item.hasChildren():
new_parent_items = list(parent_items)
new_parent_items.append(item)
for row in range(item.rowCount()):
select_queue.append(
(item.child(row), list(new_parent_items))
)
instance_id = item.data(INSTANCE_ID_ROLE)
if not instance_id:
continue
if instance_id in s_instance_ids:
select_indexes.append(item.index())
for parent_item in parent_items:
index = parent_item.index()
proxy_index = proxy_model.mapFromSource(index)
if not view.isExpanded(proxy_index):
view.expand(proxy_index)
elif context_selected and instance_id == CONTEXT_ID:
select_indexes.append(item.index())
selection_model = view.selectionModel()
if not select_indexes:
selection_model.clear()
return
if len(select_indexes) == 1:
proxy_index = proxy_model.mapFromSource(select_indexes[0])
selection_model.setCurrentIndex(
proxy_index,
selection_model.ClearAndSelect | selection_model.Rows
)
return
first_index = proxy_model.mapFromSource(select_indexes.pop(0))
last_index = proxy_model.mapFromSource(select_indexes.pop(-1))
selection_model.setCurrentIndex(
first_index,
selection_model.ClearAndSelect | selection_model.Rows
)
for index in select_indexes:
proxy_index = proxy_model.mapFromSource(index)
selection_model.select(
proxy_index,
selection_model.Select | selection_model.Rows
)
selection_model.setCurrentIndex(
last_index,
selection_model.Select | selection_model.Rows
)

View file

@ -201,16 +201,16 @@ class OverviewWidget(QtWidgets.QFrame):
self.create_requested.emit()
def _on_delete_clicked(self):
instances, _ = self.get_selected_items()
instance_ids, _ = self.get_selected_items()
# Ask user if he really wants to remove instances
dialog = QtWidgets.QMessageBox(self)
dialog.setIcon(QtWidgets.QMessageBox.Question)
dialog.setWindowTitle("Are you sure?")
if len(instances) > 1:
if len(instance_ids) > 1:
msg = (
"Do you really want to remove {} instances?"
).format(len(instances))
).format(len(instance_ids))
else:
msg = (
"Do you really want to remove the instance?"
@ -224,10 +224,7 @@ class OverviewWidget(QtWidgets.QFrame):
dialog.exec_()
# Skip if OK was not clicked
if dialog.result() == QtWidgets.QMessageBox.Ok:
instance_ids = {
instance.id
for instance in instances
}
instance_ids = set(instance_ids)
self._controller.remove_instances(instance_ids)
def _on_change_view_clicked(self):
@ -238,11 +235,16 @@ class OverviewWidget(QtWidgets.QFrame):
if self._refreshing_instances:
return
instances, context_selected = self.get_selected_items()
instance_ids, context_selected = self.get_selected_items()
# Disable delete button if nothing is selected
self._delete_btn.setEnabled(len(instances) > 0)
self._delete_btn.setEnabled(len(instance_ids) > 0)
instances_by_id = self._controller.instances
instances = [
instances_by_id[instance_id]
for instance_id in instance_ids
]
self._subset_attributes_widget.set_current_instances(
instances, context_selected
)
@ -319,15 +321,21 @@ class OverviewWidget(QtWidgets.QFrame):
def _change_view_type(self):
idx = self._subset_views_layout.currentIndex()
new_idx = (idx + 1) % self._subset_views_layout.count()
self._subset_views_layout.setCurrentIndex(new_idx)
new_view = self._subset_views_layout.currentWidget()
old_view = self._subset_views_layout.currentWidget()
new_view = self._subset_views_layout.widget(new_idx)
if not new_view.refreshed:
new_view.refresh()
new_view.set_refreshed(True)
else:
new_view.refresh_instance_states()
instance_ids, context_selected = old_view.get_selected_items()
new_view.set_selected_items(instance_ids, context_selected)
self._subset_views_layout.setCurrentIndex(new_idx)
self._on_subset_change()
def _refresh_instances(self):

View file

@ -306,10 +306,25 @@ class AbstractInstanceView(QtWidgets.QWidget):
Example: When delete button is clicked to know what should be deleted.
"""
raise NotImplementedError((
"{} Method 'get_selected_items' is not implemented."
).format(self.__class__.__name__))
def set_selected_items(self, instance_ids, context_selected):
"""Change selection for instances and context.
Used to applying selection from one view to other.
Args:
instance_ids (List[str]): Selected instance ids.
context_selected (bool): Context is selected.
"""
raise NotImplementedError((
"{} Method 'set_selected_items' is not implemented."
).format(self.__class__.__name__))
class ClickableLineEdit(QtWidgets.QLineEdit):
"""QLineEdit capturing left mouse click.

View file

@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring Pype version."""
__version__ = "3.14.4-nightly.3"
__version__ = "3.14.4"

View file

@ -0,0 +1,40 @@
<#
.SYNOPSIS
Helper script OpenPype Tray.
.DESCRIPTION
.EXAMPLE
PS> .\run_tray.ps1
#>
$current_dir = Get-Location
$script_dir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent
$openpype_root = (Get-Item $script_dir).parent.FullName
# Install PSWriteColor to support colorized output to terminal
$env:PSModulePath = $env:PSModulePath + ";$($openpype_root)\tools\modules\powershell"
$env:_INSIDE_OPENPYPE_TOOL = "1"
# make sure Poetry is in PATH
if (-not (Test-Path 'env:POETRY_HOME')) {
$env:POETRY_HOME = "$openpype_root\.poetry"
}
$env:PATH = "$($env:PATH);$($env:POETRY_HOME)\bin"
Set-Location -Path $openpype_root
Write-Color -Text ">>> ", "Reading Poetry ... " -Color Green, Gray -NoNewline
if (-not (Test-Path -PathType Container -Path "$($env:POETRY_HOME)\bin")) {
Write-Color -Text "NOT FOUND" -Color Yellow
Write-Color -Text "*** ", "We need to install Poetry create virtual env first ..." -Color Yellow, Gray
& "$openpype_root\tools\create_env.ps1"
} else {
Write-Color -Text "OK" -Color Green
}
& "$($env:POETRY_HOME)\bin\poetry" run python "$($openpype_root)\start.py" publish-report-viewer --debug
Set-Location -Path $current_dir

View file

@ -26,6 +26,9 @@ as a naive barier to prevent artists from accidental setting changes.
**`Disk mapping`** - Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up.
Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume).
### FFmpeg and OpenImageIO tools
We bundle FFmpeg tools for all platforms and OpenImageIO tools for Windows and Linux. By default are used bundled tools but it is possible to set environment variables `OPENPYPE_FFMPEG_PATHS` and `OPENPYPE_OIIO_PATHS` in system settings environments to look for them in different directory e.g. for different linux distributions or to add oiio support for MacOs. Values of both environment variables should lead to directory where tool executables are located (multiple paths are supported).
### OpenPype deployment control
**`Versions Repository`** - Location where automatic update mechanism searches for zip files with
OpenPype update packages. To read more about preparing OpenPype for automatic updates go to [Admin Distribute docs](admin_distribute.md#2-openpype-codebase)