Merge remote-tracking branch 'origin/develop' into feature/OP-2524_Maya-looks-support-for-native-Redshift-texture-format

This commit is contained in:
Ondřej Samohel 2022-10-27 15:18:23 +02:00
commit c9c26358bf
No known key found for this signature in database
GPG key ID: 02376E18990A97C6
275 changed files with 15269 additions and 4948 deletions

28
.github/workflows/milestone_assign.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Milestone - assign to PRs
on:
pull_request_target:
types: [opened, reopened, edited, synchronize]
jobs:
run_if_release:
if: startsWith(github.base_ref, 'release/')
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-minor]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-minor'
run_if_develop:
if: ${{ github.base_ref == 'develop' }}
runs-on: ubuntu-latest
steps:
- name: 'Assign Milestone [next-patch]'
if: github.event.pull_request.milestone == null
uses: zoispag/action-assign-milestone@v1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
milestone: 'next-patch'

62
.github/workflows/milestone_create.yml vendored Normal file
View file

@ -0,0 +1,62 @@
name: Milestone - create default
on:
milestone:
types: [closed, edited]
jobs:
generate-next-patch:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-patch"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-patch` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-patch'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
generate-next-minor:
runs-on: ubuntu-latest
steps:
- name: 'Get Milestones'
uses: "WyriHaximus/github-action-get-milestones@master"
id: milestones
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number')
id: querymilestone
env:
MILESTONES: ${{ steps.milestones.outputs.milestones }}
MILESTONE: "next-minor"
- name: Read output
run: |
echo "${{ steps.querymilestone.outputs.number }}"
- name: 'Create `next-minor` milestone'
if: steps.querymilestone.outputs.number == ''
id: createmilestone
uses: "WyriHaximus/github-action-create-milestone@v1"
with:
title: 'next-minor'
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View file

@ -37,27 +37,27 @@ jobs:
echo ::set-output name=next_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version_type.outputs.type != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
unreleasedLabel: ${{ steps.version.outputs.next_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version_type.outputs.type != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# unreleasedLabel: ${{ steps.version.outputs.next_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: "🖨️ Print changelog to console"
if: steps.version_type.outputs.type != 'skip'
@ -85,11 +85,11 @@ jobs:
tags: true
unprotect_reviews: true
- name: 🔨 Merge main back to develop
- name: 🔨 Merge main back to develop
uses: everlytic/branch-merge@1.1.0
if: steps.version_type.outputs.type != 'skip'
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged {source_ref} into {target_branch}'

View file

@ -2,7 +2,7 @@ name: Stable Release
on:
release:
types:
types:
- prereleased
jobs:
@ -13,7 +13,7 @@ jobs:
steps:
- name: 🚛 Checkout Code
uses: actions/checkout@v2
with:
with:
fetch-depth: 0
- name: Set up Python
@ -33,27 +33,27 @@ jobs:
echo ::set-output name=last_release::$LASTRELEASE
echo ::set-output name=release_tag::$RESULT
- name: "✏️ Generate full changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-full-changelog
uses: heinrichreimer/github-changelog-generator-action@v2.2
with:
token: ${{ secrets.ADMIN_TOKEN }}
addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
issues: false
issuesWoLabels: false
sinceTag: "3.0.0"
maxIssues: 100
pullRequests: true
prWoLabels: false
author: false
unreleased: true
compareLink: true
stripGeneratorNotice: true
verbose: true
futureRelease: ${{ steps.version.outputs.release_tag }}
excludeTagsRegex: "CI/.+"
releaseBranch: "main"
# - name: "✏️ Generate full changelog"
# if: steps.version.outputs.release_tag != 'skip'
# id: generate-full-changelog
# uses: heinrichreimer/github-changelog-generator-action@v2.3
# with:
# token: ${{ secrets.ADMIN_TOKEN }}
# addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}'
# issues: false
# issuesWoLabels: false
# sinceTag: "3.12.0"
# maxIssues: 100
# pullRequests: true
# prWoLabels: false
# author: false
# unreleased: true
# compareLink: true
# stripGeneratorNotice: true
# verbose: true
# futureRelease: ${{ steps.version.outputs.release_tag }}
# excludeTagsRegex: "CI/.+"
# releaseBranch: "main"
- name: 💾 Commit and Tag
id: git_commit
@ -73,8 +73,8 @@ jobs:
token: ${{ secrets.ADMIN_TOKEN }}
branch: main
tags: true
unprotect_reviews: true
unprotect_reviews: true
- name: "✏️ Generate last changelog"
if: steps.version.outputs.release_tag != 'skip'
id: generate-last-changelog
@ -114,11 +114,11 @@ jobs:
with:
tag: "${{ steps.version.outputs.current_version }}"
- name: 🔁 Merge main back to develop
- name: 🔁 Merge main back to develop
if: steps.version.outputs.release_tag != 'skip'
uses: everlytic/branch-merge@1.1.0
with:
github_token: ${{ secrets.ADMIN_TOKEN }}
source_ref: 'main'
target_branch: 'develop'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'
commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}'

2
.gitignore vendored
View file

@ -110,3 +110,5 @@ tools/run_eventserver.*
# Developer tools
tools/dev_*
.github_changelog_generator

File diff suppressed because it is too large Load diff

1818
HISTORY.md

File diff suppressed because it is too large Load diff

View file

@ -815,6 +815,13 @@ class BootstrapRepos:
except Exception as e:
self._print(str(e), LOG_ERROR, exc_info=True)
return None
if not destination_dir.exists():
destination_dir.mkdir(parents=True)
elif not destination_dir.is_dir():
self._print(
"Destination exists but is not directory.", LOG_ERROR)
return None
try:
shutil.move(zip_file.as_posix(), destination_dir.as_posix())
except shutil.Error as e:

View file

@ -11,7 +11,6 @@ from .lib import (
PypeLogger,
Logger,
Anatomy,
config,
execute,
run_subprocess,
version_up,
@ -72,7 +71,6 @@ __all__ = [
"PypeLogger",
"Logger",
"Anatomy",
"config",
"execute",
"get_default_components",
"ApplicationManager",

View file

@ -277,6 +277,13 @@ def projectmanager():
PypeCommands().launch_project_manager()
@main.command(context_settings={"ignore_unknown_options": True})
def publish_report_viewer():
from openpype.tools.publisher.publish_report_viewer import main
sys.exit(main())
@main.command()
@click.argument("output_path")
@click.option("--project", help="Define project context")

View file

@ -2,6 +2,7 @@ from .mongo import get_project_connection
from .entities import (
get_assets,
get_asset_by_id,
get_version_by_id,
get_representation_by_id,
convert_id,
)
@ -127,12 +128,20 @@ def get_linked_representation_id(
if not version_id:
return []
version_doc = get_version_by_id(
project_name, version_id, fields=["type", "version_id"]
)
if version_doc["type"] == "hero_version":
version_id = version_doc["version_id"]
if max_depth is None:
max_depth = 0
match = {
"_id": version_id,
"type": {"$in": ["version", "hero_version"]}
# Links are not stored to hero versions at this moment so filter
# is limited to just versions
"type": "version"
}
graph_lookup = {
@ -187,7 +196,7 @@ def _process_referenced_pipeline_result(result, link_type):
referenced_version_ids = set()
correctly_linked_ids = set()
for item in result:
input_links = item["data"].get("inputLinks")
input_links = item.get("data", {}).get("inputLinks")
if not input_links:
continue
@ -203,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type):
continue
for output in sorted(outputs_recursive, key=lambda o: o["depth"]):
output_links = output["data"].get("inputLinks")
output_links = output.get("data", {}).get("inputLinks")
if not output_links:
continue

View file

@ -23,6 +23,7 @@ CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0"
CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0"
CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0"
CURRENT_VERSION_SCHEMA = "openpype:version-3.0"
CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0"
CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0"
CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0"
CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0"
@ -162,6 +163,34 @@ def new_version_doc(version, subset_id, data=None, entity_id=None):
}
def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None):
"""Create skeleton data of hero version document.
Args:
version_id (ObjectId): Is considered as unique identifier of version
under subset.
subset_id (Union[str, ObjectId]): Id of parent subset.
data (Dict[str, Any]): Version document data.
entity_id (Union[str, ObjectId]): Predefined id of document. New id is
created if not passed.
Returns:
Dict[str, Any]: Skeleton of version document.
"""
if data is None:
data = {}
return {
"_id": _create_or_convert_to_mongo_id(entity_id),
"schema": CURRENT_HERO_VERSION_SCHEMA,
"type": "hero_version",
"version_id": version_id,
"parent": subset_id,
"data": data
}
def new_representation_doc(
name, version_id, context, data=None, entity_id=None
):
@ -293,6 +322,20 @@ def prepare_version_update_data(old_doc, new_doc, replace=True):
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_hero_version_update_data(old_doc, new_doc, replace=True):
"""Compare two hero version documents and prepare update data.
Based on compared values will create update data for 'UpdateOperation'.
Empty output means that documents are identical.
Returns:
Dict[str, Any]: Changes between old and new document.
"""
return _prepare_update_data(old_doc, new_doc, replace)
def prepare_representation_update_data(old_doc, new_doc, replace=True):
"""Compare two representation documents and prepare update data.

View file

@ -312,6 +312,8 @@ class IPublishHost:
required = [
"get_context_data",
"update_context_data",
"get_context_title",
"get_current_context",
]
missing = []
for name in required:

View file

@ -12,6 +12,7 @@ from wsrpc_aiohttp import (
from Qt import QtCore
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from openpype.tools.adobe_webserver.app import WebServerTool
@ -84,8 +85,6 @@ class ProcessLauncher(QtCore.QObject):
@property
def log(self):
if self._log is None:
from openpype.api import Logger
self._log = Logger.get_logger("{}-launcher".format(
self.route_name))
return self._log

View file

@ -4,8 +4,7 @@ from Qt import QtWidgets
import pyblish.api
from openpype import lib
from openpype.api import Logger
from openpype.lib import Logger, register_event_callback
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
@ -16,9 +15,8 @@ from openpype.pipeline import (
)
from openpype.pipeline.load import any_outdated_containers
import openpype.hosts.aftereffects
from openpype.lib import register_event_callback
from .launch_logic import get_stub
from .launch_logic import get_stub, ConnectionNotEstablishedYet
log = Logger.get_logger(__name__)
@ -111,7 +109,7 @@ def ls():
"""
try:
stub = get_stub() # only after AfterEffects is up
except lib.ConnectionNotEstablishedYet:
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return
@ -284,7 +282,7 @@ def _get_stub():
"""
try:
stub = get_stub() # only after Photoshop is up
except lib.ConnectionNotEstablishedYet:
except ConnectionNotEstablishedYet:
print("Not connected yet, ignoring")
return

View file

@ -6,7 +6,7 @@ from typing import Dict, List, Union
import bpy
import addon_utils
from openpype.api import Logger
from openpype.lib import Logger
from . import pipeline

View file

@ -20,8 +20,8 @@ from openpype.pipeline import (
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from openpype.api import Logger
from openpype.lib import (
Logger,
register_event_callback,
emit_event
)

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
"""Validate that the current mesh has UV's."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,14 +3,15 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
from openpype.pipeline.publish import ValidateContentsOrder
import openpype.hosts.blender.api.action
class ValidateMeshNoNegativeScale(pyblish.api.Validator):
"""Ensure that meshes don't have a negative scale."""
order = openpype.api.ValidateContentsOrder
order = ValidateContentsOrder
hosts = ["blender"]
families = ["model"]
category = "geometry"

View file

@ -3,7 +3,7 @@ from typing import List
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -4,7 +4,7 @@ import mathutils
import bpy
import pyblish.api
import openpype.api
import openpype.hosts.blender.api.action
from openpype.pipeline.publish import ValidateContentsOrder

View file

@ -6,9 +6,8 @@ import argparse
import pyblish.api
import pyblish.util
from openpype.api import Logger
import openpype
import openpype.hosts.celaction
from openpype.lib import Logger
from openpype.hosts.celaction import api as celaction
from openpype.tools.utils import host_tools
from openpype.pipeline import install_openpype_plugins

View file

@ -12,6 +12,9 @@ import xml.etree.cElementTree as cET
from copy import deepcopy, copy
from xml.etree import ElementTree as ET
from pprint import pformat
from openpype.lib import Logger, run_subprocess
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
@ -20,9 +23,7 @@ from .constants import (
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
log = Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
@ -1016,7 +1017,7 @@ class MediaInfoFile(object):
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))

View file

@ -5,7 +5,7 @@ import os
import contextlib
from pyblish import api as pyblish
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,

View file

@ -6,16 +6,17 @@ from xml.etree import ElementTree as ET
from Qt import QtCore, QtWidgets
import openpype.api as openpype
import qargparse
from openpype import style
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LegacyCreator, LoaderPlugin
from . import constants
from . import lib as flib
from . import pipeline as fpipeline
log = openpype.Logger.get_logger(__name__)
log = Logger.get_logger(__name__)
class CreatorWidget(QtWidgets.QDialog):
@ -305,7 +306,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.presets = openpype.get_current_project_settings()[
self.presets = get_current_project_settings()[
"flame"]["create"].get(self.__class__.__name__, {})
# adding basic current context flame objects

View file

@ -1,6 +1,6 @@
import os
from xml.etree import ElementTree as ET
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -4,7 +4,7 @@ Flame utils for syncing scripts
import os
import shutil
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -1,7 +1,7 @@
"""Host API required Work Files tool"""
import os
from openpype.api import Logger
from openpype.lib import Logger
# from .. import (
# get_project_manager,
# get_current_project

View file

@ -3,16 +3,17 @@ import json
import tempfile
import contextlib
import socket
from pprint import pformat
from openpype.lib import (
PreLaunchHook,
get_openpype_username
get_openpype_username,
run_subprocess,
)
from openpype.lib.applications import (
ApplicationLaunchFailed
)
from openpype.hosts import flame as opflame
import openpype
from pprint import pformat
class FlamePrelaunch(PreLaunchHook):
@ -42,17 +43,9 @@ class FlamePrelaunch(PreLaunchHook):
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
# get image io
project_anatomy = self.data["anatomy"]
project_settings = self.data["project_settings"]
# make sure anatomy settings are having flame key
if not project_anatomy["imageio"].get("flame"):
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `flame` key. "
"Please make sure you remove project overides on "
"Anatomy Image io")
)
imageio_flame = project_anatomy["imageio"]["flame"]
imageio_flame = project_settings["flame"]["imageio"]
# get user name and host name
user_name = get_openpype_username()
@ -135,7 +128,6 @@ class FlamePrelaunch(PreLaunchHook):
except OSError as exc:
self.log.warning("Not able to open files: {}".format(exc))
def _get_flame_fps(self, fps_num):
fps_table = {
float(23.976): "23.976 fps",
@ -187,7 +179,7 @@ class FlamePrelaunch(PreLaunchHook):
"env": self.launch_context.env
}
openpype.api.run_subprocess(args, **process_kwargs)
run_subprocess(args, **process_kwargs)
# process returned json file to pass launch args
return_json_data = open(tmp_json_path).read()

View file

@ -3,8 +3,6 @@ import sys
import re
import contextlib
from Qt import QtGui
from openpype.lib import Logger
from openpype.client import (
get_asset_by_name,
@ -92,7 +90,7 @@ def set_asset_resolution():
})
def validate_comp_prefs(comp=None):
def validate_comp_prefs(comp=None, force_repair=False):
"""Validate current comp defaults with asset settings.
Validates fps, resolutionWidth, resolutionHeight, aspectRatio.
@ -135,21 +133,22 @@ def validate_comp_prefs(comp=None):
asset_value = asset_data[key]
comp_value = comp_frame_format_prefs.get(comp_key)
if asset_value != comp_value:
# todo: Actually show dialog to user instead of just logging
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
)
invalid_msg = "{} {} should be {}".format(label,
comp_value,
asset_value)
invalid.append(invalid_msg)
if not force_repair:
# Do not log warning if we force repair anyway
log.warning(
"Comp {pref} {value} does not match asset "
"'{asset_name}' {pref} {asset_value}".format(
pref=label,
value=comp_value,
asset_name=asset_doc["name"],
asset_value=asset_value)
)
if invalid:
def _on_repair():
@ -160,6 +159,11 @@ def validate_comp_prefs(comp=None):
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
if force_repair:
log.info("Applying default Comp preferences..")
_on_repair()
return
from . import menu
from openpype.widgets import popup
from openpype.style import load_stylesheet

View file

@ -16,6 +16,7 @@ from openpype.hosts.fusion.api.lib import (
from openpype.pipeline import legacy_io
from openpype.resources import get_openpype_icon_filepath
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
self = sys.modules[__name__]
@ -119,6 +120,10 @@ class OpenPypeMenu(QtWidgets.QWidget):
self._pulse = FusionPulse(parent=self)
self._pulse.start()
# Detect Fusion events as OpenPype events
self._event_handler = FusionEventHandler(parent=self)
self._event_handler.start()
def on_task_changed(self):
# Update current context label
label = legacy_io.Session["AVALON_ASSET"]

View file

@ -2,13 +2,16 @@
Basic avalon integration
"""
import os
import sys
import logging
import pyblish.api
from Qt import QtCore
from openpype.lib import (
Logger,
register_event_callback
register_event_callback,
emit_event
)
from openpype.pipeline import (
register_loader_plugin_path,
@ -39,12 +42,28 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class CompLogHandler(logging.Handler):
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
comp = get_current_comp()
if comp:
comp.Print(entry)
self.print(entry)
def install():
@ -67,7 +86,7 @@ def install():
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = CompLogHandler()
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
@ -84,10 +103,10 @@ def install():
"instanceToggled", on_pyblish_instance_toggled
)
# Fusion integration currently does not attach to direct callbacks of
# the application. So we use workfile callbacks to allow similar behavior
# on save and open
register_event_callback("workfile.open.after", on_after_open)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
def uninstall():
@ -137,8 +156,18 @@ def on_pyblish_instance_toggled(instance, old_value, new_value):
tool.SetAttrs({"TOOLB_PassThrough": passthrough})
def on_after_open(_event):
comp = get_current_comp()
def on_new(event):
comp = event["Rets"]["comp"]
validate_comp_prefs(comp, force_repair=True)
def on_save(event):
comp = event["sender"]
validate_comp_prefs(comp)
def on_after_open(event):
comp = event["sender"]
validate_comp_prefs(comp)
if any_outdated_containers():
@ -182,7 +211,7 @@ def ls():
"""
comp = get_current_comp()
tools = comp.GetToolList(False, "Loader").values()
tools = comp.GetToolList(False).values()
for tool in tools:
container = parse_container(tool)
@ -254,3 +283,114 @@ def parse_container(tool):
return container
class FusionEventThread(QtCore.QThread):
"""QThread which will periodically ping Fusion app for any events.
The fusion.UIManager must be set up to be notified of events before they'll
be reported by this thread, for example:
fusion.UIManager.AddNotify("Comp_Save", None)
"""
on_event = QtCore.Signal(dict)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
if app is None:
# No Fusion app found
return
# As optimization store the GetEvent method directly because every
# getattr of UIManager.GetEvent tries to resolve the Remote Function
# through the PyRemoteObject
get_event = app.UIManager.GetEvent
delay = int(os.environ.get("OPENPYPE_FUSION_CALLBACK_INTERVAL", 1000))
while True:
if self.isInterruptionRequested():
return
# Process all events that have been queued up until now
while True:
event = get_event(False)
if not event:
break
self.on_event.emit(event)
# Wait some time before processing events again
# to not keep blocking the UI
self.msleep(delay)
class FusionEventHandler(QtCore.QObject):
"""Emits OpenPype events based on Fusion events captured in a QThread.
This will emit the following OpenPype events based on Fusion actions:
save: Comp_Save, Comp_SaveAs
open: Comp_Opened
new: Comp_New
To use this you can attach it to you Qt UI so it runs in the background.
E.g.
>>> handler = FusionEventHandler(parent=window)
>>> handler.start()
"""
ACTION_IDS = [
"Comp_Save",
"Comp_SaveAs",
"Comp_New",
"Comp_Opened"
]
def __init__(self, parent=None):
super(FusionEventHandler, self).__init__(parent=parent)
# Set up Fusion event callbacks
fusion = getattr(sys.modules["__main__"], "fusion", None)
ui = fusion.UIManager
# Add notifications for the ones we want to listen to
notifiers = []
for action_id in self.ACTION_IDS:
notifier = ui.AddNotify(action_id, None)
notifiers.append(notifier)
# TODO: Not entirely sure whether these must be kept to avoid
# garbage collection
self._notifiers = notifiers
self._event_thread = FusionEventThread(parent=self)
self._event_thread.on_event.connect(self._on_event)
def start(self):
self._event_thread.start()
def stop(self):
self._event_thread.stop()
def _on_event(self, event):
"""Handle Fusion events to emit OpenPype events"""
if not event:
return
what = event["what"]
# Comp Save
if what in {"Comp_Save", "Comp_SaveAs"}:
if not event["Rets"].get("success"):
# If the Save action is cancelled it will still emit an
# event but with "success": False so we ignore those cases
return
# Comp was saved
emit_event("save", data=event)
return
# Comp New
elif what in {"Comp_New"}:
emit_event("new", data=event)
# Comp Opened
elif what in {"Comp_Opened"}:
emit_event("open", data=event)

View file

@ -19,9 +19,12 @@ class PulseThread(QtCore.QThread):
while True:
if self.isInterruptionRequested():
return
try:
app.Test()
except Exception:
# We don't need to call Test because PyRemoteObject of the app
# will actually fail to even resolve the Test function if it has
# gone down. So we can actually already just check by confirming
# the method is still getting resolved. (Optimization)
if app.Test is None:
self.no_response.emit()
self.msleep(interval)

View file

@ -15,13 +15,7 @@ class FusionPreLaunchOCIO(PreLaunchHook):
project_settings = self.data["project_settings"]
# make sure anatomy settings are having flame key
imageio_fusion = project_settings.get("fusion", {}).get("imageio")
if not imageio_fusion:
raise ApplicationLaunchFailed((
"Anatomy project settings are missing `fusion` key. "
"Please make sure you remove project overrides on "
"Anatomy ImageIO")
)
imageio_fusion = project_settings["fusion"]["imageio"]
ocio = imageio_fusion.get("ocio")
enabled = ocio.get("enabled", False)

View file

@ -0,0 +1,70 @@
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionLoadAlembicMesh(load.LoaderPlugin):
"""Load Alembic mesh into Fusion"""
families = ["pointcache", "model"]
representations = ["abc"]
label = "Load alembic mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceAlembicMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.fname
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["Filename"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update Alembic path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
path = get_representation_path(representation)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -0,0 +1,71 @@
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionLoadFBXMesh(load.LoaderPlugin):
"""Load FBX mesh into Fusion"""
families = ["*"]
representations = ["fbx"]
label = "Load FBX mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceFBXMesh"
def load(self, context, name, namespace, data):
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.fname
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["ImportFile"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
"""Update path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
path = get_representation_path(representation)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["ImportFile"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", str(representation["_id"]))
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -14,7 +14,7 @@ import hiero
from Qt import QtWidgets
from openpype.client import get_project
from openpype.settings import get_anatomy_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io, Anatomy
from openpype.pipeline.load import filter_containers
from openpype.lib import Logger
@ -878,8 +878,7 @@ def apply_colorspace_project():
project.close()
# get presets for hiero
imageio = get_anatomy_settings(
project_name)["imageio"].get("hiero", None)
imageio = get_project_settings(project_name)["hiero"]["imageio"]
presets = imageio.get("workfile")
# save the workfile as subversion "comment:_colorspaceChange"
@ -932,8 +931,7 @@ def apply_colorspace_clips():
clips = project.clips()
# get presets for hiero
imageio = get_anatomy_settings(
project_name)["imageio"].get("hiero", None)
imageio = get_project_settings(project_name)["hiero"]["imageio"]
from pprint import pprint
presets = imageio.get("regexInputs", {}).get("inputs", {})

View file

@ -4,7 +4,7 @@ import sys
import hiero.core
from hiero.ui import findMenuAction
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools

View file

@ -251,7 +251,6 @@ def reload_config():
import importlib
for module in (
"openpype.api",
"openpype.hosts.hiero.lib",
"openpype.hosts.hiero.menu",
"openpype.hosts.hiero.tags"

View file

@ -8,7 +8,7 @@ import hiero
from Qt import QtWidgets, QtCore
import qargparse
import openpype.api as openpype
from openpype.settings import get_current_project_settings
from openpype.lib import Logger
from openpype.pipeline import LoaderPlugin, LegacyCreator
from openpype.pipeline.context_tools import get_current_project_asset
@ -606,7 +606,7 @@ class Creator(LegacyCreator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
import openpype.hosts.hiero.api as phiero
self.presets = openpype.get_current_project_settings()[
self.presets = get_current_project_settings()[
"hiero"]["create"].get(self.__class__.__name__, {})
# adding basic current context resolve objects

View file

@ -3,7 +3,7 @@ import os
import hiero
from openpype.client import get_project, get_assets
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import legacy_io
log = Logger.get_logger(__name__)

View file

@ -1,7 +1,7 @@
import os
import hiero
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -1,5 +1,6 @@
from pyblish import api
import openpype.api as pype
from openpype.lib import version_up
class IntegrateVersionUpWorkfile(api.ContextPlugin):
@ -15,7 +16,7 @@ class IntegrateVersionUpWorkfile(api.ContextPlugin):
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
new_path = version_up(path)
if project:
project.saveAs(new_path)

View file

@ -73,7 +73,7 @@ class ImageLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -43,7 +43,7 @@ class USDSublayerLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -43,7 +43,7 @@ class USDReferenceLoader(load.LoaderPlugin):
# Imprint it manually
data = {
"schema": "avalon-core:container-2.0",
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": node_name,
"namespace": namespace,

View file

@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
import openpype.api
import pyblish.api
import hou
from openpype.pipeline.publish import RepairAction
class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
"""Validate workfile paths so they are absolute."""
@ -11,7 +12,7 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
families = ["workfile"]
hosts = ["houdini"]
label = "Validate Workfile Paths"
actions = [openpype.api.RepairAction]
actions = [RepairAction]
optional = True
node_types = ["file", "alembic"]
@ -35,6 +36,9 @@ class ValidateWorkfilePaths(pyblish.api.InstancePlugin):
def get_invalid(cls):
invalid = []
for param, _ in hou.fileReferences():
if param is None:
continue
# skip nodes we are not interested in
if param.node().type().name() not in cls.node_types:
continue

View file

@ -28,13 +28,16 @@ class MayaAddon(OpenPypeModule, IHostAddon):
env["PYTHONPATH"] = os.pathsep.join(new_python_paths)
# Set default values if are not already set via settings
defaults = {
"OPENPYPE_LOG_NO_COLORS": "Yes"
# Set default environments
envs = {
"OPENPYPE_LOG_NO_COLORS": "Yes",
# For python module 'qtpy'
"QT_API": "PySide2",
# For python module 'Qt'
"QT_PREFERRED_BINDING": "PySide2"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
for key, value in envs.items():
env[key] = value
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:

View file

@ -8,7 +8,7 @@ from functools import partial
import maya.cmds as cmds
import maya.mel as mel
from openpype.api import resources
from openpype import resources
from openpype.tools.utils import host_tools
from .lib import get_main_window

View file

@ -23,7 +23,7 @@ from openpype.client import (
get_last_versions,
get_representation_by_name
)
from openpype.api import get_anatomy_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
legacy_io,
discover_loader_plugins,
@ -2459,182 +2459,120 @@ def bake_to_world_space(nodes,
def load_capture_preset(data=None):
"""Convert OpenPype Extract Playblast settings to `capture` arguments
Input data is the settings from:
`project_settings/maya/publish/ExtractPlayblast/capture_preset`
Args:
data (dict): Capture preset settings from OpenPype settings
Returns:
dict: `capture.capture` compatible keyword arguments
"""
import capture
preset = data
options = dict()
viewport_options = dict()
viewport2_options = dict()
camera_options = dict()
# CODEC
id = 'Codec'
for key in preset[id]:
options[str(key)] = preset[id][key]
# Straight key-value match from settings to capture arguments
options.update(data["Codec"])
options.update(data["Generic"])
options.update(data["Resolution"])
# GENERIC
id = 'Generic'
for key in preset[id]:
options[str(key)] = preset[id][key]
# RESOLUTION
id = 'Resolution'
options['height'] = preset[id]['height']
options['width'] = preset[id]['width']
camera_options.update(data['Camera Options'])
viewport_options.update(data["Renderer"])
# DISPLAY OPTIONS
id = 'Display Options'
disp_options = {}
for key in preset[id]:
for key, value in data['Display Options'].items():
if key.startswith('background'):
disp_options[key] = preset['Display Options'][key]
if len(disp_options[key]) == 4:
disp_options[key][0] = (float(disp_options[key][0])/255)
disp_options[key][1] = (float(disp_options[key][1])/255)
disp_options[key][2] = (float(disp_options[key][2])/255)
disp_options[key].pop()
# Convert background, backgroundTop, backgroundBottom colors
if len(value) == 4:
# Ignore alpha + convert RGB to float
value = [
float(value[0]) / 255,
float(value[1]) / 255,
float(value[2]) / 255
]
disp_options[key] = value
else:
disp_options['displayGradient'] = True
options['display_options'] = disp_options
# VIEWPORT OPTIONS
temp_options = {}
id = 'Renderer'
for key in preset[id]:
temp_options[str(key)] = preset[id][key]
# Viewport Options has a mixture of Viewport2 Options and Viewport Options
# to pass along to capture. So we'll need to differentiate between the two
VIEWPORT2_OPTIONS = {
"textureMaxResolution",
"renderDepthOfField",
"ssaoEnable",
"ssaoSamples",
"ssaoAmount",
"ssaoRadius",
"ssaoFilterRadius",
"hwFogStart",
"hwFogEnd",
"hwFogAlpha",
"hwFogFalloff",
"hwFogColorR",
"hwFogColorG",
"hwFogColorB",
"hwFogDensity",
"motionBlurEnable",
"motionBlurSampleCount",
"motionBlurShutterOpenFraction",
"lineAAEnable"
}
for key, value in data['Viewport Options'].items():
temp_options2 = {}
id = 'Viewport Options'
for key in preset[id]:
# There are some keys we want to ignore
if key in {"override_viewport_options", "high_quality"}:
continue
# First handle special cases where we do value conversion to
# separate option values
if key == 'textureMaxResolution':
if preset[id][key] > 0:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = True
temp_options2['textureMaxResMode'] = 1
viewport2_options['textureMaxResolution'] = value
if value > 0:
viewport2_options['enableTextureMaxRes'] = True
viewport2_options['textureMaxResMode'] = 1
else:
temp_options2['textureMaxResolution'] = preset[id][key]
temp_options2['enableTextureMaxRes'] = False
temp_options2['textureMaxResMode'] = 0
viewport2_options['enableTextureMaxRes'] = False
viewport2_options['textureMaxResMode'] = 0
if key == 'multiSample':
if preset[id][key] > 0:
temp_options2['multiSampleEnable'] = True
temp_options2['multiSampleCount'] = preset[id][key]
else:
temp_options2['multiSampleEnable'] = False
temp_options2['multiSampleCount'] = preset[id][key]
elif key == 'multiSample':
viewport2_options['multiSampleEnable'] = value > 0
viewport2_options['multiSampleCount'] = value
if key == 'renderDepthOfField':
temp_options2['renderDepthOfField'] = preset[id][key]
elif key == 'alphaCut':
viewport2_options['transparencyAlgorithm'] = 5
viewport2_options['transparencyQuality'] = 1
if key == 'ssaoEnable':
if preset[id][key] is True:
temp_options2['ssaoEnable'] = True
else:
temp_options2['ssaoEnable'] = False
elif key == 'hwFogFalloff':
# Settings enum value string to integer
viewport2_options['hwFogFalloff'] = int(value)
if key == 'ssaoSamples':
temp_options2['ssaoSamples'] = preset[id][key]
if key == 'ssaoAmount':
temp_options2['ssaoAmount'] = preset[id][key]
if key == 'ssaoRadius':
temp_options2['ssaoRadius'] = preset[id][key]
if key == 'hwFogDensity':
temp_options2['hwFogDensity'] = preset[id][key]
if key == 'ssaoFilterRadius':
temp_options2['ssaoFilterRadius'] = preset[id][key]
if key == 'alphaCut':
temp_options2['transparencyAlgorithm'] = 5
temp_options2['transparencyQuality'] = 1
if key == 'headsUpDisplay':
temp_options['headsUpDisplay'] = True
if key == 'fogging':
temp_options['fogging'] = preset[id][key] or False
if key == 'hwFogStart':
temp_options2['hwFogStart'] = preset[id][key]
if key == 'hwFogEnd':
temp_options2['hwFogEnd'] = preset[id][key]
if key == 'hwFogAlpha':
temp_options2['hwFogAlpha'] = preset[id][key]
if key == 'hwFogFalloff':
temp_options2['hwFogFalloff'] = int(preset[id][key])
if key == 'hwFogColorR':
temp_options2['hwFogColorR'] = preset[id][key]
if key == 'hwFogColorG':
temp_options2['hwFogColorG'] = preset[id][key]
if key == 'hwFogColorB':
temp_options2['hwFogColorB'] = preset[id][key]
if key == 'motionBlurEnable':
if preset[id][key] is True:
temp_options2['motionBlurEnable'] = True
else:
temp_options2['motionBlurEnable'] = False
if key == 'motionBlurSampleCount':
temp_options2['motionBlurSampleCount'] = preset[id][key]
if key == 'motionBlurShutterOpenFraction':
temp_options2['motionBlurShutterOpenFraction'] = preset[id][key]
if key == 'lineAAEnable':
if preset[id][key] is True:
temp_options2['lineAAEnable'] = True
else:
temp_options2['lineAAEnable'] = False
# Then handle Viewport 2.0 Options
elif key in VIEWPORT2_OPTIONS:
viewport2_options[key] = value
# Then assume remainder is Viewport Options
else:
temp_options[str(key)] = preset[id][key]
viewport_options[key] = value
for key in ['override_viewport_options',
'high_quality',
'alphaCut',
'gpuCacheDisplayFilter',
'multiSample',
'ssaoEnable',
'ssaoSamples',
'ssaoAmount',
'ssaoFilterRadius',
'ssaoRadius',
'hwFogStart',
'hwFogEnd',
'hwFogAlpha',
'hwFogFalloff',
'hwFogColorR',
'hwFogColorG',
'hwFogColorB',
'hwFogDensity',
'textureMaxResolution',
'motionBlurEnable',
'motionBlurSampleCount',
'motionBlurShutterOpenFraction',
'lineAAEnable',
'renderDepthOfField'
]:
temp_options.pop(key, None)
options['viewport_options'] = temp_options
options['viewport2_options'] = temp_options2
options['viewport_options'] = viewport_options
options['viewport2_options'] = viewport2_options
options['camera_options'] = camera_options
# use active sound track
scene = capture.parse_active_scene()
options['sound'] = scene['sound']
# options['display_options'] = temp_options
return options
@ -3159,7 +3097,7 @@ def set_colorspace():
"""Set Colorspace from project configuration
"""
project_name = os.getenv("AVALON_PROJECT")
imageio = get_anatomy_settings(project_name)["imageio"]["maya"]
imageio = get_project_settings(project_name)["maya"]["imageio"]
# Maya 2022+ introduces new OCIO v2 color management settings that
# can override the old color managenement preferences. OpenPype has

View file

@ -80,7 +80,7 @@ IMAGE_PREFIXES = {
"mayahardware2": "defaultRenderGlobals.imageFilePrefix"
}
RENDERMAN_IMAGE_DIR = "maya/<scene>/<layer>"
RENDERMAN_IMAGE_DIR = "<scene>/<layer>"
def has_tokens(string, tokens):
@ -260,20 +260,20 @@ class ARenderProducts:
"""
try:
file_prefix_attr = IMAGE_PREFIXES[self.renderer]
prefix_attr = IMAGE_PREFIXES[self.renderer]
except KeyError:
raise UnsupportedRendererException(
"Unsupported renderer {}".format(self.renderer)
)
file_prefix = self._get_attr(file_prefix_attr)
prefix = self._get_attr(prefix_attr)
if not file_prefix:
if not prefix:
# Fall back to scene name by default
log.debug("Image prefix not set, using <Scene>")
file_prefix = "<Scene>"
return file_prefix
return prefix
def get_render_attribute(self, attribute):
"""Get attribute from render options.
@ -730,13 +730,16 @@ class RenderProductsVray(ARenderProducts):
"""Get image prefix for V-Ray.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
prefix = super(RenderProductsVray, self).get_renderer_prefix()
if self.multipart:
return prefix
aov_separator = self._get_aov_separator()
prefix = "{}{}<aov>".format(prefix, aov_separator)
return prefix
@ -974,15 +977,18 @@ class RenderProductsRedshift(ARenderProducts):
"""Get image prefix for Redshift.
This overrides :func:`ARenderProducts.get_renderer_prefix()` as
we must add `<aov>` token manually.
we must add `<aov>` token manually. This is done only for
non-multipart outputs, where `<aov>` token doesn't make sense.
See also:
:func:`ARenderProducts.get_renderer_prefix()`
"""
file_prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
separator = self.extract_separator(file_prefix)
prefix = "{}{}<aov>".format(file_prefix, separator or "_")
prefix = super(RenderProductsRedshift, self).get_renderer_prefix()
if self.multipart:
return prefix
separator = self.extract_separator(prefix)
prefix = "{}{}<aov>".format(prefix, separator or "_")
return prefix
def get_render_products(self):

View file

@ -6,7 +6,7 @@ import six
import sys
from openpype.lib import Logger
from openpype.api import (
from openpype.settings import (
get_project_settings,
get_current_project_settings
)
@ -29,7 +29,7 @@ class RenderSettings(object):
_image_prefixes = {
'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa
'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa
'renderman': 'maya/<Scene>/<layer>/<layer>{aov_separator}<aov>',
'renderman': '<Scene>/<layer>/<layer>{aov_separator}<aov>',
'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa
}

View file

View file

@ -12,6 +12,7 @@ class CreateAnimation(plugin.Creator):
family = "animation"
icon = "male"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateAnimation, self).__init__(*args, **kwargs)
@ -24,7 +25,7 @@ class CreateAnimation(plugin.Creator):
# Write vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False
self.data["writeFaceSets"] = self.write_face_sets
# Include only renderable visible shapes.
# Skips locators and empty transforms

View file

@ -9,13 +9,14 @@ class CreateModel(plugin.Creator):
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
self.data["writeFaceSets"] = False
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = self.write_face_sets
# Include attributes by attribute name or prefix
self.data["attr"] = ""

View file

@ -12,6 +12,7 @@ class CreatePointCache(plugin.Creator):
family = "pointcache"
icon = "gears"
write_color_sets = False
write_face_sets = False
def __init__(self, *args, **kwargs):
super(CreatePointCache, self).__init__(*args, **kwargs)
@ -21,7 +22,8 @@ class CreatePointCache(plugin.Creator):
# Vertex colors with the geometry.
self.data["writeColorSets"] = self.write_color_sets
self.data["writeFaceSets"] = False # Vertex colors with the geometry.
# Vertex colors with the geometry.
self.data["writeFaceSets"] = self.write_face_sets
self.data["renderableOnly"] = False # Only renderable visible shapes
self.data["visibleOnly"] = False # only nodes that are visible
self.data["includeParentHierarchy"] = False # Include parent groups

View file

@ -9,26 +9,18 @@ import requests
from maya import cmds
from maya.app.renderSetup.model import renderSetup
from openpype.api import (
from openpype.settings import (
get_system_settings,
get_project_settings,
)
from openpype.lib import requests_get
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.hosts.maya.api import (
lib,
lib_rendersettings,
plugin
)
from openpype.lib import requests_get
from openpype.api import (
get_system_settings,
get_project_settings)
from openpype.modules import ModulesManager
from openpype.pipeline import legacy_io
from openpype.pipeline import (
CreatorError,
legacy_io,
)
from openpype.pipeline.context_tools import get_current_project_asset
class CreateRender(plugin.Creator):

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Creator for Unreal Static Meshes."""
from openpype.hosts.maya.api import plugin, lib
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from maya import cmds # noqa

View file

@ -12,7 +12,7 @@ from openpype.hosts.maya.api import (
lib,
plugin
)
from openpype.api import (
from openpype.settings import (
get_system_settings,
get_project_settings
)

View file

@ -90,7 +90,7 @@ class ImportMayaLoader(load.LoaderPlugin):
so you could also use it as a new base.
"""
representations = ["ma", "mb"]
representations = ["ma", "mb", "obj"]
families = ["*"]
label = "Import"

View file

@ -1,7 +1,7 @@
import os
import clique
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -4,7 +4,7 @@ from openpype.pipeline import (
load,
get_representation_path
)
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
class GpuCacheLoader(load.LoaderPlugin):

View file

@ -5,7 +5,7 @@ import clique
import maya.cmds as cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,7 +1,7 @@
import os
from maya import cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import legacy_io
from openpype.pipeline.create import (
legacy_create,

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -1,6 +1,6 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -10,7 +10,7 @@ import os
import maya.cmds as cmds
from openpype.client import get_representation_by_name
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
legacy_io,
load,

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import os
import maya.cmds as cmds # noqa
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path

View file

@ -6,7 +6,7 @@ from collections import defaultdict
import clique
from maya import cmds
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import (
load,
get_representation_path
@ -250,7 +250,7 @@ class YetiCacheLoader(load.LoaderPlugin):
"""
name = node_name.replace(":", "_")
pattern = r"^({name})(\.[0-4]+)?(\.fur)$".format(name=re.escape(name))
pattern = r"^({name})(\.[0-9]+)?(\.fur)$".format(name=re.escape(name))
files = [fname for fname in os.listdir(root) if re.match(pattern,
fname)]

View file

@ -1,7 +1,7 @@
import os
from collections import defaultdict
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
import openpype.hosts.maya.api.plugin
from openpype.hosts.maya.api import lib

View file

@ -102,23 +102,26 @@ class CollectMayaRender(pyblish.api.ContextPlugin):
}
for layer in collected_render_layers:
try:
if layer.startswith("LAYER_"):
# this is support for legacy mode where render layers
# started with `LAYER_` prefix.
expected_layer_name = re.search(
r"^LAYER_(.*)", layer).group(1)
else:
# new way is to prefix render layer name with instance
# namespace.
expected_layer_name = re.search(
r"^.+:(.*)", layer).group(1)
except IndexError:
if layer.startswith("LAYER_"):
# this is support for legacy mode where render layers
# started with `LAYER_` prefix.
layer_name_pattern = r"^LAYER_(.*)"
else:
# new way is to prefix render layer name with instance
# namespace.
layer_name_pattern = r"^.+:(.*)"
# todo: We should have a more explicit way to link the renderlayer
match = re.match(layer_name_pattern, layer)
if not match:
msg = "Invalid layer name in set [ {} ]".format(layer)
self.log.warning(msg)
continue
self.log.info("processing %s" % layer)
expected_layer_name = match.group(1)
self.log.info("Processing '{}' as layer [ {} ]"
"".format(layer, expected_layer_name))
# check if layer is part of renderSetup
if expected_layer_name not in maya_render_layers:
msg = "Render layer [ {} ] is not in " "Render Setup".format(

View file

@ -34,14 +34,15 @@ class ExtractLayout(publish.Extractor):
for asset in cmds.sets(str(instance), query=True):
# Find the container
grp_name = asset.split(':')[0]
containers = cmds.ls(f"{grp_name}*_CON")
containers = cmds.ls("{}*_CON".format(grp_name))
assert len(containers) == 1, \
f"More than one container found for {asset}"
"More than one container found for {}".format(asset)
container = containers[0]
representation_id = cmds.getAttr(f"{container}.representation")
representation_id = cmds.getAttr(
"{}.representation".format(container))
representation = get_representation_by_id(
project_name,
@ -56,7 +57,8 @@ class ExtractLayout(publish.Extractor):
json_element = {
"family": family,
"instance_name": cmds.getAttr(f"{container}.name"),
"instance_name": cmds.getAttr(
"{}.namespace".format(container)),
"representation": str(representation_id),
"version": str(version_id)
}

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
import os
from maya import cmds
# import maya.mel as mel
import pyblish.api
from openpype.pipeline import publish
from openpype.hosts.maya.api import lib
class ExtractObj(publish.Extractor):
"""Extract OBJ from Maya.
This extracts reproducible OBJ exports ignoring any of the settings
set on the local machine in the OBJ export options window.
"""
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
label = "Extract OBJ"
families = ["model"]
def process(self, instance):
# Define output path
staging_dir = self.staging_dir(instance)
filename = "{0}.obj".format(instance.name)
path = os.path.join(staging_dir, filename)
# The export requires forward slashes because we need to
# format it into a string in a mel expression
self.log.info("Extracting OBJ to: {0}".format(path))
members = instance.data("setMembers")
members = cmds.ls(members,
dag=True,
shapes=True,
type=("mesh", "nurbsCurve"),
noIntermediate=True,
long=True)
self.log.info("Members: {0}".format(members))
self.log.info("Instance: {0}".format(instance[:]))
if not cmds.pluginInfo('objExport', query=True, loaded=True):
cmds.loadPlugin('objExport')
# Export
with lib.no_display_layers(instance):
with lib.displaySmoothness(members,
divisionsU=0,
divisionsV=0,
pointsWire=4,
pointsShaded=1,
polygonObject=1):
with lib.shader(members,
shadingEngine="initialShadingGroup"):
with lib.maintained_selection():
cmds.select(members, noExpand=True)
cmds.file(path,
exportSelected=True,
type='OBJexport',
preserveReferences=True,
force=True)
if "representation" not in instance.data:
instance.data["representation"] = []
representation = {
'name': 'obj',
'ext': 'obj',
'files': filename,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info("Extract OBJ successful to: {0}".format(path))

View file

@ -77,8 +77,10 @@ class ExtractPlayblast(publish.Extractor):
preset['height'] = asset_height
preset['start_frame'] = start
preset['end_frame'] = end
camera_option = preset.get("camera_option", {})
camera_option["depthOfField"] = cmds.getAttr(
# Enforce persisting camera depth of field
camera_options = preset.setdefault("camera_options", {})
camera_options["depthOfField"] = cmds.getAttr(
"{0}.depthOfField".format(camera))
stagingdir = self.staging_dir(instance)
@ -131,13 +133,15 @@ class ExtractPlayblast(publish.Extractor):
preset.update(panel_preset)
cmds.setFocus(panel)
path = capture.capture(**preset)
path = capture.capture(log=self.log, **preset)
self.log.debug("playblast path {}".format(path))
collected_files = os.listdir(stagingdir)
patterns = [clique.PATTERNS["frames"]]
collections, remainder = clique.assemble(collected_files,
minimum_items=1)
minimum_items=1,
patterns=patterns)
self.log.debug("filename {}".format(filename))
frame_collection = None

View file

@ -1,5 +1,6 @@
import os
import glob
import tempfile
import capture
@ -81,9 +82,17 @@ class ExtractThumbnail(publish.Extractor):
elif asset_width and asset_height:
preset['width'] = asset_width
preset['height'] = asset_height
stagingDir = self.staging_dir(instance)
# Create temp directory for thumbnail
# - this is to avoid "override" of source file
dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
self.log.debug(
"Create temp directory {} for thumbnail".format(dst_staging)
)
# Store new staging to cleanup paths
instance.context.data["cleanupFullPaths"].append(dst_staging)
filename = "{0}".format(instance.name)
path = os.path.join(stagingDir, filename)
path = os.path.join(dst_staging, filename)
self.log.info("Outputting images to %s" % path)
@ -137,7 +146,7 @@ class ExtractThumbnail(publish.Extractor):
'name': 'thumbnail',
'ext': 'jpg',
'files': thumbnail,
"stagingDir": stagingDir,
"stagingDir": dst_staging,
"thumbnail": True
}
instance.data["representations"].append(representation)

View file

@ -11,7 +11,7 @@ import pyblish.api
from openpype.lib import requests_post
from openpype.hosts.maya.api import lib
from openpype.pipeline import legacy_io
from openpype.api import get_system_settings
from openpype.settings import get_system_settings
# mapping between Maya renderer names and Muster template ids
@ -118,7 +118,7 @@ def preview_fname(folder, scene, layer, padding, ext):
"""
# Following hardcoded "<Scene>/<Scene>_<Layer>/<Layer>"
output = "maya/{scene}/{layer}/{layer}.{number}.{ext}".format(
output = "{scene}/{layer}/{layer}.{number}.{ext}".format(
scene=scene,
layer=layer,
number="#" * padding,

View file

@ -22,10 +22,10 @@ def get_redshift_image_format_labels():
class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must start with: `maya/<Scene>`
* File Name Prefix must start with: `<Scene>`
all other token are customizable but sane values for Arnold are:
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
`<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
<Camera> token is supported also, useful for multiple renderable
cameras per render layer.
@ -64,12 +64,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
}
ImagePrefixTokens = {
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'vray': 'maya/<Scene>/<Layer>/<Layer>',
'mentalray': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'arnold': '<Scene>/<RenderLayer>/<RenderLayer>{aov_separator}<RenderPass>', # noqa: E501
'redshift': '<Scene>/<RenderLayer>/<RenderLayer>',
'vray': '<Scene>/<Layer>/<Layer>',
'renderman': '<layer>{aov_separator}<aov>.<f4>.<ext>',
'mayahardware2': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
'mayahardware2': '<Scene>/<RenderLayer>/<RenderLayer>',
}
_aov_chars = {
@ -80,7 +80,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
redshift_AOV_prefix = "<BeautyPath>/<BeautyFile>{aov_separator}<RenderPass>" # noqa: E501
renderman_dir_prefix = "maya/<scene>/<layer>"
renderman_dir_prefix = "<scene>/<layer>"
R_AOV_TOKEN = re.compile(
r'%a|<aov>|<renderpass>', re.IGNORECASE)
@ -90,8 +90,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
DEFAULT_PADDING = 4
VRAY_PREFIX = "maya/<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
VRAY_PREFIX = "<Scene>/<Layer>/<Layer>"
DEFAULT_PREFIX = "<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
def process(self, instance):
@ -123,7 +123,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
prefix = prefix.replace(
"{aov_separator}", instance.data.get("aovSeparator", "_"))
required_prefix = "maya/<scene>"
default_prefix = cls.ImagePrefixTokens[renderer]
if not anim_override:
@ -131,15 +130,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
if renderer != "renderman" and not prefix.lower().startswith(
required_prefix):
invalid = True
cls.log.error(
("Wrong image prefix [ {} ] "
" - doesn't start with: '{}'").format(
prefix, required_prefix)
)
if not re.search(cls.R_LAYER_TOKEN, prefix):
invalid = True
cls.log.error("Wrong image prefix [ {} ] - "
@ -268,14 +258,20 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
# go through definitions and test if such node.attribute exists.
# if so, compare its value from the one required.
for attr, value in OrderedDict(validation_settings).items():
# first get node of that type
cls.log.debug("{}: {}".format(attr, value))
node_type = attr.split(".")[0]
attribute_name = ".".join(attr.split(".")[1:])
if "." not in attr:
cls.log.warning("Skipping invalid attribute defined in "
"validation settings: '{}'".format(attr))
continue
node_type, attribute_name = attr.split(".", 1)
# first get node of that type
nodes = cmds.ls(type=node_type)
if not isinstance(nodes, list):
cls.log.warning("No nodes of '{}' found.".format(node_type))
if not nodes:
cls.log.warning(
"No nodes of type '{}' found.".format(node_type))
continue
for node in nodes:

View file

@ -1,7 +1,6 @@
from maya import cmds
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
from openpype.pipeline.publish import ValidateContentsOrder
@ -24,7 +23,7 @@ class ValidateUniqueNames(pyblish.api.Validator):
"""Returns the invalid transforms in the instance.
Returns:
list: Non unique name transforms
list: Non-unique name transforms.
"""

View file

@ -1,5 +1,5 @@
import os
from openpype.api import get_project_settings
from openpype.settings import get_project_settings
from openpype.pipeline import install_host
from openpype.hosts.maya.api import MayaHost
from maya import cmds

View file

@ -2,7 +2,7 @@ import os
import re
import nuke
from openpype.api import Logger
from openpype.lib import Logger
log = Logger.get_logger(__name__)

View file

@ -563,7 +563,15 @@ def get_node_path(path, padding=4):
def get_nuke_imageio_settings():
return get_anatomy_settings(Context.project_name)["imageio"]["nuke"]
project_imageio = get_project_settings(
Context.project_name)["nuke"]["imageio"]
# backward compatibility for project started before 3.10
# those are still having `__legacy__` knob types
if not project_imageio["enabled"]:
return get_anatomy_settings(Context.project_name)["imageio"]["nuke"]
return get_project_settings(Context.project_name)["nuke"]["imageio"]
def get_created_node_imageio_setting_legacy(nodeclass, creator, subset):
@ -2922,3 +2930,47 @@ def get_nodes_by_names(names):
nuke.toNode(name)
for name in names
]
def get_viewer_config_from_string(input_string):
"""Convert string to display and viewer string
Args:
input_string (str): string with viewer
Raises:
IndexError: if more then one slash in input string
IndexError: if missing closing bracket
Returns:
tuple[str]: display, viewer
"""
display = None
viewer = input_string
# check if () or / or \ in name
if "/" in viewer:
split = viewer.split("/")
# rise if more then one column
if len(split) > 2:
raise IndexError((
"Viewer Input string is not correct. "
"more then two `/` slashes! {}"
).format(input_string))
viewer = split[1]
display = split[0]
elif "(" in viewer:
pattern = r"([\w\d\s]+).*[(](.*)[)]"
result = re.findall(pattern, viewer)
try:
result = result.pop()
display = str(result[1]).rstrip()
viewer = str(result[0]).rstrip()
except IndexError:
raise IndexError((
"Viewer Input string is not correct. "
"Missing bracket! {}"
).format(input_string))
return (display, viewer)

View file

@ -7,11 +7,8 @@ import nuke
import pyblish.api
import openpype
from openpype.api import (
Logger,
get_current_project_settings
)
from openpype.lib import register_event_callback
from openpype.settings import get_current_project_settings
from openpype.lib import register_event_callback, Logger
from openpype.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
@ -69,7 +66,6 @@ def reload_config():
"""
for module in (
"openpype.api",
"openpype.hosts.nuke.api.actions",
"openpype.hosts.nuke.api.menu",
"openpype.hosts.nuke.api.plugin",

View file

@ -6,7 +6,7 @@ from abc import abstractmethod
import nuke
from openpype.api import get_current_project_settings
from openpype.settings import get_current_project_settings
from openpype.pipeline import (
LegacyCreator,
LoaderPlugin,
@ -19,7 +19,8 @@ from .lib import (
add_publish_knob,
get_nuke_imageio_settings,
set_node_knobs_from_settings,
get_view_process_node
get_view_process_node,
get_viewer_config_from_string
)
@ -190,7 +191,20 @@ class ExporterReview(object):
if "#" in self.fhead:
self.fhead = self.fhead.replace("#", "")[:-1]
def get_representation_data(self, tags=None, range=False):
def get_representation_data(
self, tags=None, range=False,
custom_tags=None
):
""" Add representation data to self.data
Args:
tags (list[str], optional): list of defined tags.
Defaults to None.
range (bool, optional): flag for adding ranges.
Defaults to False.
custom_tags (list[str], optional): user inputed custom tags.
Defaults to None.
"""
add_tags = tags or []
repre = {
"name": self.name,
@ -200,6 +214,9 @@ class ExporterReview(object):
"tags": [self.name.replace("_", "-")] + add_tags
}
if custom_tags:
repre["custom_tags"] = custom_tags
if range:
repre.update({
"frameStart": self.first_frame,
@ -312,7 +329,8 @@ class ExporterReviewLut(ExporterReview):
dag_node.setInput(0, self.previous_node)
self._temp_nodes.append(dag_node)
self.previous_node = dag_node
self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
self.log.debug(
"OCIODisplay... `{}`".format(self._temp_nodes))
# GenerateLUT
gen_lut_node = nuke.createNode("GenerateLUT")
@ -415,6 +433,7 @@ class ExporterReviewMov(ExporterReview):
return path
def generate_mov(self, farm=False, **kwargs):
add_tags = []
self.publish_on_farm = farm
read_raw = kwargs["read_raw"]
reformat_node_add = kwargs["reformat_node_add"]
@ -433,10 +452,10 @@ class ExporterReviewMov(ExporterReview):
self.log.debug(">> baking_view_profile `{}`".format(
baking_view_profile))
add_tags = kwargs.get("add_tags", [])
add_custom_tags = kwargs.get("add_custom_tags", [])
self.log.info(
"__ add_tags: `{0}`".format(add_tags))
"__ add_custom_tags: `{0}`".format(add_custom_tags))
subset = self.instance.data["subset"]
self._temp_nodes[subset] = []
@ -491,7 +510,15 @@ class ExporterReviewMov(ExporterReview):
if not self.viewer_lut_raw:
# OCIODisplay
dag_node = nuke.createNode("OCIODisplay")
dag_node["view"].setValue(str(baking_view_profile))
display, viewer = get_viewer_config_from_string(
str(baking_view_profile)
)
if display:
dag_node["display"].setValue(display)
# assign viewer
dag_node["view"].setValue(viewer)
# connect
dag_node.setInput(0, self.previous_node)
@ -542,6 +569,7 @@ class ExporterReviewMov(ExporterReview):
# ---------- generate representation data
self.get_representation_data(
tags=["review", "delete"] + add_tags,
custom_tags=add_custom_tags,
range=True
)

View file

@ -1,7 +1,7 @@
import os
import nuke
from openpype.api import resources
from openpype import resources
from .lib import maintained_selection

View file

@ -1,4 +1,4 @@
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import InventoryAction
from openpype.hosts.nuke.api.lib import set_avalon_knob_data

View file

@ -425,7 +425,7 @@ class LoadClip(plugin.NukeLoader):
colorspace = repre_data.get("colorspace")
colorspace = colorspace or version_data.get("colorspace")
# colorspace from `project_anatomy/imageio/nuke/regexInputs`
# colorspace from `project_settings/nuke/imageio/regexInputs`
iio_colorspace = get_imageio_input_colorspace(path)
# Set colorspace defined in version data

View file

@ -3,7 +3,8 @@ import os
import nuke
import pyblish.api
import openpype.api as pype
from openpype.lib import get_version_from_path
from openpype.hosts.nuke.api.lib import (
add_publish_knob,
get_avalon_knob_data
@ -74,7 +75,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
"fps": root['fps'].value(),
"currentFile": current_file,
"version": int(pype.get_version_from_path(current_file)),
"version": int(get_version_from_path(current_file)),
"host": pyblish.api.current_host(),
"hostVersion": nuke.NUKE_VERSION_STRING

View file

@ -77,11 +77,14 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin):
# fix type differences
if type(node_value) in (int, float):
if isinstance(value, list):
value = color_gui_to_int(value)
else:
value = float(value)
node_value = float(node_value)
try:
if isinstance(value, list):
value = color_gui_to_int(value)
else:
value = float(value)
node_value = float(node_value)
except ValueError:
value = str(value)
else:
value = str(value)
node_value = str(node_value)

View file

@ -1,7 +1,7 @@
import nuke
import os
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import install_host
from openpype.hosts.nuke import api
from openpype.hosts.nuke.api.lib import (

View file

@ -10,7 +10,7 @@ from wsrpc_aiohttp import (
from Qt import QtCore
from openpype.api import Logger
from openpype.lib import Logger
from openpype.pipeline import legacy_io
from openpype.tools.utils import host_tools
from openpype.tools.adobe_webserver.app import WebServerTool

View file

@ -3,8 +3,7 @@ from Qt import QtWidgets
import pyblish.api
from openpype.api import Logger
from openpype.lib import register_event_callback
from openpype.lib import register_event_callback, Logger
from openpype.pipeline import (
legacy_io,
register_loader_plugin_path,

View file

@ -7,16 +7,21 @@ class CollectVersion(pyblish.api.InstancePlugin):
Used to synchronize version from workfile to all publishable instances:
- image (manually created or color coded)
- review
- workfile
Dev comment:
Explicit collector created to control this from single place and not from
3 different.
Workfile set here explicitly as version might to be forced from latest + 1
because of Webpublisher.
(This plugin must run after CollectPublishedVersion!)
"""
order = pyblish.api.CollectorOrder + 0.200
label = 'Collect Version'
hosts = ["photoshop"]
families = ["image", "review"]
families = ["image", "review", "workfile"]
def process(self, instance):
workfile_version = instance.context.data["version"]

View file

@ -49,7 +49,7 @@ class ExtractReview(publish.Extractor):
if self.make_image_sequence and len(layers) > 1:
self.log.info("Extract layers to image sequence.")
img_list = self._saves_sequences_layers(staging_dir, layers)
img_list = self._save_sequence_images(staging_dir, layers)
instance.data["representations"].append({
"name": "jpg",
@ -64,7 +64,7 @@ class ExtractReview(publish.Extractor):
processed_img_names = img_list
else:
self.log.info("Extract layers to flatten image.")
img_list = self._saves_flattened_layers(staging_dir, layers)
img_list = self._save_flatten_image(staging_dir, layers)
instance.data["representations"].append({
"name": "jpg",
@ -84,6 +84,67 @@ class ExtractReview(publish.Extractor):
source_files_pattern = self._check_and_resize(processed_img_names,
source_files_pattern,
staging_dir)
self._generate_thumbnail(ffmpeg_path, instance, source_files_pattern,
staging_dir)
no_of_frames = len(processed_img_names)
if no_of_frames > 1:
self._generate_mov(ffmpeg_path, instance, fps, no_of_frames,
source_files_pattern, staging_dir)
self.log.info(f"Extracted {instance} to {staging_dir}")
def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames,
source_files_pattern, staging_dir):
"""Generates .mov to upload to Ftrack.
Args:
ffmpeg_path (str): path to ffmpeg
instance (Pyblish Instance)
fps (str)
no_of_frames (int):
source_files_pattern (str): name of source file
staging_dir (str): temporary location to store thumbnail
Updates:
instance - adds representation portion
"""
# Generate mov.
mov_path = os.path.join(staging_dir, "review.mov")
self.log.info(f"Generate mov review: {mov_path}")
args = [
ffmpeg_path,
"-y",
"-i", source_files_pattern,
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
"-vframes", str(no_of_frames),
mov_path
]
self.log.debug("mov args:: {}".format(args))
_output = run_subprocess(args)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",
"files": os.path.basename(mov_path),
"stagingDir": staging_dir,
"frameStart": 1,
"frameEnd": no_of_frames,
"fps": fps,
"preview": True,
"tags": self.mov_options['tags']
})
def _generate_thumbnail(self, ffmpeg_path, instance, source_files_pattern,
staging_dir):
"""Generates scaled down thumbnail and adds it as representation.
Args:
ffmpeg_path (str): path to ffmpeg
instance (Pyblish Instance)
source_files_pattern (str): name of source file
staging_dir (str): temporary location to store thumbnail
Updates:
instance - adds representation portion
"""
# Generate thumbnail
thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg")
self.log.info(f"Generate thumbnail {thumbnail_path}")
@ -96,50 +157,16 @@ class ExtractReview(publish.Extractor):
thumbnail_path
]
self.log.debug("thumbnail args:: {}".format(args))
output = run_subprocess(args)
_output = run_subprocess(args)
instance.data["representations"].append({
"name": "thumbnail",
"ext": "jpg",
"outputName": "thumb",
"files": os.path.basename(thumbnail_path),
"stagingDir": staging_dir,
"tags": ["thumbnail"]
"tags": ["thumbnail", "delete"]
})
# Generate mov.
mov_path = os.path.join(staging_dir, "review.mov")
self.log.info(f"Generate mov review: {mov_path}")
img_number = len(img_list)
args = [
ffmpeg_path,
"-y",
"-i", source_files_pattern,
"-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2",
"-vframes", str(img_number),
mov_path
]
self.log.debug("mov args:: {}".format(args))
output = run_subprocess(args)
self.log.debug(output)
instance.data["representations"].append({
"name": "mov",
"ext": "mov",
"files": os.path.basename(mov_path),
"stagingDir": staging_dir,
"frameStart": 1,
"frameEnd": img_number,
"fps": fps,
"preview": True,
"tags": self.mov_options['tags']
})
# Required for extract_review plugin (L222 onwards).
instance.data["frameStart"] = 1
instance.data["frameEnd"] = img_number
instance.data["fps"] = 25
self.log.info(f"Extracted {instance} to {staging_dir}")
def _check_and_resize(self, processed_img_names, source_files_pattern,
staging_dir):
"""Check if saved image could be used in ffmpeg.
@ -168,37 +195,12 @@ class ExtractReview(publish.Extractor):
return source_files_pattern
def _get_image_path_from_instances(self, instance):
img_list = []
for instance in sorted(instance.context):
if instance.data["family"] != "image":
continue
for rep in instance.data["representations"]:
img_path = os.path.join(
rep["stagingDir"],
rep["files"]
)
img_list.append(img_path)
return img_list
def _copy_image_to_staging_dir(self, staging_dir, img_list):
copy_files = []
for i, img_src in enumerate(img_list):
img_filename = self.output_seq_filename % i
img_dst = os.path.join(staging_dir, img_filename)
self.log.debug(
"Copying file .. {} -> {}".format(img_src, img_dst)
)
shutil.copy(img_src, img_dst)
copy_files.append(img_filename)
return copy_files
def _get_layers_from_image_instances(self, instance):
"""Collect all layers from 'instance'.
Returns:
(list) of PSItem
"""
layers = []
for image_instance in instance.context:
if image_instance.data["family"] != "image":
@ -210,7 +212,12 @@ class ExtractReview(publish.Extractor):
return sorted(layers)
def _saves_flattened_layers(self, staging_dir, layers):
def _save_flatten_image(self, staging_dir, layers):
"""Creates flat image from 'layers' into 'staging_dir'.
Returns:
(str): path to new image
"""
img_filename = self.output_seq_filename % 0
output_image_path = os.path.join(staging_dir, img_filename)
stub = photoshop.stub()
@ -224,7 +231,13 @@ class ExtractReview(publish.Extractor):
return img_filename
def _saves_sequences_layers(self, staging_dir, layers):
def _save_sequence_images(self, staging_dir, layers):
"""Creates separate flat images from 'layers' into 'staging_dir'.
Used as source for multi frames .mov to review at once.
Returns:
(list): paths to new images
"""
stub = photoshop.stub()
list_img_filename = []

View file

@ -1,22 +1,24 @@
#### Basic setup
## Basic setup
- Install [latest DaVinci Resolve](https://sw.blackmagicdesign.com/DaVinciResolve/v16.2.8/DaVinci_Resolve_Studio_16.2.8_Windows.zip?Key-Pair-Id=APKAJTKA3ZJMJRQITVEA&Signature=EcFuwQFKHZIBu2zDj5LTCQaQDXcKOjhZY7Fs07WGw24xdDqfwuALOyKu+EVzDX2Tik0cWDunYyV0r7hzp+mHmczp9XP4YaQXHdyhD/2BGWDgiMsiTQbNkBgbfy5MsAMFY8FHCl724Rxm8ke1foWeUVyt/Cdkil+ay+9sL72yFhaSV16sncko1jCIlCZeMkHhbzqPwyRuqLGmxmp8ey9KgBhI3wGFFPN201VMaV+RHrpX+KAfaR6p6dwo3FrPbRHK9TvMI1RA/1lJ3fVtrkDW69LImIKAWmIxgcStUxR9/taqLOD66FNiflHd1tufHv3FBa9iYQsjb3VLMPx7OCwLyg==&Expires=1608308139)
- add absolute path to ffmpeg into openpype settings
![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png)
- install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve)
- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
- Actually supported version is up to v18
- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18)
- pip install PySide2:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2`
- pip install OpenTimelineIO:
- Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO`
- Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and
![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH.
- install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2`
- make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6
![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png)
- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related.
#### Editorial setup
## Editorial setup
This is how it looks on my testing project timeline
![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png)
Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence.
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/**__OpenPype_Menu__**
1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__**
2. then select any clips in `main` track and change their color to `Chocolate`
3. in OpenPype Menu select `Create`
4. in Creator select `Create Publishable Clip [New]` (temporary name)

View file

@ -1,189 +0,0 @@
Updated as of 08 March 2019
--------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples.
Overview
--------
As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when allowing scripting access from outside of the Resolve application.
Using a script
--------------
DaVinci Resolve needs to be running for a script to be invoked.
For a Resolve script to be executed from an external folder, the script needs to know of the API location.
You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below:
Mac OS X:
RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
Windows:
RESOLVE_SCRIPT_API="%PROGRAMDATA%\\Blackmagic Design\\DaVinci Resolve\\Support\\Developer\\Scripting\\"
RESOLVE_SCRIPT_LIB="C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\fusionscript.dll"
PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\\Modules\\"
Linux:
RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting/"
RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so"
PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/"
(Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve)
As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console.
On startup, DaVinci Resolve scans the Utility Scripts directory and enumerates the scripts found in the Script application menu. Placing your script in this folder and invoking it from this menu is the easiest way to use scripts. The Utility Scripts folder is located in:
Mac OS X: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp/
Windows: %APPDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts\Comp\
Linux: /opt/resolve/Fusion/Scripts/Comp/ (or /home/resolve/Fusion/Scripts/Comp/ depending on installation)
The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual.
This example Python script creates a simple project:
#!/usr/bin/env python
import DaVinciResolveScript as dvr_script
resolve = dvr_script.scriptapp("Resolve")
fusion = resolve.Fusion()
projectManager = resolve.GetProjectManager()
projectManager.CreateProject("Hello World")
The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and `getmetatable` in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality.
Running DaVinci Resolve in headless mode
----------------------------------------
DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. However, the various scripting APIs will continue to work as expected.
Basic Resolve API
-----------------
Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions.
Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "edit", "fusion", "color", "fairlight", "deliver").
ProjectManager
CreateProject(projectName) --> Project # Creates and returns a project if projectName (text) is unique, and None if it is not.
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (text) if there is a match found, and None if there is no matching Project.
GetCurrentProject() --> Project # Returns the currently loaded Resolve project.
SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful.
CreateFolder(folderName) --> Bool # Creates a folder if folderName (text) is unique.
GetProjectsInCurrentFolder() --> [project names...] # Returns an array of project names in current folder.
GetFoldersInCurrentFolder() --> [folder names...] # Returns an array of folder names in current folder.
GotoRootFolder() --> Bool # Opens root folder in database.
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project under given file path. Returns true in case of success.
ExportProject(projectName, filePath) --> Bool # Exports a project based on given name into provided file path. Returns true in case of success.
RestoreProject(filePath) --> Bool # Restores a project under given backup file path. Returns true in case of success.
Project
GetMediaPool() --> MediaPool # Returns the Media Pool object.
GetTimelineCount() --> int # Returns the number of timelines currently present in the project.
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (text) is unique.
GetPresets() --> [presets...] # Returns a table of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
GetRenderJobs() --> [render jobs...] # Returns a table of render jobs and their information.
GetRenderPresets() --> [presets...] # Returns a table of render presets and their information.
StartRendering(index1, index2, ...) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StartRendering([idxs...]) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs.
StopRendering() --> None # Stops rendering for all render jobs.
IsRenderingInProgress() --> Bool # Returns true is rendering is in progress.
AddRenderJob() --> Bool # Adds render job to render queue.
DeleteRenderJobByIndex(idx) --> Bool # Deletes render job based on given job index (int).
DeleteAllRenderJobs() --> Bool # Deletes all render jobs.
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (text) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates a new render preset by given name if presetName(text) is unique.
SetRenderSettings([settings map]) --> Bool # Sets given settings for rendering. Settings map is a map, keys of map are: "SelectAllFrames", "MarkIn", "MarkOut", "TargetDir", "CustomName".
GetRenderJobStatus(idx) --> [status info] # Returns job status and completion rendering percentage of the job by given job index (int).
GetSetting(settingName) --> string # Returns setting value by given settingName (string) if the setting exist. With empty settingName the function returns a full list of settings.
SetSetting(settingName, settingValue) --> Bool # Sets project setting base on given name (string) and value (string).
GetRenderFormats() --> [render formats...]# Returns a list of available render formats.
GetRenderCodecs(renderFormat) --> [render codecs...] # Returns a list of available codecs for given render format (string).
GetCurrentRenderFormatAndCodec() --> [format, codec] # Returns currently selected render format and render codec.
SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering.
MediaStorage
GetMountedVolumes() --> [paths...] # Returns an array of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolders(folderPath) --> [paths...] # Returns an array of folder paths in the given absolute folder path.
GetFiles(folderPath) --> [paths...] # Returns an array of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays a given file/folder path in Resolves Media Storage.
AddItemsToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is one or more file/folder paths.
AddItemsToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is an array of file/folder paths.
MediaPool
GetRootFolder() --> Folder # Returns the root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds a new subfolder under specified Folder object with the given name.
CreateEmptyTimeline(name) --> Timeline # Adds a new timeline with given name.
AppendToTimeline(clip1, clip2...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
CreateTimelineFromClips(name, clip1, clip2, ...)--> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects.
ImportTimelineFromFile(filePath) --> Timeline # Creates timeline based on parameters within given file.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
Folder
GetClips() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns user-defined name of the folder.
GetSubFolders() --> [folders...] # Returns a list of subfolders in the folder.
MediaPoolItem
GetMetadata(metadataType) --> [[types],[values]] # Returns a value of metadataType. If parameter is not specified returns all set metadata parameters.
SetMetadata(metadataType, metadataValue) --> Bool # Sets metadata by given type and value. Returns True if successful.
GetMediaId() --> string # Returns a unique ID name related to MediaPoolItem.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
AddFlag(color) --> Bool # Adds a flag with given color (text).
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
GetClipProperty(propertyName) --> [[types],[values]] # Returns property value related to the item based on given propertyName (string). if propertyName is empty then it returns a full list of properties.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets into given propertyName (string) propertyValue (string).
Timeline
GetName() --> string # Returns user-defined name of the timeline.
SetName(timelineName) --> Bool # Sets timeline name is timelineName (text) is unique.
GetStartFrame() --> int # Returns frame number at the start of timeline.
GetEndFrame() --> int # Returns frame number at the end of timeline.
GetTrackCount(trackType) --> int # Returns a number of track based on specified track type ("audio", "video" or "subtitle").
GetItemsInTrack(trackType, index) --> [items...] # Returns an array of Timeline items on the video or audio track (based on trackType) at specified index. 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string representing a timecode for current position of the timeline, while on Cut, Edit, Color and Deliver page.
GetCurrentVideoItem() --> item # Returns current video timeline item.
GetCurrentClipThumbnailImage() --> [width, height, format, data] # Returns raw thumbnail image data (This image data is encoded in base 64 format and the image format is RGB 8 bit) for the current media in the Color Page in the format of dictionary (in Python) and table (in Lua). Information return are "width", "height", "format" and "data". Example is provided in 6_get_current_media_thumbnail.py in Example folder.
TimelineItem
GetName() --> string # Returns a name of the item.
GetDuration() --> int # Returns a duration of item.
GetEnd() --> int # Returns a position of end frame.
GetFusionCompCount() --> int # Returns the number of Fusion compositions associated with the timeline item.
GetFusionCompByIndex(compIndex) --> fusionComp # Returns Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount()
GetFusionCompNames() --> [names...] # Returns a list of Fusion composition names associated with the timeline item.
GetFusionCompByName(compName) --> fusionComp # Returns Fusion composition object based on given name.
GetLeftOffset() --> int # Returns a maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns a maximum extension by frame for clip from right side.
GetStart() --> int # Returns a position of first frame.
AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information.
GetMarkers() --> [markers...] # Returns a list of all markers and their information.
GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item.
GetClipColor() --> string # Returns an item color as a string.
AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item.
ImportFusionComp(path) --> fusionComp # Imports Fusion composition from given file path by creating and adding a new composition for the item.
ExportFusionComp(path, compIndex) --> Bool # Exports Fusion composition based on given index into provided file name path.
DeleteFusionCompByName(compName) --> Bool # Deletes Fusion composition by provided name.
LoadFusionCompByName(compName) --> fusionComp # Loads Fusion composition by provided name and sets it as active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames Fusion composition by provided name with new given name.
AddVersion(versionName, versionType) --> Bool # Adds a new Version associated with the timeline item. versionType: 0 - local, 1 - remote.
DeleteVersionByName(versionName, versionType) --> Bool # Deletes Version by provided name. versionType: 0 - local, 1 - remote.
LoadVersionByName(versionName, versionType) --> Bool # Loads Version by provided name and sets it as active Version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames Version by provided name with new given name. versionType: 0 - local, 1 - remote.
GetMediaPoolItem() --> MediaPoolItem # Returns a corresponding to the timeline item media pool item if it exists.
GetVersionNames(versionType) --> [strings...] # Returns a list of version names by provided versionType: 0 - local, 1 - remote.
GetStereoConvergenceValues() --> [offset, value] # Returns a table of keyframe offsets and respective convergence values
GetStereoLeftFloatingWindowParams() --> [offset, value] # For the LEFT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values
GetStereoRightFloatingWindowParams() --> [offset, value] # For the RIGHT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values

View file

@ -1,5 +1,5 @@
Updated as of 20 October 2020
-----------------------------
Updated as of 9 May 2022
----------------------------
In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import
modules for scripting access (DaVinciResolve.py) and some representative examples.
@ -89,12 +89,25 @@ Resolve
Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts.
GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations.
GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database.
OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver").
GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None).
GetProductName() --> string # Returns product name.
GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format.
GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format.
LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'.
UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout.
ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'.
DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'.
SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'.
ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename.
Quit() --> None # Quits the Resolve App.
ProjectManager
ArchiveProject(projectName,
filePath,
isArchiveSrcMedia=True,
isArchiveRenderCache=True,
isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments
CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not.
DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded
LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project.
@ -109,9 +122,9 @@ ProjectManager
GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent.
GetCurrentFolder() --> string # Returns the current folder name.
OpenFolder(folderName) --> Bool # Opens folder under given name.
ImportProject(filePath) --> Bool # Imports a project from the file path provided. Returns True if successful.
ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful.
ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success.
RestoreProject(filePath) --> Bool # Restores a project from the file path provided. Returns True if successful.
RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful.
GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection
GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve
SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project.
@ -125,8 +138,9 @@ Project
GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount()
GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline.
SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful.
GetGallery() --> Gallery # Returns the Gallery object.
GetName() --> string # Returns project name.
SetName(projectName) --> Bool # Sets project name if given projectname (string) is unique.
SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique.
GetPresetList() --> [presets...] # Returns a list of presets and their information.
SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project.
AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job.
@ -144,27 +158,7 @@ Project
LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists.
SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique.
SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys:
# "SelectAllFrames": Bool
# "MarkIn": int
# "MarkOut": int
# "TargetDir": string
# "CustomName": string
# "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
# "ExportVideo": Bool
# "ExportAudio": Bool
# "FormatWidth": int
# "FormatHeight": int
# "FrameRate": float (examples: 23.976, 24)
# "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
# "VideoQuality" possible values for current codec (if applicable):
# 0 (int) - will set quality to automatic
# [1 -> MAX] (int) - will set input bit rate
# ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
# "AudioCodec": string (example: "aac")
# "AudioBitDepth": int
# "AudioSampleRate": int
# "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
# "GammaTag" : string (example: "Same as Project", "ACEScct")
# Refer to "Looking up render settings" section for information for supported settings
GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string).
GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information.
@ -176,12 +170,13 @@ Project
SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip.
GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height".
RefreshLUTList() --> Bool # Refreshes LUT List
GetUniqueId() --> string # Returns a unique ID for the project item
MediaStorage
GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolves Media Storage.
GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path.
GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries.
RevealInStorage(path) --> None # Expands and displays given file/folder path in Resolves Media Storage.
RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolves Media Storage.
AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created.
AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful.
@ -190,10 +185,11 @@ MediaStorage
MediaPool
GetRootFolder() --> Folder # Returns root Folder of Media Pool
AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name.
RefreshFolders() --> Bool # Updates the folders in collaboration mode
CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name.
AppendToTimeline(clip1, clip2, ...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful.
AppendToTimeline([{clipInfo}, ...]) --> Bool # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems.
AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only). Returns the list of appended timelineItems.
CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects.
CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int).
@ -202,6 +198,8 @@ MediaPool
# "importSourceClips": Bool, specifies whether source clips should be imported, True by default
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True
# "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False
# "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import
DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool.
GetCurrentFolder() --> Folder # Returns currently selected Folder.
SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder.
DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool
@ -214,19 +212,26 @@ MediaPool
RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path.
UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips.
ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created.
ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created.
# Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on.
# Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx".
ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format.
# If no clips are specified, all clips from media pool will be used.
GetUniqueId() --> string # Returns a unique ID for the media pool
Folder
GetClipList() --> [clips...] # Returns a list of clips (items) within the folder.
GetName() --> string # Returns the media folder name.
GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder.
GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise
GetUniqueId() --> string # Returns a unique ID for the media pool folder
MediaPoolItem
GetName() --> string # Returns the clip name.
GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'.
# If no argument is specified, a dict of all set metadata properties is returned.
SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful.
SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful.
GetMediaId() --> string # Returns the unique ID for the MediaPoolItem.
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
@ -248,15 +253,18 @@ MediaPoolItem
GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'.
# If no argument is specified, a dict of all clip properties is returned. Check the section below for more information.
SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information.
LinkProxyMedia(propertyName) --> Bool # Links proxy media (absolute path) with the current clip.
LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path.
UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip.
ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path.
GetUniqueId() --> string # Returns a unique ID for the media pool item
Timeline
GetName() --> string # Returns the timeline name.
SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful.
GetStartFrame() --> int # Returns the frame number at the start of timeline.
GetEndFrame() --> int # Returns the frame number at the end of timeline.
SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise.
GetStartTimecode() --> string # Returns the start timecode for the timeline.
GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle").
GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType).
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
@ -271,7 +279,8 @@ Timeline
DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData.
ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned".
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color and Deliver pages.
GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages.
SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages.
GetCurrentVideoItem() --> item # Returns the current video timeline item.
GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page.
# An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder.
@ -280,37 +289,30 @@ Timeline
DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success.
CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item.
CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item.
ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys:
# "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default
# "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default
# "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default
# "useSizingInfo": Bool, specifies if sizing information should be used, False by default
# "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default
# "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default
# "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False
# "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True
# "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder
Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format.
# exportType can be one of the following constants:
# resolve.EXPORT_AAF
# resolve.EXPORT_DRT
# resolve.EXPORT_EDL
# resolve.EXPORT_FCP_7_XML
# resolve.EXPORT_FCPXML_1_3
# resolve.EXPORT_FCPXML_1_4
# resolve.EXPORT_FCPXML_1_5
# resolve.EXPORT_FCPXML_1_6
# resolve.EXPORT_FCPXML_1_7
# resolve.EXPORT_FCPXML_1_8
# resolve.EXPORT_HDR_10_PROFILE_A
# resolve.EXPORT_HDR_10_PROFILE_B
# resolve.EXPORT_TEXT_CSV
# resolve.EXPORT_TEXT_TAB
# resolve.EXPORT_DOLBY_VISION_VER_2_9
# resolve.EXPORT_DOLBY_VISION_VER_4_0
# exportSubtype can be one of the following enums:
# resolve.EXPORT_NONE
# resolve.EXPORT_AAF_NEW
# resolve.EXPORT_AAF_EXISTING
# resolve.EXPORT_CDL
# resolve.EXPORT_SDL
# resolve.EXPORT_MISSING_CLIPS
# Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
# When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
# When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
# Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
# Refer to section "Looking up timeline exports properties" for information on the parameters.
GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information.
SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information.
InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline.
InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline.
InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline.
InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline.
InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline.
InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline.
GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object.
GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects.
GetUniqueId() --> string # Returns a unique ID for the timeline
TimelineItem
GetName() --> string # Returns the item name.
@ -323,6 +325,10 @@ TimelineItem
GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side.
GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side.
GetStart() --> int # Returns the start frame position on the timeline.
SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue"
# Refer to "Looking up Timeline item properties" for more information
GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key
# if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys
AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker.
customData)
GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information.
@ -345,7 +351,8 @@ TimelineItem
DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition.
LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition.
RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName.
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clipbased on versionType (0 - local, 1 - remote).
AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote).
GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote).
DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote).
LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote.
RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote).
@ -354,12 +361,14 @@ TimelineItem
GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values.
GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values.
GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item
SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes.
# The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path).
# The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList).
GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes.
SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes.
# Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"})
AddTake(mediaPoolItem, startFrame=0, endFrame)=0 --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the whole clip is added. startFrame and endFrame can be specified as extents.
AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents.
GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector.
GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector.
GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index.
@ -367,7 +376,24 @@ TimelineItem
SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes.
FinalizeTake() --> Bool # Finalizes take selection.
CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred.
UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips.
GetUniqueId() --> string # Returns a unique ID for the timeline item
Gallery
GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'.
SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'.
GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object.
SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'.
GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects.
GalleryStillAlbum
GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album.
GetLabel(galleryStill) --> string # Returns the label of the galleryStill.
SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'.
ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm).
DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'.
GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes.
List and Dict Data Structures
-----------------------------
@ -375,7 +401,6 @@ Beside primitive data types, Resolve's Python API mainly uses list and dict data
As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }.
Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }.
Looking up Project and Clip properties
--------------------------------------
This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and
@ -412,6 +437,179 @@ Affects:
• x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x)
Looking up Render Settings
--------------------------
This section covers the supported settings for the method SetRenderSettings({settings})
The parameter setting is a dictionary containing the following keys:
- "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored)
- "MarkIn": int
- "MarkOut": int
- "TargetDir": string
- "CustomName": string
- "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix.
- "ExportVideo": Bool
- "ExportAudio": Bool
- "FormatWidth": int
- "FormatHeight": int
- "FrameRate": float (examples: 23.976, 24)
- "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope")
- "VideoQuality" possible values for current codec (if applicable):
- 0 (int) - will set quality to automatic
- [1 -> MAX] (int) - will set input bit rate
- ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level
- "AudioCodec": string (example: "aac")
- "AudioBitDepth": int
- "AudioSampleRate": int
- "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign")
- "GammaTag" : string (example: "Same as Project", "ACEScct")
- "ExportAlpha": Bool
- "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265.
- "MultiPassEncode": Bool. Can only be set for H.264.
- "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true.
- "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats.
Looking up timeline export properties
-------------------------------------
This section covers the parameters for the argument Export(fileName, exportType, exportSubtype).
exportType can be one of the following constants:
- resolve.EXPORT_AAF
- resolve.EXPORT_DRT
- resolve.EXPORT_EDL
- resolve.EXPORT_FCP_7_XML
- resolve.EXPORT_FCPXML_1_3
- resolve.EXPORT_FCPXML_1_4
- resolve.EXPORT_FCPXML_1_5
- resolve.EXPORT_FCPXML_1_6
- resolve.EXPORT_FCPXML_1_7
- resolve.EXPORT_FCPXML_1_8
- resolve.EXPORT_FCPXML_1_9
- resolve.EXPORT_FCPXML_1_10
- resolve.EXPORT_HDR_10_PROFILE_A
- resolve.EXPORT_HDR_10_PROFILE_B
- resolve.EXPORT_TEXT_CSV
- resolve.EXPORT_TEXT_TAB
- resolve.EXPORT_DOLBY_VISION_VER_2_9
- resolve.EXPORT_DOLBY_VISION_VER_4_0
exportSubtype can be one of the following enums:
- resolve.EXPORT_NONE
- resolve.EXPORT_AAF_NEW
- resolve.EXPORT_AAF_EXISTING
- resolve.EXPORT_CDL
- resolve.EXPORT_SDL
- resolve.EXPORT_MISSING_CLIPS
Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored.
When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING.
When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE.
Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used.
Looking up Timeline item properties
-----------------------------------
This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned.
The supported keys with their accepted values are:
"Pan" : floating point values from -4.0*width to 4.0*width
"Tilt" : floating point values from -4.0*height to 4.0*height
"ZoomX" : floating point values from 0.0 to 100.0
"ZoomY" : floating point values from 0.0 to 100.0
"ZoomGang" : a boolean value
"RotationAngle" : floating point values from -360.0 to 360.0
"AnchorPointX" : floating point values from -4.0*width to 4.0*width
"AnchorPointY" : floating point values from -4.0*height to 4.0*height
"Pitch" : floating point values from -1.5 to 1.5
"Yaw" : floating point values from -1.5 to 1.5
"FlipX" : boolean value for flipping horizontally
"FlipY" : boolean value for flipping vertically
"CropLeft" : floating point values from 0.0 to width
"CropRight" : floating point values from 0.0 to width
"CropTop" : floating point values from 0.0 to height
"CropBottom" : floating point values from 0.0 to height
"CropSoftness" : floating point values from -100.0 to 100.0
"CropRetain" : boolean value for "Retain Image Position" checkbox
"DynamicZoomEase" : A value from the following constants
- DYNAMIC_ZOOM_EASE_LINEAR = 0
- DYNAMIC_ZOOM_EASE_IN
- DYNAMIC_ZOOM_EASE_OUT
- DYNAMIC_ZOOM_EASE_IN_AND_OUT
"CompositeMode" : A value from the following constants
- COMPOSITE_NORMAL = 0
- COMPOSITE_ADD
- COMPOSITE_SUBTRACT
- COMPOSITE_DIFF
- COMPOSITE_MULTIPLY
- COMPOSITE_SCREEN
- COMPOSITE_OVERLAY
- COMPOSITE_HARDLIGHT
- COMPOSITE_SOFTLIGHT
- COMPOSITE_DARKEN
- COMPOSITE_LIGHTEN
- COMPOSITE_COLOR_DODGE
- COMPOSITE_COLOR_BURN
- COMPOSITE_EXCLUSION
- COMPOSITE_HUE
- COMPOSITE_SATURATE
- COMPOSITE_COLORIZE
- COMPOSITE_LUMA_MASK
- COMPOSITE_DIVIDE
- COMPOSITE_LINEAR_DODGE
- COMPOSITE_LINEAR_BURN
- COMPOSITE_LINEAR_LIGHT
- COMPOSITE_VIVID_LIGHT
- COMPOSITE_PIN_LIGHT
- COMPOSITE_HARD_MIX
- COMPOSITE_LIGHTER_COLOR
- COMPOSITE_DARKER_COLOR
- COMPOSITE_FOREGROUND
- COMPOSITE_ALPHA
- COMPOSITE_INVERTED_ALPHA
- COMPOSITE_LUM
- COMPOSITE_INVERTED_LUM
"Opacity" : floating point value from 0.0 to 100.0
"Distortion" : floating point value from -1.0 to 1.0
"RetimeProcess" : A value from the following constants
- RETIME_USE_PROJECT = 0
- RETIME_NEAREST
- RETIME_FRAME_BLEND
- RETIME_OPTICAL_FLOW
"MotionEstimation" : A value from the following constants
- MOTION_EST_USE_PROJECT = 0
- MOTION_EST_STANDARD_FASTER
- MOTION_EST_STANDARD_BETTER
- MOTION_EST_ENHANCED_FASTER
- MOTION_EST_ENHANCED_BETTER
- MOTION_EST_SPEED_WRAP
"Scaling" : A value from the following constants
- SCALE_USE_PROJECT = 0
- SCALE_CROP
- SCALE_FIT
- SCALE_FILL
- SCALE_STRETCH
"ResizeFilter" : A value from the following constants
- RESIZE_FILTER_USE_PROJECT = 0
- RESIZE_FILTER_SHARPER
- RESIZE_FILTER_SMOOTHER
- RESIZE_FILTER_BICUBIC
- RESIZE_FILTER_BILINEAR
- RESIZE_FILTER_BESSEL
- RESIZE_FILTER_BOX
- RESIZE_FILTER_CATMULL_ROM
- RESIZE_FILTER_CUBIC
- RESIZE_FILTER_GAUSSIAN
- RESIZE_FILTER_LANCZOS
- RESIZE_FILTER_MITCHELL
- RESIZE_FILTER_NEAREST_NEIGHBOR
- RESIZE_FILTER_QUADRATIC
- RESIZE_FILTER_SINC
- RESIZE_FILTER_LINEAR
Values beyond the range will be clipped
width and height are same as the UI max limits
The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed
as a single argument.
Getting the values for the keys that uses constants will return the number which is in the constant
Deprecated Resolve API Functions
--------------------------------
The following API functions are deprecated.
@ -450,12 +648,12 @@ TimelineItem
Unsupported Resolve API Functions
---------------------------------
The following API (functions and paraameters) are no longer supported.
The following API (functions and parameters) are no longer supported. Use job IDs instead of indices.
Project
StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices.
StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices.
DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices.
GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices.
GetSetting and SetSetting --> {} # settingName "videoMonitorUseRec601For422SDI" is no longer supported.
# Please use "videoMonitorUseMatrixOverrideFor422SDI" and "videoMonitorMatrixOverrideFor422SDI" instead.
GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI.
# settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available.

View file

@ -1,10 +1,6 @@
"""
resolve api
"""
bmdvr = None
bmdvf = None
from .utils import (
get_resolve_module
)
@ -70,6 +66,9 @@ from .workio import (
from .testing_utils import TestGUI
bmdvr = None
bmdvf = None
__all__ = [
"bmdvr",
"bmdvf",

View file

@ -54,15 +54,15 @@ class OpenPypeMenu(QtWidgets.QWidget):
)
self.setWindowTitle("OpenPype")
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
inventory_btn = QtWidgets.QPushButton("Inventory...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self)
create_btn = QtWidgets.QPushButton("Create ...", self)
publish_btn = QtWidgets.QPushButton("Publish ...", self)
load_btn = QtWidgets.QPushButton("Load ...", self)
inventory_btn = QtWidgets.QPushButton("Manager ...", self)
subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self)
libload_btn = QtWidgets.QPushButton("Library ...", self)
experimental_btn = QtWidgets.QPushButton(
"Experimental tools...", self
"Experimental tools ...", self
)
# rename_btn = QtWidgets.QPushButton("Rename", self)
# set_colorspace_btn = QtWidgets.QPushButton(

Some files were not shown because too many files have changed in this diff Show more